diff --git a/.azure-pipelines/code-scan-neural-insights.yaml b/.azure-pipelines/code-scan-neural-insights.yaml deleted file mode 100644 index eaf741c7ec1..00000000000 --- a/.azure-pipelines/code-scan-neural-insights.yaml +++ /dev/null @@ -1,59 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_insights - - setup.py - - .azure-pipelines/code-scan-neural-insights.yml - -pool: - vmImage: "ubuntu-latest" - -variables: - CODE_SCAN_LOG_PATH: ".azure-pipelines/scripts/codeScan/scanLog" - -stages: - - stage: DocStyleCodeScan - displayName: DocStyle Code Scan - dependsOn: [] - jobs: - - job: DocStyle - displayName: DocStyle - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pydocstyle" - uploadPath: "pydocstyle.log" - scanModule: "neural_insights" - - - stage: BanditCodeScan - displayName: Bandit Code Scan - dependsOn: [] - jobs: - - job: Bandit - displayName: Bandit - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "bandit" - uploadPath: "bandit.log" - scanModule: "neural_insights" - - - stage: PylintCodeScan - displayName: Pylint Code Scan - dependsOn: [] - jobs: - - job: Pylint - displayName: Pylint - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pylint" - uploadPath: "pylint.json" - scanModule: "neural_insights" diff --git a/.azure-pipelines/code-scan-neural-solution.yaml b/.azure-pipelines/code-scan-neural-solution.yaml deleted file mode 100644 index 301c7010465..00000000000 --- a/.azure-pipelines/code-scan-neural-solution.yaml +++ /dev/null @@ -1,59 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_solution - - setup.py - - .azure-pipelines/code-scan-neural-solution.yml - -pool: - vmImage: "ubuntu-latest" - -variables: - CODE_SCAN_LOG_PATH: ".azure-pipelines/scripts/codeScan/scanLog" - -stages: - - stage: DocStyleCodeScan - displayName: DocStyle Code Scan - dependsOn: [] - jobs: - - job: DocStyle - displayName: DocStyle - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pydocstyle" - uploadPath: "pydocstyle.log" - scanModule: "neural_solution" - - - stage: BanditCodeScan - displayName: Bandit Code Scan - dependsOn: [] - jobs: - - job: Bandit - displayName: Bandit - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "bandit" - uploadPath: "bandit.log" - scanModule: "neural_solution" - - - stage: PylintCodeScan - displayName: Pylint Code Scan - dependsOn: [] - jobs: - - job: Pylint - displayName: Pylint - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pylint" - uploadPath: "pylint.json" - scanModule: "neural_solution" diff --git a/.azure-pipelines/code-scan.yml b/.azure-pipelines/code-scan.yml index afc894ee034..1f3f3beb647 100644 --- a/.azure-pipelines/code-scan.yml +++ b/.azure-pipelines/code-scan.yml @@ -44,15 +44,3 @@ stages: parameters: codeScanFileName: "bandit" uploadPath: "bandit.log" - - - stage: PylintCodeScan - displayName: Pylint Code Scan - dependsOn: [] - jobs: - - job: Pylint - displayName: Pylint - steps: - - template: template/code-scan-template.yml - parameters: - codeScanFileName: "pylint" - uploadPath: "pylint.json" diff --git a/.azure-pipelines/docker/Dockerfile.devel b/.azure-pipelines/docker/Dockerfile.devel index 30e6bf3ec11..2f3aab3ce72 100644 --- a/.azure-pipelines/docker/Dockerfile.devel +++ b/.azure-pipelines/docker/Dockerfile.devel @@ -36,7 +36,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \ RUN ln -sf $(which python3) /usr/bin/python -RUN python -m pip --no-cache-dir install --upgrade pip +RUN python -m pip install pip==24.0 RUN python -m pip install --no-cache-dir setuptools RUN pip list diff --git a/.azure-pipelines/docker/DockerfileCodeScan.devel b/.azure-pipelines/docker/DockerfileCodeScan.devel index f6359248a7c..611fe02e235 100644 --- a/.azure-pipelines/docker/DockerfileCodeScan.devel +++ b/.azure-pipelines/docker/DockerfileCodeScan.devel @@ -30,7 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \ RUN ln -sf $(which python3) /usr/bin/python -RUN python -m pip install --no-cache-dir pylint==2.12.1\ +RUN python -m pip install --no-cache-dir \ bandit\ pyspelling\ pydocstyle diff --git a/.azure-pipelines/model-test-3x.yml b/.azure-pipelines/model-test-3x.yml index f247deb96fb..55320d9247c 100644 --- a/.azure-pipelines/model-test-3x.yml +++ b/.azure-pipelines/model-test-3x.yml @@ -10,7 +10,7 @@ pr: include: - neural_compressor/common - neural_compressor/torch - - examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm + - examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only - setup.py - requirements_pt.txt - .azure-pipelines/scripts/models @@ -112,7 +112,7 @@ stages: displayName: "Publish report" - script: | if [ $(is_perf_reg) == 'true' ]; then - echo "[Performance Regression] Some model performance regression occurred, please check artifacts and reports." + echo "Some benchmark regression occurred or the reference data need to be updated, please check artifacts and reports." exit 1 fi - displayName: "Specify performance regression" + displayName: "Specify regression" diff --git a/.azure-pipelines/model-test.yml b/.azure-pipelines/model-test.yml index cc9e431b608..b73321a8640 100644 --- a/.azure-pipelines/model-test.yml +++ b/.azure-pipelines/model-test.yml @@ -40,33 +40,20 @@ parameters: displayName: Run ONNX models? type: boolean default: true - - name: MXNet_Model - displayName: Run MXNet models? - type: boolean - default: false - name: TensorFlowModelList type: object default: - resnet50v1.5 - ssd_resnet50_v1 -# - ssd_mobilenet_v1_ckpt -# - inception_v1 -# - darknet19 -# - resnet-101 - name: PyTorchModelList type: object default: - - resnet18 - resnet18_fx - name: ONNXModelList type: object default: - resnet50-v1-12 - - name: MXNetModelList - type: object - default: - - resnet50v1 stages: - stage: TensorFlowModels @@ -114,21 +101,6 @@ stages: modelName: ${{ model }} framework: "onnxrt" - - stage: MXNetModels - displayName: Run MXNet Model - pool: MODEL_PERF_TEST - dependsOn: [] - condition: and(succeeded(), eq('${{ parameters.MXNet_Model }}', 'true')) - jobs: - - ${{ each model in parameters.MXNetModelList }}: - - job: - displayName: ${{ model }} - steps: - - template: template/model-template.yml - parameters: - modelName: ${{ model }} - framework: "mxnet" - - stage: GenerateLogs displayName: Generate Report pool: @@ -191,7 +163,7 @@ stages: displayName: "Publish report" - script: | if [ $(is_perf_reg) == 'true' ]; then - echo "[Performance Regression] Some model performance regression occurred, please check artifacts and reports." + echo "Some benchmark regression occurred or the reference data need to be updated, please check artifacts and reports." exit 1 fi - displayName: "Specify performance regression" + displayName: "Specify regression" diff --git a/.azure-pipelines/scripts/codeScan/pydocstyle/pydocstyle.sh b/.azure-pipelines/scripts/codeScan/pydocstyle/pydocstyle.sh index db0a818db33..96f95bcc68b 100644 --- a/.azure-pipelines/scripts/codeScan/pydocstyle/pydocstyle.sh +++ b/.azure-pipelines/scripts/codeScan/pydocstyle/pydocstyle.sh @@ -17,11 +17,6 @@ log_dir="$work_dir/../scanLog" mkdir -p $log_dir scan_path="scan_path.txt" -if [ "${scan_module}" = "neural_solution" ]; then - scan_path="scan_path_neural_solution.txt" -elif [ "${scan_module}" = "neural_insights" ]; then - scan_path="scan_path_neural_insights.txt" -fi exit_code=0 for line in $(cat ${work_dir}/${scan_path}) diff --git a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path.txt b/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path.txt index b524f1f61db..ed2c4ccafca 100644 --- a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path.txt +++ b/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path.txt @@ -15,3 +15,15 @@ /neural-compressor/neural_compressor/strategy /neural-compressor/neural_compressor/training.py /neural-compressor/neural_compressor/utils +/neural-compressor/neural_compressor/common +/neural-compressor/neural_compressor/tensorflow +/neural-compressor/neural_compressor/torch/algorithms/layer_wise +/neural-compressor/neural_compressor/torch/algorithms/mixed_precision +/neural-compressor/neural_compressor/torch/algorithms/mx_quant +/neural-compressor/neural_compressor/torch/algorithms/pt2e_quant +/neural-compressor/neural_compressor/torch/algorithms/smooth_quant +/neural-compressor/neural_compressor/torch/algorithms/static_quant +/neural-compressor/neural_compressor/torch/algorithms/weight_only +/neural-compressor/neural_compressor/torch/export +/neural-compressor/neural_compressor/torch/quantization +/neural-compressor/neural_compressor/torch/utils diff --git a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_insights.txt b/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_insights.txt deleted file mode 100644 index 8a008fd19fb..00000000000 --- a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_insights.txt +++ /dev/null @@ -1,4 +0,0 @@ -/neural-compressor/neural_insights/components -/neural-compressor/neural_insights/utils -/neural-compressor/neural_insights/web - diff --git a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_solution.txt b/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_solution.txt deleted file mode 100644 index 07a5b1483d7..00000000000 --- a/.azure-pipelines/scripts/codeScan/pydocstyle/scan_path_neural_solution.txt +++ /dev/null @@ -1,4 +0,0 @@ -/neural-compressor/neural_solution/backend -/neural-compressor/neural_solution/frontend -/neural-compressor/neural_solution/utils - diff --git a/.azure-pipelines/scripts/codeScan/pylint/pylint.sh b/.azure-pipelines/scripts/codeScan/pylint/pylint.sh deleted file mode 100644 index 9103947e965..00000000000 --- a/.azure-pipelines/scripts/codeScan/pylint/pylint.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -for var in "$@" -do - case $var in - --scan_module=*) - scan_module=$(echo $var |cut -f2 -d=) - ;; - esac -done - -source /neural-compressor/.azure-pipelines/scripts/change_color.sh -RESET="echo -en \\E[0m \\n" # close color - -log_dir="/neural-compressor/.azure-pipelines/scripts/codeScan/scanLog" -mkdir -p $log_dir - -apt-get install -y --no-install-recommends --fix-missing \ - autoconf \ - build-essential - -pip install -r /neural-compressor/requirements.txt -pip install cmake - -pip install torch \ - horovod \ - google \ - autograd \ - ofa \ - fvcore \ - pymoo \ - onnxruntime_extensions \ - peft \ - tf_slim \ - transformers \ - accelerate \ - flask==2.1.3 \ - xgboost \ - datasets \ - prettytable \ - psutil \ - py-cpuinfo \ - pyyaml \ - pydantic \ - protobuf - -if [ "${scan_module}" = "neural_solution" ]; then - cd /neural-compressor - python setup.py install - - echo "Install Neural Solution ... " - bash /neural-compressor/.azure-pipelines/scripts/install_neural_solution.sh - -elif [ "${scan_module}" = "neural_insights" ]; then - cd /neural-compressor - python setup.py install - - echo "Install Neural Insights ... " - bash /neural-compressor/.azure-pipelines/scripts/install_neural_insights.sh - -fi - -echo "[DEBUG] list pipdeptree..." -pip install pipdeptree -pipdeptree - -python -m pylint -f json --disable=R,C,W,E1129 --enable=line-too-long --max-line-length=120 --extension-pkg-whitelist=numpy --ignored-classes=TensorProto,NodeProto \ ---ignored-modules=tensorflow,keras,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch,intel_extension_for_tensorflow,torchinfo,horovod,transformers \ -/neural-compressor/${scan_module} > $log_dir/pylint.json - -exit_code=$? - -$BOLD_YELLOW && echo " ----------------- Current pylint cmd start --------------------------" && $RESET -echo "python -m pylint -f json --disable=R,C,W,E1129 --enable=line-too-long --max-line-length=120 --extension-pkg-whitelist=numpy --ignored-classes=TensorProto,NodeProto --ignored-modules=tensorflow,torch,torch.quantization,torch.tensor,torchvision,fairseq,mxnet,onnx,onnxruntime,intel_extension_for_pytorch,intel_extension_for_tensorflow,torchinfo,horovod,transformers -/neural-compressor/${scan_module}>$log_dir/pylint.json" -$BOLD_YELLOW && echo " ----------------- Current pylint cmd end --------------------------" && $RESET - -$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------" && $RESET -cat $log_dir/pylint.json -$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET - -if [ ${exit_code} -ne 0 ]; then - $BOLD_RED && echo "Error!! Please Click on the artifact button to download and view Pylint error details." && $RESET - exit 1 -fi -$BOLD_PURPLE && echo "Congratulations, Pylint check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET -exit 0 diff --git a/.azure-pipelines/scripts/fwk_version.sh b/.azure-pipelines/scripts/fwk_version.sh index b0ea9fa0b4f..82845e8982e 100644 --- a/.azure-pipelines/scripts/fwk_version.sh +++ b/.azure-pipelines/scripts/fwk_version.sh @@ -1,27 +1,10 @@ #!/bin/bash echo "export FWs version..." -test_mode=$1 - -if [ "$test_mode" == "coverage" ] || [ "$test_mode" == "latest" ]; then - export tensorflow_version='2.15.0-official' - export pytorch_version='2.3.0+cpu' - export torchvision_version='0.18.0+cpu' - export ipex_version='2.3.0+cpu' - export onnx_version='1.16.0' - export onnxruntime_version='1.18.0' - export mxnet_version='1.9.1' -else - export tensorflow_version='2.15.0-official' - export pytorch_version='2.2.1+cpu' - export torchvision_version='0.17.1+cpu' - export ipex_version='2.2.0+cpu' - export onnx_version='1.15.0' - export onnxruntime_version='1.17.1' - export mxnet_version='1.9.1' -fi - - - - - +export tensorflow_version='2.15.0-official' +export pytorch_version='2.3.0+cpu' +export torchvision_version='0.18.0+cpu' +export ipex_version='2.3.0+cpu' +export onnx_version='1.16.0' +export onnxruntime_version='1.18.0' +export mxnet_version='1.9.1' diff --git a/.azure-pipelines/scripts/install_nc.sh b/.azure-pipelines/scripts/install_nc.sh index d3cee07609c..6a80419259f 100644 --- a/.azure-pipelines/scripts/install_nc.sh +++ b/.azure-pipelines/scripts/install_nc.sh @@ -2,21 +2,21 @@ echo -e "\n Install Neural Compressor ... " cd /neural-compressor -if [[ $1 = *"3x_pt" ]]; then +if [[ $1 = *"3x_pt"* ]]; then + if [[ $1 != *"3x_pt_fp8"* ]]; then + echo -e "\n Install torch CPU ... " + pip install torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu + fi python -m pip install --no-cache-dir -r requirements_pt.txt python setup.py pt bdist_wheel - pip install dist/neural_compressor*.whl --force-reinstall + pip install --no-deps dist/neural_compressor*.whl --force-reinstall elif [[ $1 = *"3x_tf"* ]]; then python -m pip install --no-cache-dir -r requirements_tf.txt python setup.py tf bdist_wheel pip install dist/neural_compressor*.whl --force-reinstall -elif [[ $1 = *"3x_ort" ]]; then - python -m pip install --no-cache-dir -r requirements_ort.txt - python setup.py ort bdist_wheel - pip install dist/neural_compressor*.whl --force-reinstall else python -m pip install --no-cache-dir -r requirements.txt - python setup.py 2x bdist_wheel + python setup.py bdist_wheel pip install dist/neural_compressor*.whl --force-reinstall fi diff --git a/.azure-pipelines/scripts/install_neural_insights.sh b/.azure-pipelines/scripts/install_neural_insights.sh deleted file mode 100644 index daa8887f635..00000000000 --- a/.azure-pipelines/scripts/install_neural_insights.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -echo "Install Neural Insights ... " -cd /neural-compressor -python -m pip install --no-cache-dir -r neural_insights/requirements.txt -python setup.py neural_insights bdist_wheel -pip install dist/neural_insights*.whl -pip list \ No newline at end of file diff --git a/.azure-pipelines/scripts/install_neural_solution.sh b/.azure-pipelines/scripts/install_neural_solution.sh deleted file mode 100644 index d0139c85132..00000000000 --- a/.azure-pipelines/scripts/install_neural_solution.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -echo "Install Open MPI ..." -wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.5.tar.gz -gunzip -c openmpi-4.1.5.tar.gz | tar xf - -cd openmpi-4.1.5 -./configure --prefix=/usr/local -make all install -MPI_DIR=/usr/local/lib/openmpi -export PATH=$MPI_DIR/bin:$PATH -export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH -echo "Current Path ... " -echo $PATH - -echo "Current LD_LIBRARY_PATH ... " -echo $LD_LIBRARY_PATH - -echo "check mpicc ..." -which mpicc -which mpirun - -echo "Install Neural Solution ... " -cd /neural-compressor -python -m pip install --no-cache-dir -r neural_solution/requirements.txt -python setup.py neural_solution sdist bdist_wheel -pip install dist/neural_solution*.whl -pip list \ No newline at end of file diff --git a/.azure-pipelines/scripts/models/env_setup.sh b/.azure-pipelines/scripts/models/env_setup.sh index 2c356d7307b..adf4f01df67 100644 --- a/.azure-pipelines/scripts/models/env_setup.sh +++ b/.azure-pipelines/scripts/models/env_setup.sh @@ -100,12 +100,6 @@ if [[ "${fwk_ver}" != "latest" ]]; then elif [[ "${framework}" == "onnxrt" ]]; then pip install onnx==1.15.0 pip install onnxruntime==${fwk_ver} - elif [[ "${framework}" == "mxnet" ]]; then - pip install numpy==1.23.5 - echo "re-install pycocotools resolve the issue with numpy..." - pip uninstall pycocotools -y - pip install --no-cache-dir pycocotools - pip install mxnet==${fwk_ver} fi fi @@ -118,9 +112,6 @@ if [ -f "requirements.txt" ]; then sed -i '/tensorflow==/d;/tensorflow$/d' requirements.txt sed -i '/^intel-tensorflow/d' requirements.txt fi - if [ "${framework}" == "mxnet" ]; then - sed -i '/mxnet==/d;/mxnet$/d;/mxnet-mkl==/d;/mxnet-mkl$/d' requirements.txt - fi if [ "${framework}" == "pytorch" ]; then sed -i '/torch==/d;/torch$/d;/torchvision==/d;/torchvision$/d' requirements.txt fi diff --git a/.azure-pipelines/scripts/models/generate_report.sh b/.azure-pipelines/scripts/models/generate_report.sh index f4194585b6d..e13118fcffe 100644 --- a/.azure-pipelines/scripts/models/generate_report.sh +++ b/.azure-pipelines/scripts/models/generate_report.sh @@ -245,13 +245,9 @@ function generate_html_core { if((new_result == nan && previous_result == nan) || new_result == "unknown"){ printf(""); } else{ - if(new_result == nan) { - job_status = "fail" - status_png = "background-color:#FFD2D2"; - printf("", status_png); - } else{ - printf(""); - } + job_status = "fail" + status_png = "background-color:#FFD2D2"; + printf("", status_png); } } } diff --git a/.azure-pipelines/scripts/models/run_model_trigger_common.sh b/.azure-pipelines/scripts/models/run_model_trigger_common.sh index 8cb5f8c9855..28201dca309 100644 --- a/.azure-pipelines/scripts/models/run_model_trigger_common.sh +++ b/.azure-pipelines/scripts/models/run_model_trigger_common.sh @@ -80,8 +80,6 @@ if [ "${mode}" == "env_setup" ]; then elif [ "${mode}" == "tuning" ]; then if [ "${framework}" == "onnxrt" ]; then output_model=${log_dir}/${model}/${framework}-${model}-tune.onnx - elif [ "${framework}" == "mxnet" ]; then - output_model=${log_dir}/${model}/resnet50_v1 elif [ "${framework}" == "tensorflow" ]; then output_model=${log_dir}/${model}/${framework}-${model}-tune.pb fi @@ -140,8 +138,6 @@ elif [ "${mode}" == "int8_benchmark" ]; then $BOLD_YELLOW && echo "====== run benchmark int8 =======" && $RESET if [[ "${framework}" == "onnxrt" ]]; then model_name="${log_dir}/${model}/${framework}-${model}-tune.onnx" - elif [[ "${framework}" == "mxnet" ]]; then - model_name="${log_dir}/${model}" elif [[ "${framework}" == "tensorflow" ]]; then model_name="${log_dir}/${model}/${framework}-${model}-tune.pb" elif [[ "${framework}" == "pytorch" ]]; then diff --git a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh b/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh deleted file mode 100644 index 21be4c96031..00000000000 --- a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -set -eo pipefail -# get parameters -PATTERN='[-a-zA-Z0-9_]*=' - -for i in "$@" -do - case $i in - --model=*) - model=`echo $i | sed "s/${PATTERN}//"`;; - --mode=*) - mode=`echo $i | sed "s/${PATTERN}//"`;; - --USE_TUNE_ACC=*) - USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;; - --PERF_STABLE_CHECK=*) - PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;; - --BUILD_BUILDID=*) - BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;; - *) - echo "Parameter $i not recognized."; exit 1;; - esac -done - -echo "specify FWs version..." -source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' -FRAMEWORK="mxnet" -FRAMEWORK_VERSION=${mxnet_version} - -inc_new_api=false -# ======== set up config for mxnet models ======== -if [ "${model}" == "resnet50v1" ]; then - model_src_dir="image_recognition/cnn_models/quantization/ptq" - dataset_location="/tf_dataset/mxnet/val_256_q90.rec" - input_model="/tf_dataset/mxnet/resnet50_v1" - yaml="cnn.yaml" - strategy="mse" - batch_size=1 - new_benchmark=false - tuning_cmd="bash run_tuning.sh --topology=resnet50_v1 --dataset_location=${dataset_location} --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=resnet50_v1 --dataset_location=${dataset_location} --batch_size=1 --iters=500 --mode=benchmark" -fi - - -/bin/bash run_model_trigger_common.sh \ - --yaml=${yaml} \ - --framework=${FRAMEWORK} \ - --fwk_ver=${FRAMEWORK_VERSION} \ - --model=${model} \ - --model_src_dir=${model_src_dir} \ - --dataset_location=${dataset_location} \ - --input_model=${input_model} \ - --batch_size=${batch_size} \ - --strategy=${strategy} \ - --new_benchmark=${new_benchmark} \ - --tuning_cmd="${tuning_cmd}" \ - --benchmark_cmd="${benchmark_cmd}" \ - --inc_new_api="${inc_new_api}" \ - --mode=${mode} \ - --USE_TUNE_ACC=${USE_TUNE_ACC} \ - --PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \ - --BUILD_BUILDID=${BUILD_BUILDID} diff --git a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh index 32bd2eb0109..f0675fe505e 100644 --- a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh @@ -53,15 +53,15 @@ elif [ "${model}" == "resnet18_fx" ]; then tuning_cmd="bash run_quant.sh --topology=resnet18 --dataset_location=${dataset_location} --input_model=${input_model}" benchmark_cmd="bash run_benchmark.sh --topology=resnet18 --dataset_location=${dataset_location} --mode=performance --batch_size=${batch_size} --iters=500" elif [ "${model}" == "opt_125m_woq_gptq_int4" ]; then - model_src_dir="nlp/huggingface_models/language-modeling/quantization/llm" + model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only" inc_new_api=3x_pt tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_int4" elif [ "${model}" == "opt_125m_woq_gptq_int4_dq_bnb" ]; then - model_src_dir="nlp/huggingface_models/language-modeling/quantization/llm" + model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only" inc_new_api=3x_pt tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_int4_dq_bnb" elif [ "${model}" == "opt_125m_woq_gptq_int4_dq_ggml" ]; then - model_src_dir="nlp/huggingface_models/language-modeling/quantization/llm" + model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only" inc_new_api=3x_pt tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_int4_dq_ggml" fi @@ -72,6 +72,7 @@ FRAMEWORK="pytorch" source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' if [[ "${inc_new_api}" == "3x"* ]]; then FRAMEWORK_VERSION="latest" + export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH else FRAMEWORK_VERSION=${pytorch_version} TORCH_VISION_VERSION=${torchvision_version} diff --git a/.azure-pipelines/scripts/ut/3x/collect_log_3x.sh b/.azure-pipelines/scripts/ut/3x/collect_log_3x.sh index 386ec397c81..03f4fd02dbf 100644 --- a/.azure-pipelines/scripts/ut/3x/collect_log_3x.sh +++ b/.azure-pipelines/scripts/ut/3x/collect_log_3x.sh @@ -25,7 +25,8 @@ git config --global --add safe.directory /neural-compressor git fetch git checkout master rm -rf build dist *egg-info -echo y | pip uninstall neural_compressor_${1} +binary_index="${1%_fp8}" +echo y | pip uninstall neural_compressor_${binary_index} cd /neural-compressor/.azure-pipelines-pr/scripts && bash install_nc.sh ${1} coverage erase diff --git a/.azure-pipelines/scripts/ut/3x/coverage.3x_ort b/.azure-pipelines/scripts/ut/3x/coverage.3x_ort deleted file mode 100644 index 1404dccbaee..00000000000 --- a/.azure-pipelines/scripts/ut/3x/coverage.3x_ort +++ /dev/null @@ -1,15 +0,0 @@ -[run] -branch = True - -[report] -include = - */neural_compressor/common/* - */neural_compressor/onnxrt/* -exclude_lines = - pragma: no cover - raise NotImplementedError - raise TypeError - if self.device == "gpu": - if device == "gpu": - except ImportError: - except Exception as e: \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/3x/coverage.3x_pt b/.azure-pipelines/scripts/ut/3x/coverage.3x_pt index 34fc7f29fcf..dd4991f5fa7 100644 --- a/.azure-pipelines/scripts/ut/3x/coverage.3x_pt +++ b/.azure-pipelines/scripts/ut/3x/coverage.3x_pt @@ -5,6 +5,9 @@ branch = True include = */neural_compressor/common/* */neural_compressor/torch/* +omit = + */neural_compressor/torch/algorithms/fp8_quant/* + */neural_compressor/torch/amp/* exclude_lines = pragma: no cover raise NotImplementedError diff --git a/.azure-pipelines/scripts/ut/3x/coverage.3x_pt_fp8 b/.azure-pipelines/scripts/ut/3x/coverage.3x_pt_fp8 new file mode 100644 index 00000000000..9b12b354d83 --- /dev/null +++ b/.azure-pipelines/scripts/ut/3x/coverage.3x_pt_fp8 @@ -0,0 +1,14 @@ +[run] +branch = True + +[report] +include = + */neural_compressor/torch/algorithms/fp8_quant/* +exclude_lines = + pragma: no cover + raise NotImplementedError + raise TypeError + if self.device == "gpu": + if device == "gpu": + except ImportError: + except Exception as e: \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/3x/run_3x_ort.sh b/.azure-pipelines/scripts/ut/3x/run_3x_ort.sh deleted file mode 100644 index 5f8550ea742..00000000000 --- a/.azure-pipelines/scripts/ut/3x/run_3x_ort.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc" -test_case="run 3x ONNXRT" -echo "${test_case}" - -# install requirements -echo "set up UT env..." -pip install -r /neural-compressor/test/3x/onnxrt/requirements.txt -pip install pytest-cov -pip install pytest-html -pip list - -export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.3x_ort -inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])') -cd /neural-compressor/test/3x || exit 1 -rm -rf torch -rm -rf tensorflow - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_3x_ort.log -pytest --cov="${inc_path}" -vs --disable-warnings --html=report.html --self-contained-html . 2>&1 | tee -a ${ut_log_name} - -cp report.html ${LOG_DIR}/ - -if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then - echo "Find errors in pytest case, please check the output..." - echo "Please search for '== FAILURES ==' or '== ERRORS =='" - exit 1 -fi - -# if ut pass, collect the coverage file into artifacts -cp .coverage ${LOG_DIR}/.coverage - -echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/3x/run_3x_pt.sh b/.azure-pipelines/scripts/ut/3x/run_3x_pt.sh index b91bc182c7c..fba15ce6c4e 100644 --- a/.azure-pipelines/scripts/ut/3x/run_3x_pt.sh +++ b/.azure-pipelines/scripts/ut/3x/run_3x_pt.sh @@ -5,6 +5,7 @@ echo "${test_case}" # install requirements echo "set up UT env..." +export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH pip install -r /neural-compressor/test/3x/torch/requirements.txt pip install pytest-cov pip install pytest-html @@ -14,7 +15,8 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverag inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])') cd /neural-compressor/test/3x || exit 1 rm -rf tensorflow -rm -rf onnxrt +rm -rf torch/algorithms/fp8_quant +rm -rf torch/quantization/fp8_quant LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} diff --git a/.azure-pipelines/scripts/ut/3x/run_3x_pt_fp8.sh b/.azure-pipelines/scripts/ut/3x/run_3x_pt_fp8.sh new file mode 100644 index 00000000000..753dd8ac440 --- /dev/null +++ b/.azure-pipelines/scripts/ut/3x/run_3x_pt_fp8.sh @@ -0,0 +1,42 @@ +#!/bin/bash +python -c "import neural_compressor as nc" +test_case="run 3x Torch Habana FP8" +echo "${test_case}" + +# install requirements +echo "set up UT env..." +export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH +sed -i '/^intel_extension_for_pytorch/d' /neural-compressor/test/3x/torch/requirements.txt +pip install -r /neural-compressor/test/3x/torch/requirements.txt +pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.16.0 +pip install pytest-cov +pip install pytest-html +pip install pytest-html-merger +pip list + +export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.3x_pt_fp8 +inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])') +cd /neural-compressor/test/3x || exit 1 + +LOG_DIR=/neural-compressor/log_dir +mkdir -p ${LOG_DIR} +ut_log_name=${LOG_DIR}/ut_3x_pt_fp8.log +pytest --cov="${inc_path}" -vs --disable-warnings --html=report_1.html --self-contained-html torch/quantization/weight_only/test_load.py 2>&1 | tee -a ${ut_log_name} +pytest --cov="${inc_path}" -vs --disable-warnings --html=report_2.html --self-contained-html torch/quantization/weight_only/test_rtn.py 2>&1 | tee -a ${ut_log_name} +# pytest --cov="${inc_path}" -vs --disable-warnings --html=report_3.html --self-contained-html torch/quantization/weight_only/test_autoround.py 2>&1 | tee -a ${ut_log_name} +pytest --cov="${inc_path}" -vs --disable-warnings --html=report_4.html --self-contained-html torch/quantization/fp8_quant 2>&1 | tee -a ${ut_log_name} + +mkdir -p report && mv *.html report +pytest_html_merger -i ./report -o ./report.html +cp report.html ${LOG_DIR}/ + +if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then + echo "Find errors in pytest case, please check the output..." + echo "Please search for '== FAILURES ==' or '== ERRORS =='" + exit 1 +fi + +# if ut pass, collect the coverage file into artifacts +cp .coverage ${LOG_DIR}/.coverage + +echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/3x/run_3x_tf.sh b/.azure-pipelines/scripts/ut/3x/run_3x_tf.sh index d1aee3a98cb..1032e6dc6f1 100644 --- a/.azure-pipelines/scripts/ut/3x/run_3x_tf.sh +++ b/.azure-pipelines/scripts/ut/3x/run_3x_tf.sh @@ -16,20 +16,38 @@ inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__ cd /neural-compressor/test/3x || exit 1 rm -rf torch rm -rf onnxrt -rm -rf tensorflow/quantization/ptq/newapi mv tensorflow/keras ../3x_keras -mv tensorflow/quantization/itex ./3x_itex +mv tensorflow/quantization/ptq/newapi ../3x_newapi LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} ut_log_name=${LOG_DIR}/ut_3x_tf.log + +# test for tensorflow ut pytest --cov="${inc_path}" -vs --disable-warnings --html=report_tf_quant.html --self-contained-html ./tensorflow/quantization 2>&1 | tee -a ${ut_log_name} rm -rf tensorflow/quantization +pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_tf_test_quantize_model.html --self-contained-html ./tensorflow/test_quantize_model.py 2>&1 | tee -a ${ut_log_name} +rm -rf tensorflow/test_quantize_model.py pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_tf.html --self-contained-html . 2>&1 | tee -a ${ut_log_name} +# test for tensorflow new api ut +pip uninstall tensorflow -y +pip install /tf_dataset/tf_binary/230928/tensorflow*.whl +pip install cmake +pip install protobuf==3.20.3 +pip install horovod==0.27.0 +pip list +rm -rf tensorflow/* +mkdir -p tensorflow/quantization/ptq +mv ../3x_newapi tensorflow/quantization/ptq/newapi +find . -name "test*.py" | sed "s,\.\/,python -m pytest --cov=${inc_path} --cov-append -vs --disable-warnings ,g" > run.sh +cat run.sh +bash run.sh 2>&1 | tee -a ${ut_log_name} + +# test for itex ut rm -rf tensorflow/* mv ../3x_keras tensorflow/keras -mv ../3x_itex tensorflow/quantization/itex +pip uninstall tensorflow -y pip install intel-extension-for-tensorflow[cpu] pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_keras.html --self-contained-html ./tensorflow 2>&1 | tee -a ${ut_log_name} diff --git a/.azure-pipelines/scripts/ut/3x/run_3x_tf_new_api.sh b/.azure-pipelines/scripts/ut/3x/run_3x_tf_new_api.sh deleted file mode 100644 index 218e32a9b3a..00000000000 --- a/.azure-pipelines/scripts/ut/3x/run_3x_tf_new_api.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc" -test_case="run 3x New TF API" -echo "${test_case}" - -# install requirements -echo "set up UT env..." -pip install -r /neural-compressor/test/3x/tensorflow/requirements.txt -pip install pytest-html -pip install pytest-html-merger - -pip uninstall tensorflow -y -pip install /tf_dataset/tf_binary/230928/tensorflow*.whl -pip install cmake -pip install protobuf==3.20.3 -pip install horovod==0.27.0 -pip list - -cd /neural-compressor/test/3x || exit 1 -mv tensorflow/quantization/ptq/newapi ../3x_newapi -rm -rf ./* - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_3x_new_tf.log - -mkdir -p tensorflow/quantization/ptq -mv ../3x_newapi tensorflow/quantization/ptq/newapi - -pytest -vs --disable-warnings --html=report_new_tf_quant_one_case.html --self-contained-html ./tensorflow/quantization/ptq/newapi/test_big_saved_model.py 2>&1 | tee -a ${ut_log_name} -rm -rf tensorflow/quantization/ptq/newapi/test_big_saved_model.py -pytest -vs --disable-warnings --html=report_new_tf_quant.html --self-contained-html ./tensorflow/quantization/ptq/newapi 2>&1 | tee -a ${ut_log_name} - -mkdir -p report -mv *.html report -pytest_html_merger -i ./report -o ./report.html - -cp report.html ${LOG_DIR}/ - -if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then - echo "Find errors in pytest case, please check the output..." - echo "Please search for '== FAILURES ==' or '== ERRORS =='" - exit 1 -fi - -echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index d5876b07cef..3715c485631 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -92,7 +92,7 @@ elif [[ $(echo "${test_case}" | grep -c "tf pruning") != 0 ]]; then fi if [[ $(echo "${test_case}" | grep -c "api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "adaptor") != 0 ]]; then - pip install auto-round + pip install git+https://github.com/intel/auto-round.git@e24b9074af6cdb099e31c92eb81b7f5e9a4a244e fi # test deps diff --git a/.azure-pipelines/scripts/ut/run_basic_others.sh b/.azure-pipelines/scripts/ut/run_basic_others.sh index df7acf52cf7..301d6fd14d1 100644 --- a/.azure-pipelines/scripts/ut/run_basic_others.sh +++ b/.azure-pipelines/scripts/ut/run_basic_others.sh @@ -14,7 +14,6 @@ cd /neural-compressor/test || exit 1 find . -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh sed -i '/ adaptor\//d' run.sh sed -i '/ tfnewapi\//d' run.sh -sed -i '/ neural_coder\//d' run.sh sed -i '/ itex\//d' run.sh sed -i '/ pruning_with_pt/d' run.sh sed -i '/ pruning_with_tf/d' run.sh diff --git a/.azure-pipelines/scripts/ut/run_itrex.sh b/.azure-pipelines/scripts/ut/run_itrex.sh index 74ff01c7062..2bbbf958398 100644 --- a/.azure-pipelines/scripts/ut/run_itrex.sh +++ b/.azure-pipelines/scripts/ut/run_itrex.sh @@ -4,6 +4,10 @@ source /neural-compressor/.azure-pipelines/scripts/change_color.sh python -c "import neural_compressor as nc;print(nc.version.__version__)" echo "run itrex ut..." +# install inc 3x deps +pip install -r /neural-compressor/requirements_pt.txt +export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH + # prepare itrex git clone https://github.com/intel/intel-extension-for-transformers.git /intel-extension-for-transformers cd /intel-extension-for-transformers && git rev-parse --short HEAD @@ -15,6 +19,8 @@ sed -i '/neural-compressor.git/d' /intel-extension-for-transformers/tests/requir pip install -r /intel-extension-for-transformers/tests/requirements.txt # workaround pip install onnx==1.15.0 +echo "pip list itrex ut deps..." +pip list LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} ut_log_name=${LOG_DIR}/ut_itrex.log diff --git a/.azure-pipelines/scripts/ut/run_ncoder.sh b/.azure-pipelines/scripts/ut/run_ncoder.sh deleted file mode 100644 index 3c487eebd7b..00000000000 --- a/.azure-pipelines/scripts/ut/run_ncoder.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc;print(nc.version.__version__)" -echo "run coder" - -echo "no FWKs need to be installed..." -echo "no requirements need to be installed..." - -cd /neural-compressor/test || exit 1 -find ./neural_coder -name "test*.py" | sed 's,\.\/,python ,g' | sed 's/$/ --verbose/' > run.sh - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_neural_coder.log - -echo "cat run.sh..." -sort run.sh -o run.sh -cat run.sh | tee ${ut_log_name} -echo "------UT start-------" -bash -x run.sh 2>&1 | tee -a ${ut_log_name} -echo "------UT end -------" - -if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then - echo "Find errors in UT test, please check the output..." - exit 1 -fi -echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/run_neural_insights.sh b/.azure-pipelines/scripts/ut/run_neural_insights.sh deleted file mode 100644 index 5f0f2fe5521..00000000000 --- a/.azure-pipelines/scripts/ut/run_neural_insights.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc;print(nc.version.__version__)" -echo "run neural insights ut..." - -# Install Neural Solution -bash /neural-compressor/.azure-pipelines/scripts/install_neural_insights.sh - -# Install requirements for test -cd /neural-compressor/neural_insights/test || exit 1 -if [ -f "requirements.txt" ]; then - n=0 - until [ "$n" -ge 3 ] - do - python -m pip install --no-cache-dir -r requirements.txt && break - n=$((n+1)) - sleep 5 - done - pip list -else - echo "Not found requirements.txt file." -fi - -cd /neural-compressor/neural_insights || exit 1 -find ./test -name "test*.py" | sed 's,\.\/,python ,g' | sed 's/$/ --verbose/' > run.sh - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_neural_insights.log - -echo "cat run.sh..." -sort run.sh -o run.sh -cat run.sh | tee ${ut_log_name} -echo "------UT start-------" -bash -x run.sh 2>&1 | tee -a ${ut_log_name} -echo "------UT end -------" - -if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then - echo "Find errors in UT test, please check the output..." - exit 1 -fi -echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/scripts/ut/run_neural_solution.sh b/.azure-pipelines/scripts/ut/run_neural_solution.sh deleted file mode 100644 index 42041e4a087..00000000000 --- a/.azure-pipelines/scripts/ut/run_neural_solution.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc;print(nc.version.__version__)" -echo "run neural solution ut..." - -echo "no FWKs need to be installed..." - -# Install Neural Solution -bash /neural-compressor/.azure-pipelines/scripts/install_neural_solution.sh - -# Install requirements for test -cd /neural-compressor/neural_solution/test || exit 1 -if [ -f "requirements.txt" ]; then - n=0 - until [ "$n" -ge 3 ] - do - python -m pip install --no-cache-dir -r requirements.txt && break - n=$((n+1)) - sleep 5 - done - pip list -else - echo "Not found requirements.txt file." -fi - -cd /neural-compressor/neural_solution || exit 1 -find ./test -name "test*.py" | sed 's,\.\/,python ,g' | sed 's/$/ --verbose/' > run.sh - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_neural_solution.log - -echo "cat run.sh..." -sort run.sh -o run.sh -cat run.sh | tee ${ut_log_name} -echo "------UT start-------" -bash -x run.sh 2>&1 | tee -a ${ut_log_name} -echo "------UT end -------" - -if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then - echo "Find errors in UT test, please check the output..." - exit 1 -fi -echo "UT finished successfully! " \ No newline at end of file diff --git a/.azure-pipelines/template/docker-template.yml b/.azure-pipelines/template/docker-template.yml index e7b563bcea7..51103c39e21 100644 --- a/.azure-pipelines/template/docker-template.yml +++ b/.azure-pipelines/template/docker-template.yml @@ -16,6 +16,9 @@ parameters: - name: repo type: string default: "https://github.com/intel/neural-compressor" + - name: imageSource + type: string + default: "build" steps: - task: Bash@3 @@ -24,7 +27,7 @@ steps: script: | docker ps -a if [[ $(docker ps -a | grep -i '${{ parameters.containerName }}'$) ]]; then - docker start $(docker ps -aq) + docker start $(docker ps -aq --filter "name=${{ parameters.containerName }}") echo "remove left files through container ..." docker exec ${{ parameters.containerName }} bash -c "ls -a /neural-compressor && rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* && ls -a /neural-compressor || true" fi @@ -33,7 +36,7 @@ steps: - ${{ if eq(parameters.dockerConfigName, 'commonDockerConfig') }}: - script: | rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true - echo y | docker system prune --all + echo y | docker image prune -a displayName: "Clean workspace" - checkout: self @@ -45,7 +48,7 @@ steps: rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true mkdir ${BUILD_SOURCESDIRECTORY} chmod 777 ${BUILD_SOURCESDIRECTORY} - echo y | docker system prune --all + echo y | docker image prune -a displayName: "Clean workspace" - checkout: none @@ -57,19 +60,25 @@ steps: git checkout master displayName: "Checkout out master" - - script: | - if [[ ! $(docker images | grep -i ${{ parameters.repoName }}:${{ parameters.repoTag }}) ]]; then - docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/${{parameters.dockerFileName}}.devel -t ${{ parameters.repoName }}:${{ parameters.repoTag }} . - fi - docker images | grep -i ${{ parameters.repoName }} - if [[ $? -ne 0 ]]; then - echo "NO Such Repo" - exit 1 - fi - displayName: "Build develop docker image" + - ${{ if eq(parameters.imageSource, 'build') }}: + - script: | + if [[ ! $(docker images | grep -i ${{ parameters.repoName }}:${{ parameters.repoTag }}) ]]; then + docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/${{parameters.dockerFileName}}.devel -t ${{ parameters.repoName }}:${{ parameters.repoTag }} . + fi + docker images | grep -i ${{ parameters.repoName }} + if [[ $? -ne 0 ]]; then + echo "NO Such Repo" + exit 1 + fi + displayName: "Build develop docker image" + + - ${{ if eq(parameters.imageSource, 'pull') }}: + - script: | + docker pull vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + displayName: "Pull habana docker image" - script: | - docker stop $(docker ps -aq) + docker stop $(docker ps -aq --filter "name=${{ parameters.containerName }}") docker rm -vf ${{ parameters.containerName }} || true env | sort displayName: "Clean docker container" @@ -79,8 +88,15 @@ steps: inputs: targetType: "inline" script: | - docker run -dit --disable-content-trust --privileged --name=${{ parameters.containerName }} --shm-size="2g" \ - -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor -v /tf_dataset:/tf_dataset -v /tf_dataset2:/tf_dataset2 ${{ parameters.repoName }}:${{ parameters.repoTag }} + if [[ "${{ parameters.imageSource }}" == "build" ]]; then + docker run -dit --disable-content-trust --privileged --name=${{ parameters.containerName }} --shm-size="2g" \ + -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor -v /tf_dataset:/tf_dataset -v /tf_dataset2:/tf_dataset2 \ + ${{ parameters.repoName }}:${{ parameters.repoTag }} + else + docker run -dit --disable-content-trust --privileged --name=${{ parameters.containerName }} --shm-size="2g" \ + --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host \ + -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest + fi echo "Show the container list after docker run ... " docker ps -a displayName: "Docker run - ${{ parameters.containerName }} Container" diff --git a/.azure-pipelines/template/ut-template.yml b/.azure-pipelines/template/ut-template.yml index b7fecacd3d7..d8908d22a35 100644 --- a/.azure-pipelines/template/ut-template.yml +++ b/.azure-pipelines/template/ut-template.yml @@ -17,6 +17,9 @@ parameters: - name: utContainerName type: string default: "utTest" + - name: imageSource + type: string + default: "build" steps: - template: docker-template.yml @@ -27,6 +30,7 @@ steps: dockerFileName: "Dockerfile" containerName: ${{ parameters.utContainerName }} repo: ${{ parameters.repo }} + imageSource: ${{ parameters.imageSource }} - script: | docker exec ${{ parameters.utContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts \ diff --git a/.azure-pipelines/ut-3x-ort.yml b/.azure-pipelines/ut-3x-ort.yml deleted file mode 100644 index 42636df2314..00000000000 --- a/.azure-pipelines/ut-3x-ort.yml +++ /dev/null @@ -1,109 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_compressor/common - - neural_compressor/onnxrt - - test/3x/onnxrt - - test/3x/common - - setup.py - - requirements_ort.txt - - .azure-pipelines/scripts/ut/3x/run_3x_ort.sh - -pool: ICX-16C - -variables: - IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py310" - UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir - DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir - ARTIFACT_NAME: "UT_coverage_report_3x_ort" - REPO: $(Build.Repository.Uri) - -stages: - - stage: ONNXRT - displayName: Unit Test 3x ONNXRT - dependsOn: [] - jobs: - - job: - displayName: Unit Test 3x ONNXRT - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "3x/run_3x_ort" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_3x" - - - - stage: ONNXRT_baseline - displayName: Unit Test 3x ONNXRT baseline - dependsOn: [] - jobs: - - job: - displayName: Unit Test 3x ONNXRT baseline - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "gitCloneDockerConfig" - utScriptFileName: "3x/run_3x_ort" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_3x_baseline" - repo: $(REPO) - - - stage: Coverage - displayName: "Coverage Compare" - pool: - vmImage: "ubuntu-latest" - dependsOn: [ONNXRT, ONNXRT_baseline] - jobs: - - job: CollectDatafiles - steps: - - script: | - if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then - docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} . - fi - docker images | grep -i ${IMAGE_NAME} - if [[ $? -ne 0 ]]; then - echo "NO Such Repo" - exit 1 - fi - displayName: "Build develop docker image" - - - task: DownloadPipelineArtifact@2 - inputs: - artifact: - patterns: '*_coverage/.coverage' - path: $(DOWNLOAD_PATH) - - - script: | - echo "--- create container ---" - docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash - echo "--- docker ps ---" - docker ps - echo "--- collect logs ---" - docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \ - && bash install_nc.sh 3x_ort \ - && bash ut/3x/collect_log_3x.sh 3x_ort" - displayName: "Collect UT Coverage" - - - task: PublishPipelineArtifact@1 - condition: succeededOrFailed() - inputs: - targetPath: $(UPLOAD_PATH) - artifact: $(ARTIFACT_NAME) - publishLocation: "pipeline" - - - task: Bash@3 - condition: always() - inputs: - targetType: "inline" - script: | - docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true" - displayName: "Docker clean up" diff --git a/.azure-pipelines/ut-3x-pt-fp8.yml b/.azure-pipelines/ut-3x-pt-fp8.yml new file mode 100644 index 00000000000..6f36ddecc64 --- /dev/null +++ b/.azure-pipelines/ut-3x-pt-fp8.yml @@ -0,0 +1,110 @@ +trigger: none + +pr: + autoCancel: true + drafts: false + branches: + include: + - master + paths: + include: + - .azure-pipelines/scripts/ut/3x/run_3x_pt_fp8.sh + - .azure-pipelines/ut-3x-pt-fp8.yml + - neural_compressor/common + - neural_compressor/torch + - test/3x/torch/algorithms/fp8_quant + - test/3x/torch/quantization/fp8_quant + - setup.py + - requirements_pt.txt + +pool: GAUDI + +variables: + IMAGE_NAME: "neural-compressor" + IMAGE_TAG: "py310" + UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir + DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir + ARTIFACT_NAME: "UT_coverage_report_3x_pt_fp8" + REPO: $(Build.Repository.Uri) + +stages: + - stage: Torch_habana + displayName: Torch 3x Habana FP8 + dependsOn: [] + jobs: + - job: + displayName: Torch 3x Habana FP8 + steps: + - template: template/ut-template.yml + parameters: + imageSource: "pull" + dockerConfigName: "commonDockerConfig" + utScriptFileName: "3x/run_3x_pt_fp8" + uploadPath: $(UPLOAD_PATH) + utArtifact: "ut_3x" + + - stage: Torch_habana_baseline + displayName: Torch 3x Habana FP8 baseline + dependsOn: [] + jobs: + - job: + displayName: Torch 3x Habana FP8 baseline + steps: + - template: template/ut-template.yml + parameters: + imageSource: "pull" + dockerConfigName: "gitCloneDockerConfig" + utScriptFileName: "3x/run_3x_pt_fp8" + uploadPath: $(UPLOAD_PATH) + utArtifact: "ut_3x_baseline" + + - stage: Coverage + displayName: "Coverage Compare" + pool: + vmImage: "ubuntu-latest" + dependsOn: [Torch_habana, Torch_habana_baseline] + jobs: + - job: CollectDatafiles + steps: + - script: | + if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then + docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} . + fi + docker images | grep -i ${IMAGE_NAME} + if [[ $? -ne 0 ]]; then + echo "NO Such Repo" + exit 1 + fi + displayName: "Build develop docker image" + + - task: DownloadPipelineArtifact@2 + inputs: + artifact: + patterns: '*_coverage/.coverage' + path: $(DOWNLOAD_PATH) + + - script: | + echo "--- create container ---" + docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash + echo "--- docker ps ---" + docker ps + echo "--- collect logs ---" + docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \ + && bash install_nc.sh 3x_pt_fp8 \ + && bash ut/3x/collect_log_3x.sh 3x_pt_fp8" + displayName: "Collect UT Coverage" + + - task: PublishPipelineArtifact@1 + condition: succeededOrFailed() + inputs: + targetPath: $(UPLOAD_PATH) + artifact: $(ARTIFACT_NAME) + publishLocation: "pipeline" + + - task: Bash@3 + condition: always() + inputs: + targetType: "inline" + script: | + docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true" + displayName: "Docker clean up" diff --git a/.azure-pipelines/ut-3x-tf.yml b/.azure-pipelines/ut-3x-tf.yml index 0fdc0c02f26..df852e28000 100644 --- a/.azure-pipelines/ut-3x-tf.yml +++ b/.azure-pipelines/ut-3x-tf.yml @@ -41,20 +41,6 @@ stages: uploadPath: $(UPLOAD_PATH) utArtifact: "ut_3x" - - stage: NewTF - displayName: Unit Test 3x New TF API - dependsOn: [] - jobs: - - job: - displayName: Unit Test 3x New TF API - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "3x/run_3x_tf_new_api" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_3x_tf_new_api" - - stage: TensorFlow_baseline displayName: Unit Test 3x TensorFlow baseline dependsOn: [] diff --git a/.azure-pipelines/ut-basic-no-cover.yml b/.azure-pipelines/ut-basic-no-cover.yml deleted file mode 100644 index 9a0fbb190b5..00000000000 --- a/.azure-pipelines/ut-basic-no-cover.yml +++ /dev/null @@ -1,109 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_compressor - - test - - setup.py - - requirements.txt - - .azure-pipelines/scripts/ut - exclude: - - test/neural_coder - - test/3x - - neural_compressor/common - - neural_compressor/torch - - neural_compressor/tensorflow - - neural_compressor/onnxrt - - .azure-pipelines/scripts/ut/3x - -pool: ICX-16C - -variables: - IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py310" - UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir - DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir - ARTIFACT_NAME: "UT_report" - REPO: $(Build.Repository.Uri) - -stages: - - stage: Adaptor - displayName: Unit Test FWKs adaptor - dependsOn: [] - jobs: - - job: - displayName: Test FWKs adaptor - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "run_basic_adaptor" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_adaptor" - utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" - - - stage: API - displayName: Unit Test User facing API - dependsOn: [] - jobs: - - job: - displayName: Test User facing API - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "run_basic_api" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_api" - utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" - - - stage: Pruning - displayName: Unit Test Pruning - dependsOn: [] - jobs: - - job: - displayName: Test PyTorch Pruning - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "run_basic_pt_pruning" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_pt-pruning" - utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" - - job: - displayName: Test TensorFlow Pruning - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "run_basic_tf_pruning" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_tf-pruning" - utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" - - - stage: Others - displayName: Unit Test other basic case - dependsOn: [] - jobs: - - job: - displayName: Test other basic case - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: "commonDockerConfig" - utScriptFileName: "run_basic_others" - uploadPath: $(UPLOAD_PATH) - utArtifact: "ut_others" - utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" diff --git a/.azure-pipelines/ut-basic.yml b/.azure-pipelines/ut-basic.yml index a6e34a466ca..2249f0c2590 100644 --- a/.azure-pipelines/ut-basic.yml +++ b/.azure-pipelines/ut-basic.yml @@ -14,7 +14,6 @@ pr: - requirements.txt - .azure-pipelines/scripts/ut exclude: - - test/neural_coder - test/3x - neural_compressor/common - neural_compressor/torch diff --git a/.azure-pipelines/ut-itrex.yml b/.azure-pipelines/ut-itrex.yml index 574c8a32677..2f038270234 100644 --- a/.azure-pipelines/ut-itrex.yml +++ b/.azure-pipelines/ut-itrex.yml @@ -13,10 +13,6 @@ pr: - requirements.txt - .azure-pipelines/scripts/ut/run_itrex.sh - .azure-pipelines/ut-itrex.yml - exclude: - - neural_compressor/common - - neural_compressor/torch - - neural_compressor/tensorflow pool: MODEL_PERF_TEST diff --git a/.azure-pipelines/ut-ncoder.yml b/.azure-pipelines/ut-ncoder.yml deleted file mode 100644 index a1512ae4803..00000000000 --- a/.azure-pipelines/ut-ncoder.yml +++ /dev/null @@ -1,33 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_coder - - test/neural_coder - - setup.py - -pool: ICX-16C - -variables: - UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir - -stages: -- stage: - displayName: Unit Test for Neural Coder - jobs: - - job: - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: 'commonDockerConfig' - utScriptFileName: 'run_ncoder' - uploadPath: $(UPLOAD_PATH) - utArtifact: 'ut_ncoder' - utTestMode: "no-coverage" - utContainerName: "utTest-ncoder" diff --git a/.azure-pipelines/ut-neural-insights.yaml b/.azure-pipelines/ut-neural-insights.yaml deleted file mode 100644 index b73026c623b..00000000000 --- a/.azure-pipelines/ut-neural-insights.yaml +++ /dev/null @@ -1,33 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_insights - - setup.py - - .azure-pipelines/ut-neural-insights.yaml - -pool: ICX-16C - -variables: - UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir - -stages: - - stage: - displayName: Unit Test for Neural Insights - jobs: - - job: - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: 'commonDockerConfig' - utScriptFileName: 'run_neural_insights' - uploadPath: $(UPLOAD_PATH) - utArtifact: 'ut_neural-insights' - utTestMode: "no-coverage" - utContainerName: "utTest-nInsights" diff --git a/.azure-pipelines/ut-neural-solution.yaml b/.azure-pipelines/ut-neural-solution.yaml deleted file mode 100644 index df717899b57..00000000000 --- a/.azure-pipelines/ut-neural-solution.yaml +++ /dev/null @@ -1,33 +0,0 @@ -trigger: none - -pr: - autoCancel: true - drafts: false - branches: - include: - - master - paths: - include: - - neural_solution - - setup.py - - .azure-pipelines/ut-neural-solution.yaml - -pool: ICX-16C - -variables: - UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir - -stages: - - stage: - displayName: Unit Test for Neural Solution - jobs: - - job: - steps: - - template: template/ut-template.yml - parameters: - dockerConfigName: 'commonDockerConfig' - utScriptFileName: 'run_neural_solution' - uploadPath: $(UPLOAD_PATH) - utArtifact: 'ut_neural-solution' - utTestMode: "no-coverage" - utContainerName: "utTest-nSolution" diff --git a/.coverage b/.coverage deleted file mode 100644 index 02b5b52790b..00000000000 Binary files a/.coverage and /dev/null differ diff --git a/.github/checkgroup.yml b/.github/checkgroup.yml index 697e70799c4..9f566749da0 100644 --- a/.github/checkgroup.yml +++ b/.github/checkgroup.yml @@ -11,29 +11,6 @@ subprojects: - "Code-Scan" - "Code-Scan (Bandit Code Scan Bandit)" - "Code-Scan (DocStyle Code Scan DocStyle)" - - "Code-Scan (Pylint Code Scan Pylint)" - - - id: "Code Scan Neural-Insights Tests workflow" - paths: - - "neural_insights/**" - - "setup.py" - - ".azure-pipelines/code-scan-neural-insights.yml" - checks: - - "Code-Scan-Neural-Insights" - - "Code-Scan-Neural-Insights (Bandit Code Scan Bandit)" - - "Code-Scan-Neural-Insights (DocStyle Code Scan DocStyle)" - - "Code-Scan-Neural-Insights (Pylint Code Scan Pylint)" - - - id: "Code Scan Neural-Solution Tests workflow" - paths: - - "neural_solution/**" - - "setup.py" - - ".azure-pipelines/code-scan-neural-solution.yml" - checks: - - "Code-Scan-Neural-Solution" - - "Code-Scan-Neural-Solution (Bandit Code Scan Bandit)" - - "Code-Scan-Neural-Solution (DocStyle Code Scan DocStyle)" - - "Code-Scan-Neural-Solution (Pylint Code Scan Pylint)" - id: "Model Tests workflow" paths: @@ -51,13 +28,8 @@ subprojects: - "Model-Test" - "Model-Test (Generate Report GenerateReport)" - "Model-Test (Run ONNX Model resnet50-v1-12)" - - "Model-Test (Run PyTorch Model resnet18)" - "Model-Test (Run PyTorch Model resnet18_fx)" - - "Model-Test (Run TensorFlow Model darknet19)" - - "Model-Test (Run TensorFlow Model inception_v1)" - - "Model-Test (Run TensorFlow Model resnet-101)" - "Model-Test (Run TensorFlow Model resnet50v1.5)" - - "Model-Test (Run TensorFlow Model ssd_mobilenet_v1_ckpt)" - "Model-Test (Run TensorFlow Model ssd_resnet50_v1)" - id: "Model Tests 3x workflow" @@ -82,7 +54,6 @@ subprojects: - "setup.py" - "requirements.txt" - ".azure-pipelines/scripts/ut/**" - - "!test/neural_coder/**" - "!test/3x/**" - "!neural_compressor/common/**" - "!neural_compressor/torch/**" @@ -107,28 +78,6 @@ subprojects: - "UT-Basic (Unit Test other basic case Test other basic case)" - "UT-Basic (Unit Test other cases baseline Test other cases baseline)" - - id: "Unit Tests basic no coverage workflow" - paths: - - "neural_compressor/**" - - "test/**" - - "setup.py" - - "requirements.txt" - - ".azure-pipelines/scripts/ut/**" - - "!test/neural_coder/**" - - "!test/3x/**" - - "!neural_compressor/common/**" - - "!neural_compressor/torch/**" - - "!neural_compressor/tensorflow/**" - - "!neural_compressor/onnxrt/**" - - "!.azure-pipelines/scripts/ut/3x/**" - checks: - - "UT-Basic-No-Coverage" - - "UT-Basic-No-Coverage (Unit Test FWKs adaptor Test FWKs adaptor)" - - "UT-Basic-No-Coverage (Unit Test Pruning Test PyTorch Pruning)" - - "UT-Basic-No-Coverage (Unit Test Pruning Test TensorFlow Pruning)" - - "UT-Basic-No-Coverage (Unit Test User facing API Test User facing API)" - - "UT-Basic-No-Coverage (Unit Test other basic case Test other basic case)" - - id: "Unit Tests ITREX workflow" paths: - "neural_compressor/**" @@ -142,28 +91,6 @@ subprojects: checks: - "UT-ITREX" - - id: "Unit Tests Neural-Insights workflow" - paths: - - "neural_insights/**" - - "setup.py" - checks: - - "UT-Neural-Insights" - - - id: "Unit Tests Neural-Solution workflow" - paths: - - "neural_solution/**" - - "setup.py" - checks: - - "UT-Neural-Solution" - - - id: "Unit Tests Neural-Coder workflow" - paths: - - "neural_coder/**" - - "test/neural_coder/**" - - "setup.py" - checks: - - "UT-Coder" - - id: "Unit Tests 3x-TensorFlow workflow" paths: - "neural_compressor/common/**" @@ -191,16 +118,3 @@ subprojects: - "UT-3x-Torch (Coverage Compare CollectDatafiles)" - "UT-3x-Torch (Unit Test 3x Torch Unit Test 3x Torch)" - "UT-3x-Torch (Unit Test 3x Torch baseline Unit Test 3x Torch baseline)" - - - id: "Unit Tests 3x-ONNXRT workflow" - paths: - - "neural_compressor/common/**" - - "neural_compressor/onnxrt/**" - - "test/3x/onnxrt/**" - - "setup.py" - - "requirements_ort.txt" - checks: - - "UT-3x-ONNXRT" - - "UT-3x-ONNXRT (Coverage Compare CollectDatafiles)" - - "UT-3x-ONNXRT (Unit Test 3x ONNXRT Unit Test 3x ONNXRT)" - - "UT-3x-ONNXRT (Unit Test 3x ONNXRT baseline Unit Test 3x ONNXRT baseline)" diff --git a/.github/workflows/Scanner_Bdba.yml b/.github/workflows/Scanner_Bdba.yml index 2351406c9a7..4bef08dfc88 100644 --- a/.github/workflows/Scanner_Bdba.yml +++ b/.github/workflows/Scanner_Bdba.yml @@ -1,4 +1,5 @@ name: Scanner BDBA +permissions: read-all on: workflow_dispatch: diff --git a/.github/workflows/Scanner_Coverity.yml b/.github/workflows/Scanner_Coverity.yml index 50f44da4bf5..a95489e5442 100644 --- a/.github/workflows/Scanner_Coverity.yml +++ b/.github/workflows/Scanner_Coverity.yml @@ -1,4 +1,5 @@ name: Scanner Coverity +permissions: read-all on: workflow_dispatch: diff --git a/.github/workflows/Scanner_McAfee.yml b/.github/workflows/Scanner_McAfee.yml index 3d449ddaccf..30b882bcf47 100644 --- a/.github/workflows/Scanner_McAfee.yml +++ b/.github/workflows/Scanner_McAfee.yml @@ -1,4 +1,5 @@ name: Virus Scan +permissions: read-all on: workflow_dispatch: diff --git a/.github/workflows/probot.yml b/.github/workflows/probot.yml index 290af2a86a7..fc74b74134d 100644 --- a/.github/workflows/probot.yml +++ b/.github/workflows/probot.yml @@ -1,4 +1,5 @@ name: Probot +permissions: read-all on: pull_request: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index ec06b4bada1..f71b29376bd 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,5 @@ name: Publish +permissions: {} on: push: @@ -7,9 +8,10 @@ on: jobs: build: - runs-on: ubuntu-latest - + permissions: + pull-requests: write + contents: write steps: - uses: actions/checkout@v3 - name: Build Online Document diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19e48389c04..d93d64aba33 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,16 +2,6 @@ ci: autofix_prs: true autoupdate_schedule: quarterly -exclude: | - (?x)^( - conda_meta/.+| - neural_insights/gui.+| - neural_insights/test.+| - neural_solution/frontend/gRPC/proto/neural_solution_pb2.py| - neural_coder/extensions/.+| - neural_coder/examples/.+ - )$ - repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 @@ -60,13 +50,7 @@ repos: - id: insert-license files: | (?x)^( - neural_solution/.*(py|yaml|yml|sh)| - neural_compressor/.*(py|yaml|yml|sh)| - neural_insights/.*(py|yaml|yml|sh) - )$ - exclude: | - (?x)^( - neural_solution/test/.* + neural_compressor/.*(py|yaml|yml|sh) )$ args: [ @@ -144,7 +128,8 @@ repos: examples/.*(txt|patch)| examples/onnxrt/nlp/huggingface_model/text_generation/llama/quantization/ptq_static/prompt.json| examples/notebook/dynas/ResNet50_Quantiation_Search_Supernet_NAS.ipynb| - examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb + examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb| + neural_compressor/torch/algorithms/fp8_quant/internal/diffusion_evaluation/SR_evaluation/imagenet1000_clsidx_to_labels.txt )$ - repo: https://github.com/astral-sh/ruff-pre-commit diff --git a/README.md b/README.md index 608080432bd..f4694e991e9 100644 --- a/README.md +++ b/README.md @@ -2,114 +2,119 @@ Intel® Neural Compressor =========================== -

An open-source Python library supporting popular model compression techniques on all mainstream deep learning frameworks (TensorFlow, PyTorch, ONNX Runtime, and MXNet)

+

An open-source Python library supporting popular model compression techniques on all mainstream deep learning frameworks (TensorFlow, PyTorch, and ONNX Runtime)

[![python](https://img.shields.io/badge/python-3.8%2B-blue)](https://github.com/intel/neural-compressor) -[![version](https://img.shields.io/badge/release-2.5-green)](https://github.com/intel/neural-compressor/releases) +[![version](https://img.shields.io/badge/release-3.0-green)](https://github.com/intel/neural-compressor/releases) [![license](https://img.shields.io/badge/license-Apache%202-blue)](https://github.com/intel/neural-compressor/blob/master/LICENSE) [![coverage](https://img.shields.io/badge/coverage-85%25-green)](https://github.com/intel/neural-compressor) [![Downloads](https://static.pepy.tech/personalized-badge/neural-compressor?period=total&units=international_system&left_color=grey&right_color=green&left_text=downloads)](https://pepy.tech/project/neural-compressor) -[Architecture](./docs/source/design.md#architecture)   |   [Workflow](./docs/source/design.md#workflow)   |   [LLMs Recipes](./docs/source/llm_recipes.md)   |   [Results](./docs/source/validated_model_list.md)   |   [Documentations](https://intel.github.io/neural-compressor) +[Architecture](./docs/source/3x/design.md#architecture)   |   [Workflow](./docs/source/3x/design.md#workflows)   |   [LLMs Recipes](./docs/source/llm_recipes.md)   |   [Results](./docs/source/validated_model_list.md)   |   [Documentations](https://intel.github.io/neural-compressor) ---
-Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks such as [TensorFlow](https://www.tensorflow.org/), [PyTorch](https://pytorch.org/), [ONNX Runtime](https://onnxruntime.ai/), and [MXNet](https://mxnet.apache.org/), +Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks such as [TensorFlow](https://www.tensorflow.org/), [PyTorch](https://pytorch.org/), and [ONNX Runtime](https://onnxruntime.ai/), as well as Intel extensions such as [Intel Extension for TensorFlow](https://github.com/intel/intel-extension-for-tensorflow) and [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch). In particular, the tool provides the key features, typical examples, and open collaborations as below: -* Support a wide range of Intel hardware such as [Intel Xeon Scalable Processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html), [Intel Xeon CPU Max Series](https://www.intel.com/content/www/us/en/products/details/processors/xeon/max-series.html), [Intel Data Center GPU Flex Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/flex-series.html), and [Intel Data Center GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html) with extensive testing; support AMD CPU, ARM CPU, and NVidia GPU through ONNX Runtime with limited testing +* Support a wide range of Intel hardware such as [Intel Gaudi Al Accelerators](https://www.intel.com/content/www/us/en/products/details/processors/ai-accelerators/gaudi-overview.html), [Intel Core Ultra Processors](https://www.intel.com/content/www/us/en/products/details/processors/core-ultra.html), [Intel Xeon Scalable Processors](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html), [Intel Xeon CPU Max Series](https://www.intel.com/content/www/us/en/products/details/processors/xeon/max-series.html), [Intel Data Center GPU Flex Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/flex-series.html), and [Intel Data Center GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html) with extensive testing; +support AMD CPU, ARM CPU, and NVidia GPU through ONNX Runtime with limited testing; support NVidia GPU for some WOQ algorithms like AutoRound and HQQ. -* Validate popular LLMs such as [LLama2](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [Falcon](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [GPT-J](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [Bloom](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [OPT](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), and more than 10,000 broad models such as [Stable Diffusion](/examples/pytorch/nlp/huggingface_models/text-to-image/quantization), [BERT-Large](/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_static/fx), and [ResNet50](/examples/pytorch/image_recognition/torchvision_models/quantization/ptq/cpu/fx) from popular model hubs such as [Hugging Face](https://huggingface.co/), [Torch Vision](https://pytorch.org/vision/stable/index.html), and [ONNX Model Zoo](https://github.com/onnx/models#models), by leveraging zero-code optimization solution [Neural Coder](/neural_coder#what-do-we-offer) and automatic [accuracy-driven](/docs/source/design.md#workflow) quantization strategies +* Validate popular LLMs such as [LLama2](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [Falcon](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [GPT-J](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [Bloom](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), [OPT](/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/llm), and more than 10,000 broad models such as [Stable Diffusion](/examples/pytorch/nlp/huggingface_models/text-to-image/quantization), [BERT-Large](/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_static/fx), and [ResNet50](/examples/pytorch/image_recognition/torchvision_models/quantization/ptq/cpu/fx) from popular model hubs such as [Hugging Face](https://huggingface.co/), [Torch Vision](https://pytorch.org/vision/stable/index.html), and [ONNX Model Zoo](https://github.com/onnx/models#models), with automatic [accuracy-driven](/docs/source/design.md#workflow) quantization strategies * Collaborate with cloud marketplaces such as [Google Cloud Platform](https://console.cloud.google.com/marketplace/product/bitnami-launchpad/inc-tensorflow-intel?project=verdant-sensor-286207), [Amazon Web Services](https://aws.amazon.com/marketplace/pp/prodview-yjyh2xmggbmga#pdp-support), and [Azure](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/bitnami.inc-tensorflow-intel), software platforms such as [Alibaba Cloud](https://www.intel.com/content/www/us/en/developer/articles/technical/quantize-ai-by-oneapi-analytics-on-alibaba-cloud.html), [Tencent TACO](https://new.qq.com/rain/a/20221202A00B9S00) and [Microsoft Olive](https://github.com/microsoft/Olive), and open AI ecosystem such as [Hugging Face](https://huggingface.co/blog/intel), [PyTorch](https://pytorch.org/tutorials/recipes/intel_neural_compressor_for_pytorch.html), [ONNX](https://github.com/onnx/models#models), [ONNX Runtime](https://github.com/microsoft/onnxruntime), and [Lightning AI](https://github.com/Lightning-AI/lightning/blob/master/docs/source-pytorch/advanced/post_training_quantization.rst) ## What's New -* [2024/03] A new SOTA approach [AutoRound](https://github.com/intel/auto-round) Weight-Only Quantization on [Intel Gaudi2 AI accelerator](https://habana.ai/products/gaudi2/) is available for LLMs. +* [2024/07] From 3.0 release, framework extension API is recommended to be used for quantization. +* [2024/07] Performance optimizations and usability improvements on [client-side](./docs/source/3x/client_quant.md). ## Installation +### Install Framework +#### Install torch for CPU +```Shell +pip install torch --index-url https://download.pytorch.org/whl/cpu +``` +#### Use Docker Image with torch installed for HPU +https://docs.habana.ai/en/latest/Installation_Guide/Bare_Metal_Fresh_OS.html#bare-metal-fresh-os-single-click + +> **Note**: +> There is a version mapping between Intel Neural Compressor and Gaudi Software Stack, please refer to this [table](./docs/source/3x/gaudi_version_map.md) and make sure to use a matched combination. + +#### Install torch/intel_extension_for_pytorch for Intel GPU +https://intel.github.io/intel-extension-for-pytorch/index.html#installation + +#### Install torch for other platform +https://pytorch.org/get-started/locally + +#### Install tensorflow +```Shell +pip install tensorflow +``` ### Install from pypi ```Shell -pip install neural-compressor +# Install 2.X API + Framework extension API + PyTorch dependency +pip install neural-compressor[pt] +# Install 2.X API + Framework extension API + TensorFlow dependency +pip install neural-compressor[tf] ``` -> **Note**: -> Further installation methods can be found under [Installation Guide](https://github.com/intel/neural-compressor/blob/master/docs/source/installation_guide.md). check out our [FAQ](https://github.com/intel/neural-compressor/blob/master/docs/source/faq.md) for more details. +> **Note**: +> Further installation methods can be found under [Installation Guide](./docs/source/installation_guide.md). check out our [FAQ](./docs/source/faq.md) for more details. ## Getting Started -Setting up the environment: +Setting up the environment: ```bash pip install "neural-compressor>=2.3" "transformers>=4.34.0" torch torchvision ``` After successfully installing these packages, try your first quantization program. -### Weight-Only Quantization (LLMs) -Following example code demonstrates Weight-Only Quantization on LLMs, it supports Intel CPU, Intel Gauid2 AI Accelerator, Nvidia GPU, best device will be selected automatically. +### [FP8 Quantization](./examples/3.x_api/pytorch/cv/fp8_quant/) +Following example code demonstrates FP8 Quantization, it is supported by Intel Gaudi2 AI Accelerator. -To try on Intel Gaudi2, docker image with Gaudi Software Stack is recommended, please refer to following script for environment setup. More details can be found in [Gaudi Guide](https://docs.habana.ai/en/latest/Installation_Guide/Bare_Metal_Fresh_OS.html#launch-docker-image-that-was-built). +To try on Intel Gaudi2, docker image with Gaudi Software Stack is recommended, please refer to following script for environment setup. More details can be found in [Gaudi Guide](https://docs.habana.ai/en/latest/Installation_Guide/Bare_Metal_Fresh_OS.html#launch-docker-image-that-was-built). ```bash -docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host vault.habana.ai/gaudi-docker/1.14.0/ubuntu22.04/habanalabs/pytorch-installer-2.1.1:latest - -# Check the container ID -docker ps - -# Login into container -docker exec -it bash - -# Install the optimum-habana -pip install --upgrade-strategy eager optimum[habana] - -# Install INC/auto_round -pip install neural-compressor auto_round +# Run a container with an interactive shell +docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest ``` Run the example: ```python -from transformers import AutoModel, AutoTokenizer - -from neural_compressor.config import PostTrainingQuantConfig -from neural_compressor.quantization import fit -from neural_compressor.adaptor.torch_utils.auto_round import get_dataloader - -model_name = "EleutherAI/gpt-neo-125m" -float_model = AutoModel.from_pretrained(model_name) -tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) -dataloader = get_dataloader(tokenizer, seqlen=2048) - -woq_conf = PostTrainingQuantConfig( - approach="weight_only", - op_type_dict={ - ".*": { # match all ops - "weight": { - "dtype": "int", - "bits": 4, - "algorithm": "AUTOROUND", - }, - } - }, +from neural_compressor.torch.quantization import ( + FP8Config, + prepare, + convert, ) -quantized_model = fit(model=float_model, conf=woq_conf, calib_dataloader=dataloader) +import torchvision.models as models + +model = models.resnet18() +qconfig = FP8Config(fp8_config="E4M3") +model = prepare(model, qconfig) +# customer defined calibration +calib_func(model) +model = convert(model) ``` -**Note:** -To try INT4 model inference, please directly use [Intel Extension for Transformers](https://github.com/intel/intel-extension-for-transformers), which leverages Intel Neural Compressor for model quantization. +### Weight-Only Large Language Model Loading (LLMs) -### Static Quantization (Non-LLMs) +Following example code demonstrates weight-only large language model loading on Intel Gaudi2 AI Accelerator. ```python -from torchvision import models +from neural_compressor.torch.quantization import load + +model_name = "TheBloke/Llama-2-7B-GPTQ" +model = load( + model_name_or_path=model_name, + format="huggingface", + device="hpu", + torch_dtype=torch.bfloat16, +) +``` -from neural_compressor.config import PostTrainingQuantConfig -from neural_compressor.data import DataLoader, Datasets -from neural_compressor.quantization import fit +**Note:** -float_model = models.resnet18() -dataset = Datasets("pytorch")["dummy"](shape=(1, 3, 224, 224)) -calib_dataloader = DataLoader(framework="pytorch", dataset=dataset) -static_quant_conf = PostTrainingQuantConfig() -quantized_model = fit(model=float_model, conf=static_quant_conf, calib_dataloader=calib_dataloader) -``` +Intel Neural Compressor will convert the model format from auto-gptq to hpu format on the first load and save hpu_model.safetensors to the local cache directory for the next load. So it may take a while to load for the first time. ## Documentation @@ -121,88 +126,69 @@ quantized_model = fit(model=float_model, conf=static_quant_conf, calib_dataloade - Architecture - Workflow - APIs - LLMs Recipes - Examples + Architecture + Workflow + APIs + LLMs Recipes + Examples - Python-based APIs + PyTorch Extension APIs - Quantization - Advanced Mixed Precision - Pruning (Sparsity) - Distillation - - - Orchestration - Benchmarking - Distributed Compression - Model Export - - - - - Neural Coder (Zero-code Optimization) + Overview + Dynamic Quantization + Static Quantization + Smooth Quantization - - - Launcher - JupyterLab Extension - Visual Studio Code Extension - Supported Matrix + Weight-Only Quantization + FP8 Quantization + MX Quantization + Mixed Precision - Advanced Topics + Tensorflow Extension APIs - Adaptor - Strategy - Distillation for Quantization - SmoothQuant - - - Weight-Only Quantization (INT8/INT4/FP4/NF4) - FP8 Quantization - Layer-Wise Quantization + Overview + Static Quantization + Smooth Quantization - Innovations for Productivity + Other Modules - Neural Insights - Neural Solution + Auto Tune + Benchmark -> **Note**: -> Further documentations can be found at [User Guide](https://github.com/intel/neural-compressor/blob/master/docs/source/user_guide.md). +> **Note**: +> From 3.0 release, we recommend to use 3.X API. Compression techniques during training such as QAT, Pruning, Distillation only available in [2.X API](https://github.com/intel/neural-compressor/blob/master/docs/source/2x_user_guide.md) currently. ## Selected Publications/Events -* Blog by Intel: [Neural Compressor: Boosting AI Model Efficiency](https://community.intel.com/t5/Blogs/Tech-Innovation/Artificial-Intelligence-AI/Neural-Compressor-Boosting-AI-Model-Efficiency/post/1604740) (June 2024) +* Blog by Intel: [Neural Compressor: Boosting AI Model Efficiency](https://community.intel.com/t5/Blogs/Tech-Innovation/Artificial-Intelligence-AI/Neural-Compressor-Boosting-AI-Model-Efficiency/post/1604740) (June 2024) * Blog by Intel: [Optimization of Intel AI Solutions for Alibaba Cloud’s Qwen2 Large Language Models](https://www.intel.com/content/www/us/en/developer/articles/technical/intel-ai-solutions-accelerate-alibaba-qwen2-llms.html) (June 2024) * Blog by Intel: [Accelerate Meta* Llama 3 with Intel AI Solutions](https://www.intel.com/content/www/us/en/developer/articles/technical/accelerate-meta-llama3-with-intel-ai-solutions.html) (Apr 2024) * EMNLP'2023 (Under Review): [TEQ: Trainable Equivalent Transformation for Quantization of LLMs](https://openreview.net/forum?id=iaI8xEINAf&referrer=%5BAuthor%20Console%5D) (Sep 2023) * arXiv: [Efficient Post-training Quantization with FP8 Formats](https://arxiv.org/abs/2309.14592) (Sep 2023) * arXiv: [Optimize Weight Rounding via Signed Gradient Descent for the Quantization of LLMs](https://arxiv.org/abs/2309.05516) (Sep 2023) -> **Note**: +> **Note**: > View [Full Publication List](https://github.com/intel/neural-compressor/blob/master/docs/source/publication_list.md). ## Additional Content @@ -212,8 +198,8 @@ quantized_model = fit(model=float_model, conf=static_quant_conf, calib_dataloade * [Legal Information](./docs/source/legal_information.md) * [Security Policy](SECURITY.md) -## Communication +## Communication - [GitHub Issues](https://github.com/intel/neural-compressor/issues): mainly for bug reports, new feature requests, question asking, etc. -- [Email](mailto:inc.maintainers@intel.com): welcome to raise any interesting research ideas on model compression techniques by email for collaborations. +- [Email](mailto:inc.maintainers@intel.com): welcome to raise any interesting research ideas on model compression techniques by email for collaborations. - [Discord Channel](https://discord.com/invite/Wxk3J3ZJkU): join the discord channel for more flexible technical discussion. - [WeChat group](/docs/source/imgs/wechat_group.jpg): scan the QA code to join the technical discussion. diff --git a/conda_meta/basic/meta.yaml b/conda_meta/basic/meta.yaml deleted file mode 100644 index c894131132e..00000000000 --- a/conda_meta/basic/meta.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{% set version = "2.6" %} -{% set buildnumber = 0 %} -package: - name: neural-compressor - version: {{version}} -build: - script_env: - - NC_WHL - number: {{buildnumber}} - noarch: python - script: pip install --no-deps {{NC_WHL}} -requirements: - build: - - python - - pip - run: - - python - - pip - - numpy - - pyyaml - - scikit-learn - - schema - - py-cpuinfo - - pandas - - pycocotools - - opencv-python-headless - - psutil - - Pillow - - requests - - prettytable - - packaging - - deprecated -test: - imports: - - neural_compressor -about: - home: https://github.com/intel/neural-compressor - license: Apache 2.0 - license_family: Apache - license_file: ../../LICENSE - description: ' - LEGAL NOTICE: Use of this software package is subject to the software license agreement (as set forth above, in the license section of the installed Conda package and/or the README file) and all notices, disclaimers or license terms for third party or open source software included in or with the software. -

- EULA: Apache 2.0
- Third Party Programs: https://github.com/intel/neural-compressor/blob/master/third-party-programs.txt -

- Intel® Neural Compressor. - ' diff --git a/conda_meta/neural_insights/meta.yaml b/conda_meta/neural_insights/meta.yaml deleted file mode 100644 index add638448ad..00000000000 --- a/conda_meta/neural_insights/meta.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{% set version = "2.6" %} -{% set buildnumber = 0 %} -package: - name: neural-insights - version: {{version}} -build: - script_env: - - NC_WHL - number: {{buildnumber}} - noarch: python - script: pip install --no-deps {{NC_WHL}} - entry_points: - - neural_insights = neural_insights.bin.neural_insights:main -requirements: - build: - - python - - pip - run: - - python - - pip - - neural-compressor>=2.2 - - Flask - - Flask-Cors - - Flask-SocketIO - - gevent - - gevent-websocket - - pywin32 # [win] -test: - imports: - - neural_insights -about: - home: https://github.com/intel/neural-compressor - license: Apache 2.0 - license_family: Apache - license_file: ../../LICENSE - description: ' - LEGAL NOTICE: Use of this software package is subject to the software license agreement (as set forth above, in the license section of the installed Conda package and/or the README file) and all notices, disclaimers or license terms for third party or open source software included in or with the software. -

- EULA: Apache 2.0
- Third Party Programs: https://github.com/intel/neural-compressor/blob/master/third-party-programs.txt -

- Intel® Neural Compressor. - ' diff --git a/conda_meta/neural_solution/meta.yaml b/conda_meta/neural_solution/meta.yaml deleted file mode 100644 index cef0297fb5d..00000000000 --- a/conda_meta/neural_solution/meta.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{% set version = "2.6" %} -{% set buildnumber = 0 %} -package: - name: neural-solution - version: {{version}} -build: - script_env: - - NC_WHL - number: {{buildnumber}} - noarch: python - script: pip install --no-deps {{NC_WHL}} - entry_points: - - neural_solution = neural_solution.bin.neural_solution:main -requirements: - build: - - python - - pip - run: - - python - - pip - - neural-compressor>=2.2 - - pydantic - - fastapi - - uvicorn[standard] - - watchdog - - protobuf - - grpcio - - mpi4py -test: - imports: - - neural_solution -about: - home: https://github.com/intel/neural-compressor - license: Apache 2.0 - license_family: Apache - license_file: ../../LICENSE - description: ' - LEGAL NOTICE: Use of this software package is subject to the software license agreement (as set forth above, in the license section of the installed Conda package and/or the README file) and all notices, disclaimers or license terms for third party or open source software included in or with the software. -

- EULA: Apache 2.0
- Third Party Programs: https://github.com/intel/neural-compressor/blob/master/third-party-programs.txt -

- Intel® Neural Compressor. - ' diff --git a/docs/3x/PT_FP8Quant.md b/docs/3x/PT_FP8Quant.md new file mode 100644 index 00000000000..a0ed3352e8e --- /dev/null +++ b/docs/3x/PT_FP8Quant.md @@ -0,0 +1,113 @@ +FP8 Quantization +======= + +1. [Introduction](#introduction) +2. [Supported Parameters](#supported-parameters) +3. [Get Start with FP8 Quantization](#get-start-with-fp8-quantization) +4. [Examples](#examples) + +## Introduction + +Float point 8 (FP8) is a promising data type for low precision quantization which provides a data distribution that is completely different from INT8 and it's shown as below. + +
+ +
+ +Intel Gaudi2, also known as HPU, provides this data type capability for low precision quantization, which includes `E4M3` and `E5M2`. For more information about these two data type, please refer to [link](https://arxiv.org/abs/2209.05433). + +Intel Neural Compressor provides general quantization APIs to leverage HPU FP8 capability. with simple with lower memory usage and lower compute cost, 8 bit model + +## Supported Parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
AttributeDescriptionValues
fp8_configThe target data type of FP8 quantization.E4M3 (default) - As Fig. 2
E5M2 - As Fig. 1.
hp_dtypeThe high precision data type of non-FP8 operators.bf16 (default) - torch.bfloat16
fp16 - torch.float16.
fp32 - torch.float32.
observerThe observer to measure the statistics.maxabs (default), saves all tensors to files.
allowlistList of nn.Module names or types to quantize. When setting an empty list, all the supported modules will be quantized by default. See Supported Modules. Not setting the list at all is not recommended as it will set the allowlist to these modules only: torch.nn.Linear, torch.nn.Conv2d, and BMM.Default = {'names': [], 'types': FP8_WHITE_LIST}
blocklistList of nn.Module names or types not to quantize. Defaults to empty list, so you may omit it from the config file.Default = {'names': [], 'types': ()}
modeThe mode, measure or quantize, to run HQT with.MEASURE - Measure statistics of all modules and emit the results to dump_stats_path.
QUANTIZE - Quantize and run the model according to the provided measurements.
AUTO (default) - Select from [MEASURE, QUANTIZE] automatically.
dump_stats_pathThe path to save and load the measurements. The path is created up until the level before last "/". The string after the last / will be used as prefix to all the measurement files that will be created.Default = "./hqt_output/measure"
scale_methodThe method for calculating the scale from the measurement.- without_scale - Convert to/from FP8 without scaling.
- unit_scale - Always use scale of 1.
- maxabs_hw (default) - Scale is calculated to stretch/compress the maxabs measurement to the full-scale of FP8 and then aligned to the corresponding HW accelerated scale.
- maxabs_pow2 - Scale is calculated to stretch/compress the maxabs measurement to the full-scale of FP8 and then rounded to the power of 2.
- maxabs_hw_opt_weight - Scale of model params (weights) is chosen as the scale that provides minimal mean-square-error between quantized and non-quantized weights, from all possible HW accelerated scales. Scale of activations is calculated the same as maxabs_hw.
- act_maxabs_pow2_weights_pcs_opt_pow2 - Scale of model params (weights) is calculated per-channel of the params tensor. The scale per-channel is calculated the same as maxabs_hw_opt_weight. Scale of activations is calculated the same as maxabs_pow2.
- act_maxabs_hw_weights_pcs_maxabs_pow2 - Scale of model params (weights) is calculated per-channel of the params tensor. The scale per-channel is calculated the same as maxabs_pow2. Scale of activations is calculated the same as maxabs_hw.
measure_excludeIf this attribute is not defined, the default is OUTPUT. Since most models do not require measuring output tensors, you can exclude it to speed up the measurement process.NONE - All tensors are measured.
OUTPUT (default) - Excludes measurement of output tensors.
+ +## Get Start with FP8 Quantization + +### Demo Usage + +```python +from neural_compressor.torch.quantization import ( + FP8Config, + prepare, + convert, +) +import torchvision.models as models + +model = models.resnet18() +qconfig = FP8Config(fp8_config="E4M3") +model = prepare(model, qconfig) +# customer defined calibration +calib_func(model) +model = convert(model) +``` + +## Examples + +| Task | Example | +|----------------------|---------| +| Computer Vision (CV) | [Link](../../examples/3.x_api/pytorch/cv/fp8_quant/) | +| Large Language Model (LLM) | [Link](https://github.com/HabanaAI/optimum-habana-fork/tree/habana-main/examples/text-generation#running-with-fp8) | + +> Note: For LLM, Optimum-habana provides higher performance based on modified modeling files, so here the Link of LLM goes to Optimum-habana, which utilize Intel Neural Compressor for FP8 quantization internally. diff --git a/docs/3x/PT_MixPrecision.md b/docs/3x/PT_MixPrecision.md deleted file mode 100644 index c1cd198049b..00000000000 --- a/docs/3x/PT_MixPrecision.md +++ /dev/null @@ -1,103 +0,0 @@ -PyTorch Mixed Precision -======================================== - -1. [Introduction](#introduction) -2. [Mixed Precision Support Matrix](#mixed-precision-support-matrix) -3. [Get Started](#get-start) -4. [Examples](#examples) - -## Introduction - -The recent growth of Deep Learning has driven the development of more complex models that require significantly more compute and memory capabilities. Several low precision numeric formats have been proposed to address the problem. Google's [bfloat16](https://cloud.google.com/tpu/docs/bfloat16) and the [FP16: IEEE](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) half-precision format are two of the most widely used sixteen bit formats. [Mixed precision](https://arxiv.org/abs/1710.03740) training and inference using low precision formats have been developed to reduce compute and bandwidth requirements. - -The 3rd Gen Intel® Xeon® Scalable processor (codenamed Cooper Lake), featuring Intel® Deep Learning Boost, is the first general-purpose x86 CPU to support the bfloat16 format. Specifically, three new bfloat16 instructions are added as a part of the AVX512_BF16 extension within Intel Deep Learning Boost: VCVTNE2PS2BF16, VCVTNEPS2BF16, and VDPBF16PS. The first two instructions allow converting to and from bfloat16 data type, while the last one performs a dot product of bfloat16 pairs. Further details can be found in the [hardware numerics document](https://www.intel.com/content/www/us/en/developer/articles/technical/intel-deep-learning-boost-new-instruction-bfloat16.html) published by Intel. - -The 4th Gen Intel® Xeon® Scalable processor supports FP16 instruction set architecture (ISA) for Intel® -Advanced Vector Extensions 512 (Intel® AVX-512). The new ISA supports a wide range of general-purpose numeric -operations for 16-bit half-precision IEEE-754 floating-point and complements the existing 32-bit and 64-bit floating-point instructions already available in the Intel Xeon processor based products. Further details can be found in the [hardware numerics document](https://www.intel.com/content/www/us/en/content-details/669773/intel-avx-512-fp16-instruction-set-for-intel-xeon-processor-based-products-technology-guide.html) published by Intel. - -

- Architecture -

- -## Mixed Precision Support Matrix - - - - - - - - - - - - - - - - - - - - - - - - -
FrameworkBackendBackend LibraryBackend ValueSupport Device(cpu as default)Support BF16Support FP16
PyTorchFXFBGEMM"default"cpu
- - -### Hardware and Software requests for **BF16** -- PyTorch - 1. Hardware: CPU supports `avx512_bf16` instruction set. - 2. Software: torch >= [1.11.0](https://download.pytorch.org/whl/torch_stable.html). - - -### Hardware and Software requests for **FP16** -- PyTorch - 1. Hardware: CPU supports `avx512_fp16` instruction set. - 2. Software: torch >= [1.11.0](https://download.pytorch.org/whl/torch_stable.html). - - -### Accuracy-driven mixed precision -BF16/FP16 conversion may lead to accuracy drop. Intel® Neural Compressor provides an accuracy-driven tuning function to reduce accuracy loss, -which could fallback converted ops to FP32, if set in config, to get better accuracy. To enable this function, users only to provide -`eval_fn` and `eval_args` for `autotune`. -To be noticed, IPEX backend doesn't support accuracy-driven mixed precision. - -## Get Started with autotune API - -To get a bf16/fp16 model, users can use the `autotune` interface with `MixPrecisionConfig` as follows. - -- BF16: - -```python -from neural_compressor.torch.quantization import MixPrecisionConfig, TuningConfig, autotune - -def eval_acc_fn(model): - ...... - return acc - -# modules might be fallback to fp32 to get better accuracy -custom_tune_config = TuningConfig(config_set=[MixPrecisionConfig(dtype=["bf16", "fp32"])], max_trials=3) -best_model = autotune(model=build_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) -``` - -- FP16: - -```python -from neural_compressor.torch.quantization import MixPrecisionConfig, TuningConfig, autotune - -def eval_acc_fn(model): - ...... - return acc - -# modules might be fallback to fp32 to get better accuracy -custom_tune_config = TuningConfig(config_set=[MixPrecisionConfig(dtype=["fp16", "fp32"])], max_trials=3) -best_model = autotune(model=build_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) -``` - -## Examples - -Example will be added later. diff --git a/docs/build_docs/build.sh b/docs/build_docs/build.sh index 032a15890a2..43b2a0dd467 100755 --- a/docs/build_docs/build.sh +++ b/docs/build_docs/build.sh @@ -81,12 +81,10 @@ fi source env_sphinx/bin/activate cp -rf ../docs/ ./source -cp -rf ../neural_coder ./source/docs/source -cp -rf ../neural_insights ./source/docs/source -cp -rf ../neural_solution ./source/docs/source cp -f "../README.md" "./source/docs/source/Welcome.md" cp -f "../SECURITY.md" "./source/docs/source/SECURITY.md" + all_md_files=`find ./source/docs -name "*.md"` for md_file in ${all_md_files} do @@ -94,18 +92,14 @@ do done -sed -i 's/.\/docs\/source\/_static/./g' ./source/docs/source/Welcome.md ./source/docs/source/user_guide.md -sed -i 's/.md/.html/g; s/.\/docs\/source\//.\//g' ./source/docs/source/Welcome.md ./source/docs/source/user_guide.md -sed -i 's/\/examples\/README.html/https:\/\/github.com\/intel\/neural-compressor\/blob\/master\/examples\/README.md/g' ./source/docs/source/user_guide.md -sed -i 's/href=\"\/neural_coder/href=\".\/neural_coder/g' ./source/docs/source/user_guide.md -sed -i 's/https\:\/\/intel.github.io\/neural-compressor\/lates.\/api-doc\/apis.html/https\:\/\/intel.github.io\/neural-compressor\/latest\/docs\/source\/api-doc\/apis.html/g' ./source/docs/source/Welcome.md ./source/docs/source/user_guide.md +sed -i 's/.\/docs\/source\/_static/./g' ./source/docs/source/Welcome.md +sed -i 's/.md/.html/g; s/.\/docs\/source\//.\//g' ./source/docs/source/Welcome.md +#sed -i 's/\/examples\/README.html/https:\/\/github.com\/intel\/neural-compressor\/blob\/master\/examples\/README.md/g' ./source/docs/source/user_guide.md +sed -i 's/https\:\/\/intel.github.io\/neural-compressor\/lates.\/api-doc\/apis.html/https\:\/\/intel.github.io\/neural-compressor\/latest\/docs\/source\/api-doc\/apis.html/g' ./source/docs/source/Welcome.md +sed -i 's/\/examples\/pytorch/https:\/\/github.com\/intel\/neural-compressor\/blob\/master\/examples\/pytorch/g' ./source/docs/source/Welcome.md sed -i 's/examples\/README.html/https:\/\/github.com\/intel\/neural-compressor\/blob\/master\/examples\/README.md/g' ./source/docs/source/Welcome.md -sed -i 's/\"\/neural_coder\/extensions\/screenshots\/extmanager.png/\".\/neural_coder\/extensions\/screenshots\/extmanager.png/g' ./source/docs/source/get_started.md - -sed -i 's/\/neural_coder\/extensions\/neural_compressor_ext_lab\/README.md/.\/neural_coder\/extensions\/neural_compressor_ext_lab\/README.md/g' ./source/docs/source/get_started.md - sed -i 's/\/examples\/README.md/https:\/\/github.com\/intel\/neural-compressor\/blob\/master\/examples\/README.md/g' ./source/docs/source/get_started.md sed -i 's/.\/validated_model_list.md\#/.\/validated_model_list.html\#/g' ./source/docs/source/installation_guide.md @@ -138,8 +132,8 @@ if [[ ${UPDATE_VERSION_FOLDER} -eq 1 ]]; then cp -r ${SRC_FOLDER}/* ${DST_FOLDER} python update_html.py ${DST_FOLDER} ${VERSION} cp -r ./source/docs/source/imgs ${DST_FOLDER}/docs/source - cp -r ./source/docs/source/neural_coder/extensions/neural_compressor_ext_vscode/images ${DST_FOLDER}/docs/source/neural_coder/extensions/neural_compressor_ext_vscode - cp -r ./source/docs/source/neural_coder/extensions/screenshots ${DST_FOLDER}/docs/source/neural_coder/extensions + cp -r ./source/docs/source/3x/imgs ${DST_FOLDER}/docs/source/3x + cp source/_static/index.html ${DST_FOLDER} else @@ -153,8 +147,7 @@ if [[ ${UPDATE_LATEST_FOLDER} -eq 1 ]]; then cp -r ${SRC_FOLDER}/* ${LATEST_FOLDER} python update_html.py ${LATEST_FOLDER} ${VERSION} cp -r ./source/docs/source/imgs ${LATEST_FOLDER}/docs/source - cp -r ./source/docs/source/neural_coder/extensions/neural_compressor_ext_vscode/images ${LATEST_FOLDER}/docs/source/neural_coder/extensions/neural_compressor_ext_vscode - cp -r ./source/docs/source/neural_coder/extensions/screenshots ${LATEST_FOLDER}/docs/source/neural_coder/extensions + cp -r ./source/docs/source/3x/imgs ${LATEST_FOLDER}/docs/source/3x cp source/_static/index.html ${LATEST_FOLDER} else echo "skip to create ${LATEST_FOLDER}" @@ -164,7 +157,7 @@ echo "Create document is done" if [[ ${CHECKOUT_GH_PAGES} -eq 1 ]]; then git clone -b gh-pages --single-branch https://github.com/intel/neural-compressor.git ${RELEASE_FOLDER} - + if [[ ${UPDATE_VERSION_FOLDER} -eq 1 ]]; then python update_version.py ${ROOT_DST_FOLDER} ${VERSION} cp -rf ${DST_FOLDER} ${RELEASE_FOLDER} diff --git a/docs/source/2x_user_guide.md b/docs/source/2x_user_guide.md new file mode 100644 index 00000000000..941e80d6a39 --- /dev/null +++ b/docs/source/2x_user_guide.md @@ -0,0 +1,76 @@ +2.X API User Guide +=========================== + +Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search to help the user optimize their model. The below documents could help you to get familiar with concepts and modules in Intel® Neural Compressor. Learn how to utilize the APIs in Intel® Neural Compressor to conduct quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks. + +## Overview +This part helps user to get a quick understand about design structure and workflow of 2.X Intel® Neural Compressor. We provided broad examples to help users get started. + + + + + + + + + + + + + + +
ArchitectureWorkflowAPIs
NotebookExamplesResultsIntel oneAPI AI Analytics Toolkit
+ +## Python-based APIs +Python-based APIs contains more details about the functional APIs in Intel® Neural Compressor, +which introduce the mechanism of each function and provides a tutorial to help the user apply in their own cases. +Please note that we will stop to support Intel Neural Compressor 1.X API in the future. +So we provide a comprehensive migration document in Code Migration to help the user update their code from previous 1.X version to the new 2.X version. +In 2.X API, it's very important to create the `DataLoader` and `Metrics` for your examples, so we provide the detail introductions. + + + + + + + + + + + + + + + + + + + + + + + +
QuantizationAdvanced Mixed PrecisionPruning (Sparsity)Distillation
OrchestrationBenchmarkingDistributed CompressionModel Export
Code Migration from Intel® Neural Compressor 1.X to Intel® Neural Compressor 2.X
DataLoaderMetric
+ +## Advanced Topics +This part provides the advanced topics that help user dive deep into Intel® Neural Compressor 2.X API. + + + + + + + + + + + + + + + + + + + +
AdaptorStrategyObjectiveCalibration
Add New Data TypeAdd New Adaptor
Distillation for QuantizationSmoothQuantWeight-Only QuantizationLayer-Wise Quantization
diff --git a/docs/3x/PT_DynamicQuant.md b/docs/source/3x/PT_DynamicQuant.md similarity index 100% rename from docs/3x/PT_DynamicQuant.md rename to docs/source/3x/PT_DynamicQuant.md diff --git a/docs/3x/PT_MXQuant.md b/docs/source/3x/PT_MXQuant.md similarity index 98% rename from docs/3x/PT_MXQuant.md rename to docs/source/3x/PT_MXQuant.md index 1cfb17ff30b..42e12d039a6 100644 --- a/docs/3x/PT_MXQuant.md +++ b/docs/source/3x/PT_MXQuant.md @@ -95,7 +95,7 @@ user_model = convert(model=user_model) ## Examples -- PyTorch [huggingface models](/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx) +- PyTorch [huggingface models](/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant) ## Reference diff --git a/docs/source/3x/PT_MixedPrecision.md b/docs/source/3x/PT_MixedPrecision.md new file mode 100644 index 00000000000..3fbd1db6bbf --- /dev/null +++ b/docs/source/3x/PT_MixedPrecision.md @@ -0,0 +1,111 @@ +PyTorch Mixed Precision +======================================== + +1. [Introduction](#introduction) +2. [Mixed Precision Support Matrix](#mixed-precision-support-matrix) +3. [Get Started](#get-start) +4. [Examples](#examples) + +## Introduction + +The recent growth of Deep Learning has driven the development of more complex models that require significantly more compute and memory capabilities. Several low precision numeric formats have been proposed to address the problem. +Google's [bfloat16](https://cloud.google.com/tpu/docs/bfloat16) and the [FP16: IEEE](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) half-precision format are two of the most widely used sixteen bit formats. [Mixed precision](https://arxiv.org/abs/1710.03740) training and inference using low precision formats have been developed to reduce compute and bandwidth requirements. + +The 3rd Gen Intel® Xeon® Scalable processor (codenamed Cooper Lake), featuring Intel® Deep Learning Boost, is the first general-purpose x86 CPU to support the bfloat16 format. Specifically, three new bfloat16 instructions are added as a part of the AVX512_BF16 extension within Intel Deep Learning Boost: VCVTNE2PS2BF16, VCVTNEPS2BF16, and VDPBF16PS. The first two instructions allow converting to and from bfloat16 data type, while the last one performs a dot product of bfloat16 pairs. +Further details can be found in the [Hardware Numerics Document](https://www.intel.com/content/www/us/en/developer/articles/technical/intel-deep-learning-boost-new-instruction-bfloat16.html) published by Intel. + +The 4th Gen Intel® Xeon® Scalable processor supports FP16 instruction set architecture (ISA) for Intel® Advanced Vector Extensions 512 (Intel® AVX-512). The new ISA supports a wide range of general-purpose numeric operations for 16-bit half-precision IEEE-754 floating-point and complements the existing 32-bit and 64-bit floating-point instructions already available in the Intel Xeon processor based products. +Further details can be found in the [Intel AVX512 FP16 Guide](https://www.intel.com/content/www/us/en/content-details/669773/intel-avx-512-fp16-instruction-set-for-intel-xeon-processor-based-products-technology-guide.html) published by Intel. + +The latest Intel Xeon processors deliver flexibility of Intel Advanced Matrix Extensions (Intel AMX) ,an accelerator that improves the performance of deep learning(DL) training and inference, making it ideal for workloads like NLP, recommender systems, and image recognition. Developers can code AI functionality to take advantage of the Intel AMX instruction set, and they can code non-AI functionality to use the processor instruction set architecture (ISA). Intel has integrated the Intel® oneAPI Deep Neural Network Library (oneDNN), its oneAPI DL engine, into Pytorch. +Further details can be found in the [Intel AMX Document](https://www.intel.com/content/www/us/en/content-details/785250/accelerate-artificial-intelligence-ai-workloads-with-intel-advanced-matrix-extensions-intel-amx.html) published by Intel. + +

+ Architecture +

+ +## Mixed Precision Support Matrix + + + + + + + + + + + + + + + + + + + + + + + + +
FrameworkBackendBackend LibraryBackend ValueSupport Device(cpu as default)Support BF16Support FP16
PyTorchFXFBGEMM"default"cpu
+ + +### Hardware and Software requests for **BF16** +- PyTorch + 1. Hardware: CPU supports `avx512_bf16` instruction set. + 2. Software: torch >= [1.11.0](https://download.pytorch.org/whl/torch_stable.html). + + +### Hardware and Software requests for **FP16** +- PyTorch + 1. Hardware: CPU supports `avx512_fp16` instruction set. + 2. Software: torch >= [1.11.0](https://download.pytorch.org/whl/torch_stable.html). +> Note: To run FP16 on Intel-AMX, please set the environment variable `ONEDNN_MAX_CPU_ISA`: +> ```export ONEDNN_MAX_CPU_ISA=AVX512_CORE_AMX_FP16``` + + + +### Accuracy-driven mixed precision +BF16/FP16 conversion may lead to accuracy drop. Intel® Neural Compressor provides an accuracy-driven tuning function to reduce accuracy loss, +which could fallback converted ops to FP32, if set in config, to get better accuracy. To enable this function, users only to provide +`eval_fn` and `eval_args` for `autotune`. +To be noticed, IPEX backend doesn't support accuracy-driven mixed precision. + +## Get Started with autotune API + +To get a bf16/fp16 model, users can use the `autotune` interface with `MixedPrecisionConfig` as follows. + +- BF16: + +```python +from neural_compressor.torch.quantization import MixedPrecisionConfig, TuningConfig, autotune + +def eval_acc_fn(model): + ...... + return acc + +# modules might be fallback to fp32 to get better accuracy +custom_tune_config = TuningConfig(config_set=[MixedPrecisionConfig(dtype=["bf16", "fp32"])], max_trials=3) +best_model = autotune(model=build_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) +``` + +- FP16: + +```python +from neural_compressor.torch.quantization import MixedPrecisionConfig, TuningConfig, autotune + +def eval_acc_fn(model): + ...... + return acc + +# modules might be fallback to fp32 to get better accuracy +custom_tune_config = TuningConfig(config_set=[MixedPrecisionConfig(dtype=["fp16", "fp32"])], max_trials=3) +best_model = autotune(model=build_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) +``` + +## Examples + +Users can also refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/cv/mixed_precision +) on how to quantize a model with Mixed Precision. diff --git a/docs/3x/PT_SmoothQuant.md b/docs/source/3x/PT_SmoothQuant.md similarity index 98% rename from docs/3x/PT_SmoothQuant.md rename to docs/source/3x/PT_SmoothQuant.md index 9e4ae3eb62f..e3a7262dcde 100644 --- a/docs/3x/PT_SmoothQuant.md +++ b/docs/source/3x/PT_SmoothQuant.md @@ -46,7 +46,7 @@ run_fn(prepared_model) q_model = convert(prepared_model) ``` -To get more information, please refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm). +To get more information, please refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant). ## Validated Models diff --git a/docs/3x/PT_StaticQuant.md b/docs/source/3x/PT_StaticQuant.md similarity index 91% rename from docs/3x/PT_StaticQuant.md rename to docs/source/3x/PT_StaticQuant.md index ec967a780d4..d687e83c1f6 100644 --- a/docs/3x/PT_StaticQuant.md +++ b/docs/source/3x/PT_StaticQuant.md @@ -1,6 +1,5 @@ PyTorch Static Quantization ======================================== - 1. [Introduction](#introduction) 2. [Get Started](#get-started) \ 2.1 [Static Quantization with IPEX Backend](#static-quantization-with-ipex-backend) \ @@ -9,6 +8,7 @@ PyTorch Static Quantization 2.1.3 [Model Examples](#model-examples) \ 2.2 [Static Quantization with PT2E Backend](#static-quantization-with-pt2e-backend) \ 2.2.1 [Usage Sample with PT2E](#usage-sample-with-pt2e) + 2.2.2 [Model Examples with PT2E](#model-examples-with-pt2e) ## Introduction @@ -68,7 +68,7 @@ q_model = convert(prepared_model) #### Model Examples -Users could refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm) on how to quantize a new model. +Users could refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex) on how to quantize a new model. ### Static Quantization with PT2E Backend @@ -102,3 +102,7 @@ opt_model = torch.compile(q_model) ``` > Note: The `set_local` of `StaticQuantConfig` will be supported after the torch 2.4 release. + +#### Model Examples with PT2E + +Users could refer to [cv examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/cv/static_quant) and [llm examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e) on how to quantize a new model. diff --git a/docs/3x/PT_WeightOnlyQuant.md b/docs/source/3x/PT_WeightOnlyQuant.md similarity index 90% rename from docs/3x/PT_WeightOnlyQuant.md rename to docs/source/3x/PT_WeightOnlyQuant.md index 37cc934592a..1578b57f8c9 100644 --- a/docs/3x/PT_WeightOnlyQuant.md +++ b/docs/source/3x/PT_WeightOnlyQuant.md @@ -28,7 +28,6 @@ Besides, as mentioned in many papers[1][2], activation quantization is the main Theoretically, round-to-nearest (RTN) is the most straightforward way to quantize weight using scale maps. However, when the number of bits is small (e.g. 3), the MSE loss is larger than expected. A group size is introduced to reduce elements using the same scale to improve accuracy. - ## Supported Matrix | Algorithms/Backend | PyTorch eager mode | @@ -58,12 +57,14 @@ Theoretically, round-to-nearest (RTN) is the most straightforward way to quantiz WeightOnlyQuant quantization for PyTorch is using prepare and convert [APIs](./PyTorch.md#quantization-apis). #### Common arguments + | Config | Capability | |---|---| | dtype (str)| ['int', 'nf4', 'fp4'] | | bits (int)| [1, ..., 8] | | group_size (int)| [-1, 1, ..., $C_{in}$] | | use_sym (bool)| [True, False] | +| quant_lm_head (bool)| [False, True] | | use_double_quant (bool) | [True, False] | | double_quant_dtype (str) | ['int'] | | double_quant_bits (int) | [1, ..., bits] | @@ -71,12 +72,14 @@ WeightOnlyQuant quantization for PyTorch is using prepare and convert [APIs](./P | double_quant_group_size (int) | [-1, 1, ..., $C_{in}$] | Notes: + - *group_size = -1* refers to **per output channel quantization**. Taking a linear layer (input channel = $C_{in}$, output channel = $C_{out}$) for instance, when *group size = -1*, quantization will calculate total $C_{out}$ quantization parameters. Otherwise, when *group_size = gs* quantization parameters are calculate with every $gs$ elements along with the input channel, leading to total $C_{out} \times (C_{in} / gs)$ quantization parameters. - 4-bit NormalFloat(NF4) is proposed in QLoRA[7]. 'fp4' includes [fp4_e2m1](../../neural_compressor/adaptor/torch_utils/weight_only.py#L37) and [fp4_e2m1_bnb](https://github.com/TimDettmers/bitsandbytes/blob/18e827d666fa2b70a12d539ccedc17aa51b2c97c/bitsandbytes/functional.py#L735). By default, fp4 refers to fp4_e2m1_bnb. -- Only RTN and GPTQ support double quant. - +- *quant_lm_head* defaults to False. This means that, except for transformer blocks, the last layer in transformer models will not be quantized by default. The last layer may be named "lm_head", "output_layer" or "embed_out". +- Only RTN and GPTQ support double quant. #### RTN + | rtn_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | group_dim (int) | Dimension for grouping | 1 | @@ -86,6 +89,7 @@ Notes: | model_path (str) | Model path that is used to load state_dict per layer | | > **Notes:** `model_path` is only used when use_layer_wise=True. `layer-wise` is stay-tuned. + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, RTNConfig @@ -96,6 +100,7 @@ model = convert(model) ``` #### GPTQ + | gptq_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | use_mse_search (bool) | Enables mean squared error (MSE) search | False @@ -107,6 +112,7 @@ model = convert(model) | block_size (int) | Execute GPTQ quantization per block, block shape = [C_out, block_size] | 128 | | static_groups (bool) | Whether to calculate group wise quantization parameters in advance. This option mitigate actorder's extra computational requirements. | False. | > **Note:** `model_path` is only used when use_layer_wise=True. `layer-wise` is stay-tuned. + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, GPTQConfig @@ -118,6 +124,7 @@ model = convert(model) ``` #### AutoRound + | autoround_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | enable_full_range (bool) | Whether to enable full range quantization | False @@ -138,6 +145,7 @@ model = convert(model) | not_use_best_mse (bool) | Whether to use mean squared error | False | | dynamic_max_gap (int) | The dynamic maximum gap | -1 | | scale_dtype (str) | The data type of quantization scale to be used, different kernels have different choices | "float16" | + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, AutoRoundConfig @@ -149,6 +157,7 @@ model = convert(model) ``` #### AWQ + | awq_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | group_dim (int) | Dimension for grouping | 1 | @@ -159,6 +168,7 @@ model = convert(model) | use_auto_clip (bool) | Enables clip range search | True | | folding(bool) | Allow insert mul before linear when the scale cannot be absorbed by last layer | False. | > **Notes:** `layer-wise` is stay-tuned. + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, AWQConfig @@ -170,6 +180,7 @@ model = convert(model) ``` #### TEQ + | teq_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | group_dim (int) | Dimension for grouping | 1 | @@ -179,6 +190,7 @@ model = convert(model) | use_double_quant (bool) | Enables double quantization | False | | folding(bool) | Allow insert mul before linear when the scale cannot be absorbed by last layer | False | > **Notes:** `layer-wise` is stay-tuned. + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, TEQConfig @@ -190,12 +202,13 @@ model = convert(model) ``` #### HQQ + | hqq_args | comments | default value | |----------|-------------|-------------------------------------------------------------------| | quant_zero (bool) | Whether to quantize zero point | True | | quant_scale: (bool) | Whether to quantize scale: point | False | | scale_quant_group_size (int) | The group size for quantizing scale | 128 | -| skip_lm_head (bool) | Whether to skip for quantizing lm_head | True | + ``` python # Quantization code from neural_compressor.torch.quantization import prepare, convert, HQQConfig @@ -205,10 +218,13 @@ model = prepare(model, quant_config) run_fn(model) # calibration model = convert(model) ``` + ### Specify Quantization Rules + Intel(R) Neural Compressor support specify quantization rules by operator name or operator type. Users can set `local` in dict or use `set_local` method of config class to achieve the above purpose. 1. Example of setting `local` from a dict + ```python quant_config = { "rtn": { @@ -226,7 +242,9 @@ quant_config = { } } ``` + 2. Example of using `set_local` + ```python quant_config = RTNConfig() lm_head_config = RTNConfig(dtype="fp32") @@ -234,7 +252,9 @@ quant_config.set_local("lm_head", lm_head_config) ``` ### Saving and Loading + The saved_results folder contains two files: quantized_model.pt and qconfig.json, and the generated model is a quantized model. The quantitative model will include WeightOnlyLinear. To support low memory inference, Intel(R) Neural Compressor implemented WeightOnlyLinear, a torch.nn.Module, to compress the fake quantized fp32 model. Since torch does not provide flexible data type storage, WeightOnlyLinear combines low bits data into a long date type, such as torch.int8 and torch.int32. Low bits data includes weights and zero points. When using WeightOnlyLinear for inference, it will restore the compressed data to float32 and run torch linear function. + ```python # Quantization code from neural_compressor.torch.quantization import prepare, convert, RTNConfig @@ -255,10 +275,38 @@ loaded_model = load( ) # Please note that the original_model parameter passes the original model. ``` +## Layer Wise Quantization + +As the size of LLMs continues to grow, loading the entire model into a single GPU card or the RAM of a client machine becomes impractical. To address this challenge, we introduce Layer-wise Quantization (LWQ), a method that quantizes LLMs layer by layer or block by block. This approach significantly reduces memory consumption. The diagram below illustrates the LWQ process. + + + +*Figure 1: The process of layer-wise quantization for PyTorch model. The color grey means empty parameters and the color blue represents parameters need to be quantized. Every rectangle inside model represents one layer.* + + +Currently, we support LWQ for `RTN`, `AutoRound`, and `GPTQ`. + +Here, we take the `RTN` algorithm as example to demonstrate the usage of LWQ. + +```python +from neural_compressor.torch.quantization import RTNConfig, convert, prepare +from neural_compressor.torch import load_empty_model + +model_state_dict_path = "/path/to/model/state/dict" +float_model = load_empty_model(model_state_dict_path) +quant_config = RTNConfig(use_layer_wise=True) +prepared_model = prepare(float_model, quant_config) +quantized_model = convert(prepared_model) +``` + +## Efficient Usage on Client-Side + +For client machines with limited RAM and cores, we offer optimizations to reduce computational overhead and minimize memory usage. For detailed information, please refer to [Quantization on Client](https://github.com/intel/neural-compressor/blob/master/docs/source/3x/client_quant.md). + ## Examples -Users can also refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm) on how to quantize a model with WeightOnlyQuant. +Users can also refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only) on how to quantize a model with WeightOnlyQuant. ## Reference @@ -272,6 +320,6 @@ Users can also refer to [examples](https://github.com/intel/neural-compressor/bl [5]. Cheng, Wenhua, et al. "Optimize Weight Rounding via Signed Gradient Descent for the Quantization of LLMs" arXiv preprint arXiv:2309.05516 (2023). -[6]. Badri, Hicham and Shaji, Appu. "Half-Quadratic Quantization of Large Machine Learning Models." [Online] Available: https://mobiusml.github.io/hqq_blog/ (2023). +[6]. Badri, Hicham and Shaji, Appu. "Half-Quadratic Quantization of Large Machine Learning Models." [Online] Available: (2023). [7]. Dettmers, Tim, et al. "Qlora: Efficient finetuning of quantized llms." arXiv preprint arXiv:2305.14314 (2023). diff --git a/docs/3x/PyTorch.md b/docs/source/3x/PyTorch.md similarity index 85% rename from docs/3x/PyTorch.md rename to docs/source/3x/PyTorch.md index b8c4ea2c7c5..a3004f6bcfb 100644 --- a/docs/3x/PyTorch.md +++ b/docs/source/3x/PyTorch.md @@ -194,6 +194,21 @@ def load(output_dir="./saved_results", model=None): ✔ link + + MX Quantization + Microscaling Data Formats for +Deep Learning + PyTorch eager mode + ✔ + link + + + Mixed Precision + Mixed precision + PyTorch eager mode + ✔ + link + Quantization Aware Training Quantization Aware Training @@ -223,3 +238,24 @@ def load(output_dir="./saved_results", model=None): + +2. How to set different configuration for specific op_name or op_type? + > INC extends a `set_local` method based on the global configuration object to set custom configuration. + + ```python + def set_local(self, operator_name_or_list: Union[List, str, Callable], config: BaseConfig) -> BaseConfig: + """Set custom configuration based on the global configuration object. + + Args: + operator_name_or_list (Union[List, str, Callable]): specific operator + config (BaseConfig): specific configuration + """ + ``` + + > Demo: + + ```python + quant_config = RTNConfig() # Initialize global configuration with default bits=4 + quant_config.set_local(".*mlp.*", RTNConfig(bits=8)) # For layers with "mlp" in their names, set bits=8 + quant_config.set_local("Conv1d", RTNConfig(dtype="fp32")) # For Conv1d layers, do not quantize them. + ``` diff --git a/docs/3x/TF_Quant.md b/docs/source/3x/TF_Quant.md similarity index 98% rename from docs/3x/TF_Quant.md rename to docs/source/3x/TF_Quant.md index d80c25ecada..9314a3c8200 100644 --- a/docs/3x/TF_Quant.md +++ b/docs/source/3x/TF_Quant.md @@ -13,7 +13,7 @@ TensorFlow Quantization `neural_compressor.tensorflow` supports quantizing both TensorFlow and Keras model with or without accuracy aware tuning. -For the detailed quantization fundamentals, please refer to the document for [Quantization](../quantization.md). +For the detailed quantization fundamentals, please refer to the document for [Quantization](quantization.md). ## Get Started diff --git a/docs/3x/TF_SQ.md b/docs/source/3x/TF_SQ.md similarity index 97% rename from docs/3x/TF_SQ.md rename to docs/source/3x/TF_SQ.md index 5225138e502..1d3a08836b5 100644 --- a/docs/3x/TF_SQ.md +++ b/docs/source/3x/TF_SQ.md @@ -50,4 +50,4 @@ best_model = autotune( ## Examples -Users can also refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/tensorflow/nlp/large_language_models\quantization\ptq\smoothquant) on how to apply smooth quant to a TensorFlow model with `neural_compressor.tensorflow`. +Users can also refer to [examples](https://github.com/intel/neural-compressor/blob/master/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant) on how to apply smooth quant to a TensorFlow model with `neural_compressor.tensorflow`. diff --git a/docs/3x/TensorFlow.md b/docs/source/3x/TensorFlow.md similarity index 82% rename from docs/3x/TensorFlow.md rename to docs/source/3x/TensorFlow.md index 5634a524f14..6e4936f2c63 100644 --- a/docs/3x/TensorFlow.md +++ b/docs/source/3x/TensorFlow.md @@ -2,12 +2,16 @@ TensorFlow =============== -1. [Introduction](#introduction) -2. [API for TensorFlow](#api-for-tensorflow) -3. [Support Matrix](#support-matrix) - 3.1 [Quantization Scheme](#quantization-scheme) - 3.2 [Quantization Approaches](#quantization-approaches) - 3.3 [Backend and Device](#backend-and-device) +- [TensorFlow](#tensorflow) + - [Introduction](#introduction) + - [API for TensorFlow](#api-for-tensorflow) + - [Support Matrix](#support-matrix) + - [Quantization Scheme](#quantization-scheme) + - [Quantization Approaches](#quantization-approaches) + - [Post Training Static Quantization](#post-training-static-quantization) + - [Smooth Quantization](#smooth-quantization) + - [Mixed Precision](#mixed-precison) + - [Backend and Device](#backend-and-device) ## Introduction @@ -23,7 +27,7 @@ Intel(R) Neural Compressor provides `quantize_model` and `autotune` as main inte **quantize_model** -The design philosophy of the `quantize_model` interface is easy-of-use. With minimal parameters requirement, including `model`, `quant_config`, `calib_dataloader` and `calib_iteration`, it offers a straightforward choice of quantizing TF model in one-shot. +The design philosophy of the `quantize_model` interface is easy-of-use. With minimal parameters requirement, including `model`, `quant_config`, `calib_dataloader`, `calib_iteration`, it offers a straightforward choice of quantizing TF model in one-shot. ```python def quantize_model( @@ -31,6 +35,7 @@ def quantize_model( quant_config: Union[BaseConfig, list], calib_dataloader: Callable = None, calib_iteration: int = 100, + calib_func: Callable = None, ): ``` `model` should be a string of the model's location, the object of Keras model or INC TF model wrapper class. @@ -41,6 +46,9 @@ def quantize_model( `calib_iteration` is used to decide how many iterations the calibration process will be run. +`calib_func` is a substitution for `calib_dataloader` when the built-in calibration function of INC does not work for model inference. + + Here is a simple example of using `quantize_model` interface with a dummy calibration dataloader and the default `StaticQuantConfig`: ```python from neural_compressor.tensorflow import StaticQuantConfig, quantize_model @@ -68,6 +76,7 @@ def autotune( eval_args: Optional[Tuple[Any]] = None, calib_dataloader: Callable = None, calib_iteration: int = 100, + calib_func: Callable = None, ) -> Optional[BaseModel]: ``` `model` should be a string of the model's location, the object of Keras model or INC TF model wrapper class. @@ -82,6 +91,8 @@ def autotune( `calib_iteration` is used to decide how many iterations the calibration process will be run. +`calib_func` is a substitution for `calib_dataloader` when the built-in calibration function of INC does not work for model inference. + Here is a simple example of using `autotune` interface with different quantization rules defined by a list of `StaticQuantConfig`: ```python from neural_compressor.common.base_tuning import TuningConfig @@ -145,9 +156,16 @@ The supported Quantization methods for TensorFlow and Keras are listed below: TensorFlow/Intel TensorFlow - Smooth Quantization(SQ) - weights - calibration + Smooth Quantization(SQ) + weights + calibration + Tensorflow + TensorFlow/Intel TensorFlow + + + Mixed Precision(MP) + weights and activations + NA Tensorflow TensorFlow/Intel TensorFlow @@ -168,6 +186,10 @@ Smooth Quantization (SQ) is an advanced quantization technique designed to optim Refer to the [SQ Guide](./TF_SQ.md) for detailed information. +##### Mixed Precision +The Mixed Precision (MP) is enabled with Post Training Static Quantization. Once `BF16` is supported on machine, the matched operators will be automatically converted. + + #### Backend and Device Intel(R) Neural Compressor supports TF GPU with [ITEX-XPU](https://github.com/intel/intel-extension-for-tensorflow). We will automatically run model on GPU by checking if it has been installed. diff --git a/docs/3x/autotune.md b/docs/source/3x/autotune.md similarity index 100% rename from docs/3x/autotune.md rename to docs/source/3x/autotune.md diff --git a/docs/source/3x/benchmark.md b/docs/source/3x/benchmark.md new file mode 100644 index 00000000000..571e0f83f80 --- /dev/null +++ b/docs/source/3x/benchmark.md @@ -0,0 +1,61 @@ +Benchmark +--- + +1. [Introduction](#introduction) + +2. [Supported Matrix](#supported-matrix) + +3. [Usage](#usage) + +## Introduction + +Intel Neural Compressor provides a command `incbench` to launch the Intel CPU performance benchmark. + +To get the peak performance on Intel Xeon CPU, we should avoid crossing NUMA node in one instance. +Therefore, by default, `incbench` will trigger 1 instance on the first NUMA node. + +## Supported Matrix + +| Platform | Status | +|:---:|:---:| +| Linux | ✔ | +| Windows | ✔ | + +## Usage + +| Parameters | Default | comments | +|:----------------------:|:------------------------:|:-------------------------------------:| +| num_instances | 1 | Number of instances | +| num_cores_per_instance | None | Number of cores in each instance | +| C, cores | 0-${num_cores_on_NUMA-1} | decides the visible core range | +| cross_memory | False | whether to allocate memory cross NUMA | + +> Note: cross_memory is set to True only when memory is insufficient. + +### General Use Cases + +1. `incbench main.py`: run 1 instance on NUMA:0. +2. `incbench --num_i 2 main.py`: run 2 instances on NUMA:0. +3. `incbench --num_c 2 main.py`: run multi-instances with 2 cores per instance on NUMA:0. +4. `incbench -C 24-47 main.py`: run 1 instance on COREs:24-47. +5. `incbench -C 24-47 --num_c 4 main.py`: run multi-instances with 4 COREs per instance on COREs:24-47. + +> Note: + > - `num_i` works the same as `num_instances` + > - `num_c` works the same as `num_cores_per_instance` + +### Dump Throughput and Latency Summary + +To merge benchmark results from multi-instances, "incbench" automatically checks log file messages for "throughput" and "latency" information matching the following patterns. + +```python +throughput_pattern = r"[T,t]hroughput:\s*([0-9]*\.?[0-9]+)\s*([a-zA-Z/]*)" +latency_pattern = r"[L,l]atency:\s*([0-9]*\.?[0-9]+)\s*([a-zA-Z/]*)" +``` + +#### Demo usage + +```python +print("Throughput: {:.3f} samples/sec".format(throughput)) +print("Latency: {:.3f} ms".format(latency * 10**3)) +``` diff --git a/docs/source/3x/client_quant.md b/docs/source/3x/client_quant.md new file mode 100644 index 00000000000..9921560c798 --- /dev/null +++ b/docs/source/3x/client_quant.md @@ -0,0 +1,40 @@ +Quantization on Client +========================================== + +1. [Introduction](#introduction) +2. [Get Started](#get-started) + +## Introduction + +For `RTN`, and `GPTQ` algorithms, we provide default algorithm configurations for different processor types (`client` and `sever`). Generally, lightweight configurations are tailored specifically for client devices to enhance performance and efficiency. + + +## Get Started + +Here, we take the `RTN` algorithm as example to demonstrate the usage on a client machine. + +```python +from neural_compressor.torch.quantization import get_default_rtn_config, convert, prepare +from neural_compressor.torch import load_empty_model + +model_state_dict_path = "/path/to/model/state/dict" +float_model = load_empty_model(model_state_dict_path) +quant_config = get_default_rtn_config() +prepared_model = prepare(float_model, quant_config) +quantized_model = convert(prepared_model) +``` + +> [!TIP] +> By default, the appropriate configuration is determined based on hardware information, but users can explicitly specify `processor_type` as either `client` or `server` when calling `get_default_rtn_config`. + + +For Windows machines, run the following command to utilize all available cores automatically: + +```bash +python main.py +``` + +> [!TIP] +> For Linux systems, users need to configure the environment variables appropriately to achieve optimal performance. For example, set the `OMP_NUM_THREADS` explicitly. For processors with hybrid architecture (including both P-cores and E-cores), it is recommended to bind tasks to all P-cores using `taskset`. + +RTN quantization is a quick process, finishing in tens of seconds and using several GB of RAM when working with 7B models, e.g.,[meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). However, for the higher accuracy, GPTQ algorithm is recommended, but be prepared for a longer quantization time. diff --git a/docs/source/3x/design.md b/docs/source/3x/design.md new file mode 100644 index 00000000000..73d4d2e5568 --- /dev/null +++ b/docs/source/3x/design.md @@ -0,0 +1,16 @@ +Design +===== + +## Architecture + + + Architecture + + +## Workflows + +Intel® Neural Compressor provides two workflows: Quantization and Auto-tune. + + + Workflow + diff --git a/docs/source/3x/gaudi_version_map.md b/docs/source/3x/gaudi_version_map.md new file mode 100644 index 00000000000..65695479acb --- /dev/null +++ b/docs/source/3x/gaudi_version_map.md @@ -0,0 +1,16 @@ + +### Version mapping between Intel Neural Compressor to Gaudi Software Stack ### + + + + + + + + + + + + + +
Intel Neural CompressorGaudi Software Stack
v3.0v1.17
diff --git a/docs/source/3x/imgs/architecture.png b/docs/source/3x/imgs/architecture.png new file mode 100644 index 00000000000..8d99b780422 Binary files /dev/null and b/docs/source/3x/imgs/architecture.png differ diff --git a/docs/3x/imgs/data_format.png b/docs/source/3x/imgs/data_format.png similarity index 100% rename from docs/3x/imgs/data_format.png rename to docs/source/3x/imgs/data_format.png diff --git a/docs/source/3x/imgs/lwq.png b/docs/source/3x/imgs/lwq.png new file mode 100644 index 00000000000..b2e75bc5d8e Binary files /dev/null and b/docs/source/3x/imgs/lwq.png differ diff --git a/docs/3x/imgs/mx_workflow.png b/docs/source/3x/imgs/mx_workflow.png similarity index 100% rename from docs/3x/imgs/mx_workflow.png rename to docs/source/3x/imgs/mx_workflow.png diff --git a/docs/3x/imgs/smoothquant.png b/docs/source/3x/imgs/smoothquant.png similarity index 100% rename from docs/3x/imgs/smoothquant.png rename to docs/source/3x/imgs/smoothquant.png diff --git a/docs/3x/imgs/sq_convert.png b/docs/source/3x/imgs/sq_convert.png similarity index 100% rename from docs/3x/imgs/sq_convert.png rename to docs/source/3x/imgs/sq_convert.png diff --git a/docs/3x/imgs/sq_pc.png b/docs/source/3x/imgs/sq_pc.png similarity index 100% rename from docs/3x/imgs/sq_pc.png rename to docs/source/3x/imgs/sq_pc.png diff --git a/docs/3x/imgs/INC3_WORKFLOW.png b/docs/source/3x/imgs/workflow.png similarity index 100% rename from docs/3x/imgs/INC3_WORKFLOW.png rename to docs/source/3x/imgs/workflow.png diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/dataset/.gitkeep b/docs/source/3x/llm_recipes.md similarity index 100% rename from neural_solution/examples/custom_models_optimized/tf_example1/dataset/.gitkeep rename to docs/source/3x/llm_recipes.md diff --git a/docs/3x/quantization.md b/docs/source/3x/quantization.md similarity index 99% rename from docs/3x/quantization.md rename to docs/source/3x/quantization.md index b26c49470a9..26ba158d54f 100644 --- a/docs/3x/quantization.md +++ b/docs/source/3x/quantization.md @@ -396,7 +396,7 @@ For supported quantization methods for `accuracy aware tuning` and the detailed User could refer to below chart to understand the whole tuning flow. -accuracy aware tuning working flow +accuracy aware tuning working flow diff --git a/docs/source/CONTRIBUTING.md b/docs/source/CONTRIBUTING.md index 58c703a20cd..4decbfb6d0d 100644 --- a/docs/source/CONTRIBUTING.md +++ b/docs/source/CONTRIBUTING.md @@ -48,10 +48,10 @@ Intel® Neural Compressor use [Azure DevOps](https://learn.microsoft.com/en-us/a And generally use [Azure Cloud Instance](https://azure.microsoft.com/en-us/pricing/purchase-options/pay-as-you-go) to deploy pipelines, e.g. Standard E16s v5. | Test Name | Test Scope | Test Pass Criteria | |-------------------------------|-----------------------------------------------|---------------------------| -| Code Scan | Pylint/Bandit/CopyRight/DocStyle/SpellCheck | PASS | +| Code Scan | Bandit/CopyRight/DocStyle/SpellCheck | PASS | | [DCO](https://github.com/apps/dco/) | Use `git commit -s` to sign off | PASS | | Unit Test | Pytest scripts under [test](/test) | PASS (No failure, No core dump, No segmentation fault, No coverage drop) | -| Model Test | Pytorch + TensorFlow + ONNX Runtime + MXNet | PASS (Functionality pass, FP32/INT8 No performance regression) | +| Model Test | Pytorch + TensorFlow + ONNX Runtime | PASS (Functionality pass, FP32/INT8 No performance regression) | ## Support diff --git a/docs/source/NAS.md b/docs/source/NAS.md deleted file mode 100644 index e922b971c97..00000000000 --- a/docs/source/NAS.md +++ /dev/null @@ -1,86 +0,0 @@ -# Neural Architecture Search - -1. [Introduction](#introduction) - - 1.1. [Basic NAS](#basic-nas) - - 1.2. [Dynamic NAS](#dynamic-nas) - -2. [NAS Support Matrix](#nas-support-matrix) -3. [Get Started with NAS API](#get-started-with-nas-api) - - 3.1. [Basic Usage](#basic-usage) - - 3.2. [Advanced Usage (Custom NAS)](#advanced-usage-custom-nas) - -4. [Examples](#examples) - -## Introduction -Neural Architecture Search (NAS) is the process of automating the design of artificial neural networks (ANN) architecture. NAS has been used to design networks that are on par with or outperform hand-designed architectures. Intel® Neural Compressor has supported two different NAS methods: Basic NAS and Dynamic NAS. - -### Basic NAS -Our Basic NAS method leverages a specific search algorithm from built-in search algorithms (grid search, random search, and Bayesian optimization are supported in Intel® Neural Compressor now) or user-defined search algorithms to propose the model architecture based on the given search space, then performs the train evaluation process to evaluate the potential of the proposed model architecture, after several iterations of such procedure, best-performing model architectures which lie in Pareto front will be returned. - -### Dynamic NAS -Dynamic Neural Architecture Search (DyNAS) is a super-network-based NAS approach that uses the metric predictors for predicting the metrics of the model architecture, it is >4x more sample efficient than typical one-shot predictor-based NAS approaches. -
-The flow of the DyNAS approach is shown in the following figure. In the first phase of the search, a small population of sub-networks is randomly sampled from the super-network and evaluated (validation measurement) to provide the initial training set for the inner predictor loop. After the predictors are trained, a multi-objective evolutionary search is performed in the predictor objective space. After this extensive search is performed, the best-performing sub-network configurations are selected to be the next iteration's validation population. The cycle continues until the search concludes when the user-defined evaluation count is met. -
-![DyNAS Workflow](./imgs/dynas.png) - -## NAS Support Matrix - -|NAS Algorithm |PyTorch |TensorFlow | -|------------------|:--------:|:---------:| -|Basic NAS |✔ |Not supported yet| -|Dynamic NAS |✔ |Not supported yet| - -## Get Started with NAS API - -### Basic Usage - -#### 1. Python code + YAML - -Simplest launcher code if NAS configuration is defined in user-defined yaml. - -```python -from neural_compressor.experimental import NAS - -agent = NAS("/path/to/user/yaml") -results = agent.search() -``` - -#### 2. Python code only - -NAS class also support `NASConfig` class as it's argument. - -```python -from neural_compressor.conf.config import NASConfig -from neural_compressor.experimental import NAS - -config = NASConfig(approach="dynas", search_algorithm="nsga2") -config.dynas.supernet = "ofa_mbv3_d234_e346_k357_w1.2" -config.dynas.metrics = ["acc", "macs"] -config.dynas.population = 50 -config.dynas.num_evals = 250 -config.dynas.results_csv_path = "search_results.csv" -config.dynas.batch_size = 64 -config.dynas.dataset_path = "/datasets/imagenet-ilsvrc2012" # example -agent = NAS(config) -results = agent.search() -``` - -### Advanced Usage (Custom NAS) - -Intel® Neural Compressor NAS API is defined under `neural_compressor.experimental.nas`, which takes a user defined yaml file or a [NASConfig](../../neural_compressor/conf/config.py#NASConfig) object as input. The user defined yaml or the [NASConfig](../../neural_compressor/conf/config.py#NASConfig) object defines necessary configuration of the NAS process. The [NAS](../../neural_compressor/experimental/nas/nas.py#NAS) class aims to create an object according to the defined NAS approach in the configuration, please note this NAS approach should be registered in the Intel® Neural Compressor. - -Currently, Intel® Neural Compressor supported two built-in NAS methods: [Basic NAS](../../neural_compressor/experimental/nas/basic_nas.py#BasicNAS) and [Dynamic NAS](../../neural_compressor/experimental/nas/dynas.py#DyNAS). Both methods are inherited from a base class called [NASBase](../../neural_compressor/experimental/nas/nas.py#NASBase). User can also customize their own NAS approach in Intel® Neural Compressor just by decorating their NAS approach class with function [nas_registry](../../neural_compressor/experimental/nas/nas_utils.py#nas_registry) as well as following the API in [NASBase](../../neural_compressor/experimental/nas/nas.py#NASBase), like the way used in the two built-in NAS methods. - -## Examples - -Following examples are supported in Intel® Neural Compressor: - -- DyNAS MobileNetV3 supernet Example: - - [DyNAS MobileNetV3 supernet Example](../../examples/notebook/dynas/MobileNetV3_Supernet_NAS.ipynb): DyNAS with MobileNetV3 supernet on ImageNet dataset. -- DyNAS Transformer LT supernet Example: - - [DyNAS Transformer LT supernet Example](../../examples/notebook/dynas/Transformer_LT_Supernet_NAS.ipynb): DyNAS with Transformer LT supernet on WMT En-De dataset. diff --git a/docs/source/adaptor.md b/docs/source/adaptor.md index 6765c5405de..b8af7a934fb 100644 --- a/docs/source/adaptor.md +++ b/docs/source/adaptor.md @@ -144,9 +144,6 @@ Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxru tensorboard=False, fp32_baseline=False): ...... - def diagnosis_helper(self, fp32_model, int8_model, tune_cfg=None, save_path=None): - ...... - def save(self, model, path): ...... ``` diff --git a/docs/source/api-doc/api_2.rst b/docs/source/api-doc/api_2.rst new file mode 100644 index 00000000000..b5528a0426a --- /dev/null +++ b/docs/source/api-doc/api_2.rst @@ -0,0 +1,29 @@ +2.0 API +#### + +**User facing APIs:** + +.. toctree:: + :maxdepth: 1 + + quantization.rst + mix_precision.rst + training.rst + benchmark.rst + config.rst + objective.rst + + +**Advanced APIs:** + +.. toctree:: + :maxdepth: 1 + + compression.rst + strategy.rst + model.rst + +**API document example:** + +.. toctree:: + api_doc_example.rst diff --git a/docs/source/api-doc/api_3.rst b/docs/source/api-doc/api_3.rst new file mode 100644 index 00000000000..7c01e073f0b --- /dev/null +++ b/docs/source/api-doc/api_3.rst @@ -0,0 +1,27 @@ +3.0 API +#### + +**PyTorch Extension API:** + +.. toctree:: + :maxdepth: 1 + + torch_quantization_common.rst + torch_quantization_config.rst + torch_quantization_autotune.rst + +**Tensorflow Extension API:** + +.. toctree:: + :maxdepth: 1 + + tf_quantization_common.rst + tf_quantization_config.rst + tf_quantization_autotune.rst + +**Other Modules:** + +.. toctree:: + :maxdepth: 1 + + benchmark.rst diff --git a/docs/source/api-doc/apis.rst b/docs/source/api-doc/apis.rst index 63d8f2f5ca8..15f92f83501 100644 --- a/docs/source/api-doc/apis.rst +++ b/docs/source/api-doc/apis.rst @@ -1,29 +1,12 @@ APIs #### -**User facing APIs:** - .. toctree:: :maxdepth: 1 - quantization.rst - mix_precision.rst - training.rst - benchmark.rst - config.rst - objective.rst - - -**Advanced APIs:** + api_3.rst .. toctree:: :maxdepth: 1 - compression.rst - strategy.rst - model.rst - -**API document example:** - -.. toctree:: - api_doc_example.rst + api_2.rst diff --git a/docs/source/api-doc/tf_quantization_autotune.rst b/docs/source/api-doc/tf_quantization_autotune.rst new file mode 100644 index 00000000000..241b7e42c77 --- /dev/null +++ b/docs/source/api-doc/tf_quantization_autotune.rst @@ -0,0 +1,6 @@ +Tensorflow Quantization AutoTune +============ + +.. autoapisummary:: + + neural_compressor.tensorflow.quantization.autotune diff --git a/docs/source/api-doc/tf_quantization_common.rst b/docs/source/api-doc/tf_quantization_common.rst new file mode 100644 index 00000000000..3b39d2c79cb --- /dev/null +++ b/docs/source/api-doc/tf_quantization_common.rst @@ -0,0 +1,6 @@ +Tensorflow Quantization Base API +################################# + +.. autoapisummary:: + + neural_compressor.tensorflow.quantization.quantize diff --git a/docs/source/api-doc/tf_quantization_config.rst b/docs/source/api-doc/tf_quantization_config.rst new file mode 100644 index 00000000000..4f5c757c31c --- /dev/null +++ b/docs/source/api-doc/tf_quantization_config.rst @@ -0,0 +1,6 @@ +Tensorflow Quantization Config +============ + +.. autoapisummary:: + + neural_compressor.tensorflow.quantization.config diff --git a/docs/source/api-doc/torch_quantization_autotune.rst b/docs/source/api-doc/torch_quantization_autotune.rst new file mode 100644 index 00000000000..3466ead4a09 --- /dev/null +++ b/docs/source/api-doc/torch_quantization_autotune.rst @@ -0,0 +1,6 @@ +Pytorch Quantization AutoTune +============ + +.. autoapisummary:: + + neural_compressor.torch.quantization.autotune diff --git a/docs/source/api-doc/torch_quantization_common.rst b/docs/source/api-doc/torch_quantization_common.rst new file mode 100644 index 00000000000..d2ad03b933d --- /dev/null +++ b/docs/source/api-doc/torch_quantization_common.rst @@ -0,0 +1,6 @@ +Pytorch Quantization Base API +################################# + +.. autoapisummary:: + + neural_compressor.torch.quantization.quantize diff --git a/docs/source/api-doc/torch_quantization_config.rst b/docs/source/api-doc/torch_quantization_config.rst new file mode 100644 index 00000000000..cc60be355d6 --- /dev/null +++ b/docs/source/api-doc/torch_quantization_config.rst @@ -0,0 +1,6 @@ +Pytorch Quantization Config +============ + +.. autoapisummary:: + + neural_compressor.torch.quantization.config diff --git a/docs/source/dataset.md b/docs/source/dataset.md deleted file mode 100644 index 0695d78a3ac..00000000000 --- a/docs/source/dataset.md +++ /dev/null @@ -1,165 +0,0 @@ -Dataset -======= - -1. [Introduction](#introduction) - -2. [Supported Framework Dataset Matrix](#supported-framework-dataset-matrix) - -3. [Get start with Dataset API](#get-start-with-dataset-api) - -4. [Examples](#examples) - -## Introduction - -To adapt to its internal dataloader API, Intel® Neural Compressor implements some built-in datasets. - -A dataset is a container which holds all data that can be used by the dataloader, and have the ability to be fetched by index or created as an iterator. One can implement a specific dataset by inheriting from the Dataset class by implementing `__iter__` method or `__getitem__` method, while implementing `__getitem__` method, `__len__` method is recommended. - -Users can use Neural Compressor built-in dataset objects as well as register their own datasets. - -## Supported Framework Dataset Matrix - -#### TensorFlow - -| Dataset | Parameters | Comments | Usage | -| :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageRecord(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/validation-000-of-100
root/validation-001-of-100
...
root/validation-099-of-100
The file name needs to follow this pattern: '* - * -of- *' | **In yaml file:**
dataset:
   ImageRecord:
     root: /path/to/root
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImageRecord'] (root=root, transform=transform, filter=None)
| -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORecord(root, num_cores, transform, filter) | **root** (str): Root directory of dataset
**num_cores** (int, default=28):The number of input Datasets to interleave from in parallel
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Root is a full path to tfrecord file, which contains the file name.
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORecord:
     root: /path/to/tfrecord
     num_cores: 28
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCORecord'] (root, num_cores=28, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| COCONpy(root, npy_dir, anno_dir) | **root** (str): Root directory of dataset
**npy_dir** (str, default='val2017'): npy file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory | Please arrange data in this way:
/root/npy_dir/1.jpg.npy
/root/npy_dir/2.jpg.npy
...
/root/npy_dir/n.jpg.npy
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     npy_dir: /path/to/npy
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCONpy'] (root, npy_dir, anno_dir)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=True):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: True
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     label_shape: [1]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| style_transfer(content_folder, style_folder, crop_ratio, resize_shape, image_format, transform, filter) | **content_folder** (str):Root directory of content images
**style_folder** (str):Root directory of style images
**crop_ratio** (float, default=0.1):cropped ratio to each side
**resize_shape** (tuple, default=(256, 256)):target size of image
**image_format** (str, default='jpg'): target image format
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Dataset used for style transfer task. This Dataset is to construct a dataset from two specific image holders representing content image folder and style image folder. | **In yaml file:**
dataset:
   style_transfer:
     content_folder: /path/to/content_folder
     style_folder: /path/to/style_folder
     crop_ratio: 0.1
     resize_shape: [256, 256]
     image_format: 'jpg'
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['style_transfer'] (content_folder, style_folder, crop_ratio, resize_shape, image_format, transform=transform, filter=None) | -| TFRecordDataset(root, transform, filter) | **root** (str): filename of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions |Root is a full path to tfrecord file, which contains the file name. | **In yaml file:**
dataset:
   TFRecordDataset:
     root: /path/to/tfrecord
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['TFRecordDataset'] (root, transform=transform) | -| bert(root, label_file, task, transform, filter) | **root** (str): path of dataset
**label_file** (str): path of label file
**task** (str, default='squad'): task type of model
**model_type** (str, default='bert'): model type, support 'bert'.
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset supports tfrecord data, please refer to [Guide](../examples/tensorflow/nlp/bert_large_squad/quantization/ptq/README.md) to create tfrecord file first. | **In yaml file:**
dataset:
   bert:
     root: /path/to/root
     label_file: /path/to/label_file
     task: squad
     model_type: bert
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['bert'] (root, label_file, transform=transform) | -| sparse_dummy_v2(dense_shape, label_shape, sparse_ratio, low, high, dtype, transform, filter) | **dense_shape** (list or tuple):create single or multi sparse tensors, tuple represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**sparse_ratio** (float, default=0.5): the ratio of sparsity, support [0, 1].
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   sparse_dummy_v2:
     dense_shape: [224, 224, 3]
     label_shape: [1]
     sparse_ratio: 0.5
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['sparse_dummy_v2'] (dense_shape, label_shape, sparse_ratio, low, high, dtype, transform=None, filter=None) | - -#### PyTorch - -| Dataset | Parameters | Comments | Usage | -| :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size>1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=True):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: True
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     label_shape: [1]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| bert(dataset, task, model_type, transform, filter) | **dataset** (list): list of data
**task** (str): the task of the model, support "classifier", "squad"
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'xlnet', 'xlm'
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This Dataset is to construct from the Bert TensorDataset and not a full implementation from yaml config. The original repo link is: https://github.com/huggingface/transformers. When you want use this Dataset, you should add it before you initialize your DataLoader. | **In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['bert'] (dataset, task, model_type, transform=transform, filter=None)
Now not support yaml implementation | -| sparse_dummy_v2(dense_shape, label_shape, sparse_ratio, low, high, dtype, transform, filter) | **dense_shape** (list or tuple):create single or multi sparse tensors, tuple represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**sparse_ratio** (float, default=0.5): the ratio of sparsity, support [0, 1].
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   sparse_dummy_v2:
     dense_shape: [224, 224, 3]
     label_shape: [1]
     sparse_ratio: 0.5
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['sparse_dummy_v2'] (dense_shape, label_shape, sparse_ratio, low, high, dtype, transform=None, filter=None) | - -#### MXNet - -| Dataset | Parameters | Comments | Usage | -| :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=True):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: True
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     label_shape: [1]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| sparse_dummy_v2(dense_shape, label_shape, sparse_ratio, low, high, dtype, transform, filter) | **dense_shape** (list or tuple):create single or multi sparse tensors, tuple represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**sparse_ratio** (float, default=0.5): the ratio of sparsity, support [0, 1].
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   sparse_dummy_v2:
     dense_shape: [224, 224, 3]
     label_shape: [1]
     sparse_ratio: 0.5
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['sparse_dummy_v2'] (dense_shape, label_shape, sparse_ratio, low, high, dtype, transform=None, filter=None) | - -#### ONNXRT - -| Dataset | Parameters | Comments | Usage | -| :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
***Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=True):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: True
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     label_shape: [1]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| GLUE(data_dir, model_name_or_path, max_seq_length, do_lower_case, task, model_type, dynamic_length, evaluate, transform, filter) | **data_dir** (str): The input data dir
**model_name_or_path** (str): Path to pre-trained student model or shortcut name,
**max_seq_length** (int, default=128): The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.
**do_lower_case** (bool, default=True): Whether or not to lowercase the input.
**task** (bool, default=True): The name of the task to fine-tune. Choices include mrpc, qqp, qnli, rte, sts-b, cola, mnli, wnli.
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'mobilebert', 'roberta'.
**dynamic_length** (bool, default=False): Whether to use fixed sequence length.
**evaluate** (bool, default=True): Whether do evaluation or training.
**transform** (bool, default=True): If true,
**filter** (bool, default=True): If true, | Refer to [this example](/examples/onnxrt/language_translation/bert) on how to prepare dataset | **In yaml file:**
dataset:
   bert:
     data_dir: False
     model_name_or_path: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['bert'] (data_dir='/path/to/data/', model_name_or_path='bert-base-uncased', max_seq_length=128, task='mrpc', model_type='bert', dynamic_length=True, transform=None, filter=None) | -| sparse_dummy_v2(dense_shape, label_shape, sparse_ratio, low, high, dtype, transform, filter) | **dense_shape** (list or tuple):create single or multi sparse tensors, tuple represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors. In yaml usage, it offers (1,) as the default value.
**sparse_ratio** (float, default=0.5): the ratio of sparsity, support [0, 1].
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   sparse_dummy_v2:
     dense_shape: [224, 224, 3]
     label_shape: [1]
     sparse_ratio: 0.5
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.data import Datasets
datasets = Datasets(framework)
dataset = datasets['sparse_dummy_v2'] (dense_shape, label_shape, sparse_ratio, low, high, dtype, transform=None, filter=None) | - -## Get start with Dataset API - -### Config dataloader in a yaml file - -```yaml -quantization: - approach: post_training_static_quant - calibration: - dataloader: - dataset: - COCORaw: - root: /path/to/calibration/dataset - filter: - LabelBalance: - size: 1 - transform: - Resize: - size: 300 - -evaluation: - accuracy: - metric: - ... - dataloader: - batch_size: 16 - dataset: - COCORaw: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 300 - performance: - dataloader: - batch_size: 16 - dataset: - dummy_v2: - input_shape: [224, 224, 3] -``` - -## User-specific dataset - -Users can register their own datasets as follows: - -```python -class Dataset(object): - def __init__(self, args): - # init code here - - def __getitem__(self, idx): - # use idx to get data and label - return data, label - - def __len__(self): - return len - -``` - -After defining the dataset class, pass it to the quantizer: - -```python -from neural_compressor.experimental import Quantization, common - -quantizer = Quantization(yaml_file) -quantizer.calib_dataloader = common.DataLoader( - dataset -) # user can pass more optional args to dataloader such as batch_size and collate_fn -quantizer.model = graph -quantizer.eval_func = eval_func -q_model = quantizer.fit() -``` - -## Examples - -- Refer to this [example](https://github.com/intel/neural-compressor/tree/v1.14.2/examples/onnxrt/object_detection/onnx_model_zoo/DUC/quantization/ptq) to learn how to define a customised dataset. - -- Refer to this [HelloWorld example](/examples/helloworld/tf_example6) to learn how to configure a built-in dataset. diff --git a/docs/source/diagnosis.md b/docs/source/diagnosis.md deleted file mode 100644 index 9e70d695489..00000000000 --- a/docs/source/diagnosis.md +++ /dev/null @@ -1,197 +0,0 @@ -# Diagnosis -1. [Diagnosis Introduction](#diagnosis-introduction) -2. [Supported Feature Matrix](#supported-feature-matrix) -3. [Get Started](#get-started) -4. [Example](#example) -5. [Step by Step Diagnosis Example with TensorFlow](https://github.com/intel/neural-compressor/tree/master/neural_insights/docs/source/tf_accuracy_debug.md) -5. [Step by Step Diagnosis Example with ONNXRT](https://github.com/intel/neural-compressor/tree/master/neural_insights/docs/source/onnx_accuracy_debug.md) - -# Diagnosis Introduction -The diagnosis feature provides methods to debug the accuracy loss during quantization and profile the performance gap during benchmark. -There are 2 ways to diagnose a model with Intel® Neural Compressor. First is non-GUI mode that is described below and second is GUI mode with [Neural Insights](https://github.com/intel/neural-compressor/tree/master/neural_insights) component. - -The workflow is described in the diagram below. First we have to configure scripts with diagnosis, then run them and check diagnosis info in the terminal. Test if the result is satisfying and repeat the steps if needed. -![workflow](./imgs/workflow.jpg) - -# Supported Feature Matrix - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TypesDiagnosis dataFrameworkBackend
Post-Training Static Quantization (PTQ)weights and activationsTensorFlowTensorFlow/Intel TensorFlow
ONNX RuntimeQLinearops/QDQ
Benchmark ProfilingOP execute durationTensorFlowTensorFlow/Intel TensorFlow
ONNX RuntimeQLinearops/QDQ
- -# Get Started -## Install Intel® Neural Compressor -First you need to install Intel® Neural Compressor. -```shell -git clone https://github.com/intel/neural-compressor.git -cd neural-compressor -pip install -r requirements.txt -python setup.py install -``` - -## Modify script -Modify quantization/benchmark script to run diagnosis by adding argument `diagnosis` set to `True` to `PostTrainingQuantConfig`/`BenchmarkConfig` as shown below. - -### Quantization diagnosis -```python -config = PostTrainingQuantConfig(diagnosis=True, ...) -``` - -### Benchmark diagnosis -```python -config = BenchmarkConfig(diagnosis=True, ...) -``` - -# Example -Below it is explained how to run diagnosis for ONNX ResNet50 model. - -## Prepare dataset - -Download dataset [ILSVR2012 validation Imagenet dataset](http://www.image-net.org/challenges/LSVRC/2012/downloads). - -Download label: -```shell -wget http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz -tar -xvzf caffe_ilsvrc12.tar.gz val.txt -``` - -## Run quantization script -Then execute script with quantization API in another terminal with --diagnose flag. -```shell -python examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py \ - --model_path=/path/to/resnet50_v1.onnx/ \ - --dataset_location=/path/to/ImageNet/ \ - --label_path=/path/to/val.txt/ - --tune - --diagnose -``` - -## Run benchmark script -To run profiling execute script with parameters shown in the command below. -```shell -python examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py \ - --model_path=/path/to/resnet50_v1.onnx/ \ - --dataset_location=/path/to/ImageNet/ \ - --label_path=/path/to/val.txt/ - --mode=performance \​ - --benchmark \​ - --diagnose -``` - - -## See quantization data - -After script's execution you will see the results in your terminal. -In the activations summary you can see a table with OP name, MSE (mean squared error), activation minimum and maximum sorted by MSE. - -![activations](./imgs/terminal-ops.jpg) - -In the weights summary table there are parameters like minimum, maximum, mean, standard deviation and variance for input model. The table is also sorted by MSE. - -![weights](./imgs/terminal-weights.jpg) - -## How to do diagnosis -Neural Compressor diagnosis mode provides weights and activation data that includes several useful metrics for diagnosing potential losses of model accuracy. - -### Parameter description -Data is presented in the terminal in form of table where each row describes single OP in the model. We present such quantities measures like: - -**MSE - Mean Squared Error** - it is a metric that measures how big is the difference between input and optimized model's weights for specific OP. - -$$ -MSE = \sum_{i=1}^{n}(x_i-y_i)^2 -$$ - -**Input model min** - minimum value of the input OP tensor data - -$$ -\min{\vec{x}} -$$ - -**Input model max** - maximum value of the input OP tensor data - -$$ -\max{\vec{x}} -$$ - -**Input model mean** - mean value of the input OP tensor data - -$$ -\mu =\frac{1}{n} \sum_{i=1}^{n} x_{i} -$$ - -**Input model standard deviation** - standard deviation of the input OP tensor data - -$$ -\sigma =\sqrt{\frac{1}{n}\sum\limits_{i=1}^n (x_i - \mu)} -$$ - -**Input model variance** - variance of the input OP tensor data - -$$ -Var = \sigma^2 -$$ - -where,
-$x_i$ - input OP tensor data,
-$y_i$ - optimized OP tensor data,
-$\mu_x$ - input model mean,
-$\sigma_x$ - input model variance - -### Diagnosis suggestions -1. Check the nodes with MSE order. High MSE usually means higher possibility of accuracy loss happened during the quantization, so fallback those Ops may get some accuracy back. -2. Check the Min-Max data range. An dispersed data range usually means higher accuracy loss, so we can also try to full back those Ops. -3. Check with the other data and find some outliers, and try to fallback some Ops and test for the quantization accuracy. - -*Note: We can't always trust the debug rules, it's only a reference, sometimes the accuracy regression is hard to explain.* - -### Fallback setting example -```python -from neural_compressor import quantization, PostTrainingQuantConfig - -op_name_dict = {"v0/cg/conv0/conv2d/Conv2D": {"activation": {"dtype": ["fp32"]}}} -config = PostTrainingQuantConfig( - diagnosis=True, - op_name_dict=op_name_dict, -) -q_model = quantization.fit( - model, - config, - calib_dataloader=dataloader, - eval_func=eval, -) -``` - -## See profiling data - -In profiling section there is a table with nodes sorted by total execution time. It is possible to check which operations take the most time. - -![profiling](./imgs/terminal-profiling.jpg) diff --git a/docs/source/distillation.md b/docs/source/distillation.md deleted file mode 100644 index 7e2d6b063ff..00000000000 --- a/docs/source/distillation.md +++ /dev/null @@ -1,129 +0,0 @@ -Distillation -============ - -1. [Introduction](#introduction) - - 1.1. [Knowledge Distillation](#knowledge-distillation) - - 1.2. [Intermediate Layer Knowledge Distillation](#intermediate-layer-knowledge-distillation) - - 1.3. [Self Distillation](#self-distillation) - -2. [Distillation Support Matrix](#distillation-support-matrix) -3. [Get Started with Distillation API ](#get-started-with-distillation-api) -4. [Examples](#examples) - -## Introduction - -Distillation is one of popular approaches of network compression, which transfers knowledge from a large model to a smaller one without loss of validity. As smaller models are less expensive to evaluate, they can be deployed on less powerful hardware (such as a mobile device). Graph shown below is the workflow of the distillation, the teacher model will take the same input that feed into the student model to produce the output that contains knowledge of the teacher model to instruct the student model. -
- -Architecture - -Intel® Neural Compressor supports Knowledge Distillation, Intermediate Layer Knowledge Distillation and Self Distillation algorithms. - -### Knowledge Distillation -Knowledge distillation is proposed in [Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531). It leverages the logits (the input of softmax in the classification tasks) of teacher and student model to minimize the the difference between their predicted class distributions, this can be done by minimizing the below loss function. - -$$L_{KD} = D(z_t, z_s)$$ - -Where $D$ is a distance measurement, e.g. Euclidean distance and Kullback–Leibler divergence, $z_t$ and $z_s$ are the logits of teacher and student model, or predicted distributions from softmax of the logits in case the distance is measured in terms of distribution. - -### Intermediate Layer Knowledge Distillation - -There are more information contained in the teacher model beside its logits, for example, the output features of the teacher model's intermediate layers often been used to guide the student model, as in [Patient Knowledge Distillation for BERT Model Compression](https://arxiv.org/pdf/1908.09355) and [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984). The general loss function for this approach can be summarized as follow. - -$$L_{KD} = \sum\limits_i D(T_t^{n_i}(F_t^{n_i}), T_s^{m_i}(F_s^{m_i}))$$ - -Where $D$ is a distance measurement as before, $F_t^{n_i}$ the output feature of the $n_i$'s layer of the teacher model, $F_s^{m_i}$ the output feature of the $m_i$'s layer of the student model. Since the dimensions of $F_t^{n_i}$ and $F_s^{m_i}$ are usually different, the transformations $T_t^{n_i}$ and $T_s^{m_i}$ are needed to match dimensions of the two features. Specifically, the transformation can take the forms like identity, linear transformation, 1X1 convolution etc. - -### Self Distillation - -Self-distillation ia a one-stage training method where the teacher model and student models can be trained together. It attaches several attention modules and shallow classifiers at different depths of neural networks and distills knowledge from the deepest classifier to the shallower classifiers. Different from the conventional knowledge distillation methods where the knowledge of the teacher model is transferred to another student model, self-distillation can be considered as knowledge transfer in the same model, from the deeper layers to the shallower layers. -The additional classifiers in self-distillation allow the neural network to work in a dynamic manner, which leads to a much higher acceleration. -
- -Architecture - -Architecture from paper [Self-Distillation: Towards Efficient and Compact Neural Networks](https://ieeexplore.ieee.org/document/9381661) - -## Distillation Support Matrix - -|Distillation Algorithm |PyTorch |TensorFlow | -|------------------------------------------------|:--------:|:---------:| -|Knowledge Distillation |✔ |✔ | -|Intermediate Layer Knowledge Distillation |✔ |Will be supported| -|Self Distillation |✔ |✖ | - -## Get Started with Distillation API - -User can pass the customized training/evaluation functions to `Distillation` for flexible scenarios. In this case, distillation process can be done by pre-defined hooks in Neural Compressor. User needs to put those hooks inside the training function. - -Neural Compressor defines several hooks for user pass - -``` -on_train_begin() : Hook executed before training begins -on_after_compute_loss(input, student_output, student_loss) : Hook executed after each batch inference of student model -on_epoch_end() : Hook executed at each epoch end -``` - -Following section shows how to use hooks in user pass-in training function: - -```python -def training_func_for_nc(model): - compression_manager.on_train_begin() - for epoch in range(epochs): - compression_manager.on_epoch_begin(epoch) - for i, batch in enumerate(dataloader): - compression_manager.on_step_begin(i) - ...... - output = model(batch) - loss = ...... - loss = compression_manager.on_after_compute_loss(batch, output, loss) - loss.backward() - compression_manager.on_before_optimizer_step() - optimizer.step() - compression_manager.on_step_end() - compression_manager.on_epoch_end() - compression_manager.on_train_end() - -... -``` - -In this case, the launcher code for Knowledge Distillation is like the following: - -```python -from neural_compressor.training import prepare_compression -from neural_compressor.config import DistillationConfig, KnowledgeDistillationLossConfig - -distil_loss_conf = KnowledgeDistillationLossConfig() -conf = DistillationConfig(teacher_model=teacher_model, criterion=distil_loss_conf) -criterion = nn.CrossEntropyLoss() -optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) -compression_manager = prepare_compression(model, conf) -model = compression_manager.model - -model = training_func_for_nc(model) -eval_func(model) -``` - -For Intermediate Layer Knowledge Distillation or Self Distillation, the only difference to above launcher code is that `distil_loss_conf` should be set accordingly as shown below. More detailed settings can be found in this [example](../../examples/pytorch/nlp/huggingface_models/text-classification/optimization_pipeline/distillation_for_quantization/fx/run_glue_no_trainer.py#L510) for Intermediate Layer Knowledge Distillation and this [example](../../examples/pytorch/image_recognition/torchvision_models/self_distillation/eager/main.py#L344) for Self Distillation. - -```python -from neural_compressor.config import ( - IntermediateLayersKnowledgeDistillationLossConfig, - SelfKnowledgeDistillationLossConfig, -) - -# for Intermediate Layer Knowledge Distillation -distil_loss_conf = IntermediateLayersKnowledgeDistillationLossConfig(layer_mappings=layer_mappings) - -# for Self Distillation -distil_loss_conf = SelfKnowledgeDistillationLossConfig(layer_mappings=layer_mappings) -``` -## Examples -[Distillation PyTorch Examples](../../examples/README.md#distillation-1) -
-[Distillation TensorFlow Examples](../../examples/README.md#distillation) -
-[Distillation Examples Results](./validated_model_list.md#validated-knowledge-distillation-examples) diff --git a/docs/source/faq.md b/docs/source/faq.md index b13f1784365..7f2e732022a 100644 --- a/docs/source/faq.md +++ b/docs/source/faq.md @@ -17,3 +17,12 @@ ImportError: libGL.so.1: cannot open shared object file: No such file or directo #### Issue 4: Conda package *neural-compressor-full* (this binary is only available from v1.13 to v2.1.1) dependency conflict may pending on conda installation for a long time. **Solution:** run *conda install sqlalchemy=1.4.27 alembic=1.7.7 -c conda-forge* before install *neural-compressor-full*. +#### Issue 5: +If you run 3X torch extension API inside a docker container, then you may encounter the following error: +```shell +ValueError: No threading layer could be loaded. +HINT: +Intel TBB is required, try: +$ conda/pip install tbb +``` +**Solution:** It's actually already installed by `requirements_pt.txt`, so just need to set up with `export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH`. diff --git a/docs/source/get_started.md b/docs/source/get_started.md index 61c22912c41..0ba1e10d111 100644 --- a/docs/source/get_started.md +++ b/docs/source/get_started.md @@ -2,35 +2,87 @@ 1. [Quick Samples](#quick-samples) -2. [Validated Models](#validated-models) +2. [Feature Matrix](#feature-matrix) ## Quick Samples -### Quantization with Python API ```shell -# Install Intel Neural Compressor and TensorFlow -pip install neural-compressor -pip install tensorflow -# Prepare fp32 model -wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/mobilenet_v1_1.0_224_frozen.pb +# Install Intel Neural Compressor +pip install neural-compressor-pt ``` ```python -from neural_compressor.data import DataLoader, Datasets -from neural_compressor.config import PostTrainingQuantConfig +from transformers import AutoModelForCausalLM +from neural_compressor.torch.quantization import RTNConfig, prepare, convert -dataset = Datasets("tensorflow")["dummy"](shape=(1, 224, 224, 3)) -dataloader = DataLoader(framework="tensorflow", dataset=dataset) +user_model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125m") +quant_config = RTNConfig() +prepared_model = prepare(model=user_model, quant_config=quant_config) +quantized_model = convert(model=prepared_model) +``` -from neural_compressor.quantization import fit +## Feature Matrix +Intel Neural Compressor 3.X extends PyTorch and TensorFlow's APIs to support compression techniques. +The below table provides a quick overview of the APIs available in Intel Neural Compressor 3.X. +The Intel Neural Compressor 3.X mainly focuses on quantization-related features, especially for algorithms that benefit LLM accuracy and inference. +It also provides some common modules across different frameworks. For example, Auto-tune support accuracy driven quantization and mixed precision, benchmark aimed to measure the multiple instances performance of the quantized model. -q_model = fit( - model="./mobilenet_v1_1.0_224_frozen.pb", - conf=PostTrainingQuantConfig(), - calib_dataloader=dataloader, -) -``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Overview
ArchitectureWorkflowAPIsLLMs RecipesExamples
PyTorch Extension APIs
OverviewStatic QuantizationDynamic QuantizationSmooth Quantization
Weight-Only QuantizationMX QuantizationMixed Precision
Tensorflow Extension APIs
OverviewStatic QuantizationSmooth Quantization
Other Modules
Auto TuneBenchmark
-## Validated Models -Intel® Neural Compressor validated the quantization for 10K+ models from popular model hubs (e.g., HuggingFace Transformers, Torchvision, TensorFlow Model Hub, ONNX Model Zoo). -Over 30 pruning, knowledge distillation and model export samples are also available. -More details for validated typical models are available [here](/examples/README.md). +> **Note**: +> From 3.0 release, we recommend to use 3.X API. Compression techniques during training such as QAT, Pruning, Distillation only available in [2.X API](https://github.com/intel/neural-compressor/blob/master/docs/source/2x_user_guide.md) currently. diff --git a/docs/source/imgs/dynas.png b/docs/source/imgs/dynas.png deleted file mode 100644 index e82a1c03160..00000000000 Binary files a/docs/source/imgs/dynas.png and /dev/null differ diff --git a/docs/source/imgs/release_data.png b/docs/source/imgs/release_data.png deleted file mode 100644 index 5e06eb067e1..00000000000 Binary files a/docs/source/imgs/release_data.png and /dev/null differ diff --git a/docs/source/imgs/tensorboard_baseline_v0_cg_conv0.png b/docs/source/imgs/tensorboard_baseline_v0_cg_conv0.png deleted file mode 100644 index daa3036c516..00000000000 Binary files a/docs/source/imgs/tensorboard_baseline_v0_cg_conv0.png and /dev/null differ diff --git a/docs/source/imgs/tensorboard_tune_1_v0_cg_conv0.png b/docs/source/imgs/tensorboard_tune_1_v0_cg_conv0.png deleted file mode 100644 index 6086a6eb837..00000000000 Binary files a/docs/source/imgs/tensorboard_tune_1_v0_cg_conv0.png and /dev/null differ diff --git a/docs/source/imgs/tensorboard_v0_cg_conv0_histogram.png b/docs/source/imgs/tensorboard_v0_cg_conv0_histogram.png deleted file mode 100644 index 6f5d52a5cb8..00000000000 Binary files a/docs/source/imgs/tensorboard_v0_cg_conv0_histogram.png and /dev/null differ diff --git a/docs/source/imgs/terminal-ops.jpg b/docs/source/imgs/terminal-ops.jpg deleted file mode 100644 index 32bfee29110..00000000000 Binary files a/docs/source/imgs/terminal-ops.jpg and /dev/null differ diff --git a/docs/source/imgs/terminal-profiling.jpg b/docs/source/imgs/terminal-profiling.jpg deleted file mode 100644 index 642a64ab446..00000000000 Binary files a/docs/source/imgs/terminal-profiling.jpg and /dev/null differ diff --git a/docs/source/imgs/terminal-weights.jpg b/docs/source/imgs/terminal-weights.jpg deleted file mode 100644 index 3fe8501017b..00000000000 Binary files a/docs/source/imgs/terminal-weights.jpg and /dev/null differ diff --git a/docs/source/imgs/tutorial.png b/docs/source/imgs/tutorial.png deleted file mode 100644 index 1c57041b0ce..00000000000 Binary files a/docs/source/imgs/tutorial.png and /dev/null differ diff --git a/docs/source/imgs/workflow.jpg b/docs/source/imgs/workflow.jpg deleted file mode 100644 index c40f02b99b9..00000000000 Binary files a/docs/source/imgs/workflow.jpg and /dev/null differ diff --git a/docs/source/infrastructure.md b/docs/source/infrastructure.md index 96f0ea3fca8..cfa1912c571 100644 --- a/docs/source/infrastructure.md +++ b/docs/source/infrastructure.md @@ -182,19 +182,6 @@ Intel® Neural Compressor has unified interfaces which dispatch tasks to differe -
-
- -[Neural architecture search](NAS.md): -|Approach |Framework | -|------------------------------------------------|:-----------:| -|Basic |PyTorch | -|DyNas |PyTorch | - -
-
- - [Mixed precision](mixed_precision.md): |Framework | | |--------------|:-----------:| diff --git a/docs/source/installation_guide.md b/docs/source/installation_guide.md index d73f885d403..ca33138b2a4 100644 --- a/docs/source/installation_guide.md +++ b/docs/source/installation_guide.md @@ -27,43 +27,49 @@ The following prerequisites and requirements must be satisfied for a successful > Notes: > - If you get some build issues, please check [frequently asked questions](faq.md) at first. -### Install from Binary -- Install from Pypi - ```Shell - # install stable basic version from pypi - pip install neural-compressor - ``` - ```Shell - # [Experimental] install stable basic + PyTorch framework extension API from pypi - pip install neural-compressor[pt] - ``` - ```Shell - # [Experimental] install stable basic + TensorFlow framework extension API from pypi - pip install neural-compressor[tf] - ``` +### Install Framework +#### Install torch for CPU +```Shell +pip install torch --index-url https://download.pytorch.org/whl/cpu +``` +#### Use Docker Image with torch installed for HPU +https://docs.habana.ai/en/latest/Installation_Guide/Bare_Metal_Fresh_OS.html#bare-metal-fresh-os-single-click -- Install from test Pypi - ```Shell - # install nightly version - git clone https://github.com/intel/neural-compressor.git - cd neural-compressor - pip install -r requirements.txt - # install nightly basic version from pypi - pip install -i https://test.pypi.org/simple/ neural-compressor - ``` +#### Install torch/intel_extension_for_pytorch for Intel GPU +https://intel.github.io/intel-extension-for-pytorch/index.html#installation -- Install from Conda - ```Shell - # install on Linux OS - conda install opencv-python-headless -c fastai - conda install neural-compressor -c conda-forge -c intel - ``` - ```Shell - # install on Windows OS - conda install pycocotools -c esri - conda install opencv-python-headless -c fastai - conda install neural-compressor -c conda-forge -c intel - ``` +#### Install torch for other platform +https://pytorch.org/get-started/locally + +#### Install tensorflow +```Shell +pip install tensorflow +``` + +### Install from Binary +- Install from Pypi +```Shell +# Install 2.X API + Framework extension API + PyTorch dependency +pip install neural-compressor[pt] +``` +```Shell +# Install 2.X API + Framework extension API + TensorFlow dependency +pip install neural-compressor[tf] +``` +```Shell +# Install 2.X API + Framework extension API +# With this install CMD, some dependencies for framework extension API not installed, +# you can install them separately by `pip install -r requirements_pt.txt` or `pip install -r requirements_tf.txt`. +pip install neural-compressor +``` +```Shell +# Framework extension API + TensorFlow dependency +pip install neural-compressor-pt +``` +```Shell +# Framework extension API + TensorFlow dependency +pip install neural-compressor-tf +``` ### Install from Source @@ -71,8 +77,9 @@ The following prerequisites and requirements must be satisfied for a successful git clone https://github.com/intel/neural-compressor.git cd neural-compressor pip install -r requirements.txt - # build with basic functionality python setup.py install + [optional] pip install -r requirements_pt.txt # for PyTorch framework extension API + [optional] pip install -r requirements_tf.txt # for TensorFlow framework extension API ``` ### Install from AI Kit @@ -88,15 +95,20 @@ The AI Kit is distributed through many common channels, including from Intel's w ## System Requirements ### Validated Hardware Environment + +#### Intel® Neural Compressor supports HPUs based on heterogeneous architecture with two compute engines (MME and TPC): +* Intel Gaudi Al Accelerators (Gaudi2) + #### Intel® Neural Compressor supports CPUs based on [Intel 64 architecture or compatible processors](https://en.wikipedia.org/wiki/X86-64): -* Intel Xeon Scalable processor (formerly Skylake, Cascade Lake, Cooper Lake, Ice Lake, and Sapphire Rapids) -* Intel Xeon CPU Max Series (formerly Sapphire Rapids HBM) +* Intel Xeon Scalable processor (Skylake, Cascade Lake, Cooper Lake, Ice Lake, and Sapphire Rapids) +* Intel Xeon CPU Max Series (Sapphire Rapids HBM) +* Intel Core Ultra Processors (Meteor Lake) #### Intel® Neural Compressor supports GPUs built on Intel's Xe architecture: -* Intel Data Center GPU Flex Series (formerly Arctic Sound-M) -* Intel Data Center GPU Max Series (formerly Ponte Vecchio) +* Intel Data Center GPU Flex Series (Arctic Sound-M) +* Intel Data Center GPU Max Series (Ponte Vecchio) #### Intel® Neural Compressor quantized ONNX models support multiple hardware vendors through ONNX Runtime: @@ -112,7 +124,6 @@ The AI Kit is distributed through many common channels, including from Intel's w Framework TensorFlow - Intel
TensorFlow Intel®
Extension for
TensorFlow* PyTorch Intel®
Extension for
PyTorch* @@ -122,25 +133,26 @@ The AI Kit is distributed through many common channels, including from Intel's w Version - 2.15.0
- 2.14.1
- 2.13.1
- 2.14.0
- 2.13.0
- 2.14.0.1
+ + 2.16.1
+ 2.15.0
+ 2.14.1
+ + 2.15.0.0
+ 2.14.0.1
2.13.0.0
- 2.2.1
- 2.1.0
- 2.0.1
- 2.2.0
- 2.1.100
- 2.0.100
- 1.17.1
- 1.16.3
- 1.15.1
+ + 2.3.0
+ 2.2.2
+ 2.1.1
+ + 2.3.0
+ 2.2.0
+ 2.1.100
+ + 1.18.0
+ 1.17.3
+ 1.16.3
- -> **Note:** -> Set the environment variable ``TF_ENABLE_ONEDNN_OPTS=1`` to enable oneDNN optimizations if you are using TensorFlow before v2.9. oneDNN is the default for TensorFlow since [v2.9](https://github.com/tensorflow/tensorflow/releases/tag/v2.9.0) ([Intel Cascade Lake](https://www.intel.com/content/www/us/en/products/platforms/details/cascade-lake.html) and newer CPUs). diff --git a/docs/source/llm_recipes.md b/docs/source/llm_recipes.md index 5f04242516b..8a9c17e7cd7 100644 --- a/docs/source/llm_recipes.md +++ b/docs/source/llm_recipes.md @@ -17,8 +17,8 @@ This document aims to publish the specific recipes we achieved for the popular L | EleutherAI/gpt-j-6b | ✔ | ✔ | ✔ | | facebook/opt-1.3b | ✔ | ✔ | ✔ | | facebook/opt-30b | ✔ | ✔ | ✔ | -| meta-llama/Llama-2-7b-hf | ✔ | ✔ | ✔ | -| meta-llama/Llama-2-13b-hf | ✔ | ✔ | ✔ | +| meta-llama/Llama-2-7b-hf | WIP | ✔ | ✔ | +| meta-llama/Llama-2-13b-hf | WIP | ✔ | ✔ | | meta-llama/Llama-2-70b-hf | ✔ | ✔ | ✔ | | tiiuae/falcon-7b | ✔ | ✔ | ✔ | | tiiuae/falcon-40b | ✔ | ✔ | ✔ | @@ -29,7 +29,7 @@ This document aims to publish the specific recipes we achieved for the popular L | databricks/dolly-v2-12b | ✖ | ✔ | ✖ | | EleutherAI/gpt-neox-20b | ✖ | ✔ | ✔ | | mistralai/Mistral-7B-v0.1 | ✖ | ✔ | ✔ | -| THUDM/chatglm2-6b | WIP | ✔ | ✔ | +| THUDM/chatglm2-6b | WIP | ✔ | WIP | | THUDM/chatglm3-6b | WIP | ✔ | ✔ | **Detail recipes can be found [HERE](https://github.com/intel/intel-extension-for-transformers/blob/main/examples/huggingface/pytorch/text-generation/quantization/llm_quantization_recipes.md).** @@ -40,8 +40,8 @@ This document aims to publish the specific recipes we achieved for the popular L > - The WIP recipes will be published soon. ## Large Language Models Accuracy - - + +
@@ -63,212 +63,210 @@ This document aims to publish the specific recipes we achieved for the popular L - - + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - + + + + - - - - + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - + + - - + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - + + - - - - + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - + + + + + + + + - -
Model lambada_openaiRatio ACC Ratio
baichuan-inc/Baichuan-13B-Chat 67.57%68.23%1.009867.57%1.000067.84%1.0040NANA67.86%1.004367.55%0.999767.46%0.9984N/AN/A
baichuan-inc/Baichuan2-13B-Chat 71.51%70.89%0.991371.53%1.000371.76%1.0035NANA75.51%1.055971.57%1.000871.45%0.999270.87%0.9911
baichuan-inc/Baichuan2-7B-Chat 67.67%67.96%1.004367.59%0.998867.24%0.993667.42%0.996367.51%0.997667.61%0.999168.08%1.006167.18%0.9928
bigscience/bloom-1b7 46.34%47.99%1.035646.38%1.000946.19%0.9968NANA47.97%1.035246.21%0.997247.00%1.0142N/AN/A
databricks/dolly-v2-12b 64.35%NANA64.10%0.9961NANANANAN/AN/A63.92%0.9933N/AN/AN/AN/A
EleutherAI/gpt-j-6b 68.31%68.33%1.000368.00%0.995568.27%0.9994 68.23% 0.998868.79%1.007068.43%1.001867.40%0.9867
EleutherAI/gpt-neox-20b 72.33%NANA72.25%0.998971.96%0.9949NANAN/AN/A72.29%0.999472.15%0.9975N/AN/A
facebook/opt-1.3b 57.89%57.54%0.994058.08%1.003358.57%1.0117NANA57.35%0.990758.12%1.004058.01%1.0021N/AN/A
facebook/opt-30b 71.49% 71.51% 1.000371.51%1.000371.53%1.0006 71.82% 1.004672.11%1.008771.43%0.9992
meta-llama/Llama-2-13b-hf 76.77%76.25%0.993276.75%0.999777.43%1.008676.75%0.9997N/AN/A76.89%1.001676.96%1.0025N/AN/A
meta-llama/Llama-2-70b-hf 79.64%79.55%0.998979.57%0.999180.09%1.005779.97%1.004179.53%0.998679.62%0.999780.05%1.0051N/AN/A
meta-llama/Llama-2-7b-hf 73.92%73.45%0.993673.96%1.000573.45%0.993673.49%0.9942N/AN/A73.90%0.999773.51%0.9945N/AN/A
mistralai/Mistral-7B-v0.1 75.90%NANAN/AN/A 75.80% 0.998776.13%1.003075.61%0.996275.37%0.993075.82%0.9989
THUDM/chatglm2-6b 53.23%NANA53.19%0.999252.77%0.991453.35%1.0023N/AN/A53.00%0.9957N/AN/AN/AN/A
THUDM/chatglm3-6b 59.09%NANA59.01%0.9986NANA58.61%0.9919N/AN/A59.03%0.9990N/AN/A58.59%0.9915
tiiuae/falcon-40b 77.22%77.04%0.997777.22%1.000077.94%1.009378.79%1.020377.26%1.000577.18%0.999577.97%1.0097N/AN/A
tiiuae/falcon-7b 74.67%76.44%1.023774.77%1.001375.00%1.0044NANA76.17%1.020174.73%1.000874.79%1.0016N/AN/A
+ diff --git a/docs/source/migration.md b/docs/source/migration.md index ba5f654e41c..b4d087c890f 100644 --- a/docs/source/migration.md +++ b/docs/source/migration.md @@ -195,7 +195,6 @@ QuantizationAwareTrainingConfig( objective="performance", # tuning.objective: same as in the conf.yaml; performance_only=False, # tuning.performance_only: same as in the conf.yaml; ## tuning.random_seed and tuning.tensorboard: these parameters do not need to specially be defined; - ## diagnosis: these parameters do not need to specially be defined; ) ``` diff --git a/docs/source/pythonic_style.md b/docs/source/pythonic_style.md deleted file mode 100644 index d036e9775d5..00000000000 --- a/docs/source/pythonic_style.md +++ /dev/null @@ -1,146 +0,0 @@ -Pythonic Style Access for Configurations -==== - -1. [Introduction](#introduction) -2. [Supported Feature Matrix](#supported-feature-matrix) -3. [Get Started with Pythonic API for Configurations](#get-started-with-pythonic-api-for-configurations) - -## Introduction -To meet the variety of needs arising from various circumstances, INC now provides a -pythonic style access - Pythonic API - for same purpose of either user or framework configurations. - -The Pythonic API for Configuration allows users to specify configurations -directly in their python codes without referring to -a separate YAML file. While we support both simultaneously, -the Pythonic API for Configurations has several advantages over YAML files, -which one can tell from usages in the context below. Hence, we recommend -users to use the Pythonic API for Configurations moving forward. - -## Supported Feature Matrix - -### Pythonic API for User Configurations -| Optimization Techniques | Pythonic API | -|-------------------------|:------------:| -| Quantization | ✔ | -| Pruning | ✔ | -| Distillation | ✔ | -| NAS | ✔ | -### Pythonic API for Framework Configurations - -| Framework | Pythonic API | -|------------|:------------:| -| TensorFlow | ✔ | -| PyTorch | ✔ | -| ONNX | ✔ | -| MXNet | ✔ | - -## Get Started with Pythonic API for Configurations - -### Pythonic API for User Configurations -Now, let's go through the Pythonic API for Configurations in the order of -sections similar as in user YAML files. - -#### Quantization - -To specify quantization configurations, users can use the following -Pythonic API step by step. - -* First, load the ***config*** module -```python -from neural_compressor import config -``` -* Next, assign values to the attributes of *config.quantization* to use specific configurations, and pass the config to *Quantization* API. -```python -config.quantization.inputs = ["image"] # list of str -config.quantization.outputs = ["out"] # list of str -config.quantization.backend = "onnxrt_integerops" # support tensorflow, tensorflow_itex, pytorch, pytorch_ipex, pytorch_fx, onnxrt_qlinearops, onnxrt_integerops, onnxrt_qdq, onnxrt_qoperator, mxnet -config.quantization.approach = "post_training_dynamic_quant" # support post_training_static_quant, post_training_dynamic_quant, quant_aware_training -config.quantization.device = "cpu" # support cpu, gpu -config.quantization.op_type_dict = {"Conv": {"weight": {"dtype": ["fp32"]}, "activation": {"dtype": ["fp32"]}}} # dict -config.quantization.strategy = "mse" # support basic, mse, bayesian, random, exhaustive -config.quantization.objective = "accuracy" # support performance, accuracy, modelsize, footprint -config.quantization.timeout = 100 # int, default is 0 -config.quantization.accuracy_criterion.relative = 0.5 # float, default is 0.01 -config.quantization.reduce_range = ( - False # bool. default value depends on hardware, True if cpu supports VNNI instruction, otherwise is False -) -config.quantization.use_bf16 = False # bool -from neural_compressor.experimental import Quantization - -quantizer = Quantization(config) -``` - -#### Distillation -To specify distillation configurations, users can assign values to -the corresponding attributes. -```python -from neural_compressor import config - -config.distillation.optimizer = {"SGD": {"learning_rate": 0.0001}} - -from neural_compressor.experimental import Distillation - -distiller = Distillation(config) -``` -#### Pruning -To specify pruning configurations, users can assign values to the corresponding attributes. -```python -from neural_compressor import config - -config.pruning.weight_compression.initial_sparsity = 0.0 -config.pruning.weight_compression.target_sparsity = 0.9 -config.pruning.weight_compression.max_sparsity_ratio_per_layer = 0.98 -config.pruning.weight_compression.prune_type = "basic_magnitude" -config.pruning.weight_compression.start_epoch = 0 -config.pruning.weight_compression.end_epoch = 3 -config.pruning.weight_compression.start_step = 0 -config.pruning.weight_compression.end_step = 0 -config.pruning.weight_compression.update_frequency = 1.0 -config.pruning.weight_compression.update_frequency_on_step = 1 -config.pruning.weight_compression.prune_domain = "global" -config.pruning.weight_compression.pattern = "tile_pattern_1x1" - -from neural_compressor.experimental import Pruning - -prune = Pruning(config) -``` -#### NAS -To specify nas configurations, users can assign values to the -corresponding attributes. - -```python -from neural_compressor import config - -config.nas.approach = "dynas" -from neural_compressor.experimental import NAS - -nas = NAS(config) -``` - - -#### Benchmark -To specify benchmark configurations, users can assign values to the -corresponding attributes. -```python -from neural_compressor import config - -config.benchmark.warmup = 10 -config.benchmark.iteration = 10 -config.benchmark.cores_per_instance = 10 -config.benchmark.num_of_instance = 10 -config.benchmark.inter_num_of_threads = 10 -config.benchmark.intra_num_of_threads = 10 - -from neural_compressor.experimental import Benchmark - -benchmark = Benchmark(config) -``` -### Pythonic API for Framework Configurations -Now, let's go through the Pythonic API for Configurations in setting up similar framework -capabilities as in YAML files. Users can specify a framework's (eg. ONNX Runtime) capability by -assigning values to corresponding attributes. - -```python -config.onnxruntime.precisions = ["int8", "uint8"] -config.onnxruntime.graph_optimization_level = "DISABLE_ALL" # only onnxruntime has graph_optimization_level attribute -``` diff --git a/docs/source/releases_info.md b/docs/source/releases_info.md index a2b2c15c6ca..7d263a0a8db 100644 --- a/docs/source/releases_info.md +++ b/docs/source/releases_info.md @@ -17,8 +17,6 @@ Contact [inc.maintainers@intel.com](mailto:inc.maintainers@intel.com) if you nee The MSE tuning strategy does not work with the PyTorch adaptor layer. This strategy requires a comparison between the FP32 and INT8 tensors to decide which op impacts the final quantization accuracy. The PyTorch adaptor layer does not implement this inspect tensor interface. Therefore, do not choose the MSE tuning strategy for PyTorch models. -The diagnosis function does not work with ONNX Runtime 1.13.1 for QDQ format quantization of ONNX models. It can not dump the output value of QDQ pairs since framework limitation. - ## Incompatible Changes [Neural Compressor v1.2](https://github.com/intel/neural-compressor/tree/v1.2) introduces incompatible changes in user facing APIs. Please refer to [incompatible changes](incompatible_changes.md) to know which incompatible changes are made in v1.2. diff --git a/docs/source/tensorboard.md b/docs/source/tensorboard.md deleted file mode 100644 index 670f7930417..00000000000 --- a/docs/source/tensorboard.md +++ /dev/null @@ -1,205 +0,0 @@ -TensorBoard -=========== -1. [Introduction](#introduction) -2. [Supported Feature Matrix](#supported-feature-matrix) -3. [Get Started with Tensorboard](#get-started-with-tensorboard) -4. [Examples](#examples) - -## Introduction - -TensorBoard is a suite of web applications that provide measurements and visualizations used to inspect and understand your machine learning workflow for [TensorFlow TensorBoard](https://github.com/tensorflow/tensorboard) and [PyTorch TensorBoard](https://github.com/pytorch/pytorch/tree/master/torch/utils/tensorboard). Intel® Neural Compressor performs accuracy-driven quantization; the tuning process quantizes the tensor and performs graph transformation and optimization to achieve optimal performance under accuracy requirement. If you want to observe the behaviors of the optimizations, or if you want to discover why an accuracy target cannot be met, TensorBoard can provide you with some valuable information. You can inspect the graph and tensor after each tuning run. If a model cannot meet accuracy requirements, you can analyze the comparison of FP32 and the INT8 tensor histogram. - -We collect the TensorBoard event summary during evaluation. The first time is on the baseline FP32 model and later on at the end of each tuning runs are based on the quantized model. The TensorBoard log directory is named baseline_acc_ and tune__acc_, to indicate the stage and accuracy of the data that is generated. Users can select their data of interest to observe with TensorBoard. - -## Supported Feature Matrix -| Optimized Framework | Tensorboard Support | -|---------------------|:-------------------:| -| PyTorch | ✔ | -| TensorFlow | ✔ | - -## Get Started with TensorBoard -### PyTorch TensorBoard - -PyTorch TensorBoard implementation includes three steps: - -* Before evaluation in the _pre_eval_hook() where instruments observers are placed in the model. -* During evaluation where observers collect tensor information in a dict data structure. -* After evaluation where the graph and tensor information is dumped with the TensorBoard summary writer in _post_eval_hook(). - - -The detailed algorithm can be described by the Pseudo code: -```python - -def evaluate(self, model, dataloader, postprocess=None, \ - metric=None, measurer=None, iteration=-1, tensorboard=False): -# The tensorboard summary is collected in the evaluation function of adaptor - - if tensorboard: - model = self._pre_eval_hook(model) - #evaluation code - .... - acc = metric.result() - if tensorboard: - self._post_eval_hook(model, accuracy=acc, input=input) - -def _pre_eval_hook(self, model): -# Insert observer submodule into each module in whitelist in order to collect tensor information - - class _RecordingObserver(ABC, torch.nn.Module): - # Define the Observer class - - def forward(self, x): - # Record the tensor information in a dict structure - self.output_tensors_dict[self.current_iter] = x.to("cpu") - - @torch.jit.export - def get_tensor_value(self): - return self.output_tensors_dict - - def _observer_forward_hook(module, input, output): - #Forward hook that calls observer on the output - return module.activation_post_process(output) - - def _add_observer_(module, op_list=None, prefix=""): - - #Add observer for each child module - for name, child in module.named_children(): - _add_observer_(child, op_list, op_name) - - if module is a leaf: - module.add_module( - 'activation_post_process', - module.qconfig.activation()) - module.register_forward_hook(_observer_forward_hook) - -def _post_eval_hook(self, model, **args): - # Dump tensor and graph information with TensorBoard summary writer - if self.dump_times == 0: - writer = SummaryWriter('runs/eval/baseline' + - '_acc' + str(accuracy), model) - else: - writer = SummaryWriter('runs/eval/tune_' + - str(self.dump_times) + - '_acc' + str(accuracy), model) - - if args is not None and 'input' in args and self.dump_times == 0: - writer.add_graph(model, args['input']) - - from torch.quantization import get_observer_dict - get_observer_dict(model, observer_dict) - for key in observer_dict: - ...... - op_name = key.strip(".activation_post_process") - summary[op_name + ".output"] = observer_dict[key].get_tensor_value() - - for iter in summary[op_name + ".output"]: - #Record output tensor, for fused op only record the parent op output - ...... - if summary[op_name + ".output"][iter].is_quantized: - writer.add_histogram( - op + "/Output/int8", - torch.dequantize(summary[op_name + - ".output"][iter])) - else: - writer.add_histogram( - op + "/Output/fp32", - summary[op_name + ".output"][iter]) - - state_dict = model.state_dict() - for key in state_dict: - # Record weight tensor, fused child tensorBoard tag will be merge - if state_dict[key].is_quantized: - writer.add_histogram(op + "/int8", - torch.dequantize(state_dict[key])) - else: - writer.add_histogram(op + "/fp32", state_dict[key]) - -``` - - -#### Usage - -1. Add "tensorboard: true" in the yaml file. -2. Run quantization tuning; a "./runs" folder is generated in the working folder. -3. Start tensorboard: - - ``shell - tensorboard --bind_all --logdir_spec baseline:./runs/eval/tune_0_acc0.80,tune_1:././runs/eval/tune_1_acc0.79 - `` - - -### TensorFlow Tensorboard - -TensorFlow TensorBoard implementation includes four steps: - -1. Before evaluation where we create the TensorBoard summary write and write graph, collect FP32 and node names for inspection, and dump the histogram of weights and bias tensor directly from graph_def. -2. Run get_tensor_by_name_with_import() where we get data output tensors. -3. Run session.run() to predict and get the inference result of the output tensor list collected in the previous step. -4. Enumerate the output tensor and write the histogram. - -See the [tensorflow.py](https://github.com/intel/neural-compressor/tree/master/neural_compressor/adaptor/tensorflow.py) evaluate() function for details. - -#### Usage - -1. Add "tensorboard: true" in the yaml file. - -2. Run quantization tuning; a "./runs" folder is generated in the working folder. For example: - - ```shell - ls ./runs/eval - baseline_acc_0.776 tune_1_acc_0.095 - ``` - The baseline_acc_0.776 folder contains the FP32 event log and 0.776 is the FP32 accuracy. tune_1_acc_0.095 contains the evaluation event log of the first run of tuning. - -3. Start tensorboard: - - ```shell - tensorboard --bind_all --logdir_spec baseline:./runs_v3/eval/baseline_acc_0.776/,tune_1:./runs_v3/eval/tune_1_acc_0.095/ - ``` -## Examples - -### PyTorch Examples - -```shell - examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/run_tuning_dump_tensor.sh -``` - -### TensorFlow Examples - -1. Add "tensorboard: true" into examples/tensorflow/image_recognition/inceptionv3.yaml. In order to demonstrate the usage of TensorBoard, remove the following lines which are added to skip the quantization of 'v0/cg/conv0/conv2d/Conv2D' to avoid a known limitation. - - ```yaml - op_wise: { - 'v0/cg/conv0/conv2d/Conv2D': { - 'activation': {'dtype': ['fp32']}, - } - } - ``` - -2. Run tuning: - - ```shell - bash run_quant.sh --topology=inception_v3 --dataset_location= \ - --input_model=./inceptionv3_fp32_pretrained_model.pb --output_model=./nc_inceptionv3.pb --config=./inceptionv3_dump_tensor.yaml - ``` - -3. Start TensorBoard - - ```shell - tensorboard --bind_all --logdir_spec baseline:./runs_v3/eval/baseline_acc_0.776/,tune_1:./runs_v3/eval/tune_1_acc_0.095/ - ``` - -4. In order to find the reason why tune_1 got such poor accuracy, we can observe the TensorBoard. - -* From the **GRAPHS** tab, select "baseline/." in the "Run" box and find the first 'Conv2d' op after 'input' op. The op name is "v0/cg/conv0/Relu": - -![TensorBoard Baseline](./imgs/tensorboard_baseline_v0_cg_conv0.png "TensorBoard Baseline") - -* From the **GRAPHS** tab, select "tune_1/." in the "Run" box and find the first 'Conv2d' op after 'input' op. The tensor name is 'v0/cg/conv0/conv2d/Conv2D_eightbit_requantize': - -![TensorBoard Tuning](./imgs/tensorboard_tune_1_v0_cg_conv0.png "TensorBoard Tuning") - - -* Switch to the **HISTOGRAMS** tab. Click the 'v0/cg/conv0' op name in the search box. TensorBoard groups the tensors with the same op name together so you can compare the tensor of baseline 'v0/cg/conv0/Relu' with the tensor of tune_1 'v0/cg/conv0/conv2d/Conv2D_eightbit_requantize_int8.output'. Note that the tensor name can be changed after quantization, so group the tensor by op name and compare. From the chart below, we can see that the histogram of the first conv2d output tensor are different. This is due to a known TensorFlow issue. After filtering the 'v0/cg/conv0/conv2d/Conv2D' op by adding "op_wise" in the yaml file, the issue disappears. - -![TensorBoard Histogram](./imgs/tensorboard_v0_cg_conv0_histogram.png "TensorBoard Histogram") diff --git a/docs/source/user_guide.md b/docs/source/user_guide.md deleted file mode 100644 index 662a2ec177c..00000000000 --- a/docs/source/user_guide.md +++ /dev/null @@ -1,101 +0,0 @@ -User Guide -=========================== - -Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search to help the user optimize their model. The below documents could help you to get familiar with concepts and modules in Intel® Neural Compressor. Learn how to utilize the APIs in Intel® Neural Compressor to conduct quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks. - -## Overview -This part helps user to get a quick understand about design structure and workflow of Intel® Neural Compressor. We provided broad examples to help users get started. - - - - - - - - - - - - - - -
ArchitectureWorkflowAPIs
NotebookExamplesResultsIntel oneAPI AI Analytics Toolkit
- -## Python-based APIs -Python-based APIs contains more details about the functional APIs in Intel® Neural Compressor, -which introduce the mechanism of each function and provides a tutorial to help the user apply in their own cases. -Please note that we will stop to support Intel Neural Compressor 1.X API in the future. -So we provide a comprehensive migration document in Code Migration to help the user update their code from previous 1.X version to the new 2.X version. -In 2.X API, it's very important to create the `DataLoader` and `Metrics` for your examples, so we provide the detail introductions. - - - - - - - - - - - - - - - - - - - - - - - -
QuantizationAdvanced Mixed PrecisionPruning (Sparsity)Distillation
OrchestrationBenchmarkingDistributed CompressionModel Export
Code Migration from Intel® Neural Compressor 1.X to Intel® Neural Compressor 2.X
DataLoaderMetric
- -## Neural Coder (Zero-code Optimization) -Neural Coder shows our special innovation about zero-code optimization to help user quickly apply Intel® Neural Compressor optimization without coding. - - - - - - - - - -
LauncherJupyterLab ExtensionVisual Studio Code ExtensionSupported Matrix
- -## Advanced Topics -This part provides the advanced topics that help user dive deep into Intel® Neural Compressor. - - - - - - - - - - - - - - - - - - - - -
AdaptorStrategyObjectiveCalibration
DiagnosisAdd New Data TypeAdd New Adaptor
Distillation for QuantizationSmoothQuantWeight-Only QuantizationLayer-Wise Quantization
- -## Innovations for Productivity -We are continue creating some user-friendly applications to improve the productivity. From v2.2 we have `Neural Solution` for distributed quantization and `Neural Insights` for quantization accuracy debugging. - - - - - - - -
Neural SolutionNeural Insights
diff --git a/docs/source/user_yaml.md b/docs/source/user_yaml.md deleted file mode 100644 index 14d4157ab78..00000000000 --- a/docs/source/user_yaml.md +++ /dev/null @@ -1,166 +0,0 @@ -User YAML Configuration Files -===== -1. [Introduction](#introduction) -2. [Supported Feature Matrix](#supported-feature-matrix) -3. [Get Started with User YAML Files](#get-started-with-user-yaml-files) - - -## Introduction - -Intel® Neural Compressor uses YAML files for quick -and user-friendly configurations. There are two types of YAML files - -user YAML files and framework YAML files, which are used in -running user cases and setting up framework capabilities, respectively. - -First, let's take a look at a user YAML file, It defines the model, tuning -strategies, tuning calibrations and evaluations, and performance benchmarking -of the passing model vs. original model. - -## Supported Feature Matrix - -| Optimization Techniques | YAML Configuration Files | -|-------------------------|:------------------------:| -| Quantization | ✔ | -| Pruning | ✔ | -| Distillation | ✔ | - - -## Get started with User YAML Files - - -A complete user YAML file is organized logically into several sections: - -* ***model***: The model specifications define a user model's name, inputs, outputs and framework. - - -```yaml -model: # mandatory. used to specify model specific information. - name: mobilenet_v1 - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor # optional. inputs field is only required in tensorflow. - outputs: num_detections,detection_boxes,detection_scores,detection_classes # optional. outputs field is only required in tensorflow. -``` -* ***quantization***: The quantization specifications define quantization tuning space and related calibrations. To calibrate, users can -specify *sampling_size* (optional) and use the subsection *dataloader* to specify -the dataset location using *root* and transformation using *transform*. To -implement tuning space constraints, users can use the subsection *model_wise* and *op_wise* for specific configurations. - -```yaml -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 20 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: - dataset: - ImageRecord: - root: /path/to/imagenet/ # NOTE: modify to calibration dataset location if needed - transform: - BilinearImagenet: - height: 224 - width: 224 - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - weight: - granularity: per_channel - scheme: asym - dtype: int8 - algorithm: minmax - activation: - granularity: per_tensor - scheme: asym - dtype: int8, fp32 - algorithm: minmax, kl - op_wise: { # optional. tuning constraints on op-wise for advance user to reduce tuning space. - 'conv1': { - 'activation': {'dtype': ['uint8', 'fp32'], - 'algorithm': ['minmax', 'kl'], - 'scheme':['sym']}, - 'weight': {'dtype': ['int8', 'fp32'], - 'algorithm': ['minmax']} - } - } -``` - -* ***pruning***: The pruning specifications define pruning tuning space. To define the training behavior, uses can -use the subsection *train* to specify the training hyper-parameters and the training dataloader. -To define the pruning approach, users can use the subsection *approach* to specify -pruning target, choose the type of pruning algorithm, and the way to apply it -during training process. - -```yaml -pruning: - train: - dataloader: - ... - epoch: 40 - optimizer: - Adam: - learning_rate: 1e-06 - beta_1: 0.9 - beta_2: 0.999 - epsilon: 1e-07 - criterion: - SparseCategoricalCrossentropy: - reduction: sum_over_batch_size - from_logits: False - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.54 - start_epoch: 0 - end_epoch: 19 - pruners: - - !Pruner - start_epoch: 0 - end_epoch: 19 - prune_type: basic_magnitude -``` -* ***distillation***: The distillation specifications define distillation's tuning -space. Similar to pruning, to define the training behavior, users can use the -subsection *train* to specify the training hyper-parameters and the training -dataloader and it is optional if users implement *train_func* and set the attribute -of distillation instance to *train_func*. For criterion, Intel® Neural Compressor provides a built-in -knowledge distillation loss class to calculate distillation loss. -```yaml -distillation: - train: - start_epoch: 0 - end_epoch: 90 - iteration: 1000 - frequency: 1 - dataloader: - ... - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - KnowledgeDistillationLoss: - temperature: 1.0 - loss_types: ['CE', 'CE'] - loss_weights: [0.5, 0.5] -``` -* ***evaluation***: The evaluation specifications define the dataloader and metric for accuracy evaluation as well as dataloader -and configurations for performance benchmarking. -```yaml -evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - accuracy: - metric: - ... - dataloader: - ... -``` -* ***tuning***: The tuning specifications define overall tuning targets. Users can -use *accuracy_criterion* to specify the target of accuracy loss percentage and use -*exit_policy* to specify the tuning timeout in seconds. The random -seed can be specified using *random_seed*. - -```yaml -tuning: - accuracy_criterion: - relative: 0.01 # the tuning target of accuracy loss percentage: 1% - higher_is_better: True - exit_policy: - timeout: 0 # tuning timeout (seconds), 0 means early stop - random_seed: 9527 # random seed -``` diff --git a/examples/.config/model_params_keras_3x.json b/examples/.config/model_params_keras_3x.json new file mode 100644 index 00000000000..bac8a06b4a3 --- /dev/null +++ b/examples/.config/model_params_keras_3x.json @@ -0,0 +1,18 @@ +{ + "keras": { + "resnetv2_50": { + "model_src_dir": "keras/image_recognition/resnet_v2_50/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset2/models/tensorflow/resnetv2_50_keras/saved_model", + "main_script": "main.py", + "batch_size": 32 + }, + "inception_v3": { + "model_src_dir": "keras/image_recognition/inception_v3/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset2/models/tensorflow/inception_v3_keras/saved_model", + "main_script": "main.py", + "batch_size": 32 + } + } +} diff --git a/examples/.config/model_params_pytorch_3x.json b/examples/.config/model_params_pytorch_3x.json new file mode 100644 index 00000000000..c3ae3f6b5be --- /dev/null +++ b/examples/.config/model_params_pytorch_3x.json @@ -0,0 +1,172 @@ +{ + "pytorch": { + "opt_125m_woq_gptq_int4":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "opt_125m_woq_gptq_int4_dq_bnb":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "opt_125m_woq_gptq_int4_dq_ggml":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "llama2_7b_gptq_int4":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "llama2_7b_gptq_int4_dq_bnb":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "llama2_7b_gptq_int4_dq_ggml":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_rtn_int4":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_rtn_int4_dq_bnb":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_rtn_int4_dq_ggml":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_gptq_int4":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_gptq_int4_dq_bnb":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_woq_gptq_int4_dq_ggml":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/weight_only", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "gpt_j_ipex":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/static_quant/ipex", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "gpt_j_ipex_sq":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/smooth_quant", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "llama2_7b_ipex":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/static_quant/ipex", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "llama2_7b_ipex_sq":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/smooth_quant", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "opt_125m_ipex":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/static_quant/ipex", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "opt_125m_ipex_sq":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/smooth_quant", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 8 + }, + "dlrm_ipex": { + "model_src_dir": "recommendation/dlrm/static_quant/ipex", + "dataset_location": "/mnt/local_disk3/dataset/dlrm/dlrm/input", + "input_model": "/mnt/local_disk3/dataset/dlrm/dlrm/dlrm_weight/tb00_40M.pt", + "main_script": "dlrm_s_pytorch.py", + "batch_size": 16384 + }, + "resnet18_pt2e_static":{ + "model_src_dir": "cv/static_quant", + "dataset_location": "/tf_dataset/pytorch/ImageNet/raw", + "input_model": "", + "main_script": "main.py", + "batch_size": 1 + }, + "resnet18_fp8_static":{ + "model_src_dir": "cv/fp8_quant", + "dataset_location": "/tf_dataset/pytorch/ImageNet/raw", + "input_model": "", + "main_script": "main.py", + "batch_size": 1 + }, + "opt_125m_pt2e_static":{ + "model_src_dir": "nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e", + "dataset_location": "", + "input_model": "", + "main_script": "run_clm_no_trainer.py", + "batch_size": 1 + }, + "sdxl_ipex_sq":{ + "model_src_dir": "diffusion_model/diffusers/stable_diffusion/smooth_quant", + "dataset_location": "", + "input_model": "", + "main_script": "main.py", + "batch_size": 1 + }, + "resnet18_mixed_precision": { + "model_src_dir": "cv/mixed_precision", + "dataset_location": "/tf_dataset/pytorch/ImageNet/raw", + "input_model": "resnet18", + "main_script": "main.py", + "batch_size": 20 + } + } +} diff --git a/examples/.config/model_params_tensorflow_3x.json b/examples/.config/model_params_tensorflow_3x.json new file mode 100644 index 00000000000..e2a052656f8 --- /dev/null +++ b/examples/.config/model_params_tensorflow_3x.json @@ -0,0 +1,147 @@ +{ + "tensorflow": { + "bert_large_squad_model_zoo": { + "model_src_dir": "nlp/bert_large_squad_model_zoo/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/bert/data", + "input_model": "/tf_dataset/tensorflow/bert/fp32_bert_squad.pb", + "main_script": "main.py", + "batch_size": 64, + "fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_7_0/fp32_bert_squad.pb" + }, + "opt_125m_sq": { + "model_src_dir": "nlp/large_language_models/quantization/ptq/smoothquant", + "dataset_location": "", + "input_model": "facebook/opt-125m", + "main_script": "main.py", + "batch_size": 16 + }, + "gpt2_medium_sq": { + "model_src_dir": "nlp/large_language_models/quantization/ptq/smoothquant", + "dataset_location": "", + "input_model": "gpt2-medium", + "main_script": "main.py", + "batch_size": 16 + }, + "gpt-j-6B": { + "model_src_dir": "nlp/large_language_models/quantization/ptq/gpt-j", + "dataset_location": "", + "input_model": "/tf_dataset2/models/tensorflow/gpt-j-6B", + "main_script": "main.py", + "batch_size": 1 + }, + "transformer_lt": { + "model_src_dir": "nlp/transformer_lt/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/transformer-lt-official-fp32-inference/transformer_lt_official_fp32_pretrained_model/data", + "input_model": "/tf_dataset/tensorflow/transformer-lt-official-fp32-inference/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb", + "main_script": "main.py", + "batch_size": 64 + }, + "inception_v3": { + "model_src_dir": "image_recognition/inception_v3/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset/pre-trained-models/inceptionv3/fp32/freezed_inceptionv3.pb", + "main_script": "main.py", + "batch_size": 32, + "fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/inceptionv3_fp32_pretrained_model.pb" + }, + "mobilenetv2": { + "model_src_dir": "image_recognition/mobilenet_v2/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_mobilenet_v2.pb", + "main_script": "main.py", + "batch_size": 32 + }, + "resnetv2_50": { + "model_src_dir": "image_recognition/resnet_v2_50/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_resnet_v2_50.pb", + "main_script": "main.py", + "batch_size": 32 + }, + "vgg16": { + "model_src_dir": "image_recognition/vgg16/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_vgg16.pb", + "main_script": "main.py", + "batch_size": 32 + }, + "ViT": { + "model_src_dir": "image_recognition/vision_transformer/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/imagenet", + "input_model": "/tf_dataset/tensorflow/vit/HF-ViT-Base16-Img224-frozen.pb", + "main_script": "main.py", + "batch_size": 32 + }, + "GraphSage": { + "model_src_dir": "graph_networks/graphsage/quantization/ptq", + "dataset_location": "/tf_dataset/dataset/ppi", + "input_model": "/tf_dataset/tensorflow/graphsage/graphsage_frozen_model.pb", + "main_script": "main.py", + "batch_size": 1000 + }, + "yolo_v5": { + "model_src_dir": "object_detection/yolo_v5/quantization/ptq", + "dataset_location": "/tf_dataset2/datasets/coco_yolov5/coco", + "input_model": "/tf_dataset2/models/tensorflow/yolo_v5/yolov5s.pb", + "main_script": "main.py", + "batch_size": 1 + }, + "faster_rcnn_resnet50": { + "model_src_dir": "object_detection/faster_rcnn_resnet50/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/coco_val.record", + "input_model": "/tf_dataset/pre-train-model-oob/object_detection/faster_rcnn_resnet50/frozen_inference_graph.pb", + "main_script": "main.py", + "batch_size": 10 + }, + "mask_rcnn_inception_v2": { + "model_src_dir": "object_detection/mask_rcnn_inception_v2/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/coco_val.record", + "input_model": "/tf_dataset/pre-train-model-oob/object_detection/mask_rcnn_inception_v2/frozen_inference_graph.pb", + "main_script": "main.py", + "batch_size": 10 + }, + "mask_rcnn_inception_v2_ckpt": { + "model_src_dir": "object_detection/mask_rcnn_inception_v2/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/coco_val.record", + "input_model": "/tf_dataset/pre-train-model-oob/object_detection/mask_rcnn_inception_v2", + "main_script": "main.py", + "batch_size": 10 + }, + "ssd_mobilenet_v1": { + "model_src_dir": "object_detection/ssd_mobilenet_v1/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/coco_val.record", + "input_model": "/tf_dataset/pre-train-model-oob/object_detection/ssd_mobilenet_v1/frozen_inference_graph.pb", + "main_script": "main.py", + "batch_size": 10 + }, + "ssd_mobilenet_v1_ckpt": { + "model_src_dir": "object_detection/ssd_mobilenet_v1/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/coco_val.record", + "input_model": "/tf_dataset/pre-train-model-oob/object_detection/ssd_mobilenet_v1", + "main_script": "main.py", + "batch_size": 10 + }, + "wide_deep_large_ds": { + "model_src_dir": "recommendation/wide_deep_large_ds/quantization/ptq", + "dataset_location": "/tf_dataset/tensorflow/wide_deep_large_ds/dataset", + "input_model": "/tf_dataset/tensorflow/wide_deep_large_ds/fp32_optimized_graph.pb", + "main_script": "main.py", + "batch_size": 256, + "fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/wide_deep_fp32_pretrained_model.pb" + }, + "3dunet-mlperf": { + "model_src_dir": "semantic_image_segmentation/3dunet-mlperf/quantization/ptq", + "dataset_location": "/tf_dataset2/models/tensorflow/3dunet/build", + "input_model": "/tf_dataset2/models/tensorflow/3dunet/3dunet_dynamic_ndhwc.pb", + "main_script": "main.py", + "batch_size": 100 + }, + "style_transfer": { + "model_src_dir": "style_transfer/arbitrary_style_transfer/quantization/ptq", + "dataset_location": "style_images,content_images", + "input_model": "/tf_dataset/tensorflow/style_transfer/arbitrary_style_transfer/model.ckpt", + "main_script": "main.py", + "batch_size": 1 + } + } +} \ No newline at end of file diff --git a/examples/3.x_api/README.md b/examples/3.x_api/README.md new file mode 100644 index 00000000000..fd79f210533 --- /dev/null +++ b/examples/3.x_api/README.md @@ -0,0 +1,169 @@ +# Examples + +Intel® Neural Compressor validated examples with multiple compression techniques, including quantization, pruning, knowledge distillation and orchestration. Part of the validated cases can be found in the example tables, and the release data is available [here](../docs/source/validated_model_list.md). + + +# PyTorch Examples + +## Quantization + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDomainMethod Examples
gpt_jNatural Language ProcessingWeight-Only Quantizationlink
Static Quantization (IPEX)link
llama2_7bNatural Language ProcessingWeight-Only Quantizationlink
Static Quantization (IPEX)link
opt_125mNatural Language ProcessingStatic Quantization (IPEX)link
Static Quantization (PT2E)link
Weight-Only Quantizationlink
resnet18Image RecognitionMixed Precisionlink
Static Quantizationlink
+ + +# TensorFlow Examples + +## Quantization + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDomainMethodExamples
bert_large_squad_model_zooNatural Language ProcessingPost-Training Static Quantizationlink
transformer_ltNatural Language ProcessingPost-Training Static Quantizationlink
inception_v3Image RecognitionPost-Training Static Quantizationlink
mobilenetv2Image RecognitionPost-Training Static Quantizationlink
resnetv2_50Image RecognitionPost-Training Static Quantizationlink
vgg16Image RecognitionPost-Training Static Quantizationlink
ViTImage RecognitionPost-Training Static Quantizationlink
GraphSageGraph NetworksPost-Training Static Quantizationlink
yolo_v5Object DetectionPost-Training Static Quantizationlink
faster_rcnn_resnet50Object DetectionPost-Training Static Quantizationlink
mask_rcnn_inception_v2Object DetectionPost-Training Static Quantizationlink
ssd_mobilenet_v1Object DetectionPost-Training Static Quantizationlink
wide_deep_large_dsRecommendationPost-Training Static Quantizationlink
3dunet-mlperfSemantic Image SegmentationPost-Training Static Quantizationlink
+ diff --git a/examples/3.x_api/pytorch/cv/fp8_quant/README.md b/examples/3.x_api/pytorch/cv/fp8_quant/README.md new file mode 100644 index 00000000000..72b8eb282b5 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/fp8_quant/README.md @@ -0,0 +1,28 @@ +# ImageNet FP8 Quantization + +This implements FP8 quantization of popular model architectures, such as ResNet on the ImageNet dataset, which is supported by Intel Gaudi2 AI Accelerator. + +## Requirements + +To try on Intel Gaudi2, docker image with Gaudi Software Stack is recommended, please refer to following script for environment setup. More details can be found in [Gaudi Guide](https://docs.habana.ai/en/latest/Installation_Guide/Bare_Metal_Fresh_OS.html#launch-docker-image-that-was-built). +```bash +# Run a container with an interactive shell +docker run -it --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host vault.habana.ai/gaudi-docker/1.17.0/ubuntu22.04/habanalabs/pytorch-installer-2.3.1:latest +``` + +- Install requirements +- `pip install -r requirements.txt` +- Download the ImageNet dataset from http://www.image-net.org/ + - Then, move and extract the training and validation images to labeled subfolders, using [the following shell script](extract_ILSVRC.sh) + +## Quantizaiton + +To quant a model and validate accaracy, run `main.py` with the desired model architecture and the path to the ImageNet dataset: + +```bash +python main.py --pretrained -t -a resnet50 -b 30 /path/to/imagenet +``` +or +```bash +bash run_quant.sh --input_model=resnet50 --dataset_location=/path/to/imagenet +``` diff --git a/examples/3.x_api/pytorch/cv/fp8_quant/extract_ILSVRC.sh b/examples/3.x_api/pytorch/cv/fp8_quant/extract_ILSVRC.sh new file mode 100644 index 00000000000..3ec05e8f328 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/fp8_quant/extract_ILSVRC.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# script to extract ImageNet dataset +# ILSVRC2012_img_train.tar (about 138 GB) +# ILSVRC2012_img_val.tar (about 6.3 GB) +# make sure ILSVRC2012_img_train.tar & ILSVRC2012_img_val.tar in your current directory +# +# Adapted from: +# https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md +# https://gist.github.com/BIGBALLON/8a71d225eff18d88e469e6ea9b39cef4 +# +# imagenet/train/ +# ├── n01440764 +# │ ├── n01440764_10026.JPEG +# │ ├── n01440764_10027.JPEG +# │ ├── ...... +# ├── ...... +# imagenet/val/ +# ├── n01440764 +# │ ├── ILSVRC2012_val_00000293.JPEG +# │ ├── ILSVRC2012_val_00002138.JPEG +# │ ├── ...... +# ├── ...... +# +# +# Make imagnet directory +# +mkdir imagenet +# +# Extract the training data: +# +# Create train directory; move .tar file; change directory +mkdir imagenet/train && mv ILSVRC2012_img_train.tar imagenet/train/ && cd imagenet/train +# Extract training set; remove compressed file +tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar +# +# At this stage imagenet/train will contain 1000 compressed .tar files, one for each category +# +# For each .tar file: +# 1. create directory with same name as .tar file +# 2. extract and copy contents of .tar file into directory +# 3. remove .tar file +find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done +# +# This results in a training directory like so: +# +# imagenet/train/ +# ├── n01440764 +# │ ├── n01440764_10026.JPEG +# │ ├── n01440764_10027.JPEG +# │ ├── ...... +# ├── ...... +# +# Change back to original directory +cd ../.. +# +# Extract the validation data and move images to subfolders: +# +# Create validation directory; move .tar file; change directory; extract validation .tar; remove compressed file +mkdir imagenet/val && mv ILSVRC2012_img_val.tar imagenet/val/ && cd imagenet/val && tar -xvf ILSVRC2012_img_val.tar && rm -f ILSVRC2012_img_val.tar +# get script from soumith and run; this script creates all class directories and moves images into corresponding directories +wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash +# +# This results in a validation directory like so: +# +# imagenet/val/ +# ├── n01440764 +# │ ├── ILSVRC2012_val_00000293.JPEG +# │ ├── ILSVRC2012_val_00002138.JPEG +# │ ├── ...... +# ├── ...... +# +# +# Check total files after extract +# +# $ find train/ -name "*.JPEG" | wc -l +# 1281167 +# $ find val/ -name "*.JPEG" | wc -l +# 50000 +# \ No newline at end of file diff --git a/examples/3.x_api/pytorch/cv/fp8_quant/main.py b/examples/3.x_api/pytorch/cv/fp8_quant/main.py new file mode 100644 index 00000000000..dfa7515343c --- /dev/null +++ b/examples/3.x_api/pytorch/cv/fp8_quant/main.py @@ -0,0 +1,391 @@ +import argparse +import os +import random +import shutil +import time +import warnings +import sys + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models +from neural_compressor.torch.quantization import ( + FP8Config, + prepare, + convert, +) +import habana_frameworks.torch.core as htcore + + +model_names = models.list_models(module=models) + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('-t', '--tune', dest='tune', action='store_true', + help='tune best int8 model on calibration dataset') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--ppn', default=1, type=int, + help='number of processes on each node of distributed training') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') +parser.add_argument("--calib_iters", default=10, type=int, + help="For calibration only.") +parser.add_argument('-i', "--iter", default=0, type=int, + help='For accuracy measurement only.') +parser.add_argument('-w', "--warmup_iter", default=5, type=int, + help='For benchmark measurement only.') +parser.add_argument('--performance', dest='performance', action='store_true', + help='run benchmark') +parser.add_argument('-r', "--accuracy", dest='accuracy', action='store_true', + help='For accuracy measurement only.') +parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', + help='path to checkpoint tuned by Neural Compressor (default: ./)') +parser.add_argument('--int8', dest='int8', action='store_true', + help='run benchmark') +parser.add_argument('--device', default='hpu', type=str, + help='use hpu device for fp8 quantization') + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + + if 'mobilenet' in args.arch: + import torchvision.models.quantization as models + else: + import torchvision.models as models + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + + if args.pretrained: + print("=> using pre-trained model '{}'".format(args.arch)) + model = models.__dict__[args.arch](pretrained=True) + else: + print("=> creating model '{}'".format(args.arch)) + model = models.__dict__[args.arch]() + + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss() + + optimizer = torch.optim.SGD(model.parameters(), args.lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + checkpoint = torch.load(args.resume) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True, sampler=None) + + val_dataset = datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + val_loader = torch.utils.data.DataLoader( + val_dataset, + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + return + + def eval_func(model): + accu = validate(val_loader, model, criterion, args) + return float(accu) + + if args.tune: + qconfig = FP8Config(fp8_config="E4M3") + model = prepare(model, qconfig) + + # Calibrate + # model is moved to HPU device automatically after preparing + with torch.no_grad(): + for i, (images, target) in enumerate(train_loader): + print("Calibrating batch:", i) + if i == args.calib_iters: + break + images = images.to(args.device) + model(images) + htcore.mark_step() + + model = convert(model) + eval_func(model) + # The saving and loading of fp8 quantization are planned in the next release. + + if args.performance or args.accuracy: + model.eval() + if args.int8: + from neural_compressor.utils.pytorch import load + new_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), + model, + dataloader=val_loader) + else: + new_model = model + if args.performance: + from neural_compressor.config import BenchmarkConfig + from neural_compressor import benchmark + b_conf = BenchmarkConfig(warmup=5, + iteration=args.iter, + cores_per_instance=4, + num_of_instance=1) + benchmark.fit(new_model, b_conf, b_dataloader=val_loader) + if args.accuracy: + validate(val_loader, new_model, criterion, args) + return + + +def train(train_loader, model, criterion, optimizer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, + top5, prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.print(i) + + +def validate(val_loader, model, criterion, args): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, + prefix='Test: ') + + # switch to evaluate mode + model.eval() + + with torch.no_grad(): + for i, (input, target) in enumerate(val_loader): + if i >= args.warmup_iter: + start = time.time() + input = input.to(args.device) + target = target.to(args.device) + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # measure elapsed time + if i >= args.warmup_iter: + batch_time.update(time.time() - start) + + if i % args.print_freq == 0: + progress.print(i) + + if args.iter > 0 and i >= (args.warmup_iter + args.iter - 1): + break + + print('Batch size = %d' % args.batch_size) + print('Accuracy: {top1:.5f} Accuracy@5 {top5:.5f}' + .format(top1=(top1.avg / 100), top5=(top5.avg / 100))) + + return top1.avg/100 + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, *meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def print(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = args.lr * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() diff --git a/examples/3.x_api/pytorch/cv/fp8_quant/requirements.txt b/examples/3.x_api/pytorch/cv/fp8_quant/requirements.txt new file mode 100644 index 00000000000..ebd3df6ae7a --- /dev/null +++ b/examples/3.x_api/pytorch/cv/fp8_quant/requirements.txt @@ -0,0 +1,3 @@ +torch +torchvision +neural-compressor \ No newline at end of file diff --git a/examples/3.x_api/pytorch/cv/fp8_quant/run_quant.sh b/examples/3.x_api/pytorch/cv/fp8_quant/run_quant.sh new file mode 100644 index 00000000000..4d0047cf2d1 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/fp8_quant/run_quant.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + output_model=saved_results + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + if [ "${topology}" = "resnet18_fp8_static" ]; then + input_model="resnet18" + output_dir="saved_results" + fi + python main.py \ + --pretrained \ + -t \ + -a ${input_model} \ + -b 30 \ + --tuned_checkpoint ${output_model} \ + ${dataset_location} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/cv/mixed_precision/README.md b/examples/3.x_api/pytorch/cv/mixed_precision/README.md new file mode 100644 index 00000000000..ede1837b57a --- /dev/null +++ b/examples/3.x_api/pytorch/cv/mixed_precision/README.md @@ -0,0 +1,48 @@ +Step-by-Step +============ + +This document describes the step-by-step instructions for reproducing PyTorch ResNet18 MixedPrecision results with Intel® Neural Compressor. + +# Prerequisite + +### 1. Environment + +PyTorch 1.8 or higher version is needed with pytorch_fx backend. + +```Shell +cd examples/3.x_api/pytorch/image_recognition/torchvision_models/mixed_precision/resnet18 +pip install -r requirements.txt +``` +> Note: Validated PyTorch [Version](/docs/source/installation_guide.md#validated-software-environment). + +### 2. Prepare Dataset + +Download [ImageNet](http://www.image-net.org/) Raw image to dir: /path/to/imagenet. The dir includes below folder: + +```bash +ls /path/to/imagenet +train val +``` + +# Run + +> Note: All torchvision model names can be passed as long as they are included in `torchvision.models`, below are some examples. + +## MixedPrecision +```Shell +bash run_autotune.sh --input_model=resnet18 --dataset_location=/path/to/imagenet +``` + +## Benchmark +```Shell +# run optimized performance +bash run_benchmark.sh --input_model=resnet18 --dataset_location=/path/to/imagenet --mode=performance --batch_size=20 --optimized=true --iters=500 + +# run optimized accuracy +bash run_benchmark.sh --input_model=resnet18 --dataset_location=/path/to/imagenet --mode=accuracy --batch_size=1 --optimized=true +``` + + + + + diff --git a/examples/3.x_api/pytorch/cv/mixed_precision/main.py b/examples/3.x_api/pytorch/cv/mixed_precision/main.py new file mode 100644 index 00000000000..8ef798f9ac3 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/mixed_precision/main.py @@ -0,0 +1,367 @@ +import argparse +import os +import random +import shutil +import time +import warnings +import sys + +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models + +model_names = models.list_models(module=models) + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('-t', '--tune', dest='tune', action='store_true', + help='tune best optimized model') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--ppn', default=1, type=int, + help='number of processes on each node of distributed training') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') +parser.add_argument('-i', "--iter", default=0, type=int, + help='For accuracy measurement only.') +parser.add_argument('-w', "--warmup_iter", default=5, type=int, + help='For benchmark measurement only.') +parser.add_argument('--performance', dest='performance', action='store_true', + help='run benchmark') +parser.add_argument('-r', "--accuracy", dest='accuracy', action='store_true', + help='For accuracy measurement only.') +parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', + help='path to checkpoint tuned by Neural Compressor (default: ./)') +parser.add_argument('--optimized', dest='optimized', action='store_true', + help='run benchmark') + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + + if 'mobilenet_v2' in args.arch: + import torchvision.models.quantization as models + else: + import torchvision.models as models + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + + if args.pretrained: + print("=> using pre-trained model '{}'".format(args.arch)) + model = models.__dict__[args.arch](pretrained=True) + else: + print("=> creating model '{}'".format(args.arch)) + model = models.__dict__[args.arch]() + + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss() + + optimizer = torch.optim.SGD(model.parameters(), args.lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + checkpoint = torch.load(args.resume) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True, sampler=None) + + val_dataset = datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + val_loader = torch.utils.data.DataLoader( + val_dataset, + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + return + + def eval_func(model): + accu = validate(val_loader, model, criterion, args) + return float(accu) + + if args.tune: + from neural_compressor.torch.quantization import MixedPrecisionConfig, TuningConfig, autotune + custom_tune_config = TuningConfig(config_set=[MixedPrecisionConfig(dtype=["fp16", "fp32"])]) + best_model = autotune(model=model, tune_config=custom_tune_config, eval_fn=eval_func) + torch.save(best_model, args.tuned_checkpoint) + return + + if args.performance or args.accuracy: + model.eval() + if args.optimized: + new_model = torch.load(args.tuned_checkpoint) + else: + new_model = model + if args.performance or args.accuracy: + validate(val_loader, new_model, criterion, args) + return + + +def train(train_loader, model, criterion, optimizer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, + top5, prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.print(i) + + +def validate(val_loader, model, criterion, args): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, + prefix='Test: ') + + # switch to evaluate mode + model.eval() + + with torch.no_grad(): + latency_list = [] + for i, (input, target) in enumerate(val_loader): + if i >= args.warmup_iter: + start = time.time() + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + perf_start = time.time() + output = model(input) + perf_end = time.time() + latency_list.append(perf_end-perf_start) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # measure elapsed time + if i >= args.warmup_iter: + batch_time.update(time.time() - start) + + if i % args.print_freq == 0: + progress.print(i) + + if args.iter > 0 and i >= (args.warmup_iter + args.iter - 1): + break + + if args.accuracy: + print('Batch size = %d' % args.batch_size) + print('Accuracy: {top1:.5f} Accuracy@5 {top5:.5f}' + .format(top1=(top1.avg / 100), top5=(top5.avg / 100))) + if args.performance: + latency = np.array(latency_list[args.warmup_iter:]).mean() / args.batch_size + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + + return top1.avg + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, *meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def print(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = args.lr * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() diff --git a/examples/3.x_api/pytorch/cv/mixed_precision/requirements.txt b/examples/3.x_api/pytorch/cv/mixed_precision/requirements.txt new file mode 100644 index 00000000000..46233c08f4a --- /dev/null +++ b/examples/3.x_api/pytorch/cv/mixed_precision/requirements.txt @@ -0,0 +1,4 @@ +neural-compressor +torch>=1.9.0 +torchvision>=0.10.0 +accelerate diff --git a/examples/3.x_api/pytorch/cv/mixed_precision/run_autotune.sh b/examples/3.x_api/pytorch/cv/mixed_precision/run_autotune.sh new file mode 100644 index 00000000000..770671db180 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/mixed_precision/run_autotune.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + tuned_checkpoint=saved_results + batch_size=30 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd="${dataset_location}" + python main.py \ + -a ${input_model}\ + -t\ + --pretrained\ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/cv/mixed_precision/run_benchmark.sh b/examples/3.x_api/pytorch/cv/mixed_precision/run_benchmark.sh new file mode 100644 index 00000000000..28319cc4ffe --- /dev/null +++ b/examples/3.x_api/pytorch/cv/mixed_precision/run_benchmark.sh @@ -0,0 +1,86 @@ +#!/bin/bash +set -x + +function main { + + export ONEDNN_MAX_CPU_ISA=AVX512_CORE_AMX_FP16 + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + tuned_checkpoint=saved_results + batch_size=20 + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --optimized=*) + optimized=$(echo ${var} |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --iter ${iters} --performance " + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + if [[ ${optimized} == "true" ]]; then + extra_cmd="--optimized ${dataset_location}" + else + extra_cmd="${dataset_location}" + fi + if [[ ${mode} == "accuracy" ]]; then + python main.py \ + --pretrained \ + --tuned_checkpoint ${tuned_checkpoint} \ + -b ${batch_size} \ + -a ${input_model} \ + ${mode_cmd} \ + ${extra_cmd} + elif [[ ${mode} == "performance" ]]; then + incbench --num_c 4 main.py \ + --pretrained \ + --tuned_checkpoint ${tuned_checkpoint} \ + -b ${batch_size} \ + -a ${input_model} \ + ${mode_cmd} \ + ${extra_cmd} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/cv/static_quant/README.md b/examples/3.x_api/pytorch/cv/static_quant/README.md new file mode 100644 index 00000000000..172f8b0e12f --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/README.md @@ -0,0 +1,27 @@ +# ImageNet Quantization + +This implements quantization of popular model architectures, such as ResNet on the ImageNet dataset. + +## Requirements + +- Install requirements +- `pip install -r requirements.txt` +- Download the ImageNet dataset from http://www.image-net.org/ + - Then, move and extract the training and validation images to labeled subfolders, using [the following shell script](extract_ILSVRC.sh) + +## Quantizaiton + +To quant a model and validate accaracy, run `main.py` with the desired model architecture and the path to the ImageNet dataset: + +```bash +python main.py -a resnet18 [imagenet-folder with train and val folders] -q -e +``` + + +## Use Dummy Data + +ImageNet dataset is large and time-consuming to download. To get started quickly, run `main.py` using dummy data by "--dummy". Note that the loss or accuracy is useless in this case. + +```bash +python main.py -a resnet18 --dummy -q -e +``` \ No newline at end of file diff --git a/examples/3.x_api/pytorch/cv/static_quant/extract_ILSVRC.sh b/examples/3.x_api/pytorch/cv/static_quant/extract_ILSVRC.sh new file mode 100644 index 00000000000..3ec05e8f328 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/extract_ILSVRC.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# script to extract ImageNet dataset +# ILSVRC2012_img_train.tar (about 138 GB) +# ILSVRC2012_img_val.tar (about 6.3 GB) +# make sure ILSVRC2012_img_train.tar & ILSVRC2012_img_val.tar in your current directory +# +# Adapted from: +# https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md +# https://gist.github.com/BIGBALLON/8a71d225eff18d88e469e6ea9b39cef4 +# +# imagenet/train/ +# ├── n01440764 +# │ ├── n01440764_10026.JPEG +# │ ├── n01440764_10027.JPEG +# │ ├── ...... +# ├── ...... +# imagenet/val/ +# ├── n01440764 +# │ ├── ILSVRC2012_val_00000293.JPEG +# │ ├── ILSVRC2012_val_00002138.JPEG +# │ ├── ...... +# ├── ...... +# +# +# Make imagnet directory +# +mkdir imagenet +# +# Extract the training data: +# +# Create train directory; move .tar file; change directory +mkdir imagenet/train && mv ILSVRC2012_img_train.tar imagenet/train/ && cd imagenet/train +# Extract training set; remove compressed file +tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar +# +# At this stage imagenet/train will contain 1000 compressed .tar files, one for each category +# +# For each .tar file: +# 1. create directory with same name as .tar file +# 2. extract and copy contents of .tar file into directory +# 3. remove .tar file +find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done +# +# This results in a training directory like so: +# +# imagenet/train/ +# ├── n01440764 +# │ ├── n01440764_10026.JPEG +# │ ├── n01440764_10027.JPEG +# │ ├── ...... +# ├── ...... +# +# Change back to original directory +cd ../.. +# +# Extract the validation data and move images to subfolders: +# +# Create validation directory; move .tar file; change directory; extract validation .tar; remove compressed file +mkdir imagenet/val && mv ILSVRC2012_img_val.tar imagenet/val/ && cd imagenet/val && tar -xvf ILSVRC2012_img_val.tar && rm -f ILSVRC2012_img_val.tar +# get script from soumith and run; this script creates all class directories and moves images into corresponding directories +wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash +# +# This results in a validation directory like so: +# +# imagenet/val/ +# ├── n01440764 +# │ ├── ILSVRC2012_val_00000293.JPEG +# │ ├── ILSVRC2012_val_00002138.JPEG +# │ ├── ...... +# ├── ...... +# +# +# Check total files after extract +# +# $ find train/ -name "*.JPEG" | wc -l +# 1281167 +# $ find val/ -name "*.JPEG" | wc -l +# 50000 +# \ No newline at end of file diff --git a/examples/3.x_api/pytorch/cv/static_quant/main.py b/examples/3.x_api/pytorch/cv/static_quant/main.py new file mode 100644 index 00000000000..3d7af7827e3 --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/main.py @@ -0,0 +1,437 @@ +import argparse +import os +import random +import shutil +import time +import warnings +import sys + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models + +model_names = models.list_models(module=models) + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('-t', '--tune', dest='tune', action='store_true', + help='tune best int8 model on calibration dataset') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--ppn', default=1, type=int, + help='number of processes on each node of distributed training') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') +parser.add_argument('-i', "--iter", default=0, type=int, + help='For accuracy measurement only.') +parser.add_argument('-w', "--warmup_iter", default=5, type=int, + help='For benchmark measurement only.') +parser.add_argument('--performance', dest='performance', action='store_true', + help='run benchmark') +parser.add_argument('-r', "--accuracy", dest='accuracy', action='store_true', + help='For accuracy measurement only.') +parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', + help='path to checkpoint tuned by Neural Compressor (default: ./)') +parser.add_argument('--int8', dest='int8', action='store_true', + help='Load int8 model.') +parser.add_argument("--calib_iters", default=128, type=int, + help="For calibration only.") +parser.add_argument("--iters", default=100, type=int, + help="For benchmark only.") + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + + if 'mobilenet' in args.arch: + import torchvision.models.quantization as models + else: + import torchvision.models as models + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + + if args.pretrained: + print("=> using pre-trained model '{}'".format(args.arch)) + model = models.__dict__[args.arch](pretrained=True) + else: + print("=> creating model '{}'".format(args.arch)) + model = models.__dict__[args.arch]() + + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss() + + optimizer = torch.optim.SGD(model.parameters(), args.lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + checkpoint = torch.load(args.resume) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True, sampler=None) + + val_dataset = datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + val_loader = torch.utils.data.DataLoader( + val_dataset, + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + return + + def eval_func(model): + accu = validate(val_loader, model, criterion, args) + return float(accu) + + if args.tune: + from neural_compressor.torch.export import export + from neural_compressor.torch.quantization import prepare, convert, get_default_static_config + + # Prepare the float model and example inputs for exporting model + x = torch.randn(args.batch_size, 3, 224, 224).contiguous(memory_format=torch.channels_last) + example_inputs = (x,) + + # Specify that the first dimension of each input is that batch size + from torch.export import Dim + print(args.batch_size) + batch = Dim("batch", min=16) + + # Specify that the first dimension of each input is that batch size + dynamic_shapes = {"x": {0: batch}} + + # Export eager model into FX graph model + exported_model = export(model=model, example_inputs=example_inputs, dynamic_shapes=dynamic_shapes) + # Quantize the model + quant_config = get_default_static_config() + + prepared_model = prepare(exported_model, quant_config=quant_config) + # Calibrate + with torch.no_grad(): + for i, (images, target) in enumerate(val_loader): + if i == args.calib_iters: + break + if args.gpu is not None and torch.cuda.is_available(): + images = images.cuda(args.gpu, non_blocking=True) + if torch.backends.mps.is_available(): + images = images.to('mps') + target = target.to('mps') + if torch.cuda.is_available(): + target = target.cuda(args.gpu, non_blocking=True) + # compute output + prepared_model(images) + + q_model = convert(prepared_model) + + if args.tuned_checkpoint: + q_model.save(example_inputs=example_inputs, output_dir = args.tuned_checkpoint) + return + + if args.performance or args.accuracy: + if args.int8: + from neural_compressor.torch.quantization import load + q_model = load(args.tuned_checkpoint) + + # Compile the quantized model and replace the Q/DQ pattern with Q-operator + from torch._inductor import config + + config.freezing = True + opt_model = torch.compile(q_model) + new_model = opt_model + else: + new_model = model + new_model.eval() + if args.performance: + benchmark(val_loader, new_model, args) + return + if args.accuracy: + validate(val_loader, new_model, criterion, args) + return + + +def benchmark(val_loader, model, args): + + total_iters = args.iters + warmup_iters = args.warmup_iter + for i, (images, target) in enumerate(val_loader): + if args.gpu is not None and torch.cuda.is_available(): + images = images.cuda(args.gpu, non_blocking=True) + if torch.backends.mps.is_available(): + images = images.to('mps') + break + + with torch.no_grad(): + for i in range(total_iters): + if i == total_iters: + break + if i == warmup_iters: + start = time.time() + + # model inference + model(images) + + if i % args.print_freq == 0: + print(f"benchmarking... {i+1}/{total_iters}") + + end = time.time() + latency = (end - start) / ((total_iters - warmup_iters) * args.batch_size) + throughput = ((total_iters - warmup_iters) * args.batch_size) / (end - start) + print("Latency: {:.3f} ms".format(latency * 10**3)) + print("Throughput: {:.3f} samples/sec".format(throughput)) + +def train(train_loader, model, criterion, optimizer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, + top5, prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.print(i) + + +def validate(val_loader, model, criterion, args): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, + prefix='Test: ') + + # switch to evaluate mode + # model.eval() + + with torch.no_grad(): + for i, (input, target) in enumerate(val_loader): + if i >= args.warmup_iter: + start = time.time() + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # measure elapsed time + if i >= args.warmup_iter: + batch_time.update(time.time() - start) + + if i % args.print_freq == 0: + progress.print(i) + + if args.iter > 0 and i >= (args.warmup_iter + args.iter - 1): + break + + print('Batch size = %d' % args.batch_size) + print('Accuracy: {top1:.5f} Accuracy@5 {top5:.5f}' + .format(top1=(top1.avg / 100), top5=(top5.avg / 100))) + + return top1.avg/100 + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, *meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def print(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = args.lr * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() + diff --git a/examples/3.x_api/pytorch/cv/static_quant/requirements.txt b/examples/3.x_api/pytorch/cv/static_quant/requirements.txt new file mode 100644 index 00000000000..ebd3df6ae7a --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/requirements.txt @@ -0,0 +1,3 @@ +torch +torchvision +neural-compressor \ No newline at end of file diff --git a/examples/3.x_api/pytorch/cv/static_quant/run_benchmark.sh b/examples/3.x_api/pytorch/cv/static_quant/run_benchmark.sh new file mode 100644 index 00000000000..6f6b69c35df --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/run_benchmark.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=16 + tuned_checkpoint=saved_results + echo ${max_eval_samples} + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd='' + + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy " + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance --iters "${iters} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + + echo $extra_cmd + + if [ "${topology}" = "resnet18_pt2e_static" ]; then + model_name_or_path="resnet18" + fi + + if [[ ${mode} == "accuracy" ]]; then + python main.py \ + --pretrained \ + -a resnet18 \ + -b 30 \ + --tuned_checkpoint ${tuned_checkpoint} \ + ${dataset_location} \ + ${extra_cmd} \ + ${mode_cmd} + elif [[ ${mode} == "performance" ]]; then + incbench --num_cores_per_instance 4 \ + main.py \ + --pretrained \ + -a resnet18 \ + -b 30 \ + --tuned_checkpoint ${tuned_checkpoint} \ + ${dataset_location} \ + ${extra_cmd} \ + ${mode_cmd} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/cv/static_quant/run_quant.sh b/examples/3.x_api/pytorch/cv/static_quant/run_quant.sh new file mode 100644 index 00000000000..1f4588e933c --- /dev/null +++ b/examples/3.x_api/pytorch/cv/static_quant/run_quant.sh @@ -0,0 +1,52 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + tuned_checkpoint="saved_results" + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + if [ "${topology}" = "resnet18_pt2e_static" ]; then + model_name_or_path="resnet18" + fi + python main.py \ + --pretrained \ + -t \ + -a resnet18 \ + -b 30 \ + --tuned_checkpoint ${tuned_checkpoint} \ + ${dataset_location} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/README.md b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/README.md new file mode 100644 index 00000000000..6b37038d0dc --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/README.md @@ -0,0 +1,83 @@ +Step-by-Step +============ +This document describes the step-by-step instructions to run [stable diffusion XL model](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) using Smooth Quantization to accelerate inference while maintain the quality of output image. + +# Prerequisite + +## Environment +Recommend python 3.9 or higher version. + +```shell +pip install -r requirements.txt +``` +**Note**: IPEX along with torch require nightly version (2.4) for compatibility. Please refer to [installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=cpu&version=main&os=linux%2fwsl2&package=source). + +# Run + +To quantize the model: +```bash +python sdxl_smooth_quant.py --model_name_or_path stabilityai/stable-diffusion-xl-base-1.0 --quantize --alpha 0.44 --output_dir "./saved_results" +``` +or +```bash +sh run_quant.sh --alpha=0.44 +``` +To load a quantized model: +```bash +python sdxl_smooth_quant.py --model_name_or_path stabilityai/stable-diffusion-xl-base-1.0 --quantize --load --int8 +``` +or +```bash +sh run_quant.sh --int8=true +``` + +# Results +## Image Generated + +With caption `"A brown and white dog runs on some brown grass near a Frisbee that is just sailing above the ground."`, results of fp32 model and int8 model are listed left and right respectively. + +

+ bf16 + int8 +

+ +## CLIP evaluation +We have also evaluated CLIP scores on 5000 samples from COCO2014 validation dataset for FP32 model and INT8 model. CLIP results are listed below. + +| Precision | FP32 | INT8 | +|----------------------|-------|-------| +| CLIP on COCO2014 val | 32.05 | 31.77 | + +We're using the mlperf_sd_inference [repo](https://github.com/ahmadki/mlperf_sd_inference) to evaluate CLIP scores. In order to support evaluation on quantized model, +we made some modification on the script (`main.py`). Please use as following: +```bash +git clone https://github.com/ahmadki/mlperf_sd_inference.git +cd mlperf_sd_inference +mv ../main.py ./ +``` +After setting the environment as instructed in the repo, you can execute the modified `main.py` script to generate images: +```bash +python main.py \ + --model-id stabilityai/stable-diffusion-xl-base-1.0 \ + --quantized-unet ./saved_results \ # quantized model saving path, should include `qconfig.json` and `quantized_model.pt` + --precision fp32 \ + --guidance 8.0 \ + --steps 20 \ + --iters 200 \ # change to 5000 for the full 5k dataset + --latent-path latents.pt \ + --base-output-dir ./output +``` +Then you can compute CLIP score using the images generated by the quantized model: +```bash +mv ./output/stabilityai--stable-diffusion-xl-base-1.0__euler__20__8.0__fp32/* ./output/ # switch directory +rm -rf ./output/stabilityai--stable-diffusion-xl-base-1.0__euler__20__8.0__fp32/ + +python clip/clip_score.py \ + --tsv-file captions_5k.tsv \ + --image-folder ./output \ # folder with the generated images + --device "cpu" +``` +Or you can use the bash script for all steps above: +```bash +sh run_benchmark.sh --mode=accuracy --int8=true +``` diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/fp32.jpg b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/fp32.jpg new file mode 100644 index 00000000000..387eed9a802 Binary files /dev/null and b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/fp32.jpg differ diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/int8.jpg b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/int8.jpg new file mode 100644 index 00000000000..9a6d146894e Binary files /dev/null and b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/images/int8.jpg differ diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/latents.pt b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/latents.pt new file mode 100644 index 00000000000..208dbc48a1c Binary files /dev/null and b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/latents.pt differ diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/main.py b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/main.py new file mode 100644 index 00000000000..1f5b72fd0f0 --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/main.py @@ -0,0 +1,484 @@ +import os +import logging +import tempfile +import shutil +import argparse +import pandas as pd +import time +import torch +import intel_extension_for_pytorch as ipex +from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from diffusers import ( + DDPMScheduler, + DDIMScheduler, + EulerDiscreteScheduler, + EulerAncestralDiscreteScheduler, + StableDiffusionXLPipeline, + StableDiffusionXLImg2ImgPipeline, +) +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import ( + deprecate, retrieve_timesteps, rescale_noise_cfg, + PipelineImageInput, StableDiffusionXLPipelineOutput +) + + +class StableDiffusionXLPipelineSQ(StableDiffusionXLPipeline): + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = 'cpu' + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None: + image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + image_embeds = image_embeds.to(device) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + )['sample'] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image.detach(), output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + +parser = argparse.ArgumentParser() +parser.add_argument('--model-id', default="stabilityai/stable-diffusion-xl-base-1.0", type=str) +parser.add_argument('--precision', default='fp32', type=str) +parser.add_argument('--base-output-dir', default="./output", type=str) +parser.add_argument('--quantized-unet', default="./saved_results", type=str) +parser.add_argument("--int8", action="store_true", help="Load quantized model.") +parser.add_argument("--load", action="store_true") +parser.add_argument('--iters', default=5000, type=int, help="Num of image generated.") +parser.add_argument('--output-dir-name', default=None, type=str) +parser.add_argument('--output-dir-name-postfix', default=None, type=str) +parser.add_argument('--captions-fname', default="captions_5k.tsv", type=str) +parser.add_argument('--guidance', default=8.0, type=float) +parser.add_argument('--scheduler', default="euler", type=str) +parser.add_argument('--steps', default=20, type=int) +parser.add_argument('--negative-prompt', default="normal quality, low quality, worst quality, low res, blurry, nsfw, nude", type=str) +parser.add_argument('--latent-path', default="latents.pt", type=str) +parser.add_argument('--generator-seed', default=None, type=int) +parser.add_argument("--refiner", dest='refiner', action="store_true", + help="Whether to add a refiner to the SDXL pipeline." + "Applicable only with --model-id=xl") +parser.add_argument("--no-refiner", dest='refiner', action="store_false", + help="Whether to add a refiner to the SDXL pipeline." + "Applicable only with --model-id=xl") + +args = parser.parse_args() + +# Init the logger +logging.basicConfig( + format='%(asctime)s %(levelname)-8s %(message)s', + level=logging.INFO, + datefmt='%Y-%m-%d %H:%M:%S' +) + +if args.latent_path and args.generator_seed: + raise ValueError( + "Cannot specify both --latent-path and --generator-seed" + ) + +if args.precision == "fp16": + dtype = torch.float16 +elif args.precision == "bf16": + dtype = torch.bfloat16 +else: + dtype = torch.float32 + +# Initialize defaults +device = torch.device('cpu') +world_size = 1 +rank = 0 + +# load frozen latent +latent_noise = None +if args.latent_path: + logging.info(f"[{rank}] loading latent from: {args.latent_path}") + latent_noise = torch.load(args.latent_path).to(dtype) + +logging.info(f"[{rank}] args: {args}") +logging.info(f"[{rank}] world_size: {world_size}") +logging.info(f"[{rank}] device: {device}") + +logging.info(f"[{rank}] using captions from: {args.captions_fname}") +df = pd.read_csv(args.captions_fname, sep='\t') +logging.info(f"[{rank}] {len(df)} captions loaded") + +# split captions among ranks +df = df[rank::world_size] +logging.info(f"[{rank}] {len(df)} captions assigned") + +# Build the pipeline +schedulers = { + "ddpm": DDPMScheduler.from_pretrained(args.model_id, subfolder="scheduler"), + "ddim": DDIMScheduler.from_pretrained(args.model_id, subfolder="scheduler"), + "euler_anc": EulerAncestralDiscreteScheduler.from_pretrained(args.model_id, subfolder="scheduler"), + "euler": EulerDiscreteScheduler.from_pretrained(args.model_id, subfolder="scheduler"), +} +pipe = StableDiffusionXLPipelineSQ.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=dtype, + use_safetensors=True, +) +pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + +if args.refiner: + refiner_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(args.model_id, + scheduler=schedulers[args.scheduler], + safety_checker=None, + add_watermarker=False, + variant="fp16" if args.precision == 'fp16' else None, + torch_dtype=dtype) + +if args.int8 and args.load: + from neural_compressor.torch.quantization import load + example_inputs = {"sample": torch.randn((2, 4, 128, 128), dtype=dtype), + "timestep": torch.tensor(951.0), + "encoder_hidden_states": torch.randn((2, 77, 2048), dtype=dtype), + "added_cond_kwargs": {'text_embeds':torch.randn((2, 1280), dtype=dtype), + 'time_ids': torch.tensor([[1024., 1024., 0., 0., 1024., 1024.], + [1024., 1024., 0., 0., 1024., 1024.]], dtype=dtype)},} + q_unet = load(args.quantized_unet) + for _ in range(2): + q_unet(**example_inputs) + print("Loaded Quantized Model") + setattr(q_unet, "config", pipe.unet.config) + pipe.unet = q_unet + +pipe.set_progress_bar_config(disable=True) +logging.info(f"[{rank}] Pipeline initialized: {pipe}") + +if args.refiner: + refiner_pipe = refiner_pipe.to(device) + refiner_pipe.set_progress_bar_config(disable=True) + logging.info(f"[{rank}] Refiner pipeline initialized: {refiner_pipe}") + +# Output directory +output_dir = args.output_dir_name or f"{args.model_id.replace('/','--')}__{args.scheduler}__{args.steps}__{args.guidance}__{args.precision}" +if args.output_dir_name_postfix is not None: + output_dir = f"{output_dir}_{args.output_dir_name_postfix}" + +output_dir = os.path.join(args.base_output_dir, output_dir) + +# Ensure the output directory exists +if not os.path.exists(output_dir): + os.makedirs(output_dir) + +# Create a temporary directory to atomically move the images +tmp_dir = tempfile.mkdtemp() + +# Generate the images +for index, row in df.iterrows(): + image_id = row['image_id'] + caption_id = row['id'] + caption_text = row['caption'] + + destination_path = os.path.join(output_dir, f"{caption_id}.png") + + if index >= args.iters: + break + + # Check if the image already exists in the output directory + if not os.path.exists(destination_path): + # Generate the image + print(index, caption_text) + tic = time.time() + image = pipe(prompt=caption_text, + negative_prompt="normal quality, low quality, worst quality, low res, blurry, nsfw, nude", + guidance_scale=8.0, + generator=torch.Generator(device=device).manual_seed(args.generator_seed) if args.generator_seed else None, + latents=latent_noise, + num_inference_steps=20).images[0] + toc = time.time() + print("Time taken : ",toc-tic) + + if args.refiner: + image = refiner_pipe(caption_text, + image=image).images[0] + + # Save the image + image_path_tmp = os.path.join(tmp_dir, f"{caption_id}.png") + image.save(image_path_tmp) + shutil.move(image_path_tmp, destination_path) + + logging.info(f"[{rank}] Saved image {caption_id}: {caption_text}") diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/requirements.txt b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/requirements.txt new file mode 100644 index 00000000000..f1fe1f7e20f --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/requirements.txt @@ -0,0 +1,8 @@ +diffusers +accelerate +torch +transformers +tensorboard +intel_extension_for_pytorch +tqdm +open-clip-torch \ No newline at end of file diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_benchmark.sh b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_benchmark.sh new file mode 100644 index 00000000000..54046faebb1 --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_benchmark.sh @@ -0,0 +1,110 @@ +ert!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" + latent="latents.pt" + tuned_checkpoint="./saved_results/" + iters=200 + for var in "$@" + do + case $var in + --iters=*) + iters=$(echo $var | cut -f2 -d=) + ;; + --int8=*) + int8=$(echo $var | cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var | cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd="--load" + model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" + precision="fp32" + latent="latents.pt" + base_output_dir="./output/" + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + if [[ ${mode} == "performance" ]]; then + extra_cmd=$extra_cmd" --performance" + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + python -u sdxl_smooth_quant.py \ + --model_name_or_path ${model_name_or_path} \ + --latent ${latent} \ + ${extra_cmd} + else + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + python -u sdxl_smooth_quant.py \ + --model_name_or_path ${model_name_or_path} \ + --latent ${latent} \ + ${extra_cmd} + + REPO_URL="https://github.com/ahmadki/mlperf_sd_inference.git" + TARGET_DIR="mlperf_sd_inference" + + if [ -d "$TARGET_DIR" ]; then + echo "Directory $TARGET_DIR already exists. Skipping git clone." + else + git clone "$REPO_URL" "$TARGET_DIR" + fi + + cd mlperf_sd_inference + cp ../main.py ./ + if [ -d "../saved_results/" ]; then + mv ../saved_results/ ./ + fi + + python -u main.py \ + --model-id ${model_name_or_path} \ + --quantized-unet ${tuned_checkpoint} \ + --precision ${precision} \ + --latent-path ${latent} \ + --base-output-dir ${base_output_dir} \ + --iters ${iters} \ + ${extra_cmd} + + mv ./output/stabilityai--stable-diffusion-xl-base-1.0__euler__20__8.0__fp32/* ./output/ + rm -rf ./output/stabilityai--stable-diffusion-xl-base-1.0__euler__20__8.0__fp32/ + + python clip/clip_score.py \ + --tsv-file captions_5k.tsv \ + --image-folder ${base_output_dir} \ + --device "cpu" + + cd .. + fi + +} + +main "$@" diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_quant.sh b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_quant.sh new file mode 100644 index 00000000000..e24ff49c78b --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/run_quant.sh @@ -0,0 +1,56 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --alpha=*) + alpha=$(echo $var |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo $var | cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + ;; + esac + done + +} + +# run_tuning +function run_tuning { + extra_cmd="" + model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" + n_steps=20 + calib_size=10 + batch_size=1 + latent="latents.pt" + alpha=0.44 + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8 --load" + else + extra_cmd=$extra_cmd" --quantize" + fi + echo $extra_cmd + + python -u sdxl_smooth_quant.py \ + --model_name_or_path ${model_name_or_path} \ + --n_steps ${n_steps} \ + --alpha ${alpha} \ + --latent ${latent} \ + ${extra_cmd} + +} + +main "$@" diff --git a/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/sdxl_smooth_quant.py b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/sdxl_smooth_quant.py new file mode 100644 index 00000000000..984a1696efd --- /dev/null +++ b/examples/3.x_api/pytorch/diffusion_model/diffusers/stable_diffusion/smooth_quant/sdxl_smooth_quant.py @@ -0,0 +1,436 @@ + +import os +import argparse +import torch +import intel_extension_for_pytorch as ipex +from diffusers import EulerDiscreteScheduler, StableDiffusionXLPipeline +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import ( + deprecate, retrieve_timesteps, rescale_noise_cfg, + PipelineImageInput, StableDiffusionXLPipelineOutput +) +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +def prompts2images(pipeline, prompts, **kwargs): + images = pipeline( + prompt=prompts, + num_inference_steps=kwargs["n_steps"], + negative_prompt=[ + "normal quality, low quality, worst quality, low res, blurry, nsfw, nude" + ] + * len(prompts), + latents=kwargs["latent"], + guidance_scale=8.0, # MLPerf requirements + ).images + return images + +def save_images(prompts, images, save_dir, prefix='ref'): + for prompt, image in zip(prompts, images): + image_name = f"{prefix}_{'_'.join(prompt.replace('/', ' ').split(' '))}.jpg" + image.save(os.path.join(save_dir, image_name)) + +def do_calibration(pipeline, calibration_prompts, **kwargs): + for i_th, prompts in enumerate(calibration_prompts): + if i_th >= kwargs["calib_size"]: + return + prompts2images(pipeline, prompts, **kwargs) + +class StableDiffusionXLPipelineSQ(StableDiffusionXLPipeline): + def _get_add_time_ids( + self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None + ): + add_time_ids = list(original_size + crops_coords_top_left + target_size) + add_time_ids = torch.tensor([add_time_ids], dtype=dtype) + return add_time_ids + + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + timesteps: List[int] = None, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + negative_original_size: Optional[Tuple[int, int]] = None, + negative_crops_coords_top_left: Tuple[int, int] = (0, 0), + negative_target_size: Optional[Tuple[int, int]] = None, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + **kwargs, + ): + + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", + ) + + # 0. Default height and width to unet + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + callback_steps, + negative_prompt, + negative_prompt_2, + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._denoising_end = denoising_end + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = "cpu" + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=self.do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + ) + + # 4. Prepare timesteps + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + + # 5. Prepare latent variables + num_channels_latents = self.unet.config.in_channels + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeds = pooled_prompt_embeds + if self.text_encoder_2 is None: + text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) + else: + text_encoder_projection_dim = self.text_encoder_2.config.projection_dim + + add_time_ids = self._get_add_time_ids( + original_size, + crops_coords_top_left, + target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + if negative_original_size is not None and negative_target_size is not None: + negative_add_time_ids = self._get_add_time_ids( + negative_original_size, + negative_crops_coords_top_left, + negative_target_size, + dtype=prompt_embeds.dtype, + text_encoder_projection_dim=text_encoder_projection_dim, + ) + else: + negative_add_time_ids = add_time_ids + + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) + add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) + + prompt_embeds = prompt_embeds.to(device) + add_text_embeds = add_text_embeds.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + if ip_adapter_image is not None: + image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt) + if self.do_classifier_free_guidance: + image_embeds = torch.cat([negative_image_embeds, image_embeds]) + image_embeds = image_embeds.to(device) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if ( + self.denoising_end is not None + and isinstance(self.denoising_end, float) + and self.denoising_end > 0 + and self.denoising_end < 1 + ): + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (self.denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + # 9. Optionally get Guidance Scale Embedding + timestep_cond = None + if self.unet.config.time_cond_proj_dim is not None: + guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) + timestep_cond = self.get_guidance_scale_embedding( + guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim + ).to(device=device, dtype=latents.dtype) + + self._num_timesteps = len(timesteps) + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents + + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} + if ip_adapter_image is not None: + added_cond_kwargs["image_embeds"] = image_embeds + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + added_cond_kwargs=added_cond_kwargs, + )['sample'] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) + add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) + negative_pooled_prompt_embeds = callback_outputs.pop( + "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds + ) + add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) + negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + # make sure the VAE is in float32 mode, as it overflows in float16 + needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast + + if needs_upcasting: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + + # cast back to fp16 if needed + if needs_upcasting: + self.vae.to(dtype=torch.float16) + else: + image = latents + + if not output_type == "latent": + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image.detach(), output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--model_name_or_path", type=str, default="stabilityai/stable-diffusion-xl-base-1.0" + ) + parser.add_argument("--quantize", action="store_true") + parser.add_argument("--load", action="store_true") + parser.add_argument("--int8", action="store_true", help="Load quantized model.") + parser.add_argument("--performance", action="store_true") + parser.add_argument("--n_steps", type=int, default=20) + parser.add_argument("--batch-size", type=int, default=1) + parser.add_argument("--calib-size", type=int, default=10) + parser.add_argument("--latent", type=str, default="latents.pt") + parser.add_argument("--alpha", type=float, default=0.5, help="SmoothQuant Alpha") + parser.add_argument("--output_dir", type=str, default="./saved_results", help="output directory") + parser.add_argument("--iters", default=10, type=int, help="For performance measurement only.") + + args = parser.parse_args() + os.makedirs(args.output_dir, exist_ok=True) + + args.calib_size = args.calib_size // args.batch_size + + dtype = torch.float32 + + pipeline = StableDiffusionXLPipelineSQ.from_pretrained( + args.model_name_or_path, + torch_dtype=dtype, + use_safetensors=True, + ) + pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) + + # This is a list of prompts + cali_prompts = [['A brown and white dog runs on some brown grass near a Frisbee that is just sailing above the ground.'], + ['The bus is traveling down a two way street.']] + + torch.random.manual_seed(42) + if args.latent is not None: + init_latent = torch.load(args.latent).to(dtype) + else: + init_latent = torch.randn((1,4,128,128), dtype=dtype) + + prompts = cali_prompts[0] + ref_images = prompts2images(pipeline, prompts, n_steps=args.n_steps, latent=init_latent) + save_images(prompts, ref_images, args.output_dir, prefix='ref') + + def forward_loop(model): + do_calibration( + pipeline=pipeline, + calibration_prompts=cali_prompts, + calib_size=args.calib_size, + n_steps=args.n_steps, + latent=init_latent, + ) + + if args.quantize: + excluded_precisions = ["bf16"] + example_inputs = {"sample": torch.randn((2, 4, 128, 128), dtype=dtype), + "timestep": torch.tensor(951.0), + "encoder_hidden_states": torch.randn((2, 77, 2048), dtype=dtype), + "added_cond_kwargs": {'text_embeds':torch.randn((2, 1280), dtype=dtype), + 'time_ids': torch.tensor([[1024., 1024., 0., 0., 1024., 1024.], + [1024., 1024., 0., 0., 1024., 1024.]], dtype=dtype)},} + + from neural_compressor.torch.quantization import SmoothQuantConfig, prepare, convert + quant_config = SmoothQuantConfig(alpha=args.alpha, excluded_precisions=excluded_precisions) + user_model = prepare(model=pipeline.unet, quant_config=quant_config, example_inputs=example_inputs) + forward_loop(user_model) + q_unet = convert(user_model) + q_unet.save(args.output_dir) + + if args.load: + if args.int8: + from neural_compressor.torch.quantization import load + q_unet = load(os.path.abspath(os.path.expanduser(args.output_dir))) + setattr(q_unet, "config", pipeline.unet.config) + else: + q_unet = pipeline.unet + + pipeline.unet = q_unet + quant_images = prompts2images(pipeline, prompts, n_steps=args.n_steps, latent=init_latent) + save_images(prompts, quant_images, args.output_dir, prefix='quant') + + if args.performance: + import time + + total_iters = args.iters * args.batch_size + warmup_iters = 5 + for i in range(total_iters): + if i == warmup_iters: + start = time.time() + prompts2images(pipeline, prompts, n_steps=args.n_steps, latent=init_latent) + end = time.time() + + latency = (end - start) / ((total_iters - warmup_iters) * args.batch_size) + throughput = ((total_iters - warmup_iters) * args.batch_size) / (end - start) + print("Latency: {:.3f} ms".format(latency * 10**3)) + print("Throughput: {:.3f} samples/sec".format(throughput)) + print('Batch size = %d' % args.batch_size) + + +if __name__ == "__main__": + main() diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/README.md b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/README.md new file mode 100644 index 00000000000..a17eb188d0a --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/README.md @@ -0,0 +1,100 @@ +Step-by-Step +============ + +This document describes the step-by-step instructions for reproducing PyTorch tuning results with Intel® Neural Compressor. + +# Prerequisite + +## 1. Environment + +We verified examples with IPEX backend on Python 3.10, recommended. + +```shell +pip install -r requirements.txt +``` + +## 2. Install Intel-Pytorch-Extension + +Please refer to [intel/intel-extension-for-pytorch(github.com)](https://github.com/intel/intel-extension-for-pytorch). + +### Install IPEX CPU + + > Note: GCC9 compiler is recommended + + ```shell + python -m pip install intel_extension_for_pytorch -f https://software.intel.com/ipex-whl-stable + ``` + +### Install IPEX XPU +Please build an IPEX docker container according to the [official guide](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu&version=v2.1.30%2bxpu&os=linux%2fwsl2&package=docker). + +You can run a simple sanity test to double confirm if the correct version is installed, and if the software stack can get correct hardware information onboard your system. The command should return PyTorch and IPEX versions installed, as well as GPU card(s) information detected. +```bash +source {DPCPPROOT}/env/vars.sh +source {MKLROOT}/env/vars.sh +source {CCLROOT}/env/vars.sh +source {MPIROOT}/env/vars.sh +python -c "import torch; import intel_extension_for_pytorch as ipex; print(torch.__version__); print(ipex.__version__); [print(f'[{i}]: {torch.xpu.get_device_properties(i)}') for i in range(torch.xpu.device_count())];" +``` +Please also refer to this [tutorial](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu&version=v2.1.30%2bxpu&os=linux%2fwsl2&package=conda) to check system requirements and install dependencies. + +## 3. Prepare Dataset + +Download [ImageNet](http://www.image-net.org/) Raw image to dir: /path/to/imagenet. The dir include below folder: + +```bash +ls /path/to/imagenet +train val +``` + +# Run with CPU + +> Note: All torchvision model names can be passed as long as they are included in `torchvision.models`, below are some examples. + +### 1. ResNet18 With Intel PyTorch Extension + +```shell +python main.py -t -a resnet18 --ipex --pretrained /path/to/imagenet +``` +or +```shell +bash run_quant.sh --input_model=resnet18 --dataset_location=/path/to/imagenet +bash run_benchmark.sh --input_model=resnet18 --dataset_location=/path/to/imagenet --mode=performance/accuracy --int8=true/false +``` + +### 2. ResNet50 With Intel PyTorch Extension + +```shell +python main.py -t -a resnet50 --ipex --pretrained /path/to/imagenet +``` +or +```shell +bash run_quant.sh --input_model=resnet50 --dataset_location=/path/to/imagenet +bash run_benchmark.sh --input_model=resnet50 --dataset_location=/path/to/imagenet --mode=performance/accuracy --int8=true/false +``` + +### 3. ResNext101_32x16d With Intel PyTorch Extension + +```shell +python main.py -t -a resnext101_32x16d_wsl --hub --ipex --pretrained /path/to/imagenet +``` +or +```shell +bash run_quant.sh --input_model=resnext101_32x16d_wsl --dataset_location=/path/to/imagenet +bash run_benchmark.sh --input_model=resnext101_32x16d_wsl --dataset_location=/path/to/imagenet --mode=performance/accuracy --int8=true/false +``` + +# Run with XPU + +> Note: All torchvision model names can be passed as long as they are included in `torchvision.models`, below are some examples. + +### 1. ResNet18 With Intel PyTorch Extension + +```shell +python main.py -t -a resnet18 --ipex --pretrained /path/to/imagenet --xpu +``` +or +```shell +bash run_quant.sh --input_model=resnet18 --dataset_location=/path/to/imagenet +bash run_benchmark.sh --input_model=resnet18 --dataset_location=/path/to/imagenet --mode=performance/accuracy --int8=true/false --xpu=true/false +``` diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/main.py b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/main.py new file mode 100644 index 00000000000..a308aacad35 --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/main.py @@ -0,0 +1,551 @@ +import argparse +import os +import random +import shutil +import time +import warnings +import sys + +import torch +import torch.nn as nn +import torch.nn.parallel +use_gpu = False +if use_gpu: + import torch.backends.cudnn as cudnn +#import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models.quantization as quantize_models +import torchvision.models as models +from neural_compressor.adaptor.pytorch import get_torch_version +from packaging.version import Version +import intel_extension_for_pytorch as ipex + + +model_names = models.list_models(module=models) + +torch.hub._validate_not_a_forked_repo=lambda a,b,c: True +hub_model_names = torch.hub.list('facebookresearch/WSL-Images') +model_names += hub_model_names + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--hub', action='store_true', default=False, + help='use model with torch hub') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--steps', default=-1, type=int, + help='steps for validation') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('-t', '--tune', dest='tune', action='store_true', + help='tune best int8 model on calibration dataset') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--ppn', default=1, type=int, + help='number of processes on each node of distributed training') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') +parser.add_argument('-i', "--iter", default=0, type=int, + help='For accuracy measurement only.') +parser.add_argument('-w', "--warmup_iter", default=5, type=int, + help='For benchmark measurement only.') +parser.add_argument('--performance', dest='performance', action='store_true', + help='run benchmark') +parser.add_argument('-r', "--accuracy", dest='accuracy', action='store_true', + help='For accuracy measurement only.') +parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', + help='path to checkpoint tuned by Neural Compressor (default: ./)') +parser.add_argument('--int8', dest='int8', action='store_true', + help='run benchmark') +parser.add_argument('--ipex', dest='ipex', action='store_true', + help='tuning or benchmark with Intel PyTorch Extension') +parser.add_argument("--calib_iters", default=512, type=int, + help="calibration iters.") +parser.add_argument('--xpu', action='store_true', + help='whether use xpu') + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + print(args) + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True + warnings.warn('You have chosen to seed training. ' + 'This will turn on the CUDNN deterministic setting, ' + 'which can slow down your training considerably! ' + 'You may see unexpected behavior when restarting ' + 'from checkpoints.') + + if args.gpu is not None: + warnings.warn('You have chosen a specific GPU. This will completely ' + 'disable data parallelism.') + + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + + args.distributed = args.world_size > 1 or args.ppn > 1 or args.multiprocessing_distributed + + if use_gpu: + ngpus_per_node = torch.cuda.device_count() + else: + ngpus_per_node = args.ppn + + #ngpus_per_node = torch.cuda.device_count() + if args.multiprocessing_distributed: + # Since we have ngpus_per_node processes per node, the total world_size + # needs to be adjusted accordingly + args.world_size = ngpus_per_node * args.world_size + # Use torch.multiprocessing.spawn to launch distributed processes: the + # main_worker process function + mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) + else: + # Simply call main_worker function + main_worker(args.gpu, ngpus_per_node, args) + + +def main_worker(gpu, ngpus_per_node, args): + global best_acc1 + pytorch_version = get_torch_version() + #args.gpu = gpu + #affinity = subprocess.check_output("lscpu | grep 'NUMA node[0-9]' | awk '{ print $4 }' | awk -F',' '{ print $1 }'", shell=True) + #os.environ['OMP_NUM_THREADS'] = '28' + #os.environ['KMP_AFFINITY'] = 'proclist=[{}],granularity=thread,explicit'.format(affinity.splitlines()[gpu].decode('utf-8')) + #print (os.environ['KMP_AFFINITY']) + + #if args.gpu is not None: + # print("Use GPU: {} for training".format(args.gpu)) + print("Use CPU: {} for training".format(gpu)) + + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if args.multiprocessing_distributed: + # For multiprocessing distributed training, rank needs to be the + # global rank among all the processes + args.rank = args.rank * ngpus_per_node + gpu + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + + if args.hub: + torch.set_flush_denormal(True) + model = torch.hub.load('facebookresearch/WSL-Images', args.arch) + else: + # create model + if args.pretrained: + print("=> using pre-trained model '{}'".format(args.arch)) + if args.ipex or pytorch_version >= Version("1.7.0-rc1"): + model = models.__dict__[args.arch](pretrained=True) + else: + model = quantize_models.__dict__[args.arch](pretrained=True, quantize=False) + else: + print("=> creating model '{}'".format(args.arch)) + if args.ipex: + model = models.__dict__[args.arch]() + else: + model = quantize_models.__dict__[args.arch]() + + if args.ipex and not args.int8: + model = model.to(memory_format=torch.channels_last) + + if not torch.cuda.is_available(): + print('using CPU...') + elif args.distributed: + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + args.batch_size = int(args.batch_size / ngpus_per_node) + args.workers = int(args.workers / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + else: + #model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallelCPU(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + else: + # DataParallel will divide and allocate batch_size to all available GPUs + if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): + model.features = torch.nn.DataParallel(model.features) + model.cuda() + else: + model = torch.nn.DataParallel(model) + if args.xpu: + model = model.to("xpu") + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss() + #criterion = nn.CrossEntropyLoss().cuda(args.gpu) + + optimizer = torch.optim.SGD(model.parameters(), args.lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + checkpoint = torch.load(args.resume) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + #cudnn.benchmark = True + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + else: + train_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), + num_workers=args.workers, pin_memory=True, sampler=train_sampler) + + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + + def eval_func(model): + accu = validate(val_loader, model, criterion, args) + return float(accu) + + if args.tune: + from neural_compressor.torch.quantization import get_default_static_config + quant_config = get_default_static_config() + + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + from tqdm import tqdm + def run_fn(model): + calib_iter = 0 + for batch in tqdm(val_loader, total=args.calib_iters): + batch = move_input_to_device(batch, device=None) + if isinstance(batch, tuple) or isinstance(batch, list): + model(batch[0]) + elif isinstance(batch, dict): + model(**batch) + else: + model(batch) + + calib_iter += 1 + if calib_iter >= args.calib_iters: + break + return + + from utils import get_example_inputs + example_inputs = get_example_inputs(model, val_loader) + + from neural_compressor.torch.quantization import prepare, convert + model = prepare(model=model, quant_config=quant_config, example_inputs=example_inputs) + run_fn(model) + q_model = convert(model) + q_model.save(args.tuned_checkpoint) + return + + if args.performance or args.accuracy: + model.eval() + if args.int8: + print("load int8 model") + from neural_compressor.torch.quantization import load + model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint))) + else: + from utils import get_example_inputs + example_inputs = get_example_inputs(model, val_loader) + model = ipex.optimize(model) + with torch.no_grad(): + model = torch.jit.trace(model, example_inputs) + model = torch.jit.freeze(model) + + if args.performance: + from neural_compressor.config import BenchmarkConfig + from neural_compressor import benchmark + b_conf = BenchmarkConfig(warmup=5, + iteration=args.iter, + cores_per_instance=4, + num_of_instance=1) + benchmark.fit(model, b_conf, b_dataloader=val_loader) + if args.accuracy: + validate(val_loader, model, criterion, args) + return + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + adjust_learning_rate(optimizer, epoch, args) + + # train for one epoch + train(train_loader, model, criterion, optimizer, epoch, args) + + # evaluate on validation set + acc1 = validate(val_loader, model, criterion, args) + + # remember best acc@1 and save checkpoint + is_best = acc1 > best_acc1 + best_acc1 = max(acc1, best_acc1) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank % ngpus_per_node == 0): + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'best_acc1': best_acc1, + 'optimizer' : optimizer.state_dict(), + }, is_best) + +def train(train_loader, model, criterion, optimizer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, + top5, prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(input) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.print(i) + + +def validate(val_loader, model, criterion, args): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, + prefix='Test: ') + + # switch to evaluate mode + with torch.no_grad(): + for i, (input, target) in enumerate(val_loader): + input = input.contiguous(memory_format=torch.channels_last) + if i >= args.warmup_iter: + start = time.time() + if args.gpu is not None: + input = input.cuda(args.gpu, non_blocking=True) + target = target.cuda(args.gpu, non_blocking=True) + if args.xpu: + input = input.to("xpu") + target = target.to("xpu") + + # compute output + output = model(input) + + # measure elapsed time + if i >= args.warmup_iter: + batch_time.update(time.time() - start) + + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1[0], input.size(0)) + top5.update(acc5[0], input.size(0)) + + + if i % args.print_freq == 0: + progress.print(i) + + if args.iter > 0 and i >= (args.warmup_iter + args.iter - 1): + break + + print('Batch size = %d' % args.batch_size) + print('Accuracy: {top1:.5f} Accuracy@5 {top5:.5f}' + .format(top1=(top1.avg / 100), top5=(top5.avg / 100))) + + return top1.avg/100 + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, *meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def print(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = args.lr * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/requirements.txt b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/requirements.txt new file mode 100644 index 00000000000..94f1a7356fe --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/requirements.txt @@ -0,0 +1,3 @@ +neural-compressor +torch>=1.9.0 +torchvision>=0.10.0 diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_benchmark.sh b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_benchmark.sh new file mode 100644 index 00000000000..f5a2e251554 --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_benchmark.sh @@ -0,0 +1,89 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=32 + tuned_checkpoint=saved_results + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --xpu=*) + xpu=$(echo ${var} |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --iter ${iters} --performance " + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + extra_cmd="--ipex" + if [ "resnext101_32x16d_wsl_ipex" = "${topology}" ];then + extra_cmd=$extra_cmd" --hub" + fi + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + + if [[ ${xpu} == "true" ]]; then + extra_cmd=$extra_cmd" --xpu" + fi + echo $extra_cmd + + + python main.py \ + --pretrained \ + --tuned_checkpoint ${tuned_checkpoint} \ + -b ${batch_size} \ + -a ${input_model} \ + ${mode_cmd} \ + ${extra_cmd} \ + ${dataset_location} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_quant.sh b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_quant.sh new file mode 100644 index 00000000000..5595b069671 --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/run_quant.sh @@ -0,0 +1,58 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + output_model=saved_results + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + extra_cmd="--ipex" + if [ -n "$output_model" ];then + extra_cmd=$extra_cmd" --tuned_checkpoint ${output_model}" + fi + if [[ "${topology}" == "resnext101_32x16d_wsl"* ]];then + extra_cmd=$extra_cmd" --hub " + fi + extra_cmd=$extra_cmd" ${dataset_location}" + + python main.py \ + --pretrained \ + -t \ + -a $input_model \ + -b 30 \ + ${extra_cmd} + +} + +main "$@" diff --git a/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/utils.py b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/utils.py new file mode 100644 index 00000000000..76117f8b0b5 --- /dev/null +++ b/examples/3.x_api/pytorch/image_recognition/torchvision_models/quantization/static_quant/ipex/utils.py @@ -0,0 +1,47 @@ +import torch +from collections import UserDict +from packaging.version import Version +from neural_compressor.torch.utils import get_torch_version + +def get_example_inputs(model, dataloader): + version = get_torch_version() + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + + # Suggest set dataloader like calib_dataloader + if dataloader is None: + return None + device = next(model.parameters()).device + try: + for idx, (input, label) in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, (list, tuple)): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + except Exception as e: # pragma: no cover + for idx, input in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, list) or isinstance(input, tuple): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + if idx == 0: + assert False, "Please checkout the example_inputs format." diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/README.md deleted file mode 100644 index 1659ae41e75..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/README.md +++ /dev/null @@ -1,168 +0,0 @@ -Step-by-Step -============ -This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch and Intel® Extension for PyTorch. - -The script `run_clm_no_trainer.py` supports `GPTJ`, `OPT`, `LLaMA2`, `BLOOM` and `Falcon` quantization and validates last word prediction accuracy with [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness.git) now, and we are adding more models. - -# Prerequisite -## 1. Create Environment -``` -# Installation -pip install -r requirements.txt -``` - -# Run - -Here is how to run the scripts: - -**Causal Language Modeling (CLM)** - -`run_clm_no_trainer.py` quantizes the large language models using the dataset [NeelNanda/pile-10k](https://huggingface.co/datasets/NeelNanda/pile-10k) calibration and validates `lambada_openai`, `piqa`, `winogrande`, `hellaswag` and other datasets accuracy provided by lm_eval, an example command is as follows. -### GPT-J-6b - -#### Quantization -```bash -# "--sq" is used to enable smooth quant -python run_clm_no_trainer.py \ - --model EleutherAI/gpt-j-6B \ - --quantize \ - --sq \ - --alpha 1.0 \ - --ipex \ - --output_dir "saved_results" -``` -**Notes**: Smooth quantization here is based on torch.jit. Without past key value in example_inputs, the quantized model cannot be used for text-generation. - -```bash -# "--approach weight_only" is used to enable weight only quantization. -# "--woq_algo GPTQ" is used to enable GPTQ algorithms -# "--double_quant_type BNB_NF4" is used to enable double quant algorithms -python run_clm_no_trainer.py \ - --model EleutherAI/gpt-j-6B \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo GPTQ \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --gptq_max_seq_length 2048 \ - --gptq_use_max_length \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" - -# "--woq_algo RTN" is used to enable RTN algorithms -python run_clm_no_trainer.py \ - --model EleutherAI/gpt-j-6B \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo RTN \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" -``` -**Notes**: Weight-only quantization based on fake quantization is previewly supported and supports RTN, GPTQ[1], AWQ[2], TEQ algorithms. For more details, please refer to [link](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization_weight_only.md). Our GPTQ API support various CLMs including GPTJ, OPTs, Blooms, Llamas, Falcons, MPTs, ChatGLMs, etc. Simply replace the "--model" argument with other models to quantize different CLMs with GPTQ. - - -### OPT-125m - -#### Quantization - -```bash -# "--sq" is used to enable smooth quant -python run_clm_no_trainer.py \ - --model facebook/opt-125m \ - --quantize \ - --sq \ - --alpha 0.5 \ - --ipex \ - --output_dir "saved_results" - -# "--approach weight_only" is used to enable weight only quantization. -# "--woq_algo GPTQ" is used to enable GPTQ algorithms -# "--double_quant_type BNB_NF4" is used to enable double quant algorithms -python run_clm_no_trainer.py \ - --model facebook/opt-125m \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo GPTQ \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --gptq_max_seq_length 2048 \ - --gptq_use_max_length \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" - -# "--woq_algo RTN" is used to enable RTN algorithms -python run_clm_no_trainer.py \ - --model facebook/opt-125m \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo RTN \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" -``` - -### LLAMA2-7b/13b/70b ->Note: LLAMA requires IPEX requirements >= 2.1 to get better accuracy. -#### Quantization - -```bash -# "--sq" is used to enable smooth quant -python run_clm_no_trainer.py \ - --model meta-llama/Llama-2-7b-hf \ - --quantize \ - --sq \ - --alpha 0.8 \ - --ipex \ - --output_dir "saved_results" - -# "--approach weight_only" is used to enable weight only quantization. -# "--double_quant_type BNB_NF4" is used to enable double quant algorithms -# "--woq_algo GPTQ" is used to enable GPTQ algorithms -python run_clm_no_trainer.py \ - --model meta-llama/Llama-2-7b-hf \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo GPTQ \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --gptq_max_seq_length 2048 \ - --gptq_use_max_length \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" - -# "--woq_algo RTN" is used to enable RTN algorithms -python run_clm_no_trainer.py \ - --model meta-llama/Llama-2-7b-hf \ - --dataset NeelNanda/pile-10k \ - --quantize \ - --approach weight_only \ - --woq_algo RTN \ - --woq_bits 4 \ - --woq_scheme asym \ - --woq_group_size 128 \ - --accuracy \ - --tasks "lambada_openai" \ - --double_quant_type "BNB_NF4" -``` - - -[1]. Elias, Frantar, et al. "GPTQ: Accurate Post-training Compression for Generative Pretrained Transformers." arXiv preprint arXiv:2210.17323 (2023). -[2]. Lin, Ji, et al. "AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration." arXiv preprint arXiv:2306.00978 (2023). diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_benchmark.sh deleted file mode 100644 index 8002b61ad10..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_benchmark.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - iters=100 - batch_size=16 - approach=static - tuned_checkpoint=saved_results - task=lambada_openai - echo ${max_eval_samples} - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --int8=*) - int8=$(echo ${var} |cut -f2 -d=) - ;; - --config=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_benchmark -function run_benchmark { - extra_cmd='' - - if [[ ${mode} == "accuracy" ]]; then - mode_cmd=" --accuracy " - elif [[ ${mode} == "performance" ]]; then - mode_cmd=" --performance --iters "${iters} - else - echo "Error: No such mode: ${mode}" - exit 1 - fi - - if [[ ${int8} == "true" ]]; then - extra_cmd=$extra_cmd" --int8" - fi - echo $extra_cmd - - if [ "${topology}" = "opt_125m_woq_gptq_int4" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_bnb" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_ggml" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length --gptq_percdamp 0.1 --gptq_actorder" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "opt_125m_ipex" ]; then - model_name_or_path="facebook/opt-125m" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "opt_125m_ipex_sq" ]; then - model_name_or_path="facebook/opt-125m" - extra_cmd=$extra_cmd" --ipex --sq --alpha 0.5" - elif [ "${topology}" = "llama2_7b_gptq_int4" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "llama2_7b_gptq_int4_dq_bnb" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "llama2_7b_gptq_int4_dq_ggml" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "llama2_7b_ipex" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "llama2_7b_ipex_sq" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - extra_cmd=$extra_cmd" --ipex --sq --alpha 0.8" - elif [ "${topology}" = "gpt_j_woq_rtn_int4" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_bnb" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_ggml" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "gpt_j_woq_gptq_int4" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_bnb" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_ggml" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "gpt_j_ipex" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "gpt_j_ipex_sq" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - extra_cmd=$extra_cmd" --ipex --sq --alpha 1.0" - fi - - python -u run_clm_no_trainer.py \ - --model ${model_name_or_path} \ - --approach ${approach} \ - --output_dir ${tuned_checkpoint} \ - --task ${task} \ - --batch_size ${batch_size} \ - ${extra_cmd} ${mode_cmd} -} - -main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_clm_no_trainer.py deleted file mode 100644 index e8ee92cb7c0..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_clm_no_trainer.py +++ /dev/null @@ -1,434 +0,0 @@ -import argparse -import os -import sys - -sys.path.append('./') -import time -import json -import re -import torch -from datasets import load_dataset -import datasets -from torch.nn.functional import pad -from torch.utils.data import DataLoader -from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer - -parser = argparse.ArgumentParser() -parser.add_argument( - "--model", nargs="?", default="EleutherAI/gpt-j-6b" -) -parser.add_argument( - "--trust_remote_code", default=True, - help="Transformers parameter: use the external repo") -parser.add_argument( - "--revision", default=None, - help="Transformers parameter: set the model hub commit number") -parser.add_argument("--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k") -parser.add_argument("--output_dir", nargs="?", default="./saved_results") -parser.add_argument("--quantize", action="store_true") -parser.add_argument( - "--int8_bf16_mixed", - action="store_true", - help="By default it is int8-fp32 mixed, to enable int8 mixed amp bf16 (work on platforms like SPR)", -) -parser.add_argument( - '--seed', - type=int, default=42, help='Seed for sampling the calibration data.' -) -parser.add_argument("--approach", type=str, default='static', - help="Select from ['dynamic', 'static', 'weight-only']") -parser.add_argument("--int8", action="store_true") -parser.add_argument("--ipex", action="store_true", help="Use intel extension for pytorch.") -parser.add_argument("--accuracy", action="store_true") -parser.add_argument("--performance", action="store_true") -parser.add_argument("--iters", default=100, type=int, - help="For accuracy measurement only.") -parser.add_argument("--batch_size", default=1, type=int, - help="For accuracy measurement only.") -parser.add_argument("--save_accuracy_path", default=None, - help="Save accuracy results path.") -parser.add_argument("--pad_max_length", default=512, type=int, - help="Pad input ids to max length.") -parser.add_argument("--calib_iters", default=512, type=int, - help="calibration iters.") -parser.add_argument("--tasks", default="lambada_openai,hellaswag,winogrande,piqa,wikitext", - type=str, help="tasks for accuracy validation") -parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") -# ============SmoothQuant configs============== -parser.add_argument("--sq", action="store_true") -parser.add_argument("--alpha", default="auto", help="Smooth quant parameter.") -# ============WeightOnly configs=============== -parser.add_argument("--woq_algo", default="RTN", choices=['RTN', 'AWQ', 'TEQ', 'GPTQ'], - help="Weight-only parameter.") -parser.add_argument("--woq_bits", type=int, default=8) -parser.add_argument("--woq_dtype", type=str, default="int") -parser.add_argument("--woq_group_size", type=int, default=-1) -parser.add_argument("--woq_group_dim", type=int, default=1) -parser.add_argument("--woq_scheme", default="sym") -parser.add_argument("--woq_use_mse_search", action="store_true") -parser.add_argument("--woq_use_full_range", action="store_true") -# =============GPTQ configs==================== -parser.add_argument("--gptq_actorder", action="store_true", - help="Whether to apply the activation order GPTQ heuristic.") -parser.add_argument('--gptq_percdamp', type=float, default=.01, - help='Percent of the average Hessian diagonal to use for dampening.') -parser.add_argument('--gptq_block_size', type=int, default=128, help='Block size. sub weight matrix size to run GPTQ.') -parser.add_argument('--gptq_static_groups', action="store_true", - help="Whether to calculate group wise quantization parameters in advance. " - "This option mitigate actorder's extra computational requirements.") -parser.add_argument('--gptq_nsamples', type=int, default=128, help='Number of calibration data samples.') -parser.add_argument('--gptq_use_max_length', action="store_true", - help='Set all sequence length to be same length of args.gptq_max_seq_length') -parser.add_argument('--gptq_max_seq_length', type=int, default=2048, - help='Calibration dataset sequence max length, ' - 'this should align with your model config, ' - 'and your dataset builder args: args.pad_max_length') - -# =============DoubleQuant configs==================== -parser.add_argument("--double_quant_type", - type=str, - default=None, - choices=['GGML_TYPE_Q4_K', 'BNB_NF4'], - help="DoubleQuant parameter") -parser.add_argument("--double_quant_dtype", - type=str, - default="fp32", - help="Data type for double quant scale.") -parser.add_argument("--double_quant_bits", - type=int, - default=8, - help="Number of bits used to represent double_quant scale.") -parser.add_argument("--double_quant_use_sym", - type=bool, - default=True, - help="Indicates whether double quant scale are symmetric.") -parser.add_argument("--double_quant_group_size", - type=int, - default=256, - help="Size of double quant groups.") -# ======================================= - -args = parser.parse_args() -if args.ipex: - import intel_extension_for_pytorch as ipex -calib_size = 1 - - -class Evaluator: - def __init__(self, dataset, tokenizer, batch_size=8, pad_val=1, pad_max=196, is_calib=False): - self.dataset = dataset - self.tokenizer = tokenizer - self.batch_size = batch_size - self.pad_val = pad_val - self.pad_max = pad_max - self.is_calib = is_calib - - # tokenize the dataset - self.dataset = self.dataset.map(self.tokenize_function, batched=True) - self.dataset.set_format(type="torch", columns=["input_ids"]) - - @torch.no_grad() - def tokenize_function(self, examples): - if args.woq_algo in ['TEQ']: - if self.tokenizer.pad_token is None: - self.tokenizer.pad_token = self.tokenizer.eos_token - example = self.tokenizer(examples["text"], padding="max_length", max_length=self.pad_max) - else: - example = self.tokenizer(examples["text"]) - return example - - @torch.no_grad() - def collate_batch(self, batch): - - input_ids_padded = [] - last_ind = [] - - for text in batch: - input_ids = text["input_ids"] - pad_len = self.pad_max - input_ids.shape[0] - last_ind.append(input_ids.shape[0] - 1) - if self.is_calib: - if args.woq_algo != 'GPTQ': - input_ids = input_ids[:self.pad_max] if len(input_ids) > self.pad_max else input_ids - else: - input_ids = pad(input_ids, (0, pad_len), value=self.pad_val) - input_ids_padded.append(input_ids) - - return (torch.vstack(input_ids_padded), torch.tensor(last_ind)) - - @torch.no_grad() - def evaluate(self, model): - model.eval() - # The task is to predict the last word of the input. - total, hit = 0, 0 - latency = 0 - test_dataloader = DataLoader( - self.dataset, - batch_size=self.batch_size, - shuffle=False, - collate_fn=self.collate_batch, - ) - for i, (input_ids, last_ind) in enumerate(test_dataloader): - label = input_ids[torch.arange(len(last_ind)), last_ind] - input_ids[torch.arange(len(last_ind)), last_ind] = self.pad_val - pad_len = self.pad_max - last_ind - 1 - - start = time.time() - outputs = model(input_ids) - latency += time.time() - start - - last_token_logits = outputs[0][torch.arange(len(last_ind)), -2 - pad_len, :] - pred = last_token_logits.argmax(dim=-1) - total += label.size(0) - hit += (pred == label).sum().item() - if (i + 1) % 50 == 0: - print(hit / total) - print("Processed minibatch:", i) - - acc = hit / total - print("Accuracy: ", acc) - print("Latency: ", latency) - return acc - - -def get_user_model(): - torchscript = False - if args.sq or args.ipex or args.woq_algo in ['AWQ', 'TEQ']: - torchscript = True - user_model = AutoModelForCausalLM.from_pretrained( - args.model, - torchscript=torchscript, # torchscript will force `return_dict=False` to avoid jit errors - trust_remote_code=args.trust_remote_code, - revision=args.revision, - ) - tokenizer = AutoTokenizer.from_pretrained(args.model) - if args.approach == 'weight_only': - user_model = user_model.float() - - # Set model's seq_len when GPTQ calibration is enabled. - if args.woq_algo == 'GPTQ': - user_model.seqlen = args.gptq_max_seq_length - - if args.peft_model_id is not None: - from peft import PeftModel - user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) - - # to channels last - user_model = user_model.to(memory_format=torch.channels_last) - user_model.eval() - return user_model, tokenizer - - -if args.quantize: - # dataset - user_model, tokenizer = get_user_model() - calib_dataset = load_dataset(args.dataset, split="train") - # calib_dataset = datasets.load_from_disk('/your/local/dataset/pile-10k/') # use this if trouble with connecting to HF - calib_dataset = calib_dataset.shuffle(seed=args.seed) - calib_evaluator = Evaluator(calib_dataset, tokenizer, args.batch_size, pad_max=args.pad_max_length, is_calib=True) - calib_dataloader = DataLoader( - calib_evaluator.dataset, - batch_size=calib_size, - shuffle=False, - collate_fn=calib_evaluator.collate_batch, - ) - - # 3.x api - if args.approach == 'weight_only': - from neural_compressor.torch.quantization import RTNConfig, GPTQConfig, prepare, convert, quantize - from neural_compressor.torch.utils import get_double_quant_config - weight_sym = True if args.woq_scheme == "sym" else False - double_quant_config_dict = get_double_quant_config(args.double_quant_type) - - if args.woq_algo == "RTN": - if args.double_quant_type is not None: - double_quant_config_dict.update( - { - # TODO: add group_dim into double quant config? - "use_full_range": args.woq_use_full_range, - "use_mse_search": args.woq_use_mse_search, - } - ) - quant_config = RTNConfig.from_dict(double_quant_config_dict) - else: - quant_config = RTNConfig( - dtype=args.woq_dtype, - bits=args.woq_bits, - use_sym=weight_sym, - group_size=args.woq_group_size, - group_dim=args.woq_group_dim, - use_full_range=args.woq_use_full_range, - use_mse_search=args.woq_use_mse_search, - use_double_quant=False, - double_quant_bits=args.double_quant_bits, - double_quant_dtype=args.double_quant_dtype, - double_quant_use_sym=args.double_quant_use_sym, - double_quant_group_size=args.double_quant_group_size, - ) - quant_config.set_local("lm_head", RTNConfig(dtype="fp32")) - user_model = prepare(model=user_model, quant_config=quant_config) - user_model = convert(model=user_model) - elif args.woq_algo == "GPTQ": - from utils import DataloaderPreprocessor - dataloaderPreprocessor = DataloaderPreprocessor( - dataloader_original=calib_dataloader, - use_max_length=args.gptq_use_max_length, - max_seq_length=args.gptq_max_seq_length, - ) - dataloader_for_calibration = dataloaderPreprocessor.get_prepared_dataloader() - from neural_compressor.torch.algorithms.weight_only.utility import move_input_to_device - from tqdm import tqdm - def run_fn_for_gptq(model, dataloader_for_calibration, *args): - for batch in tqdm(dataloader_for_calibration): - batch = move_input_to_device(batch, device=None) - try: - if isinstance(batch, tuple) or isinstance(batch, list): - model(batch[0]) - elif isinstance(batch, dict): - model(**batch) - else: - model(batch) - except ValueError: - pass - return - if args.double_quant_type is not None: - double_quant_config_dict.update( - { - "use_mse_search": args.woq_use_mse_search, - "percdamp": args.gptq_percdamp, - "act_order": args.gptq_actorder, - "block_size": args.gptq_block_size, - "static_groups": args.gptq_static_groups, - } - ) - quant_config = GPTQConfig.from_dict(double_quant_config_dict) - else: - quant_config = GPTQConfig( - dtype=args.woq_dtype, - bits=args.woq_bits, - use_sym=weight_sym, - group_size=args.woq_group_size, - use_mse_search=args.woq_use_mse_search, - percdamp=args.gptq_percdamp, - act_order=args.gptq_actorder, - block_size=args.gptq_block_size, - static_groups=args.gptq_static_groups, - use_double_quant=False, - double_quant_bits=args.double_quant_bits, - double_quant_dtype=args.double_quant_dtype, - double_quant_use_sym=args.double_quant_use_sym, - double_quant_group_size=args.double_quant_group_size, - ) - quant_config.set_local("lm_head", GPTQConfig(dtype="fp32")) - user_model = prepare(model=user_model, quant_config=quant_config) - run_fn_for_gptq(user_model, dataloader_for_calibration) - user_model = convert(user_model) - else: - if args.sq: - from neural_compressor.torch.quantization import SmoothQuantConfig - - # alpha can be a float number of a list of float number. - args.alpha = args.alpha if args.alpha == "auto" else eval(args.alpha) - if re.search("falcon", user_model.config.model_type): - quant_config = SmoothQuantConfig(alpha=args.alpha, folding=False) - else: - quant_config = SmoothQuantConfig(alpha=args.alpha, folding=True) - - if re.search("gpt", user_model.config.model_type): - quant_config.set_local(torch.add, SmoothQuantConfig(w_dtype="fp32", act_dtype="fp32")) - else: - from neural_compressor.torch.quantization import get_default_static_config, StaticQuantConfig - - quant_config = get_default_static_config() - if re.search("gpt", user_model.config.model_type): - quant_config.set_local(torch.add, StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) - - from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device - from tqdm import tqdm - def run_fn(model): - for batch in tqdm(calib_dataloader): - batch = move_input_to_device(batch, device=None) - try: - if isinstance(batch, tuple) or isinstance(batch, list): - model(batch[0]) - elif isinstance(batch, dict): - model(**batch) - else: - model(batch) - except ValueError: - pass - return - - from utils import get_example_inputs - example_inputs = get_example_inputs(user_model, calib_dataloader) - - from neural_compressor.torch.quantization import prepare, convert - user_model = prepare(model=user_model, quant_config=quant_config, example_inputs=example_inputs) - run_fn(user_model) - user_model = convert(user_model) - - user_model.save(args.output_dir) - - -# TODO: we need run_benchmark.sh for loading and remove --accuracy in run_quant.sh, currently run_quant.sh will get fp32 result - -if args.int8 or args.int8_bf16_mixed: - print("load int8 model") - - from neural_compressor.torch.quantization import load - user_model, _ = get_user_model() - tokenizer = AutoTokenizer.from_pretrained(args.model) - config = AutoConfig.from_pretrained(args.model) - user_model = load(os.path.abspath(os.path.expanduser(args.output_dir)), user_model) - setattr(user_model, "config", config) -else: - user_model, tokenizer = get_user_model() - - -if args.accuracy: - user_model.eval() - from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser - eval_args = LMEvalParser( - model="hf", - user_model=user_model, - tokenizer=tokenizer, - batch_size=args.batch_size, - tasks=args.tasks, - device="cpu", - ) - results = evaluate(eval_args) - for task_name in args.tasks.split(","): - if task_name == "wikitext": - acc = results["results"][task_name]["word_perplexity,none"] - else: - acc = results["results"][task_name]["acc,none"] - print("Accuracy: %.5f" % acc) - print('Batch size = %d' % args.batch_size) - -if args.performance: - user_model.eval() - from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser - import time - - samples = args.iters * args.batch_size - eval_args = LMEvalParser( - model="hf", - user_model=user_model, - tokenizer=tokenizer, - batch_size=args.batch_size, - tasks=args.tasks, - limit=samples, - device="cpu", - ) - start = time.time() - results = evaluate(eval_args) - end = time.time() - for task_name in args.tasks.split(","): - if task_name == "wikitext": - acc = results["results"][task_name]["word_perplexity,none"] - else: - acc = results["results"][task_name]["acc,none"] - print("Accuracy: %.5f" % acc) - print('Throughput: %.3f samples/sec' % (samples / (end - start))) - print('Latency: %.3f ms' % ((end - start) * 1000 / samples)) - print('Batch size = %d' % args.batch_size) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_quant.sh deleted file mode 100644 index 3f95f44946e..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/run_quant.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_tuning - -} - -# init params -function init_params { - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - tuned_checkpoint=$(echo $var |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# run_tuning -function run_tuning { - extra_cmd='' - batch_size=8 - approach='static' - DATASET_NAME="NeelNanda/pile-10k" - tuned_checkpoint="saved_results" - - if [ "${topology}" = "opt_125m_woq_gptq_int4" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_bnb" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_ggml" ]; then - model_name_or_path="facebook/opt-125m" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length --gptq_percdamp 0.1 --gptq_actorder" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "opt_125m_ipex" ]; then - model_name_or_path="facebook/opt-125m" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "opt_125m_ipex_sq" ]; then - model_name_or_path="facebook/opt-125m" - extra_cmd=$extra_cmd" --ipex --sq --alpha 0.5" - elif [ "${topology}" = "llama2_7b_gptq_int4" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "llama2_7b_gptq_int4_dq_bnb" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "llama2_7b_gptq_int4_dq_ggml" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "llama2_7b_ipex" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "llama2_7b_ipex_sq" ]; then - model_name_or_path="meta-llama/Llama-2-7b-hf" - extra_cmd=$extra_cmd" --ipex --sq --alpha 0.8" - elif [ "${topology}" = "gpt_j_woq_rtn_int4" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_bnb" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_ggml" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "gpt_j_woq_gptq_int4" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_bnb" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" - elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_ggml" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - approach="weight_only" - extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" - extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" - elif [ "${topology}" = "gpt_j_ipex" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - extra_cmd=$extra_cmd" --ipex" - elif [ "${topology}" = "gpt_j_ipex_sq" ]; then - model_name_or_path="EleutherAI/gpt-j-6b" - extra_cmd=$extra_cmd" --ipex --sq --alpha 1.0" - fi - - python -u run_clm_no_trainer.py \ - --model ${model_name_or_path} \ - --dataset ${DATASET_NAME} \ - --quantize \ - --approach ${approach} \ - --output_dir ${tuned_checkpoint} \ - --tasks "lambada_openai" \ - --batch_size ${batch_size} \ - ${extra_cmd} -} - -main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/README.md deleted file mode 100644 index 6608cbcf726..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Run - -## Run WOQ MX FP4 model -``` python -python run_clm_no_trainer.py --model [model_name_or_id] --quantize --accuracy --tasks lambada_openai --w_dtype fp4 --woq -``` \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/run_clm_no_trainer.py deleted file mode 100644 index db5b08882e0..00000000000 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/run_clm_no_trainer.py +++ /dev/null @@ -1,110 +0,0 @@ -import argparse -import time -import json - -parser = argparse.ArgumentParser() -parser.add_argument( - "--model", nargs="?", default="EleutherAI/gpt-j-6b" -) -parser.add_argument( - "--trust_remote_code", default=True, - help="Transformers parameter: use the external repo") -parser.add_argument( - "--revision", default=None, - help="Transformers parameter: set the model hub commit number") -parser.add_argument("--quantize", action="store_true") -# dynamic only now -parser.add_argument("--w_dtype", type=str, default="int8", - choices=["int8", "int4", "int2", "fp8_e5m2", "fp8_e4m3", "fp6_e3m2", - "fp6_e2m3", "fp4", "float16", "bfloat12"], - help="weight data type") -parser.add_argument("--act_dtype", type=str, default="int8", - choices=["int8", "int4", "int2", "fp8_e5m2", "fp8_e4m3", "fp6_e3m2", - "fp6_e2m3", "fp4", "float16", "bfloat12"], - help="input activation data type") -parser.add_argument("--woq", action="store_true") -parser.add_argument("--accuracy", action="store_true") -parser.add_argument("--performance", action="store_true") -parser.add_argument("--iters", default=100, type=int, - help="For accuracy measurement only.") -parser.add_argument("--batch_size", default=1, type=int, - help="For accuracy measurement only.") -parser.add_argument("--save_accuracy_path", default=None, - help="Save accuracy results path.") -parser.add_argument("--tasks", type=str, default="lambada_openai", - help="tasks list for accuracy validation") -parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") - -args = parser.parse_args() - -def get_user_model(): - from transformers import AutoModelForCausalLM, AutoModel, AutoTokenizer - user_model = AutoModelForCausalLM.from_pretrained( - args.model, - trust_remote_code=args.trust_remote_code, - revision=args.revision, - ) - tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=args.trust_remote_code) - - if args.peft_model_id is not None: - from peft import PeftModel - user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) - - user_model.eval() - return user_model, tokenizer - -user_model, tokenizer = get_user_model() -if args.quantize: - from neural_compressor.torch.quantization import MXQuantConfig, quantize - quant_config = MXQuantConfig(w_dtype=args.w_dtype, act_dtype=args.act_dtype, weight_only=args.woq) - user_model = quantize(model=user_model, quant_config=quant_config) - - -if args.accuracy: - user_model.eval() - from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser - args = LMEvalParser( - model="hf", - user_model=user_model, - tokenizer=tokenizer, - batch_size=args.batch_size, - tasks=args.tasks, - device="cpu", - ) - results = evaluate(args) - dumped = json.dumps(results, indent=2) - if args.save_accuracy_path: - with open(args.save_accuracy_path, "w") as f: - f.write(dumped) - for task_name in args.tasks: - if task_name == "wikitext": - acc = results["results"][task_name]["word_perplexity"] - else: - acc = results["results"][task_name]["acc"] - print("Accuracy: %.5f" % acc) - print('Batch size = %d' % args.batch_size) - -if args.performance: - user_model.eval() - from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate - import time - samples = args.iters * args.batch_size - start = time.time() - results = evaluate( - model="hf", - tokenizer=tokenizer, - user_model=user_model, - batch_size=args.batch_size, - tasks=args.tasks, - limit=samples, - ) - end = time.time() - for task_name in args.tasks: - if task_name == "wikitext": - acc = results["results"][task_name]["word_perplexity"] - else: - acc = results["results"][task_name]["acc"] - print("Accuracy: %.5f" % acc) - print('Throughput: %.3f samples/sec' % (samples / (end - start))) - print('Latency: %.3f ms' % ((end - start)*1000 / samples)) - print('Batch size = %d' % args.batch_size) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/README.md new file mode 100644 index 00000000000..e61d5a64ade --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/README.md @@ -0,0 +1,7 @@ +# Run + +## Run WOQ MX FP4 model + +``` python +python run_clm_no_trainer.py --model [model_name_or_id] --quantize --accuracy --tasks lambada_openai --w_dtype fp4 --woq +``` diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/requirements.txt similarity index 100% rename from examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx/requirements.txt rename to examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/requirements.txt diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/run_clm_no_trainer.py new file mode 100644 index 00000000000..40bf217c72e --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/mx_quant/run_clm_no_trainer.py @@ -0,0 +1,95 @@ +import argparse +import time +import json + +parser = argparse.ArgumentParser() +parser.add_argument( + "--model", nargs="?", default="EleutherAI/gpt-j-6b" +) +parser.add_argument( + "--trust_remote_code", default=True, + help="Transformers parameter: use the external repo") +parser.add_argument( + "--revision", default=None, + help="Transformers parameter: set the model hub commit number") +parser.add_argument("--quantize", action="store_true") +# dynamic only now +parser.add_argument("--w_dtype", type=str, default="int8", + choices=["int8", "int4", "int2", "fp8_e5m2", "fp8_e4m3", "fp6_e3m2", + "fp6_e2m3", "fp4", "float16", "bfloat12"], + help="weight data type") +parser.add_argument("--act_dtype", type=str, default="int8", + choices=["int8", "int4", "int2", "fp8_e5m2", "fp8_e4m3", "fp6_e3m2", + "fp6_e2m3", "fp4", "float16", "bfloat12"], + help="input activation data type") +parser.add_argument("--woq", action="store_true") +parser.add_argument("--accuracy", action="store_true") +parser.add_argument("--performance", action="store_true") +parser.add_argument("--iters", default=100, type=int, + help="For accuracy measurement only.") +parser.add_argument("--batch_size", default=1, type=int, + help="For accuracy measurement only.") +parser.add_argument("--save_accuracy_path", default=None, + help="Save accuracy results path.") +parser.add_argument("--tasks", nargs="+", default=["lambada_openai"], type=str, + help="tasks list for accuracy validation" +) +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") + +args = parser.parse_args() + +def get_user_model(): + from transformers import AutoModelForCausalLM, AutoModel, AutoTokenizer + user_model = AutoModelForCausalLM.from_pretrained( + args.model, + trust_remote_code=args.trust_remote_code, + revision=args.revision, + ) + tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=args.trust_remote_code) + + if args.peft_model_id is not None: + from peft import PeftModel + user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) + + user_model.eval() + return user_model, tokenizer + +user_model, tokenizer = get_user_model() + +from neural_compressor.torch.quantization import MXQuantConfig, prepare, convert +quant_config = MXQuantConfig(w_dtype=args.w_dtype, act_dtype=args.act_dtype, weight_only=args.woq) +user_model = prepare(model=user_model, quant_config=quant_config) +user_model = convert(model=user_model) +user_model.eval() + +from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser +eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=','.join(args.tasks), + device="cpu", +) + +results = evaluate(eval_args) +dumped = json.dumps(results, indent=2) +if args.save_accuracy_path: + with open(args.save_accuracy_path, "w") as f: + f.write(dumped) + +eval_acc = 0 +for task_name in args.tasks: + if task_name == "wikitext": + print("Accuracy for %s is: %s" % + (task_name, results["results"][task_name]["word_perplexity,none"])) + eval_acc += results["results"][task_name]["word_perplexity,none"] + else: + print("Accuracy for %s is: %s" % + (task_name, results["results"][task_name]["acc,none"])) + eval_acc += results["results"][task_name]["acc,none"] + +if len(args.tasks) != 0: + eval_acc /= len(args.tasks) +print("Accuracy: %.5f" % eval_acc) +print('Batch size = %d' % args.batch_size) \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/README.md new file mode 100644 index 00000000000..8900ea9fd9b --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/README.md @@ -0,0 +1,64 @@ +Step-by-Step +============ +This document describes the step-by-step instructions to run large language models (LLMs) using Smooth Quantization on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch and Intel® Extension for PyTorch. + +The script `run_clm_no_trainer.py` supports `GPTJ`, `OPT`, `LLaMA2`, `BLOOM` and `Falcon` quantization and validates last word prediction accuracy with [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness.git) now, and we are adding more models. + +# Prerequisite +## 1. Create Environment +``` +# Installation +pip install -r requirements.txt +``` + +# Run + +Here is how to run the scripts: + +**Causal Language Modeling (CLM)** + +`run_clm_no_trainer.py` quantizes the large language models using the dataset [NeelNanda/pile-10k](https://huggingface.co/datasets/NeelNanda/pile-10k) calibration and validates `lambada_openai`, `piqa`, `winogrande`, `hellaswag` and other datasets accuracy provided by lm_eval, an example command is as follows. +### GPT-J-6b + +#### Quantization +```bash +# "--sq" is used to enable smooth quant +python run_clm_no_trainer.py \ + --model EleutherAI/gpt-j-6B \ + --quantize \ + --sq \ + --alpha 1.0 \ + --ipex \ + --output_dir "saved_results" +``` +**Notes**: Smooth quantization here is based on torch.jit. Without past key value in example_inputs, the quantized model cannot be used for text-generation. + +### OPT-125m + +#### Quantization + +```bash +# "--sq" is used to enable smooth quant +python run_clm_no_trainer.py \ + --model facebook/opt-125m \ + --quantize \ + --sq \ + --alpha 0.5 \ + --ipex \ + --output_dir "saved_results" +``` + +### LLAMA2-7b/13b/70b +>Note: LLAMA requires IPEX requirements >= 2.1 to get better accuracy. +#### Quantization + +```bash +# "--sq" is used to enable smooth quant +python run_clm_no_trainer.py \ + --model meta-llama/Llama-2-7b-hf \ + --quantize \ + --sq \ + --alpha 0.8 \ + --ipex \ + --output_dir "saved_results" +``` \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt new file mode 100644 index 00000000000..d4155dfbf75 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt @@ -0,0 +1,14 @@ +accelerate +protobuf +sentencepiece != 0.1.92 +datasets >= 1.1.3 +torch >= 1.10 +transformers +pytest +wandb +einops +neural-compressor +intel-extension-for-transformers +lm_eval==0.4.2 +peft +optimum-intel diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_benchmark.sh new file mode 100644 index 00000000000..7b60727b047 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_benchmark.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=16 + approach=static + tuned_checkpoint=saved_results + task=lambada_openai + echo ${max_eval_samples} + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd='' + + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy " + extra_cmd=$extra_cmd" --load" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance --iters "${iters} + extra_cmd=$extra_cmd" --load" + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + if [ "${topology}" = "opt_125m_ipex_sq" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "llama2_7b_ipex_sq" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "gpt_j_ipex_sq" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --ipex" + fi + + if [[ ${mode} == "accuracy" ]]; then + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --approach ${approach} \ + --output_dir ${tuned_checkpoint} \ + --task ${task} \ + --batch_size ${batch_size} \ + ${extra_cmd} ${mode_cmd} + elif [[ ${mode} == "performance" ]]; then + incbench --num_cores_per_instance 4 run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --approach ${approach} \ + --batch_size ${batch_size} \ + --output_dir ${tuned_checkpoint} \ + ${extra_cmd} ${mode_cmd} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_clm_no_trainer.py new file mode 100644 index 00000000000..694c0505ea4 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_clm_no_trainer.py @@ -0,0 +1,272 @@ +import argparse +import os +import sys + +sys.path.append("./") +import time +import re +import torch +from datasets import load_dataset +from torch.nn.functional import pad +from torch.utils.data import DataLoader +from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer + +parser = argparse.ArgumentParser() +parser.add_argument("--model", nargs="?", default="EleutherAI/gpt-j-6b") +parser.add_argument("--trust_remote_code", default=True, help="Transformers parameter: use the external repo") +parser.add_argument( + "--revision", default=None, help="Transformers parameter: set the model hub commit number" +) +parser.add_argument("--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k") +parser.add_argument("--output_dir", nargs="?", default="./saved_results") +parser.add_argument("--quantize", action="store_true") +parser.add_argument( + "--int8_bf16_mixed", + action="store_true", + help="By default it is int8-fp32 mixed, to enable int8 mixed amp bf16 (work on platforms like SPR)", +) +parser.add_argument("--seed", type=int, default=42, help="Seed for sampling the calibration data.") +parser.add_argument( + "--approach", type=str, default="static", help="Select from ['dynamic', 'static', 'weight-only']" +) +parser.add_argument("--int8", action="store_true") +parser.add_argument("--ipex", action="store_true", help="Use intel extension for pytorch.") +parser.add_argument("--load", action="store_true", help="Load quantized model.") +parser.add_argument("--accuracy", action="store_true") +parser.add_argument("--performance", action="store_true") +parser.add_argument("--iters", default=100, type=int, help="For accuracy measurement only.") +parser.add_argument("--batch_size", default=1, type=int, help="For accuracy measurement only.") +parser.add_argument("--save_accuracy_path", default=None, help="Save accuracy results path.") +parser.add_argument("--pad_max_length", default=512, type=int, help="Pad input ids to max length.") +parser.add_argument("--calib_iters", default=512, type=int, help="calibration iters.") +parser.add_argument( + "--tasks", + default="lambada_openai,hellaswag,winogrande,piqa,wikitext", + type=str, + help="tasks for accuracy validation", +) +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") +# ============SmoothQuant configs============== +parser.add_argument("--sq", action="store_true") +parser.add_argument("--alpha", default="auto", help="Smooth quant parameter.") + +args = parser.parse_args() +if args.ipex: + import intel_extension_for_pytorch as ipex +calib_size = 1 + + +class Evaluator: + def __init__(self, dataset, tokenizer, batch_size=8, pad_val=1, pad_max=196, is_calib=False): + self.dataset = dataset + self.tokenizer = tokenizer + self.batch_size = batch_size + self.pad_val = pad_val + self.pad_max = pad_max + self.is_calib = is_calib + + # tokenize the dataset + self.dataset = self.dataset.map(self.tokenize_function, batched=True) + self.dataset.set_format(type="torch", columns=["input_ids"]) + + @torch.no_grad() + def tokenize_function(self, examples): + return self.tokenizer(examples["text"]) + + @torch.no_grad() + def collate_batch(self, batch): + + input_ids_padded = [] + last_ind = [] + + for text in batch: + input_ids = text["input_ids"] + pad_len = self.pad_max - input_ids.shape[0] + last_ind.append(input_ids.shape[0] - 1) + if self.is_calib: + input_ids = input_ids[: self.pad_max] if len(input_ids) > self.pad_max else input_ids + else: + input_ids = pad(input_ids, (0, pad_len), value=self.pad_val) + input_ids_padded.append(input_ids) + + return (torch.vstack(input_ids_padded), torch.tensor(last_ind)) + + @torch.no_grad() + def evaluate(self, model): + model.eval() + # The task is to predict the last word of the input. + total, hit = 0, 0 + latency = 0 + test_dataloader = DataLoader( + self.dataset, + batch_size=self.batch_size, + shuffle=False, + collate_fn=self.collate_batch, + ) + for i, (input_ids, last_ind) in enumerate(test_dataloader): + label = input_ids[torch.arange(len(last_ind)), last_ind] + input_ids[torch.arange(len(last_ind)), last_ind] = self.pad_val + pad_len = self.pad_max - last_ind - 1 + + start = time.time() + outputs = model(input_ids) + latency += time.time() - start + + last_token_logits = outputs[0][torch.arange(len(last_ind)), -2 - pad_len, :] + pred = last_token_logits.argmax(dim=-1) + total += label.size(0) + hit += (pred == label).sum().item() + if (i + 1) % 50 == 0: + print(hit / total) + print("Processed minibatch:", i) + + acc = hit / total + print("Accuracy: ", acc) + print("Latency: ", latency) + return acc + + +def get_user_model(): + user_model = AutoModelForCausalLM.from_pretrained( + args.model, + torchscript=True, # torchscript will force `return_dict=False` to avoid jit errors + trust_remote_code=args.trust_remote_code, + revision=args.revision, + ) + tokenizer = AutoTokenizer.from_pretrained(args.model) + + if args.peft_model_id is not None: + from peft import PeftModel + + user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) + + # to channels last + user_model = user_model.to(memory_format=torch.channels_last) + user_model.eval() + return user_model, tokenizer + + +if args.quantize: + # dataset + user_model, tokenizer = get_user_model() + calib_dataset = load_dataset(args.dataset, split="train") + # calib_dataset = datasets.load_from_disk('/your/local/dataset/pile-10k/') # use this if trouble with connecting to HF + calib_dataset = calib_dataset.shuffle(seed=args.seed) + calib_evaluator = Evaluator( + calib_dataset, tokenizer, args.batch_size, pad_max=args.pad_max_length, is_calib=True + ) + calib_dataloader = DataLoader( + calib_evaluator.dataset, + batch_size=calib_size, + shuffle=False, + collate_fn=calib_evaluator.collate_batch, + ) + + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + from tqdm import tqdm + + def run_fn(model): + calib_iter = 0 + for batch in tqdm(calib_dataloader, total=args.calib_iters): + batch = move_input_to_device(batch, device=None) + if isinstance(batch, tuple) or isinstance(batch, list): + model(batch[0]) + elif isinstance(batch, dict): + model(**batch) + else: + model(batch) + + calib_iter += 1 + if calib_iter >= args.calib_iters: + break + return + + def eval_func(model): + config = AutoConfig.from_pretrained(args.model) + setattr(model, "config", config) + + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + eval_args = LMEvalParser( + model="hf", + user_model=model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + device="cpu", + ) + results = evaluate(eval_args) + if args.tasks == "wikitext": + return results["results"][args.tasks]["word_perplexity,none"] + else: + return results["results"][args.tasks]["acc,none"] + + from utils import get_example_inputs + + example_inputs = get_example_inputs(user_model, calib_dataloader) + + from neural_compressor.torch.quantization import SmoothQuantConfig, autotune, TuningConfig + tune_config = TuningConfig(config_set=SmoothQuantConfig.get_config_set_for_tuning()) + user_model = autotune( + user_model, + tune_config=tune_config, + eval_fn=eval_func, + run_fn=run_fn, + example_inputs=example_inputs, + ) + user_model.save(args.output_dir) + + +if args.load: + # TODO: we need run_benchmark.sh for loading and remove --accuracy in run_quant.sh, currently run_quant.sh will get fp32 result + if args.int8 or args.int8_bf16_mixed: + print("load int8 model") + from neural_compressor.torch.quantization import load + + tokenizer = AutoTokenizer.from_pretrained(args.model) + config = AutoConfig.from_pretrained(args.model) + user_model = load(os.path.abspath(os.path.expanduser(args.output_dir))) + setattr(user_model, "config", config) + else: + user_model, tokenizer = get_user_model() + + +if args.accuracy: + user_model.eval() + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + device="cpu", + ) + results = evaluate(eval_args) + for task_name in args.tasks.split(","): + if task_name == "wikitext": + print("Accuracy for %s is: %s" % (task_name, results["results"][task_name]["word_perplexity,none"])) + else: + print("Accuracy for %s is: %s" % (task_name, results["results"][task_name]["acc,none"])) + + +if args.performance: + user_model.eval() + batch_size, input_leng = args.batch_size, 512 + example_inputs = torch.ones((batch_size, input_leng), dtype=torch.long) + print("Batch size = {:d}".format(batch_size)) + print("The length of input tokens = {:d}".format(input_leng)) + import time + + total_iters = args.iters + warmup_iters = 5 + with torch.no_grad(): + for i in range(total_iters): + if i == warmup_iters: + start = time.time() + user_model(example_inputs) + end = time.time() + latency = (end - start) / ((total_iters - warmup_iters) * args.batch_size) + throughput = ((total_iters - warmup_iters) * args.batch_size) / (end - start) + print("Latency: {:.3f} ms".format(latency * 10**3)) + print("Throughput: {:.3f} samples/sec".format(throughput)) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_quant.sh new file mode 100644 index 00000000000..774bb73b6f1 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/run_quant.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + extra_cmd='' + batch_size=8 + approach='static' + DATASET_NAME="NeelNanda/pile-10k" + tuned_checkpoint="saved_results" + + if [ "${topology}" = "opt_125m_ipex_sq" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --ipex --sq --alpha 0.5" + elif [ "${topology}" = "llama2_7b_ipex_sq" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --ipex --sq --alpha 0.8" + elif [ "${topology}" = "gpt_j_ipex_sq" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --ipex --sq --alpha 1.0" + fi + + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --dataset ${DATASET_NAME} \ + --quantize \ + --approach ${approach} \ + --output_dir ${tuned_checkpoint} \ + --tasks "lambada_openai" \ + --batch_size ${batch_size} \ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/utils.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/utils.py new file mode 100644 index 00000000000..76117f8b0b5 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/utils.py @@ -0,0 +1,47 @@ +import torch +from collections import UserDict +from packaging.version import Version +from neural_compressor.torch.utils import get_torch_version + +def get_example_inputs(model, dataloader): + version = get_torch_version() + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + + # Suggest set dataloader like calib_dataloader + if dataloader is None: + return None + device = next(model.parameters()).device + try: + for idx, (input, label) in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, (list, tuple)): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + except Exception as e: # pragma: no cover + for idx, input in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, list) or isinstance(input, tuple): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + if idx == 0: + assert False, "Please checkout the example_inputs format." diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/README.md new file mode 100644 index 00000000000..8ecdc6c5110 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/README.md @@ -0,0 +1,57 @@ +Step-by-Step +============ +This document describes the step-by-step instructions to run large language models (LLMs) using Static Quantization on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch and Intel® Extension for PyTorch. + +The script `run_clm_no_trainer.py` supports `GPTJ`, `OPT`, `LLaMA2`, `BLOOM` and `Falcon` quantization and validates last word prediction accuracy with [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness.git) now, and we are adding more models. + +# Prerequisite +## 1. Create Environment +``` +# Installation +pip install -r requirements.txt +``` + +# Run + +Here is how to run the scripts: + +**Causal Language Modeling (CLM)** + +`run_clm_no_trainer.py` quantizes the large language models using the dataset [NeelNanda/pile-10k](https://huggingface.co/datasets/NeelNanda/pile-10k) calibration and validates `lambada_openai`, `piqa`, `winogrande`, `hellaswag` and other datasets accuracy provided by lm_eval, an example command is as follows. +### GPT-J-6b + +#### Quantization +```bash +python run_clm_no_trainer.py \ + --model EleutherAI/gpt-j-6B \ + --quantize \ + --alpha 1.0 \ + --ipex \ + --output_dir "saved_results" +``` + +### OPT-125m + +#### Quantization + +```bash +python run_clm_no_trainer.py \ + --model facebook/opt-125m \ + --quantize \ + --alpha 0.5 \ + --ipex \ + --output_dir "saved_results" +``` + +### LLAMA2-7b/13b/70b +>Note: LLAMA requires IPEX requirements >= 2.1 to get better accuracy. +#### Quantization + +```bash +python run_clm_no_trainer.py \ + --model meta-llama/Llama-2-7b-hf \ + --quantize \ + --alpha 0.8 \ + --ipex \ + --output_dir "saved_results" +``` \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt similarity index 100% rename from examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/requirements.txt rename to examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_benchmark.sh new file mode 100644 index 00000000000..b62a6381b20 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_benchmark.sh @@ -0,0 +1,96 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=16 + approach=static + tuned_checkpoint=saved_results + task=lambada_openai + echo ${max_eval_samples} + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd='' + + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy " + extra_cmd=$extra_cmd" --load" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance --iters "${iters} + extra_cmd=$extra_cmd" --load" + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + if [ "${topology}" = "opt_125m_ipex" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "llama2_7b_ipex" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "gpt_j_ipex" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --ipex" + fi + + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --approach ${approach} \ + --output_dir ${tuned_checkpoint} \ + --task ${task} \ + --batch_size ${batch_size} \ + ${extra_cmd} ${mode_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_clm_no_trainer.py new file mode 100644 index 00000000000..b56c01f20f5 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_clm_no_trainer.py @@ -0,0 +1,259 @@ +import argparse +import os +import sys + +sys.path.append('./') +import time +import re +import torch +from datasets import load_dataset +from torch.nn.functional import pad +from torch.utils.data import DataLoader +from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer + +parser = argparse.ArgumentParser() +parser.add_argument( + "--model", nargs="?", default="EleutherAI/gpt-j-6b" +) +parser.add_argument( + "--trust_remote_code", default=True, + help="Transformers parameter: use the external repo") +parser.add_argument( + "--revision", default=None, + help="Transformers parameter: set the model hub commit number") +parser.add_argument("--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k") +parser.add_argument("--output_dir", nargs="?", default="./saved_results") +parser.add_argument("--quantize", action="store_true") +parser.add_argument( + "--int8_bf16_mixed", + action="store_true", + help="By default it is int8-fp32 mixed, to enable int8 mixed amp bf16 (work on platforms like SPR)", +) +parser.add_argument( + '--seed', + type=int, default=42, help='Seed for sampling the calibration data.' +) +parser.add_argument("--approach", type=str, default='static', + help="Select from ['dynamic', 'static', 'weight-only']") +parser.add_argument("--int8", action="store_true") +parser.add_argument("--ipex", action="store_true", help="Use intel extension for pytorch.") +parser.add_argument("--load", action="store_true", help="Load quantized model.") +parser.add_argument("--accuracy", action="store_true") +parser.add_argument("--performance", action="store_true") +parser.add_argument("--iters", default=100, type=int, + help="For accuracy measurement only.") +parser.add_argument("--batch_size", default=1, type=int, + help="For accuracy measurement only.") +parser.add_argument("--save_accuracy_path", default=None, + help="Save accuracy results path.") +parser.add_argument("--pad_max_length", default=512, type=int, + help="Pad input ids to max length.") +parser.add_argument("--calib_iters", default=512, type=int, + help="calibration iters.") +parser.add_argument("--tasks", default="lambada_openai,hellaswag,winogrande,piqa,wikitext", + type=str, help="tasks for accuracy validation") +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") + +args = parser.parse_args() +if args.ipex: + import intel_extension_for_pytorch as ipex +calib_size = 1 + + +class Evaluator: + def __init__(self, dataset, tokenizer, batch_size=8, pad_val=1, pad_max=196, is_calib=False): + self.dataset = dataset + self.tokenizer = tokenizer + self.batch_size = batch_size + self.pad_val = pad_val + self.pad_max = pad_max + self.is_calib = is_calib + + # tokenize the dataset + self.dataset = self.dataset.map(self.tokenize_function, batched=True) + self.dataset.set_format(type="torch", columns=["input_ids"]) + + @torch.no_grad() + def tokenize_function(self, examples): + return self.tokenizer(examples["text"]) + + @torch.no_grad() + def collate_batch(self, batch): + + input_ids_padded = [] + last_ind = [] + + for text in batch: + input_ids = text["input_ids"] + pad_len = self.pad_max - input_ids.shape[0] + last_ind.append(input_ids.shape[0] - 1) + if self.is_calib: + input_ids = input_ids[:self.pad_max] if len(input_ids) > self.pad_max else input_ids + else: + input_ids = pad(input_ids, (0, pad_len), value=self.pad_val) + input_ids_padded.append(input_ids) + + return (torch.vstack(input_ids_padded), torch.tensor(last_ind)) + + @torch.no_grad() + def evaluate(self, model): + model.eval() + # The task is to predict the last word of the input. + total, hit = 0, 0 + latency = 0 + test_dataloader = DataLoader( + self.dataset, + batch_size=self.batch_size, + shuffle=False, + collate_fn=self.collate_batch, + ) + for i, (input_ids, last_ind) in enumerate(test_dataloader): + label = input_ids[torch.arange(len(last_ind)), last_ind] + input_ids[torch.arange(len(last_ind)), last_ind] = self.pad_val + pad_len = self.pad_max - last_ind - 1 + + start = time.time() + outputs = model(input_ids) + latency += time.time() - start + + last_token_logits = outputs[0][torch.arange(len(last_ind)), -2 - pad_len, :] + pred = last_token_logits.argmax(dim=-1) + total += label.size(0) + hit += (pred == label).sum().item() + if (i + 1) % 50 == 0: + print(hit / total) + print("Processed minibatch:", i) + + acc = hit / total + print("Accuracy: ", acc) + print("Latency: ", latency) + return acc + + +def get_user_model(): + user_model = AutoModelForCausalLM.from_pretrained( + args.model, + torchscript=True, # torchscript will force `return_dict=False` to avoid jit errors + trust_remote_code=args.trust_remote_code, + revision=args.revision, + ) + tokenizer = AutoTokenizer.from_pretrained(args.model) + + if args.peft_model_id is not None: + from peft import PeftModel + user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) + + # to channels last + user_model = user_model.to(memory_format=torch.channels_last) + user_model.eval() + return user_model, tokenizer + + +if args.quantize: + # dataset + user_model, tokenizer = get_user_model() + calib_dataset = load_dataset(args.dataset, split="train") + # calib_dataset = datasets.load_from_disk('/your/local/dataset/pile-10k/') # use this if trouble with connecting to HF + calib_dataset = calib_dataset.shuffle(seed=args.seed) + calib_evaluator = Evaluator(calib_dataset, tokenizer, args.batch_size, pad_max=args.pad_max_length, is_calib=True) + calib_dataloader = DataLoader( + calib_evaluator.dataset, + batch_size=calib_size, + shuffle=False, + collate_fn=calib_evaluator.collate_batch, + ) + + + from neural_compressor.torch.quantization import StaticQuantConfig + excluded_precisions = [] if args.int8_bf16_mixed else ["bf16"] + quant_config = StaticQuantConfig(excluded_precisions=excluded_precisions) + if re.search("gpt", user_model.config.model_type): + quant_config.set_local("add", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + from tqdm import tqdm + def run_fn(model): + calib_iter = 0 + for batch in tqdm(calib_dataloader, total=args.calib_iters): + batch = move_input_to_device(batch, device=None) + if isinstance(batch, tuple) or isinstance(batch, list): + model(batch[0]) + elif isinstance(batch, dict): + model(**batch) + else: + model(batch) + + calib_iter += 1 + if calib_iter >= args.calib_iters: + break + return + + from utils import get_example_inputs + example_inputs = get_example_inputs(user_model, calib_dataloader) + + from neural_compressor.torch.quantization import prepare, convert + user_model = prepare(model=user_model, quant_config=quant_config, example_inputs=example_inputs) + run_fn(user_model) + user_model = convert(user_model) + user_model.save(args.output_dir) + +if args.load: + # TODO: we need run_benchmark.sh for loading and remove --accuracy in run_quant.sh, currently run_quant.sh will get fp32 result + if args.int8 or args.int8_bf16_mixed: + print("load int8 model") + from neural_compressor.torch.quantization import load + tokenizer = AutoTokenizer.from_pretrained(args.model) + config = AutoConfig.from_pretrained(args.model) + user_model = load(os.path.abspath(os.path.expanduser(args.output_dir))) + setattr(user_model, "config", config) + else: + user_model, tokenizer = get_user_model() + + +if args.accuracy: + user_model.eval() + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + device="cpu", + ) + results = evaluate(eval_args) + for task_name in args.tasks.split(","): + if task_name == "wikitext": + acc = results["results"][task_name]["word_perplexity,none"] + else: + acc = results["results"][task_name]["acc,none"] + print("Accuracy: %.5f" % acc) + print('Batch size = %d' % args.batch_size) + +if args.performance: + user_model.eval() + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + import time + + samples = args.iters * args.batch_size + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + limit=samples, + device="cpu", + ) + start = time.time() + results = evaluate(eval_args) + end = time.time() + for task_name in args.tasks.split(","): + if task_name == "wikitext": + acc = results["results"][task_name]["word_perplexity,none"] + else: + acc = results["results"][task_name]["acc,none"] + print("Accuracy: %.5f" % acc) + print('Throughput: %.3f samples/sec' % (samples / (end - start))) + print('Latency: %.3f ms' % ((end - start) * 1000 / samples)) + print('Batch size = %d' % args.batch_size) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_quant.sh new file mode 100644 index 00000000000..a93d8220d64 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/run_quant.sh @@ -0,0 +1,67 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + extra_cmd='' + batch_size=8 + approach='static' + DATASET_NAME="NeelNanda/pile-10k" + tuned_checkpoint="saved_results" + + if [ "${topology}" = "opt_125m_ipex" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "llama2_7b_ipex" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --ipex" + elif [ "${topology}" = "gpt_j_ipex" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --ipex" + fi + + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --dataset ${DATASET_NAME} \ + --quantize \ + --approach ${approach} \ + --output_dir ${tuned_checkpoint} \ + --tasks "lambada_openai" \ + --batch_size ${batch_size} \ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/utils.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/utils.py new file mode 100644 index 00000000000..76117f8b0b5 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/utils.py @@ -0,0 +1,47 @@ +import torch +from collections import UserDict +from packaging.version import Version +from neural_compressor.torch.utils import get_torch_version + +def get_example_inputs(model, dataloader): + version = get_torch_version() + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + + # Suggest set dataloader like calib_dataloader + if dataloader is None: + return None + device = next(model.parameters()).device + try: + for idx, (input, label) in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, (list, tuple)): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + except Exception as e: # pragma: no cover + for idx, input in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, list) or isinstance(input, tuple): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + if idx == 0: + assert False, "Please checkout the example_inputs format." diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/README.md new file mode 100644 index 00000000000..7ad8b76bd1e --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/README.md @@ -0,0 +1,27 @@ +Step-by-Step +============ +This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch 2 Export Quantization. + +Currently, users can use `run_clm_no_trainer.py` to quantize the `OPT` series models and validate the last word prediction accuracy with [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness.git). We will add more models in the near future. + +# Prerequisite +## 1. Create Environment +``` +# Installation +pip install -r requirements.txt +``` + +# Run + +Here is how to run the scripts: + +**Causal Language Modeling (CLM)** + +`run_clm_no_trainer.py` quantizes the large language models using the dataset [NeelNanda/pile-10k](https://huggingface.co/datasets/NeelNanda/pile-10k) validates `lambada_openai`, `piqa`, `winogrande`, `hellaswag` and other datasets accuracy provided by lm_eval, an example command is as follows. +### OPT-125m + +#### Quantization + +```bash +python run_clm_no_trainer.py --model facebook/opt-125m --quantize --accuracy +``` \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/requirements.txt new file mode 100644 index 00000000000..b6d9b6c55de --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/requirements.txt @@ -0,0 +1,7 @@ +transformers +torch +sentencepiece +neural-compressor +intel-extension-for-transformers >= 1.4.1 +lm-eval==0.4.2 +peft \ No newline at end of file diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_benchmark.sh new file mode 100644 index 00000000000..169142cddb8 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_benchmark.sh @@ -0,0 +1,99 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=16 + tuned_checkpoint=saved_results + task=lambada_openai + echo ${max_eval_samples} + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd='' + + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy " + extra_cmd=$extra_cmd + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance --iters "${iters} + extra_cmd=$extra_cmd + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + echo $extra_cmd + + if [ "${topology}" = "opt_125m_pt2e_static" ]; then + model_name_or_path="facebook/opt-125m" + fi + if [[ ${mode} == "accuracy" ]]; then + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --output_dir ${tuned_checkpoint} \ + --task ${task} \ + --batch_size ${batch_size} \ + ${extra_cmd} ${mode_cmd} + elif [[ ${mode} == "performance" ]]; then + incbench --num_cores_per_instance 4 run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --batch_size ${batch_size} \ + --output_dir ${tuned_checkpoint} \ + ${extra_cmd} ${mode_cmd} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_clm_no_trainer.py new file mode 100644 index 00000000000..395bc6f9b57 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_clm_no_trainer.py @@ -0,0 +1,155 @@ +import argparse +import time +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +parser = argparse.ArgumentParser() +parser.add_argument( + "--model", nargs="?", default="facebook/opt-125m" +) +parser.add_argument( + "--trust_remote_code", default=True, + help="Transformers parameter: use the external repo") +parser.add_argument( + "--revision", default=None, + help="Transformers parameter: set the model hub commit number") +parser.add_argument("--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k") +parser.add_argument("--output_dir", nargs="?", default="") +parser.add_argument("--quantize", action="store_true") +parser.add_argument("--approach", type=str, default='static', + help="Select from ['dynamic', 'static', 'weight-only']") +parser.add_argument("--int8", action="store_true") +parser.add_argument("--accuracy", action="store_true") +parser.add_argument("--performance", action="store_true") +parser.add_argument("--calib_iters", default=2, type=int, + help="For calibration only.") +parser.add_argument("--iters", default=100, type=int, + help="For accuracy measurement only.") +parser.add_argument("--batch_size", default=1, type=int, + help="For accuracy measurement only.") +parser.add_argument("--tasks", default="lambada_openai,hellaswag,winogrande,piqa,wikitext", + type=str, help="tasks for accuracy validation") +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") +# ======================================= + +args = parser.parse_args() + + +def get_user_model(): + torchscript = False + user_model = AutoModelForCausalLM.from_pretrained( + args.model, + torchscript=torchscript, # torchscript will force `return_dict=False` to avoid jit errors + trust_remote_code=args.trust_remote_code, + revision=args.revision, + ) + tokenizer = AutoTokenizer.from_pretrained(args.model) + + if args.peft_model_id is not None: + from peft import PeftModel + user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) + + # to channels last + user_model = user_model.to(memory_format=torch.channels_last) + user_model.eval() + return user_model, tokenizer + +user_model, tokenizer = get_user_model() +if args.quantize: + + from neural_compressor.torch.quantization import ( + convert, + get_default_static_config, + prepare, + ) + from neural_compressor.torch.export import export + from torch.export import Dim + def get_example_inputs(tokenizer): + text = "Hello, welcome to LLM world." + encoded_input = tokenizer(text, return_tensors="pt") + + example_inputs = encoded_input + input_ids = example_inputs["input_ids"] + input_ids_batch = torch.cat((input_ids, input_ids), dim=0) + print(f"input_ids_batch shape: {input_ids_batch.shape}") + tuple_inputs = (input_ids_batch,) + return tuple_inputs + # torch._dynamo.config.cache_size_limit = 4 # set limitation if out of memory + batch = Dim(name="batch_size") + seq_len = Dim(name="seq_len") + dynamic_shapes = {"input_ids": (batch, seq_len)} + example_inputs = get_example_inputs(tokenizer) + exported_model = export(user_model, example_inputs=example_inputs, dynamic_shapes=dynamic_shapes) + + quant_config = get_default_static_config() + # prepare + prepare_model = prepare(exported_model, quant_config) + + # calibrate + for i in range(args.calib_iters): + prepare_model(*example_inputs) + # convert + converted_model = convert(prepare_model) + + # save + if args.output_dir: + converted_model.save(example_inputs=example_inputs, output_dir = args.output_dir) + + + +if args.int8: + if args.output_dir: + print("Load int8 model.") + from neural_compressor.torch.quantization import load + model = load(args.output_dir) + + model.config = user_model.config # for lm eval + + # Compile the quantized model and replace the Q/DQ pattern with Q-operator + from torch._inductor import config + + config.freezing = True + opt_model = torch.compile(model) + + opt_model.config = user_model.config # for lm eval + user_model = opt_model + +if args.accuracy: + + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + device="cpu", + ) + results = evaluate(eval_args) + for task_name in args.tasks.split(","): + if task_name == "wikitext": + acc = results["results"][task_name]["word_perplexity,none"] + else: + acc = results["results"][task_name]["acc,none"] + print("Accuracy: %.5f" % acc) + print('Batch size = %d' % args.batch_size) + +if args.performance: + batch_size, input_leng = args.batch_size, 512 + example_inputs = torch.ones((batch_size, input_leng), dtype=torch.long) + print("Batch size = {:d}".format(batch_size)) + print("The length of input tokens = {:d}".format(input_leng)) + import time + + total_iters = args.iters + warmup_iters = 5 + with torch.no_grad(): + for i in range(total_iters): + if i == warmup_iters: + start = time.time() + user_model(example_inputs) + end = time.time() + latency = (end - start) / ((total_iters - warmup_iters) * args.batch_size) + throughput = ((total_iters - warmup_iters) * args.batch_size) / (end - start) + print("Latency: {:.3f} ms".format(latency * 10**3)) + print("Throughput: {:.3f} samples/sec".format(throughput)) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_quant.sh new file mode 100644 index 00000000000..9e995ec8869 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/pt2e/run_quant.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + + if [ "${topology}" = "opt_125m_pt2e_static" ]; then + model_name_or_path="facebook/opt-125m" + output_dir="saved_results" + fi + python run_clm_no_trainer.py --model ${model_name_or_path} --quantize --output_dir ${output_dir} --tasks "lambada_openai" +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/README.md new file mode 100644 index 00000000000..889d7b42682 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/README.md @@ -0,0 +1,130 @@ +Step-by-Step +============ +This document describes the step-by-step instructions to run large language models (LLMs) on 4th Gen Intel® Xeon® Scalable Processor (codenamed Sapphire Rapids) with PyTorch and Intel® Extension for PyTorch. + +The script `run_clm_no_trainer.py` supports `GPTJ`, `OPT`, `LLaMA2`, `BLOOM` and `Falcon` quantization and validates last word prediction accuracy with [lm_eval](https://github.com/EleutherAI/lm-evaluation-harness.git) now, and we are adding more models. + +# Prerequisite +## 1. Create Environment +``` +# Installation +pip install -r requirements.txt +``` + +# Run + +Here is how to run the scripts: + +**Causal Language Modeling (CLM)** + +`run_clm_no_trainer.py` quantizes the large language models using the dataset [NeelNanda/pile-10k](https://huggingface.co/datasets/NeelNanda/pile-10k) calibration and validates `lambada_openai`, `piqa`, `winogrande`, `hellaswag` and other datasets accuracy provided by lm_eval, an example command is as follows. +### GPT-J-6b + +#### Quantization + +```bash +# "--woq_algo GPTQ" is used to enable GPTQ algorithms +# "--double_quant_type BNB_NF4" is used to enable double quant algorithms +python run_clm_no_trainer.py \ + --model EleutherAI/gpt-j-6B \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo GPTQ \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --gptq_max_seq_length 2048 \ + --gptq_use_max_length \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" + +# "--woq_algo RTN" is used to enable RTN algorithms +python run_clm_no_trainer.py \ + --model EleutherAI/gpt-j-6B \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo RTN \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" +``` +**Notes**: Weight-only quantization based on fake quantization is previewly supported and supports RTN, GPTQ[1], AWQ[2], TEQ algorithms. For more details, please refer to [link](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization_weight_only.md). Our GPTQ API support various CLMs including GPTJ, OPTs, Blooms, Llamas, Falcons, MPTs, ChatGLMs, etc. Simply replace the "--model" argument with other models to quantize different CLMs with GPTQ. + + +### OPT-125m + +#### Quantization + +```bash +# "--woq_algo GPTQ" is used to enable GPTQ algorithms +# "--double_quant_type BNB_NF4" is used to enable double quant algorithms +python run_clm_no_trainer.py \ + --model facebook/opt-125m \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo GPTQ \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --gptq_max_seq_length 2048 \ + --gptq_use_max_length \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" + +# "--woq_algo RTN" is used to enable RTN algorithms +python run_clm_no_trainer.py \ + --model facebook/opt-125m \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo RTN \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" +``` + +### LLAMA2-7b/13b/70b +>Note: LLAMA requires IPEX requirements >= 2.1 to get better accuracy. +#### Quantization + +```bash +# "--double_quant_type BNB_NF4" is used to enable double quant algorithms +# "--woq_algo GPTQ" is used to enable GPTQ algorithms +python run_clm_no_trainer.py \ + --model meta-llama/Llama-2-7b-hf \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo GPTQ \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --gptq_max_seq_length 2048 \ + --gptq_use_max_length \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" + +# "--woq_algo RTN" is used to enable RTN algorithms +python run_clm_no_trainer.py \ + --model meta-llama/Llama-2-7b-hf \ + --dataset NeelNanda/pile-10k \ + --quantize \ + --woq_algo RTN \ + --woq_bits 4 \ + --woq_scheme asym \ + --woq_group_size 128 \ + --accuracy \ + --tasks "lambada_openai" \ + --double_quant_type "BNB_NF4" +``` + + +[1]. Elias, Frantar, et al. "GPTQ: Accurate Post-training Compression for Generative Pretrained Transformers." arXiv preprint arXiv:2210.17323 (2023). +[2]. Lin, Ji, et al. "AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration." arXiv preprint arXiv:2306.00978 (2023). diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/requirements.txt new file mode 100644 index 00000000000..9688a4f6cb3 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/requirements.txt @@ -0,0 +1,15 @@ +accelerate +protobuf +sentencepiece != 0.1.92 +datasets >= 1.1.3 +torch >= 1.10 +transformers +pytest +wandb +einops +neural-compressor +intel-extension-for-transformers +lm_eval==0.4.2 +peft +auto_round +intel_extension_for_pytorch diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_benchmark.sh new file mode 100644 index 00000000000..9e1d766128e --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_benchmark.sh @@ -0,0 +1,127 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=16 + tuned_checkpoint=saved_results + task=lambada_openai + echo ${max_eval_samples} + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + extra_cmd='' + + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy " + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance --iters "${iters} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + echo $extra_cmd + + if [ "${topology}" = "opt_125m_woq_gptq_int4" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_bnb" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_ggml" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length --gptq_percdamp 0.1 --gptq_actorder" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "llama2_7b_gptq_int4" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "llama2_7b_gptq_int4_dq_bnb" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "llama2_7b_gptq_int4_dq_ggml" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "gpt_j_woq_rtn_int4" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_bnb" ]; then + model_name_or_path="EleutherAI/gpt-j-6b"\ + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_ggml" ]; then + model_name_or_path="EleutherAI/gpt-j-6b"\ + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "gpt_j_woq_gptq_int4" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_bnb" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_ggml" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + fi + + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --output_dir ${tuned_checkpoint} \ + --task ${task} \ + --batch_size ${batch_size} \ + ${extra_cmd} ${mode_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_clm_no_trainer.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_clm_no_trainer.py new file mode 100644 index 00000000000..abd8228354e --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_clm_no_trainer.py @@ -0,0 +1,379 @@ +import argparse +import os +import sys + +sys.path.append('./') +import time +import json +import re +import torch +from datasets import load_dataset +import datasets +from torch.nn.functional import pad +from torch.utils.data import DataLoader +from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer + +parser = argparse.ArgumentParser() +parser.add_argument( + "--model", nargs="?", default="EleutherAI/gpt-j-6b" +) +parser.add_argument( + "--trust_remote_code", default=True, + help="Transformers parameter: use the external repo") +parser.add_argument( + "--revision", default=None, + help="Transformers parameter: set the model hub commit number") +parser.add_argument("--dataset", nargs="?", default="NeelNanda/pile-10k", const="NeelNanda/pile-10k") +parser.add_argument("--output_dir", nargs="?", default="./saved_results") +parser.add_argument("--quantize", action="store_true") +parser.add_argument( + "--int8_bf16_mixed", + action="store_true", + help="By default it is int8-fp32 mixed, to enable int8 mixed amp bf16 (work on platforms like SPR)", +) +parser.add_argument( + '--seed', + type=int, default=42, help='Seed for sampling the calibration data.' +) +parser.add_argument("--int8", action="store_true") +parser.add_argument("--accuracy", action="store_true") +parser.add_argument("--performance", action="store_true") +parser.add_argument("--iters", default=100, type=int, + help="For accuracy measurement only.") +parser.add_argument("--batch_size", default=1, type=int, + help="For accuracy measurement only.") +parser.add_argument("--save_accuracy_path", default=None, + help="Save accuracy results path.") +parser.add_argument("--pad_max_length", default=512, type=int, + help="Pad input ids to max length.") +parser.add_argument("--calib_iters", default=512, type=int, + help="calibration iters.") +parser.add_argument("--tasks", default="lambada_openai,hellaswag,winogrande,piqa,wikitext", + type=str, help="tasks for accuracy validation") +parser.add_argument("--peft_model_id", type=str, default=None, help="model_name_or_path of peft model") +# ============WeightOnly configs=============== +parser.add_argument("--woq_algo", default="RTN", choices=['RTN', 'AWQ', 'TEQ', 'GPTQ'], + help="Weight-only parameter.") +parser.add_argument("--woq_bits", type=int, default=8) +parser.add_argument("--woq_dtype", type=str, default="int") +parser.add_argument("--woq_group_size", type=int, default=-1) +parser.add_argument("--woq_group_dim", type=int, default=1) +parser.add_argument("--woq_scheme", default="sym") +parser.add_argument("--woq_use_mse_search", action="store_true") +parser.add_argument("--woq_use_full_range", action="store_true") +# =============GPTQ configs==================== +parser.add_argument("--gptq_actorder", action="store_true", + help="Whether to apply the activation order GPTQ heuristic.") +parser.add_argument('--gptq_percdamp', type=float, default=.01, + help='Percent of the average Hessian diagonal to use for dampening.') +parser.add_argument('--gptq_block_size', type=int, default=128, help='Block size. sub weight matrix size to run GPTQ.') +parser.add_argument('--gptq_static_groups', action="store_true", + help="Whether to calculate group wise quantization parameters in advance. " + "This option mitigate actorder's extra computational requirements.") +parser.add_argument('--gptq_nsamples', type=int, default=128, help='Number of calibration data samples.') +parser.add_argument('--gptq_use_max_length', action="store_true", + help='Set all sequence length to be same length of args.gptq_max_seq_length') +parser.add_argument('--gptq_max_seq_length', type=int, default=2048, + help='Calibration dataset sequence max length, ' + 'this should align with your model config, ' + 'and your dataset builder args: args.pad_max_length') + +# =============DoubleQuant configs==================== +parser.add_argument("--double_quant_type", + type=str, + default=None, + choices=['GGML_TYPE_Q4_K', 'BNB_NF4'], + help="DoubleQuant parameter") +parser.add_argument("--double_quant_dtype", + type=str, + default="fp32", + help="Data type for double quant scale.") +parser.add_argument("--double_quant_bits", + type=int, + default=8, + help="Number of bits used to represent double_quant scale.") +parser.add_argument("--double_quant_use_sym", + type=bool, + default=True, + help="Indicates whether double quant scale are symmetric.") +parser.add_argument("--double_quant_group_size", + type=int, + default=256, + help="Size of double quant groups.") +# ======================================= + +args = parser.parse_args() +calib_size = 1 + + +class Evaluator: + def __init__(self, dataset, tokenizer, batch_size=8, pad_val=1, pad_max=196, is_calib=False): + self.dataset = dataset + self.tokenizer = tokenizer + self.batch_size = batch_size + self.pad_val = pad_val + self.pad_max = pad_max + self.is_calib = is_calib + + # tokenize the dataset + self.dataset = self.dataset.map(self.tokenize_function, batched=True) + self.dataset.set_format(type="torch", columns=["input_ids"]) + + @torch.no_grad() + def tokenize_function(self, examples): + if args.woq_algo in ['TEQ']: + if self.tokenizer.pad_token is None: + self.tokenizer.pad_token = self.tokenizer.eos_token + example = self.tokenizer(examples["text"], padding="max_length", max_length=self.pad_max) + else: + example = self.tokenizer(examples["text"]) + return example + + @torch.no_grad() + def collate_batch(self, batch): + + input_ids_padded = [] + last_ind = [] + + for text in batch: + input_ids = text["input_ids"] + pad_len = self.pad_max - input_ids.shape[0] + last_ind.append(input_ids.shape[0] - 1) + if self.is_calib: + if args.woq_algo != 'GPTQ': + input_ids = input_ids[:self.pad_max] if len(input_ids) > self.pad_max else input_ids + else: + input_ids = pad(input_ids, (0, pad_len), value=self.pad_val) + input_ids_padded.append(input_ids) + + return (torch.vstack(input_ids_padded), torch.tensor(last_ind)) + + @torch.no_grad() + def evaluate(self, model): + model.eval() + # The task is to predict the last word of the input. + total, hit = 0, 0 + latency = 0 + test_dataloader = DataLoader( + self.dataset, + batch_size=self.batch_size, + shuffle=False, + collate_fn=self.collate_batch, + ) + for i, (input_ids, last_ind) in enumerate(test_dataloader): + label = input_ids[torch.arange(len(last_ind)), last_ind] + input_ids[torch.arange(len(last_ind)), last_ind] = self.pad_val + pad_len = self.pad_max - last_ind - 1 + + start = time.time() + outputs = model(input_ids) + latency += time.time() - start + + last_token_logits = outputs[0][torch.arange(len(last_ind)), -2 - pad_len, :] + pred = last_token_logits.argmax(dim=-1) + total += label.size(0) + hit += (pred == label).sum().item() + if (i + 1) % 50 == 0: + print(hit / total) + print("Processed minibatch:", i) + + acc = hit / total + print("Accuracy: ", acc) + print("Latency: ", latency) + return acc + + +def get_user_model(): + torchscript = False + if args.woq_algo in ['AWQ', 'TEQ']: + torchscript = True + user_model = AutoModelForCausalLM.from_pretrained( + args.model, + torchscript=torchscript, # torchscript will force `return_dict=False` to avoid jit errors + trust_remote_code=args.trust_remote_code, + revision=args.revision, + ) + tokenizer = AutoTokenizer.from_pretrained(args.model) + user_model = user_model.float() + + # Set model's seq_len when GPTQ calibration is enabled. + if args.woq_algo == 'GPTQ': + user_model.seqlen = args.gptq_max_seq_length + + if args.peft_model_id is not None: + from peft import PeftModel + user_model = PeftModel.from_pretrained(user_model, args.peft_model_id) + + # to channels last + user_model = user_model.to(memory_format=torch.channels_last) + user_model.eval() + return user_model, tokenizer + + +if args.quantize: + # dataset + user_model, tokenizer = get_user_model() + calib_dataset = load_dataset(args.dataset, split="train") + # calib_dataset = datasets.load_from_disk('/your/local/dataset/pile-10k/') # use this if trouble with connecting to HF + calib_dataset = calib_dataset.shuffle(seed=args.seed) + calib_evaluator = Evaluator(calib_dataset, tokenizer, args.batch_size, pad_max=args.pad_max_length, is_calib=True) + calib_dataloader = DataLoader( + calib_evaluator.dataset, + batch_size=calib_size, + shuffle=False, + collate_fn=calib_evaluator.collate_batch, + ) + + # 3.x api + from neural_compressor.torch.quantization import RTNConfig, GPTQConfig, prepare, convert, quantize + from neural_compressor.torch.utils import get_double_quant_config_dict + weight_sym = True if args.woq_scheme == "sym" else False + if args.double_quant_type is not None: + double_quant_config_dict = get_double_quant_config_dict(args.double_quant_type) + + if args.woq_algo == "RTN": + if args.double_quant_type is not None: + double_quant_config_dict.update( + { + # TODO: add group_dim into double quant config? + "use_full_range": args.woq_use_full_range, + "use_mse_search": args.woq_use_mse_search, + } + ) + quant_config = RTNConfig.from_dict(double_quant_config_dict) + else: + quant_config = RTNConfig( + dtype=args.woq_dtype, + bits=args.woq_bits, + use_sym=weight_sym, + group_size=args.woq_group_size, + group_dim=args.woq_group_dim, + use_full_range=args.woq_use_full_range, + use_mse_search=args.woq_use_mse_search, + use_double_quant=False, + double_quant_bits=args.double_quant_bits, + double_quant_dtype=args.double_quant_dtype, + double_quant_use_sym=args.double_quant_use_sym, + double_quant_group_size=args.double_quant_group_size, + ) + quant_config.set_local("lm_head", RTNConfig(dtype="fp32")) + user_model = prepare(model=user_model, quant_config=quant_config) + user_model = convert(model=user_model) + elif args.woq_algo == "GPTQ": + from utils import DataloaderPreprocessor + dataloaderPreprocessor = DataloaderPreprocessor( + dataloader_original=calib_dataloader, + use_max_length=args.gptq_use_max_length, + max_seq_length=args.gptq_max_seq_length, + ) + dataloader_for_calibration = dataloaderPreprocessor.get_prepared_dataloader() + from neural_compressor.torch.algorithms.weight_only.utility import move_input_to_device + from tqdm import tqdm + def run_fn_for_gptq(model, dataloader_for_calibration, *args): + for batch in tqdm(dataloader_for_calibration): + batch = move_input_to_device(batch, device=None) + if isinstance(batch, tuple) or isinstance(batch, list): + model(batch[0]) + elif isinstance(batch, dict): + model(**batch) + else: + model(batch) + return + if args.double_quant_type is not None: + double_quant_config_dict.update( + { + "use_mse_search": args.woq_use_mse_search, + "percdamp": args.gptq_percdamp, + "act_order": args.gptq_actorder, + "block_size": args.gptq_block_size, + "static_groups": args.gptq_static_groups, + } + ) + quant_config = GPTQConfig.from_dict(double_quant_config_dict) + else: + quant_config = GPTQConfig( + dtype=args.woq_dtype, + bits=args.woq_bits, + use_sym=weight_sym, + group_size=args.woq_group_size, + use_mse_search=args.woq_use_mse_search, + percdamp=args.gptq_percdamp, + act_order=args.gptq_actorder, + block_size=args.gptq_block_size, + static_groups=args.gptq_static_groups, + use_double_quant=False, + double_quant_bits=args.double_quant_bits, + double_quant_dtype=args.double_quant_dtype, + double_quant_use_sym=args.double_quant_use_sym, + double_quant_group_size=args.double_quant_group_size, + ) + quant_config.set_local("lm_head", GPTQConfig(dtype="fp32")) + user_model = prepare(model=user_model, quant_config=quant_config) + run_fn_for_gptq(user_model, dataloader_for_calibration) + user_model = convert(user_model) + + user_model.save(args.output_dir) + + +# TODO: we need run_benchmark.sh for loading and remove --accuracy in run_quant.sh, currently run_quant.sh will get fp32 result + +if args.int8 or args.int8_bf16_mixed: + print("load int8 model") + + from neural_compressor.torch.quantization import load + user_model, _ = get_user_model() + tokenizer = AutoTokenizer.from_pretrained(args.model) + config = AutoConfig.from_pretrained(args.model) + user_model = load(os.path.abspath(os.path.expanduser(args.output_dir)), user_model) + setattr(user_model, "config", config) +else: + user_model, tokenizer = get_user_model() + + +if args.accuracy: + user_model.eval() + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + device="cpu", + ) + results = evaluate(eval_args) + for task_name in args.tasks.split(","): + if task_name == "wikitext": + acc = results["results"][task_name]["word_perplexity,none"] + else: + acc = results["results"][task_name]["acc,none"] + print("Accuracy: %.5f" % acc) + print('Batch size = %d' % args.batch_size) + +if args.performance: + user_model.eval() + from intel_extension_for_transformers.transformers.llm.evaluation.lm_eval import evaluate, LMEvalParser + import time + + samples = args.iters * args.batch_size + eval_args = LMEvalParser( + model="hf", + user_model=user_model, + tokenizer=tokenizer, + batch_size=args.batch_size, + tasks=args.tasks, + limit=samples, + device="cpu", + ) + start = time.time() + results = evaluate(eval_args) + end = time.time() + for task_name in args.tasks.split(","): + if task_name == "wikitext": + acc = results["results"][task_name]["word_perplexity,none"] + else: + acc = results["results"][task_name]["acc,none"] + print("Accuracy: %.5f" % acc) + print('Throughput: %.3f samples/sec' % (samples / (end - start))) + print('Latency: %.3f ms' % ((end - start) * 1000 / samples)) + print('Batch size = %d' % args.batch_size) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_quant.sh new file mode 100644 index 00000000000..a860712b697 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/run_quant.sh @@ -0,0 +1,100 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + extra_cmd='' + batch_size=8 + DATASET_NAME="NeelNanda/pile-10k" + tuned_checkpoint="saved_results" + + if [ "${topology}" = "opt_125m_woq_gptq_int4" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_bnb" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "opt_125m_woq_gptq_int4_dq_ggml" ]; then + model_name_or_path="facebook/opt-125m" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length --gptq_percdamp 0.8 --gptq_actorder" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "llama2_7b_gptq_int4" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "llama2_7b_gptq_int4_dq_bnb" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "llama2_7b_gptq_int4_dq_ggml" ]; then + model_name_or_path="meta-llama/Llama-2-7b-hf" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "gpt_j_woq_rtn_int4" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_bnb" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "gpt_j_woq_rtn_int4_dq_ggml" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo RTN --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + elif [ "${topology}" = "gpt_j_woq_gptq_int4" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_bnb" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type BNB_NF4" + elif [ "${topology}" = "gpt_j_woq_gptq_int4_dq_ggml" ]; then + model_name_or_path="EleutherAI/gpt-j-6b" + extra_cmd=$extra_cmd" --woq_algo GPTQ --woq_bits 4 --woq_group_size 128 --woq_scheme asym --woq_use_mse_search --gptq_use_max_length" + extra_cmd=$extra_cmd" --double_quant_type GGML_TYPE_Q4_K" + fi + + python -u run_clm_no_trainer.py \ + --model ${model_name_or_path} \ + --dataset ${DATASET_NAME} \ + --quantize \ + --output_dir ${tuned_checkpoint} \ + --tasks "lambada_openai" \ + --batch_size ${batch_size} \ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/utils.py b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/utils.py similarity index 100% rename from examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/llm/utils.py rename to examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only/utils.py diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/README.md b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/README.md new file mode 100644 index 00000000000..b035249baac --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/README.md @@ -0,0 +1,57 @@ +Step-by-Step +============ +This document describes the step-by-step instructions for reproducing Huggingface models with IPEX backend tuning results with Intel® Neural Compressor. +> Note: IPEX version >= 1.10 + +# Prerequisite + +## 1. Environment +Recommend python 3.6 or higher version. +```shell +pip install -r requirements.txt +pip install torch +pip install intel_extension_for_pytorch +``` + +# Quantization + +## 1. Quantization with CPU +If IPEX version is equal or higher than 1.12, please install transformers 4.19.0. +```shell +python run_qa.py \ + --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --doc_stride 128 \ + --no_cuda \ + --tune \ + --output_dir ./savedresult +``` + +## 2. Quantization with XPU +### 2.1 Environment Setting +Please build an IPEX docker container according to the [official guide](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu&version=v2.1.30%2bxpu&os=linux%2fwsl2&package=docker). + +You can run a simple sanity test to double confirm if the correct version is installed, and if the software stack can get correct hardware information onboard your system. The command should return PyTorch and IPEX versions installed, as well as GPU card(s) information detected. +```bash +source {DPCPPROOT}/env/vars.sh +source {MKLROOT}/env/vars.sh +source {CCLROOT}/env/vars.sh +source {MPIROOT}/env/vars.sh +python -c "import torch; import intel_extension_for_pytorch as ipex; print(torch.__version__); print(ipex.__version__); [print(f'[{i}]: {torch.xpu.get_device_properties(i)}') for i in range(torch.xpu.device_count())];" +``` +Please also refer to this [tutorial](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu&version=v2.1.30%2bxpu&os=linux%2fwsl2&package=conda) to check system requirements and install dependencies. + +#### 2.2 Quantization Command +```shell +python run_qa.py \ + --model_name_or_path bert-large-uncased-whole-word-masking-finetuned-squad \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --doc_stride 128 \ + --xpu \ + --tune \ + --output_dir ./savedresult +``` diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/requirements.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/requirements.txt new file mode 100644 index 00000000000..2bb000d2deb --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/requirements.txt @@ -0,0 +1,5 @@ +accelerate +datasets>=1.8.0 +transformers>=4.34.1 +tensorboard +tqdm diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_benchmark.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_benchmark.sh new file mode 100644 index 00000000000..2f646afacdb --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_benchmark.sh @@ -0,0 +1,104 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + tuned_checkpoint=saved_results + tokenizer_name=bert-large-uncased-whole-word-masking-finetuned-squad + iters=100 + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + --xpu=*) + xpu=$(echo ${var} |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy_only" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --benchmark --iters "${iters} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi + + extra_cmd="" + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + if [[ ${xpu} == "true" ]]; then + extra_cmd=$extra_cmd" --xpu" + fi + echo $extra_cmd + if [[ "${topology}" == "bert_large_ipex" ]]; then + model_name_or_path="bert-large-uncased-whole-word-masking-finetuned-squad" + python run_qa.py \ + --model_name_or_path $model_name_or_path \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --no_cuda \ + --output_dir $tuned_checkpoint \ + --per_gpu_eval_batch_size $batch_size \ + $mode_cmd \ + ${extra_cmd} + fi + if [[ "${topology}" == "distilbert_base_ipex" ]]; then + model_name_or_path="distilbert-base-uncased-distilled-squad" + python run_qa.py \ + --model_name_or_path $model_name_or_path \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --no_cuda \ + --output_dir $tuned_checkpoint \ + --per_gpu_eval_batch_size $batch_size \ + $mode_cmd \ + ${extra_cmd} + fi +} + + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_qa.py b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_qa.py new file mode 100644 index 00000000000..079c0749994 --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_qa.py @@ -0,0 +1,738 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for question answering using a slightly adapted version of the 🤗 Trainer. +""" +# You can also adapt this script on your own question answering task. Pointers for this are left as comments. + +import datasets +import logging +import os +import sys +import timeit +import transformers +from dataclasses import dataclass, field +from datasets import load_dataset, load_metric +from trainer_qa import QuestionAnsweringTrainer +from transformers import ( + AutoConfig, + AutoModelForQuestionAnswering, + AutoTokenizer, + DataCollatorWithPadding, + EvalPrediction, + HfArgumentParser, + PreTrainedTokenizerFast, + TrainingArguments, + default_data_collator, + set_seed, +) +from transformers.trainer_utils import get_last_checkpoint +from transformers.utils import check_min_version +from transformers.utils.versions import require_version +from typing import Optional +from utils_qa import postprocess_qa_predictions +from neural_compressor.utils.utility import LazyImport +try: + import intel_extension_for_pytorch as ipex + from intel_extension_for_pytorch.quantization import prepare, convert + from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig +except: + assert False, "transformers 4.19.0 requests IPEX version higher or equal to 1.12" +torch = LazyImport("torch") + + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +check_min_version("4.12.0") + +require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") + +logger = logging.getLogger(__name__) + +os.environ["WANDB_DISABLED"] = "true" + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + use_auth_token: bool = field( + default=False, + metadata={ + "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "with private models)." + }, + ) + tune: bool = field( + default=False, + metadata={"help": "Whether or not to apply quantization."}, + ) + int8: bool = field( + default=False, metadata={"help": "use int8 model to get accuracy or benchmark"} + ) + benchmark: bool = field( + default=False, metadata={"help": "get benchmark instead of accuracy"} + ) + accuracy_only: bool = field( + default=False, metadata={"help": "get accuracy"} + ) + iters: int = field( + default=100, + metadata={ + "help": "The inference iterations to run for benchmark." + }, + ) + xpu: bool = field( + default=False, metadata={"help": "whether to use xpu"} + ) + calib_iters: int = field( + default=512, + metadata={ + "help": "The inference iterations to calibration." + }, + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + ) + test_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + max_seq_length: int = field( + default=384, + metadata={ + "help": "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + }, + ) + pad_to_max_length: bool = field( + default=True, + metadata={ + "help": "Whether to pad all samples to `max_seq_length`. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch (which can " + "be faster on GPU but will be slower on TPU)." + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + }, + ) + version_2_with_negative: bool = field( + default=False, metadata={"help": "If true, some of the examples do not have an answer."} + ) + null_score_diff_threshold: float = field( + default=0.0, + metadata={ + "help": "The threshold used to select the null answer: if the best answer has a score that is less than " + "the score of the null answer minus this threshold, the null answer is selected for this example. " + "Only useful when `version_2_with_negative=True`." + }, + ) + doc_stride: int = field( + default=128, + metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, + ) + n_best_size: int = field( + default=20, + metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, + ) + max_answer_length: int = field( + default=30, + metadata={ + "help": "The maximum length of an answer that can be generated. This is needed because the start " + "and end predictions are not conditioned on one another." + }, + ) + + def __post_init__(self): + if ( + self.dataset_name is None + and self.train_file is None + and self.validation_file is None + and self.test_file is None + ): + raise ValueError("Need either a dataset name or a training/validation file/test_file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + if self.test_file is not None: + extension = self.test_file.split(".")[-1] + assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + logger.info(f"Training/evaluation parameters {training_args}") + + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called + # 'text' is found. You can easily tweak this behavior (see below). + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset( + data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir + ) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + + if data_args.validation_file is not None: + data_files["validation"] = data_args.validation_file + extension = data_args.validation_file.split(".")[-1] + if data_args.test_file is not None: + data_files["test"] = data_args.test_file + extension = data_args.test_file.split(".")[-1] + raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + config = AutoConfig.from_pretrained( + model_args.config_name if model_args.config_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + use_fast=True, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + + model = AutoModelForQuestionAnswering.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + + # Tokenizer check: this script requires a fast tokenizer. + if not isinstance(tokenizer, PreTrainedTokenizerFast): + raise ValueError( + "This example script only works for models that have a fast tokenizer. Checkout the big table of models " + "at https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet this " + "requirement" + ) + + # Preprocessing the datasets. + # Preprocessing is slightly different for training and evaluation. + if training_args.do_train: + column_names = raw_datasets["train"].column_names + elif training_args.do_eval: + column_names = raw_datasets["validation"].column_names + else: + column_names = raw_datasets["test"].column_names + question_column_name = "question" if "question" in column_names else column_names[0] + context_column_name = "context" if "context" in column_names else column_names[1] + answer_column_name = "answers" if "answers" in column_names else column_names[2] + + # Padding side determines if we do (question|context) or (context|question). + pad_on_right = tokenizer.padding_side == "right" + + if data_args.max_seq_length > tokenizer.model_max_length: + logger.warning( + f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" + f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." + ) + max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) + + # Training preprocessing + def prepare_train_features(examples): + # Some of the questions have lots of whitespace on the left, which is not useful and will make the + # truncation of the context fail (the tokenized question will take a lots of space). So we remove that + # left whitespace + examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] + + # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results + # in one example possible giving several features when a context is long, each of those features having a + # context that overlaps a bit the context of the previous feature. + tokenized_examples = tokenizer( + examples[question_column_name if pad_on_right else context_column_name], + examples[context_column_name if pad_on_right else question_column_name], + truncation="only_second" if pad_on_right else "only_first", + max_length=max_seq_length, + stride=data_args.doc_stride, + return_overflowing_tokens=True, + return_offsets_mapping=True, + padding="max_length" if data_args.pad_to_max_length else False, + ) + + # Since one example might give us several features if it has a long context, we need a map from a feature to + # its corresponding example. This key gives us just that. + sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") + # The offset mappings will give us a map from token to character position in the original context. This will + # help us compute the start_positions and end_positions. + offset_mapping = tokenized_examples.pop("offset_mapping") + + # Let's label those examples! + tokenized_examples["start_positions"] = [] + tokenized_examples["end_positions"] = [] + + for i, offsets in enumerate(offset_mapping): + # We will label impossible answers with the index of the CLS token. + input_ids = tokenized_examples["input_ids"][i] + cls_index = input_ids.index(tokenizer.cls_token_id) + + # Grab the sequence corresponding to that example (to know what is the context and what is the question). + sequence_ids = tokenized_examples.sequence_ids(i) + + # One example can give several spans, this is the index of the example containing this span of text. + sample_index = sample_mapping[i] + answers = examples[answer_column_name][sample_index] + # If no answers are given, set the cls_index as answer. + if len(answers["answer_start"]) == 0: + tokenized_examples["start_positions"].append(cls_index) + tokenized_examples["end_positions"].append(cls_index) + else: + # Start/end character index of the answer in the text. + start_char = answers["answer_start"][0] + end_char = start_char + len(answers["text"][0]) + + # Start token index of the current span in the text. + token_start_index = 0 + while sequence_ids[token_start_index] != (1 if pad_on_right else 0): + token_start_index += 1 + + # End token index of the current span in the text. + token_end_index = len(input_ids) - 1 + while sequence_ids[token_end_index] != (1 if pad_on_right else 0): + token_end_index -= 1 + + # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). + if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): + tokenized_examples["start_positions"].append(cls_index) + tokenized_examples["end_positions"].append(cls_index) + else: + # Otherwise move the token_start_index and token_end_index to the two ends of the answer. + # Note: we could go after the last offset if the answer is the last word (edge case). + while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: + token_start_index += 1 + tokenized_examples["start_positions"].append(token_start_index - 1) + while offsets[token_end_index][1] >= end_char: + token_end_index -= 1 + tokenized_examples["end_positions"].append(token_end_index + 1) + + return tokenized_examples + + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = raw_datasets["train"] + if data_args.max_train_samples is not None: + # We will select sample from whole data if argument is specified + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + # Create train feature from dataset + with training_args.main_process_first(desc="train dataset map pre-processing"): + train_dataset = train_dataset.map( + prepare_train_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) + if data_args.max_train_samples is not None: + # Number of samples might increase during Feature Creation, We select only specified max samples + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + # Validation preprocessing + def prepare_validation_features(examples): + # Some of the questions have lots of whitespace on the left, which is not useful and will make the + # truncation of the context fail (the tokenized question will take a lots of space). So we remove that + # left whitespace + examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] + + # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results + # in one example possible giving several features when a context is long, each of those features having a + # context that overlaps a bit the context of the previous feature. + tokenized_examples = tokenizer( + examples[question_column_name if pad_on_right else context_column_name], + examples[context_column_name if pad_on_right else question_column_name], + truncation="only_second" if pad_on_right else "only_first", + max_length=max_seq_length, + stride=data_args.doc_stride, + return_overflowing_tokens=True, + return_offsets_mapping=True, + padding="max_length" if data_args.pad_to_max_length else False, + ) + + # Since one example might give us several features if it has a long context, we need a map from a feature to + # its corresponding example. This key gives us just that. + sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") + + # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the + # corresponding example_id and we will store the offset mappings. + tokenized_examples["example_id"] = [] + + for i in range(len(tokenized_examples["input_ids"])): + # Grab the sequence corresponding to that example (to know what is the context and what is the question). + sequence_ids = tokenized_examples.sequence_ids(i) + context_index = 1 if pad_on_right else 0 + + # One example can give several spans, this is the index of the example containing this span of text. + sample_index = sample_mapping[i] + tokenized_examples["example_id"].append(examples["id"][sample_index]) + + # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token + # position is part of the context or not. + tokenized_examples["offset_mapping"][i] = [ + (o if sequence_ids[k] == context_index else None) + for k, o in enumerate(tokenized_examples["offset_mapping"][i]) + ] + + return tokenized_examples + + if training_args.do_eval: + if "validation" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_examples = raw_datasets["validation"] + if data_args.max_eval_samples is not None: + # We will select sample from whole data + max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) + eval_examples = eval_examples.select(range(max_eval_samples)) + # Validation Feature Creation + with training_args.main_process_first(desc="validation dataset map pre-processing"): + eval_dataset = eval_examples.map( + prepare_validation_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on validation dataset", + ) + if data_args.max_eval_samples is not None: + # During Feature creation dataset samples might increase, we will select required samples again + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + + if training_args.do_predict: + if "test" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + predict_examples = raw_datasets["test"] + if data_args.max_predict_samples is not None: + # We will select sample from whole data + predict_examples = predict_examples.select(range(data_args.max_predict_samples)) + # Predict Feature Creation + with training_args.main_process_first(desc="prediction dataset map pre-processing"): + predict_dataset = predict_examples.map( + prepare_validation_features, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on prediction dataset", + ) + if data_args.max_predict_samples is not None: + # During Feature creation dataset samples might increase, we will select required samples again + max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) + predict_dataset = predict_dataset.select(range(max_predict_samples)) + + # Data collator + # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data + # collator. + data_collator = ( + default_data_collator + if data_args.pad_to_max_length + else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) + ) + + # Post-processing: + def post_processing_function(examples, features, predictions, stage="eval"): + # Post-processing: we match the start logits and end logits to answers in the original context. + predictions = postprocess_qa_predictions( + examples=examples, + features=features, + predictions=predictions, + version_2_with_negative=data_args.version_2_with_negative, + n_best_size=data_args.n_best_size, + max_answer_length=data_args.max_answer_length, + null_score_diff_threshold=data_args.null_score_diff_threshold, + output_dir=training_args.output_dir, + log_level=log_level, + prefix=stage, + ) + # Format the result to the format the metric expects. + if data_args.version_2_with_negative: + formatted_predictions = [ + {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() + ] + else: + formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] + + references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] + return EvalPrediction(predictions=formatted_predictions, label_ids=references) + + metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad", trust_remote_code=True) + + def compute_metrics(p: EvalPrediction): + return metric.compute(predictions=p.predictions, references=p.label_ids) + + # Initialize our Trainer + trainer = QuestionAnsweringTrainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + eval_examples=eval_examples if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + post_process_function=post_processing_function, + compute_metrics=compute_metrics, + ) + + eval_dataloader = trainer.get_eval_dataloader() + # transformer issue #1 + # for transformers 4.31.0: accelerate dataloader + # *** ValueError: batch_size attribute should not be set + # after DataLoaderShard is initialized + if eval_dataloader.batch_size is None: + def _build_inc_dataloader(dataloader): + class INCDataLoader: + __iter__ = dataloader.__iter__ + def __init__(self) -> None: + self.dataloader = dataloader + self.batch_size = dataloader.total_batch_size + return INCDataLoader() + eval_dataloader = _build_inc_dataloader(eval_dataloader) + batch_size = eval_dataloader.batch_size + metric_name = "eval_f1" + + def take_eval_steps(model, trainer, metric_name, save_metrics=False): + trainer.model = model + start_time = timeit.default_timer() + metrics = trainer.evaluate() + evalTime = timeit.default_timer() - start_time + max_eval_samples = data_args.max_eval_samples \ + if data_args.max_eval_samples is not None else len(eval_dataset) + eval_samples = min(max_eval_samples, len(eval_dataset)) + samples = eval_samples - (eval_samples % batch_size) \ + if training_args.dataloader_drop_last else eval_samples + if save_metrics: + trainer.save_metrics("eval", metrics) + logger.info("metrics keys: {}".format(metrics.keys())) + print('Batch size = %d' % batch_size) + print("Finally Eval {} Accuracy: {}".format(metric_name, metrics.get(metric_name))) + print("Latency: %.3f ms" % (evalTime / samples * 1000)) + print("Throughput: {} samples/sec".format(samples / evalTime)) + return metrics.get(metric_name) + + def eval_func(model): + return take_eval_steps(model, trainer, metric_name) + + if model_args.tune: + ipex.nn.utils._model_convert.replace_dropout_with_identity(model) + from neural_compressor.torch.quantization import get_default_static_config + quant_config = get_default_static_config() + dummy_input_ids = torch.ones((training_args.per_device_eval_batch_size, data_args.max_seq_length), dtype=torch.long) + dummy_token_type_ids = torch.ones((training_args.per_device_eval_batch_size, data_args.max_seq_length), dtype=torch.long) + dummy_attention_mask = torch.ones((training_args.per_device_eval_batch_size, data_args.max_seq_length), dtype=torch.long) + if model.config.model_type == "distilbert": + example_inputs = (dummy_input_ids, dummy_attention_mask) + elif model.config.model_type == "bert": + example_inputs = (dummy_input_ids, dummy_attention_mask, dummy_token_type_ids) + else: + example_inputs = None # please provide correct example_inputs if necessary. + + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + from tqdm import tqdm + def run_fn(model): + calib_iter = 0 + for batch in tqdm(eval_dataloader, total=model_args.calib_iters): + batch = move_input_to_device(batch, device=None) + if isinstance(batch, tuple) or isinstance(batch, list): + model(batch[0]) + elif isinstance(batch, dict): + model(**batch) + else: + model(batch) + + calib_iter += 1 + if calib_iter >= model_args.calib_iters: + break + return + + from neural_compressor.torch.quantization import prepare, convert + model = prepare(model=model, quant_config=quant_config, example_inputs=example_inputs) + run_fn(model) + q_model = convert(model) + q_model.save(training_args.output_dir) + return + + model.eval() + if model_args.int8: + print("load int8 model") + from neural_compressor.torch.quantization import load + model = load(os.path.abspath(os.path.expanduser(training_args.output_dir))) + else: + from utils_qa import get_example_inputs + example_inputs = get_example_inputs(model, eval_dataloader) + model = ipex.optimize(model) + with torch.no_grad(): + if isinstance(example_inputs, dict): + model = torch.jit.trace(model, example_kwarg_inputs=example_inputs, strict=False) + else: + model = torch.jit.trace(model, example_inputs, strict=False) + model = torch.jit.freeze(model) + + if model_args.benchmark or model_args.accuracy_only: + if model_args.benchmark: + from neural_compressor.config import BenchmarkConfig + from neural_compressor import benchmark + b_conf = BenchmarkConfig(backend="ipex", + warmup=5, + iteration=model_args.iters, + cores_per_instance=4, + num_of_instance=1) + if model_args.xpu: + b_conf.device = "xpu" + benchmark.fit(model, b_conf, b_dataloader=eval_dataloader) + else: + eval_func(model) + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_quant.sh b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_quant.sh new file mode 100644 index 00000000000..ae49ed79f5f --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/run_quant.sh @@ -0,0 +1,64 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} +# init params +function init_params { + tuned_checkpoint=saved_results + tokenizer_name=bert-large-uncased-whole-word-masking-finetuned-squad + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done +} + + +# run_tuning +function run_tuning { + if [[ "${topology}" == "bert_large_ipex" ]]; then + model_name_or_path="bert-large-uncased-whole-word-masking-finetuned-squad" + python run_qa.py \ + --model_name_or_path $model_name_or_path \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --no_cuda \ + --tune \ + --output_dir $tuned_checkpoint + fi + if [[ "${topology}" == "distilbert_base_ipex" ]]; then + model_name_or_path="distilbert-base-uncased-distilled-squad" + python run_qa.py \ + --model_name_or_path $model_name_or_path \ + --dataset_name squad \ + --do_eval \ + --max_seq_length 384 \ + --no_cuda \ + --tune \ + --output_dir $tuned_checkpoint + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/trainer_qa.py b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/trainer_qa.py new file mode 100644 index 00000000000..7f98eba236c --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/trainer_qa.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A subclass of `Trainer` specific to Question-Answering tasks +""" + +from transformers import Trainer, is_torch_tpu_available +from transformers.trainer_utils import PredictionOutput + + +if is_torch_tpu_available(): + import torch_xla.core.xla_model as xm + import torch_xla.debug.metrics as met + + +class QuestionAnsweringTrainer(Trainer): + def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): + super().__init__(*args, **kwargs) + self.eval_examples = eval_examples + self.post_process_function = post_process_function + + def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"): + eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset + eval_dataloader = self.get_eval_dataloader(eval_dataset) + eval_examples = self.eval_examples if eval_examples is None else eval_examples + + # Temporarily disable metric computation, we will do it in the loop here. + compute_metrics = self.compute_metrics + self.compute_metrics = None + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + try: + output = eval_loop( + eval_dataloader, + description="Evaluation", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if compute_metrics is None else None, + ignore_keys=ignore_keys, + ) + finally: + self.compute_metrics = compute_metrics + + if self.post_process_function is not None and self.compute_metrics is not None: + eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions) + metrics = self.compute_metrics(eval_preds) + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + self.log(metrics) + else: + metrics = {} + + if self.args.tpu_metrics_debug or self.args.debug: + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) + return metrics + + def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"): + predict_dataloader = self.get_test_dataloader(predict_dataset) + + # Temporarily disable metric computation, we will do it in the loop here. + compute_metrics = self.compute_metrics + self.compute_metrics = None + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + try: + output = eval_loop( + predict_dataloader, + description="Prediction", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if compute_metrics is None else None, + ignore_keys=ignore_keys, + ) + finally: + self.compute_metrics = compute_metrics + + if self.post_process_function is None or self.compute_metrics is None: + return output + + predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict") + metrics = self.compute_metrics(predictions) + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics) diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/utils_qa.py b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/utils_qa.py new file mode 100644 index 00000000000..6514e6ba7ad --- /dev/null +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/question-answering/quantization/static_quant/ipex/utils_qa.py @@ -0,0 +1,481 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Post-processing utilities for question answering. +""" +import collections +import json +import logging +import os +import torch +from typing import Optional, Tuple +from collections import UserDict +from packaging.version import Version +from neural_compressor.torch.utils import get_torch_version + +import numpy as np +from tqdm.auto import tqdm + + +logger = logging.getLogger(__name__) + + +def get_example_inputs(model, dataloader): + version = get_torch_version() + from neural_compressor.torch.algorithms.smooth_quant import move_input_to_device + + # Suggest set dataloader like calib_dataloader + if dataloader is None: + return None + device = next(model.parameters()).device + try: + for idx, (input, label) in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, (list, tuple)): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + except Exception as e: # pragma: no cover + for idx, input in enumerate(dataloader): + input = move_input_to_device(input, device) + if isinstance(input, (dict, UserDict)): # pragma: no cover + assert version.release >= Version("1.12.0").release, "INC support IPEX version >= 1.12.0" + if "label" in input.keys(): + input.pop("label") + if version.release <= Version("2.0.1").release: + return tuple(input.values()) + else: + return dict(input) + if isinstance(input, list) or isinstance(input, tuple): + return tuple(input) + if isinstance(input, torch.Tensor): + return input + break + if idx == 0: + assert False, "Please checkout the example_inputs format." + + +def postprocess_qa_predictions( + examples, + features, + predictions: Tuple[np.ndarray, np.ndarray], + version_2_with_negative: bool = False, + n_best_size: int = 20, + max_answer_length: int = 30, + null_score_diff_threshold: float = 0.0, + output_dir: Optional[str] = None, + prefix: Optional[str] = None, + log_level: Optional[int] = logging.WARNING, +): + """ + Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the + original contexts. This is the base postprocessing functions for models that only return start and end logits. + + Args: + examples: The non-preprocessed dataset (see the main script for more information). + features: The processed dataset (see the main script for more information). + predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): + The predictions of the model: two arrays containing the start logits and the end logits respectively. Its + first dimension must match the number of elements of :obj:`features`. + version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not the underlying dataset contains examples with no answers. + n_best_size (:obj:`int`, `optional`, defaults to 20): + The total number of n-best predictions to generate when looking for an answer. + max_answer_length (:obj:`int`, `optional`, defaults to 30): + The maximum length of an answer that can be generated. This is needed because the start and end predictions + are not conditioned on one another. + null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): + The threshold used to select the null answer: if the best answer has a score that is less than the score of + the null answer minus this threshold, the null answer is selected for this example (note that the score of + the null answer for an example giving several features is the minimum of the scores for the null answer on + each feature: all features must be aligned on the fact they `want` to predict a null answer). + + Only useful when :obj:`version_2_with_negative` is :obj:`True`. + output_dir (:obj:`str`, `optional`): + If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if + :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null + answers, are saved in `output_dir`. + prefix (:obj:`str`, `optional`): + If provided, the dictionaries mentioned above are saved with `prefix` added to their names. + log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): + ``logging`` log level (e.g., ``logging.WARNING``) + """ + if len(predictions) != 2: + raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).") + all_start_logits, all_end_logits = predictions + + if len(predictions[0]) != len(features): + raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") + + # Build a map example to its corresponding features. + example_id_to_index = {k: i for i, k in enumerate(examples["id"])} + features_per_example = collections.defaultdict(list) + for i, feature in enumerate(features): + features_per_example[example_id_to_index[feature["example_id"]]].append(i) + + # The dictionaries we have to fill. + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + if version_2_with_negative: + scores_diff_json = collections.OrderedDict() + + # Logging. + logger.setLevel(log_level) + logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") + + # Let's loop over all the examples! + for example_index, example in enumerate(tqdm(examples)): + # Those are the indices of the features associated to the current example. + feature_indices = features_per_example[example_index] + + min_null_prediction = None + prelim_predictions = [] + + # Looping through all the features associated to the current example. + for feature_index in feature_indices: + # We grab the predictions of the model for this feature. + start_logits = all_start_logits[feature_index] + end_logits = all_end_logits[feature_index] + # This is what will allow us to map some the positions in our logits to span of texts in the original + # context. + offset_mapping = features[feature_index]["offset_mapping"] + # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context + # available in the current feature. + token_is_max_context = features[feature_index].get("token_is_max_context", None) + + # Update minimum null prediction. + feature_null_score = start_logits[0] + end_logits[0] + if min_null_prediction is None or min_null_prediction["score"] > feature_null_score: + min_null_prediction = { + "offsets": (0, 0), + "score": feature_null_score, + "start_logit": start_logits[0], + "end_logit": end_logits[0], + } + + # Go through all possibilities for the `n_best_size` greater start and end logits. + start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() + end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() + for start_index in start_indexes: + for end_index in end_indexes: + # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond + # to part of the input_ids that are not in the context. + if ( + start_index >= len(offset_mapping) + or end_index >= len(offset_mapping) + or offset_mapping[start_index] is None + or len(offset_mapping[start_index]) < 2 + or offset_mapping[end_index] is None + or len(offset_mapping[end_index]) < 2 + ): + continue + # Don't consider answers with a length that is either < 0 or > max_answer_length. + if end_index < start_index or end_index - start_index + 1 > max_answer_length: + continue + # Don't consider answer that don't have the maximum context available (if such information is + # provided). + if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): + continue + prelim_predictions.append( + { + "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), + "score": start_logits[start_index] + end_logits[end_index], + "start_logit": start_logits[start_index], + "end_logit": end_logits[end_index], + } + ) + if version_2_with_negative: + # Add the minimum null prediction + prelim_predictions.append(min_null_prediction) + null_score = min_null_prediction["score"] + + # Only keep the best `n_best_size` predictions. + predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] + + # Add back the minimum null prediction if it was removed because of its low score. + if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions): + predictions.append(min_null_prediction) + + # Use the offsets to gather the answer text in the original context. + context = example["context"] + for pred in predictions: + offsets = pred.pop("offsets") + pred["text"] = context[offsets[0] : offsets[1]] + + # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid + # failure. + if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""): + predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}) + + # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using + # the LogSumExp trick). + scores = np.array([pred.pop("score") for pred in predictions]) + exp_scores = np.exp(scores - np.max(scores)) + probs = exp_scores / exp_scores.sum() + + # Include the probabilities in our predictions. + for prob, pred in zip(probs, predictions): + pred["probability"] = prob + + # Pick the best prediction. If the null answer is not possible, this is easy. + if not version_2_with_negative: + all_predictions[example["id"]] = predictions[0]["text"] + else: + # Otherwise we first need to find the best non-empty prediction. + i = 0 + while predictions[i]["text"] == "": + i += 1 + best_non_null_pred = predictions[i] + + # Then we compare to the null prediction using the threshold. + score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"] + scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable. + if score_diff > null_score_diff_threshold: + all_predictions[example["id"]] = "" + else: + all_predictions[example["id"]] = best_non_null_pred["text"] + + # Make `predictions` JSON-serializable by casting np.float32 back to float. + all_nbest_json[example["id"]] = [ + {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} + for pred in predictions + ] + + # If we have an output_dir, let's save all those dicts. + if output_dir is not None: + if not os.path.isdir(output_dir): + raise EnvironmentError(f"{output_dir} is not a directory.") + + prediction_file = os.path.join( + output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" + ) + nbest_file = os.path.join( + output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" + ) + if version_2_with_negative: + null_odds_file = os.path.join( + output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" + ) + + logger.info(f"Saving predictions to {prediction_file}.") + with open(prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + logger.info(f"Saving nbest_preds to {nbest_file}.") + with open(nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + if version_2_with_negative: + logger.info(f"Saving null_odds to {null_odds_file}.") + with open(null_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions + + +def postprocess_qa_predictions_with_beam_search( + examples, + features, + predictions: Tuple[np.ndarray, np.ndarray], + version_2_with_negative: bool = False, + n_best_size: int = 20, + max_answer_length: int = 30, + start_n_top: int = 5, + end_n_top: int = 5, + output_dir: Optional[str] = None, + prefix: Optional[str] = None, + log_level: Optional[int] = logging.WARNING, +): + """ + Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the + original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as + cls token predictions. + + Args: + examples: The non-preprocessed dataset (see the main script for more information). + features: The processed dataset (see the main script for more information). + predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): + The predictions of the model: two arrays containing the start logits and the end logits respectively. Its + first dimension must match the number of elements of :obj:`features`. + version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not the underlying dataset contains examples with no answers. + n_best_size (:obj:`int`, `optional`, defaults to 20): + The total number of n-best predictions to generate when looking for an answer. + max_answer_length (:obj:`int`, `optional`, defaults to 30): + The maximum length of an answer that can be generated. This is needed because the start and end predictions + are not conditioned on one another. + start_n_top (:obj:`int`, `optional`, defaults to 5): + The number of top start logits too keep when searching for the :obj:`n_best_size` predictions. + end_n_top (:obj:`int`, `optional`, defaults to 5): + The number of top end logits too keep when searching for the :obj:`n_best_size` predictions. + output_dir (:obj:`str`, `optional`): + If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if + :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null + answers, are saved in `output_dir`. + prefix (:obj:`str`, `optional`): + If provided, the dictionaries mentioned above are saved with `prefix` added to their names. + log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): + ``logging`` log level (e.g., ``logging.WARNING``) + """ + if len(predictions) != 5: + raise ValueError("`predictions` should be a tuple with five elements.") + start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions + + if len(predictions[0]) != len(features): + raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") + + # Build a map example to its corresponding features. + example_id_to_index = {k: i for i, k in enumerate(examples["id"])} + features_per_example = collections.defaultdict(list) + for i, feature in enumerate(features): + features_per_example[example_id_to_index[feature["example_id"]]].append(i) + + # The dictionaries we have to fill. + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() if version_2_with_negative else None + + # Logging. + logger.setLevel(log_level) + logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") + + # Let's loop over all the examples! + for example_index, example in enumerate(tqdm(examples)): + # Those are the indices of the features associated to the current example. + feature_indices = features_per_example[example_index] + + min_null_score = None + prelim_predictions = [] + + # Looping through all the features associated to the current example. + for feature_index in feature_indices: + # We grab the predictions of the model for this feature. + start_log_prob = start_top_log_probs[feature_index] + start_indexes = start_top_index[feature_index] + end_log_prob = end_top_log_probs[feature_index] + end_indexes = end_top_index[feature_index] + feature_null_score = cls_logits[feature_index] + # This is what will allow us to map some the positions in our logits to span of texts in the original + # context. + offset_mapping = features[feature_index]["offset_mapping"] + # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context + # available in the current feature. + token_is_max_context = features[feature_index].get("token_is_max_context", None) + + # Update minimum null prediction + if min_null_score is None or feature_null_score < min_null_score: + min_null_score = feature_null_score + + # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits. + for i in range(start_n_top): + for j in range(end_n_top): + start_index = int(start_indexes[i]) + j_index = i * end_n_top + j + end_index = int(end_indexes[j_index]) + # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the + # p_mask but let's not take any risk) + if ( + start_index >= len(offset_mapping) + or end_index >= len(offset_mapping) + or offset_mapping[start_index] is None + or offset_mapping[end_index] is None + ): + continue + # Don't consider answers with a length negative or > max_answer_length. + if end_index < start_index or end_index - start_index + 1 > max_answer_length: + continue + # Don't consider answer that don't have the maximum context available (if such information is + # provided). + if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): + continue + prelim_predictions.append( + { + "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), + "score": start_log_prob[i] + end_log_prob[j_index], + "start_log_prob": start_log_prob[i], + "end_log_prob": end_log_prob[j_index], + } + ) + + # Only keep the best `n_best_size` predictions. + predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] + + # Use the offsets to gather the answer text in the original context. + context = example["context"] + for pred in predictions: + offsets = pred.pop("offsets") + pred["text"] = context[offsets[0] : offsets[1]] + + # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid + # failure. + if len(predictions) == 0: + predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6}) + + # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using + # the LogSumExp trick). + scores = np.array([pred.pop("score") for pred in predictions]) + exp_scores = np.exp(scores - np.max(scores)) + probs = exp_scores / exp_scores.sum() + + # Include the probabilities in our predictions. + for prob, pred in zip(probs, predictions): + pred["probability"] = prob + + # Pick the best prediction and set the probability for the null answer. + all_predictions[example["id"]] = predictions[0]["text"] + if version_2_with_negative: + scores_diff_json[example["id"]] = float(min_null_score) + + # Make `predictions` JSON-serializable by casting np.float32 back to float. + all_nbest_json[example["id"]] = [ + {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} + for pred in predictions + ] + + # If we have an output_dir, let's save all those dicts. + if output_dir is not None: + if not os.path.isdir(output_dir): + raise EnvironmentError(f"{output_dir} is not a directory.") + + prediction_file = os.path.join( + output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" + ) + nbest_file = os.path.join( + output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" + ) + if version_2_with_negative: + null_odds_file = os.path.join( + output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" + ) + + logger.info(f"Saving predictions to {prediction_file}.") + with open(prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + logger.info(f"Saving nbest_preds to {nbest_file}.") + with open(nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + if version_2_with_negative: + logger.info(f"Saving null_odds to {null_odds_file}.") + with open(null_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions, scores_diff_json diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CODE_OF_CONDUCT.md b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..0f7ad8bfc17 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Facebook has adopted a Code of Conduct that we expect project participants to adhere to. +Please read the [full text](https://code.fb.com/codeofconduct/) +so that you can understand what actions will and will not be tolerated. diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CONTRIBUTING.md b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CONTRIBUTING.md new file mode 100644 index 00000000000..cc013a17ec8 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# Contributing to DLRM +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## Coding Style +* 4 spaces for indentation rather than tabs +* 80 character line length +* in general, please maintain a consistent style with the rest of the code + +## License +By contributing to DLRM, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/LICENSE b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/LICENSE new file mode 100644 index 00000000000..b96dcb0480a --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/README.md b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/README.md new file mode 100644 index 00000000000..918cc1edc23 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/README.md @@ -0,0 +1,90 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing PyTorch DLRM tuning zoo result. and original DLRM README is in [DLRM README](https://github.com/facebookresearch/dlrm/blob/master/README.md) + +> **Note** +> +> Please ensure your PC have >370G memory to run DLRM +> IPEX version >= 1.11 + +# Prerequisite + +### 1. Environment + +PyTorch 1.11 or higher version is needed with pytorch_fx backend. + + ```shell + # Install dependency + cd examples/pytorch/recommendation/dlrm/quantization/ptq/ipex + pip install -r requirements.txt + ``` +> Note: Validated PyTorch [Version](/docs/source/installation_guide.md#validated-software-environment). + +### 2. Prepare Dataset + + The code supports interface with the [Criteo Terabyte Dataset](https://labs.criteo.com/2013/12/download-terabyte-click-logs/) + + 1. download the raw data files day_0.gz, ...,day_23.gz and unzip them. + 2. Specify the location of the unzipped text files day_0, ...,day_23, using --raw-data-file= (the day number will be appended automatically), please refer "Run" command. + +### 3. Prepare pretrained model + + Download the DLRM PyTorch weights (`tb00_40M.pt`, 90GB) from the +[MLPerf repo](https://github.com/mlcommons/inference/tree/master/recommendation/dlrm/pytorch#more-information-about-the-model-weights) + +# Run +### tune with INC + ```shell + cd examples/pytorch/recommendation/dlrm/quantization/ptq/ipex + bash run_quant.sh --input_model="/path/of/pretrained/model" --dataset_location="/path/of/dataset" + ``` + +### benchmark +```shell +bash run_benchmark.sh --input_model="/path/of/pretrained/model" --dataset_location="/path/of/dataset" --mode=accuracy --int8=true +``` + + +Examples of enabling Intel® Neural Compressor +========================= + +This is a tutorial of how to enable DLRM model with Intel® Neural Compressor. + + +### Code update + +We need update dlrm_s_pytorch.py like below + +```python +# evaluation +def eval_func(model): + args.int8 = model.is_quantized + with torch.no_grad(): + return inference( + args, + model, + best_acc_test, + best_auc_test, + test_ld, + trace=args.int8 + ) + +# calibration +def calib_fn(model): + calib_number = 0 + for X_test, lS_o_test, lS_i_test, T in train_ld: + if calib_number < 102400: + model(X_test, lS_o_test, lS_i_test) + calib_number += 1 + +from neural_compressor.torch.quantization import SmoothQuantConfig, autotune, TuningConfig +tune_config = TuningConfig(config_set=SmoothQuantConfig.get_config_set_for_tuning()) +dlrm = autotune( + dlrm, + tune_config=tune_config, + eval_fn=eval_func, + run_fn=calib_fn, +) +dlrm.save("saved_results") +``` diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_loader_terabyte.py b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_loader_terabyte.py new file mode 100644 index 00000000000..5bc0c4d3aab --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_loader_terabyte.py @@ -0,0 +1,388 @@ +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + + +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import numpy as np +from torch.utils.data import Dataset +import torch +import time +import math +from tqdm import tqdm +import argparse +import extend_distributed as ext_dist + + +class DataLoader: + """ + DataLoader dedicated for the Criteo Terabyte Click Logs dataset + """ + + def __init__( + self, + data_filename, + data_directory, + days, + batch_size, + max_ind_range=-1, + split="train", + drop_last_batch=False + ): + self.data_filename = data_filename + self.data_directory = data_directory + self.days = days + self.batch_size = batch_size + self.max_ind_range = max_ind_range + + total_file = os.path.join( + data_directory, + data_filename + "_day_count.npz" + ) + with np.load(total_file) as data: + total_per_file = data["total_per_file"][np.array(days)] + + self.length = sum(total_per_file) + if split == "test" or split == "val": + self.length = int(np.ceil(self.length / 2.)) + self.split = split + self.drop_last_batch = drop_last_batch + + def __iter__(self): + return iter( + _batch_generator( + self.data_filename, self.data_directory, self.days, + self.batch_size, self.split, self.drop_last_batch, self.max_ind_range + ) + ) + + def __len__(self): + if self.drop_last_batch: + return self.length // self.batch_size + else: + return math.ceil(self.length / self.batch_size) + + +def _transform_features( + x_int_batch, x_cat_batch, y_batch, max_ind_range, flag_input_torch_tensor=False +): + if max_ind_range > 0: + x_cat_batch = x_cat_batch % max_ind_range + + if flag_input_torch_tensor: + x_int_batch = torch.log(x_int_batch.clone().detach().type(torch.float) + 1) + x_cat_batch = x_cat_batch.clone().detach().type(torch.long) + y_batch = y_batch.clone().detach().type(torch.float32).view(-1, 1) + else: + x_int_batch = torch.log(torch.tensor(x_int_batch, dtype=torch.float) + 1) + x_cat_batch = torch.tensor(x_cat_batch, dtype=torch.long) + y_batch = torch.tensor(y_batch, dtype=torch.float32).view(-1, 1) + + batch_size = x_cat_batch.shape[0] + feature_count = x_cat_batch.shape[1] + lS_o = torch.arange(batch_size).reshape(1, -1).repeat(feature_count, 1) + + return x_int_batch, lS_o, x_cat_batch.t(), y_batch.view(-1, 1) + + +def _batch_generator( + data_filename, data_directory, days, batch_size, split, drop_last, max_ind_range +): + previous_file = None + for day in days: + filepath = os.path.join( + data_directory, + data_filename + "_{}_reordered.npz".format(day) + ) + + # print('Loading file: ', filepath) + with np.load(filepath) as data: + x_int = data["X_int"] + x_cat = data["X_cat"] + y = data["y"] + + samples_in_file = y.shape[0] + batch_start_idx = 0 + if split == "test" or split == "val": + length = int(np.ceil(samples_in_file / 2.)) + if split == "test": + samples_in_file = length + elif split == "val": + batch_start_idx = samples_in_file - length + + while batch_start_idx < samples_in_file - batch_size: + + missing_samples = batch_size + if previous_file is not None: + missing_samples -= previous_file['y'].shape[0] + + current_slice = slice(batch_start_idx, batch_start_idx + missing_samples) + + x_int_batch = x_int[current_slice] + x_cat_batch = x_cat[current_slice] + y_batch = y[current_slice] + + if previous_file is not None: + x_int_batch = np.concatenate( + [previous_file['x_int'], x_int_batch], + axis=0 + ) + x_cat_batch = np.concatenate( + [previous_file['x_cat'], x_cat_batch], + axis=0 + ) + y_batch = np.concatenate([previous_file['y'], y_batch], axis=0) + previous_file = None + + if x_int_batch.shape[0] != batch_size: + raise ValueError('should not happen') + + yield _transform_features(x_int_batch, x_cat_batch, y_batch, max_ind_range) + + batch_start_idx += missing_samples + if batch_start_idx != samples_in_file: + current_slice = slice(batch_start_idx, samples_in_file) + if previous_file is not None: + previous_file = { + 'x_int' : np.concatenate( + [previous_file['x_int'], x_int[current_slice]], + axis=0 + ), + 'x_cat' : np.concatenate( + [previous_file['x_cat'], x_cat[current_slice]], + axis=0 + ), + 'y' : np.concatenate([previous_file['y'], y[current_slice]], axis=0) + } + else: + previous_file = { + 'x_int' : x_int[current_slice], + 'x_cat' : x_cat[current_slice], + 'y' : y[current_slice] + } + + if not drop_last: + yield _transform_features( + previous_file['x_int'], + previous_file['x_cat'], + previous_file['y'], + max_ind_range + ) + + +def _test(): + generator = _batch_generator( + data_filename='day', + data_directory='./input', + days=range(23), + split="train", + batch_size=2048, + drop_last=True, + max_ind_range=-1 + ) + t1 = time.time() + for x_int, lS_o, x_cat, y in generator: + t2 = time.time() + time_diff = t2 - t1 + t1 = t2 + print( + "time {} x_int.shape: {} lS_o.shape: {} x_cat.shape: {} y.shape: {}".format( + time_diff, x_int.shape, lS_o.shape, x_cat.shape, y.shape + ) + ) + + +class CriteoBinDataset(Dataset): + """Binary version of criteo dataset.""" + + def __init__(self, data_file, counts_file, + batch_size=1, max_ind_range=-1, bytes_per_feature=4): + # dataset + self.tar_fea = 1 # single target + self.den_fea = 13 # 13 dense features + self.spa_fea = 26 # 26 sparse features + self.tad_fea = self.tar_fea + self.den_fea + self.tot_fea = self.tad_fea + self.spa_fea + + self.batch_size = batch_size + self.max_ind_range = max_ind_range + self.bytes_per_entry = (bytes_per_feature * self.tot_fea * batch_size) + + self.num_entries = math.ceil(os.path.getsize(data_file) / self.bytes_per_entry) + + data_file_size = os.path.getsize(data_file) + bytes_per_sample = bytes_per_feature * self.tot_fea + if ext_dist.my_size > 1: + self.bytes_per_rank = self.bytes_per_entry // ext_dist.my_size + else: + self.bytes_per_rank = self.bytes_per_entry + + if ext_dist.my_size > 1 and self.num_entries * self.bytes_per_entry > data_file_size: + last_batch = (data_file_size % self.bytes_per_entry) // bytes_per_sample + self.bytes_last_batch = last_batch // ext_dist.my_size * bytes_per_sample + else: + self.bytes_last_batch = self.bytes_per_rank + + if self.bytes_last_batch == 0: + self.num_entries = self.num_entries - 1 + self.bytes_last_batch = self.bytes_per_rank + + print('data file:', data_file, 'number of batches:', self.num_entries) + self.file = open(data_file, 'rb') + + with np.load(counts_file) as data: + self.counts = data["counts"] + + # hardcoded for now + self.m_den = 13 + + def __len__(self): + return self.num_entries + + def __getitem__(self, idx): + my_rank = ext_dist.dist.get_rank() if ext_dist.my_size > 1 else 0 + rank_size = self.bytes_last_batch if idx == (self.num_entries - 1) else self.bytes_per_rank + self.file.seek(idx * self.bytes_per_entry + rank_size * my_rank, 0) + raw_data = self.file.read(rank_size) + array = np.frombuffer(raw_data, dtype=np.int32) + tensor = torch.from_numpy(array).view((-1, self.tot_fea)) + + return _transform_features(x_int_batch=tensor[:, 1:14], + x_cat_batch=tensor[:, 14:], + y_batch=tensor[:, 0], + max_ind_range=self.max_ind_range, + flag_input_torch_tensor=True) + + def __del__(self): + self.file.close() + + +def numpy_to_binary(input_files, output_file_path, split='train'): + """Convert the data to a binary format to be read with CriteoBinDataset.""" + + # WARNING - both categorical and numerical data must fit into int32 for + # the following code to work correctly + + with open(output_file_path, 'wb') as output_file: + if split == 'train': + for input_file in input_files: + print('Processing file: ', input_file) + + np_data = np.load(input_file) + np_data = np.concatenate([np_data['y'].reshape(-1, 1), + np_data['X_int'], + np_data['X_cat']], axis=1) + np_data = np_data.astype(np.int32) + + output_file.write(np_data.tobytes()) + else: + assert len(input_files) == 1 + np_data = np.load(input_files[0]) + np_data = np.concatenate([np_data['y'].reshape(-1, 1), + np_data['X_int'], + np_data['X_cat']], axis=1) + np_data = np_data.astype(np.int32) + + samples_in_file = np_data.shape[0] + midpoint = int(np.ceil(samples_in_file / 2.)) + if split == "test": + begin = 0 + end = midpoint + elif split == "val": + begin = midpoint + end = samples_in_file + else: + raise ValueError('Unknown split value: ', split) + + output_file.write(np_data[begin:end].tobytes()) + + +def _preprocess(args): + train_files = ['{}_{}_reordered.npz'.format(args.input_data_prefix, day) for + day in range(0, 23)] + + test_valid_file = args.input_data_prefix + '_23_reordered.npz' + + os.makedirs(args.output_directory, exist_ok=True) + for split in ['train', 'val', 'test']: + print('Running preprocessing for split =', split) + + output_file = os.path.join(args.output_directory, + '{}_data.bin'.format(split)) + + input_files = train_files if split == 'train' else [test_valid_file] + numpy_to_binary(input_files=input_files, + output_file_path=output_file, + split=split) + + +def _test_bin(): + parser = argparse.ArgumentParser() + parser.add_argument('--output_directory', required=True) + parser.add_argument('--input_data_prefix', required=True) + parser.add_argument('--split', choices=['train', 'test', 'val'], + required=True) + args = parser.parse_args() + + _preprocess(args) + + binary_data_file = os.path.join(args.output_directory, + '{}_data.bin'.format(args.split)) + + counts_file = os.path.join(args.output_directory, 'day_fea_count.npz') + dataset_binary = CriteoBinDataset(data_file=binary_data_file, + counts_file=counts_file, + batch_size=2048,) + from dlrm_data_pytorch import CriteoDataset + from dlrm_data_pytorch import collate_wrapper_criteo_offset as collate_wrapper_criteo + + binary_loader = torch.utils.data.DataLoader( + dataset_binary, + batch_size=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + ) + + original_dataset = CriteoDataset( + dataset='terabyte', + max_ind_range=10 * 1000 * 1000, + sub_sample_rate=1, + randomize=True, + split=args.split, + raw_path=args.input_data_prefix, + pro_data='dummy_string', + memory_map=True + ) + + original_loader = torch.utils.data.DataLoader( + original_dataset, + batch_size=2048, + shuffle=False, + num_workers=0, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, + ) + + assert len(dataset_binary) == len(original_loader) + for i, (old_batch, new_batch) in tqdm(enumerate(zip(original_loader, + binary_loader)), + total=len(dataset_binary)): + + for j in range(len(new_batch)): + if not np.array_equal(old_batch[j], new_batch[j]): + raise ValueError('FAILED: Datasets not equal') + if i > len(dataset_binary): + break + print('PASSED') + + +if __name__ == '__main__': + _test() + _test_bin() diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_utils.py b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_utils.py new file mode 100644 index 00000000000..6ceef9517df --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/data_utils.py @@ -0,0 +1,1292 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: generate inputs and targets for the DLRM benchmark +# +# Utility function(s) to download and pre-process public data sets +# - Criteo Kaggle Display Advertising Challenge Dataset +# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset +# - Criteo Terabyte Dataset +# https://labs.criteo.com/2013/12/download-terabyte-click-logs +# +# After downloading dataset, run: +# getCriteoAdData( +# datafile="", +# o_filename=kaggleAdDisplayChallenge_processed.npz, +# max_ind_range=-1, +# sub_sample_rate=0.0, +# days=7, +# data_split='train', +# randomize='total', +# criteo_kaggle=True, +# memory_map=False +# ) +# getCriteoAdData( +# datafile="", +# o_filename=terabyte_processed.npz, +# max_ind_range=-1, +# sub_sample_rate=0.0, +# days=24, +# data_split='train', +# randomize='total', +# criteo_kaggle=False, +# memory_map=False +# ) + +from __future__ import absolute_import, division, print_function, unicode_literals + +import sys +# import os +from os import path +from multiprocessing import Process, Manager +# import io +# from io import StringIO +# import collections as coll + +import numpy as np + + +def convertUStringToDistinctIntsDict(mat, convertDicts, counts): + # Converts matrix of unicode strings into distinct integers. + # + # Inputs: + # mat (np.array): array of unicode strings to convert + # convertDicts (list): dictionary for each column + # counts (list): number of different categories in each column + # + # Outputs: + # out (np.array): array of output integers + # convertDicts (list): dictionary for each column + # counts (list): number of different categories in each column + + # check if convertDicts and counts match correct length of mat + if len(convertDicts) != mat.shape[1] or len(counts) != mat.shape[1]: + print("Length of convertDicts or counts does not match input shape") + print("Generating convertDicts and counts...") + + convertDicts = [{} for _ in range(mat.shape[1])] + counts = [0 for _ in range(mat.shape[1])] + + # initialize output + out = np.zeros(mat.shape) + + for j in range(mat.shape[1]): + for i in range(mat.shape[0]): + # add to convertDict and increment count + if mat[i, j] not in convertDicts[j]: + convertDicts[j][mat[i, j]] = counts[j] + counts[j] += 1 + out[i, j] = convertDicts[j][mat[i, j]] + + return out, convertDicts, counts + + +def convertUStringToDistinctIntsUnique(mat, mat_uni, counts): + # mat is an array of 0,...,# samples, with each being 26 categorical features + + # check if mat_unique and counts match correct length of mat + if len(mat_uni) != mat.shape[1] or len(counts) != mat.shape[1]: + print("Length of mat_unique or counts does not match input shape") + print("Generating mat_unique and counts...") + + mat_uni = [np.array([]) for _ in range(mat.shape[1])] + counts = [0 for _ in range(mat.shape[1])] + + # initialize output + out = np.zeros(mat.shape) + ind_map = [np.array([]) for _ in range(mat.shape[1])] + + # find out and assign unique ids to features + for j in range(mat.shape[1]): + m = mat_uni[j].size + mat_concat = np.concatenate((mat_uni[j], mat[:, j])) + mat_uni[j], ind_map[j] = np.unique(mat_concat, return_inverse=True) + out[:, j] = ind_map[j][m:] + counts[j] = mat_uni[j].size + + return out, mat_uni, counts + + +def processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, pre_comp_counts): + # Process Kaggle Display Advertising Challenge or Terabyte Dataset + # by converting unicode strings in X_cat to integers and + # converting negative integer values in X_int. + # + # Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day. + # + # Inputs: + # d_path (str): path for {kaggle|terabyte}_day_i.npz files + # i (int): splits in the dataset (typically 0 to 7 or 0 to 24) + + # process data if not all files exist + filename_i = npzfile + "_{0}_processed.npz".format(i) + + if path.exists(filename_i): + print("Using existing " + filename_i, end="\n") + else: + print("Not existing " + filename_i) + with np.load(npzfile + "_{0}.npz".format(i)) as data: + # categorical features + ''' + # Approach 1a: using empty dictionaries + X_cat, convertDicts, counts = convertUStringToDistinctIntsDict( + data["X_cat"], convertDicts, counts + ) + ''' + ''' + # Approach 1b: using empty np.unique + X_cat, convertDicts, counts = convertUStringToDistinctIntsUnique( + data["X_cat"], convertDicts, counts + ) + ''' + # Approach 2a: using pre-computed dictionaries + X_cat_t = np.zeros(data["X_cat_t"].shape) + for j in range(26): + for k, x in enumerate(data["X_cat_t"][j, :]): + X_cat_t[j, k] = convertDicts[j][x] + # continuous features + X_int = data["X_int"] + X_int[X_int < 0] = 0 + # targets + y = data["y"] + + np.savez_compressed( + filename_i, + # X_cat = X_cat, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=X_int, + y=y, + ) + print("Processed " + filename_i, end="\n") + # sanity check (applicable only if counts have been pre-computed & are re-computed) + # for j in range(26): + # if pre_comp_counts[j] != counts[j]: + # sys.exit("ERROR: Sanity check on counts has failed") + # print("\nSanity check on counts passed") + + return + + +def concatCriteoAdData( + d_path, + d_file, + npzfile, + trafile, + days, + data_split, + randomize, + total_per_file, + total_count, + memory_map, + o_filename +): + # Concatenates different days and saves the result. + # + # Inputs: + # days (int): total number of days in the dataset (typically 7 or 24) + # d_path (str): path for {kaggle|terabyte}_day_i.npz files + # o_filename (str): output file name + # + # Output: + # o_file (str): output file path + + if memory_map: + # dataset break up per fea + # tar_fea = 1 # single target + den_fea = 13 # 13 dense features + spa_fea = 26 # 26 sparse features + # tad_fea = tar_fea + den_fea + # tot_fea = tad_fea + spa_fea + # create offset per file + offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + offset_per_file[i + 1] += offset_per_file[i] + + ''' + # Approach 1, 2 and 3 use indices, while Approach 4 does not use them + # create indices + indices = np.arange(total_count) + if data_split == "none": + if randomize == "total": + indices = np.random.permutation(indices) + else: + indices = np.array_split(indices, offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + indices = np.concatenate((train_indices, test_indices)) + # no reordering + # indices = np.arange(total_count) + ''' + ''' + # Approach 1: simple and slow (no grouping is used) + # check if data already exists + recreate_flag = False + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((total_count)) + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered".format(j) + np.save(filename_j, z) + print("Creating " + filename_j) + + for i in range(days): + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat_t = np.transpose(data["X_cat"]) + X_int_t = np.transpose(data["X_int"]) + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + # print(filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r+') + if j < tar_fea: + fj[indices[start:end]] = y + elif tar_fea <= j and j < tad_fea: + fj[indices[start:end]] = X_int_t[j - tar_fea, :] + else: + fj[indices[start:end]] = X_cat_t[j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing " + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + size = total_per_file[i] + X_int_t = np.zeros((den_fea, size)) + X_cat_t = np.zeros((spa_fea, size)) + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + print("Creating " + filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r') + if j < tar_fea: + y = fj[start:end] + elif tar_fea <= j and j < tad_fea: + X_int_t[j - tar_fea, :] = fj[start:end] + else: + X_cat_t[j - tad_fea, :] = fj[start:end] + del fj + + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=np.transpose(X_int_t), # transpose of the data + y=y, + ) + else: + print("Reordered day files already exist, skipping ...") + ''' + ''' + # Approach 2: group days + # check if data already exists + recreate_flag = False + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((total_count)) + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered".format(j) + np.save(filename_j, z) + print("Creating " + filename_j) + + group_day = 3 # e.g. 8, 4 or 3 + group_num = days // group_day + file_group = [i*group_day for i in range(group_num)] + [days] + for ii in range(group_num): + # for last may be group_size != group_num, therefore reset it below + group_size = file_group[ii + 1] - file_group[ii] + X_cat_t = [0]*group_size + X_int_t = [0]*group_size + y = [0]*group_size + start = [0]*group_size + end = [0]*group_size + for ig in range(group_size): + i = file_group[ii] + ig + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + # setup start and end ranges + start[ig] = offset_per_file[i] + end[ig] = offset_per_file[i + 1] + # print(filename_i) + # load a group of files + with np.load(filename_i) as data: + X_cat_t[ig] = np.transpose(data["X_cat"]) + X_int_t[ig] = np.transpose(data["X_int"]) + y[ig] = data["y"] + # sanity check + if total_per_file[i] != len(y[ig]): + sys.exit("ERROR: sanity check on number of samples failed") + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end[ig]-start[ig]) + "=" + str(total_per_file[i])) + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r+') + for ig in range(group_size): + if j < tar_fea: + fj[indices[start[ig]:end[ig]]] = y[ig] + elif tar_fea <= j and j < tad_fea: + fj[indices[start[ig]:end[ig]]] = X_int_t[ig][j - tar_fea, :] + else: + fj[indices[start[ig]:end[ig]]] = X_cat_t[ig][j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing " + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for ii in range(group_num): + # for last may be group_size != group_num, therefore reset it below + group_size = file_group[ii + 1] - file_group[ii] + X_cat_t= []; X_int_t = [] + for ig in range(group_size): + i = file_group[ii] + ig + X_int_t.append(np.zeros((den_fea, total_per_file[i]))) + X_cat_t.append(np.zeros((spa_fea, total_per_file[i]))) + y = [0]*group_size + start = [0]*group_size + end = [0]*group_size + + for j in range(tot_fea): + filename_j = trafile + "_{0}_reordered.npy".format(j) + fj = np.load(filename_j, mmap_mode='r') + # load a group of files + for ig in range(group_size): + i = file_group[ii] + ig + # setup start and end ranges + start[ig] = offset_per_file[i] + end[ig] = offset_per_file[i + 1] + # load data for the group of files + if j < tar_fea: + y[ig] = fj[start[ig]:end[ig]] + elif tar_fea <= j and j < tad_fea: + X_int_t[ig][j - tar_fea, :] = fj[start[ig]:end[ig]] + else: + X_cat_t[ig][j - tad_fea, :] = fj[start[ig]:end[ig]] + del fj + + for ig in range(group_size): + i = file_group[ii] + ig + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + print("Creating " + filename_i) + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t[ig]), # transpose of the data + X_int=np.transpose(X_int_t[ig]), # transpose of the data + y=y[ig], + ) + else: + print("Reordered day files already exist, skipping ...") + ''' + ''' + # Approach 3: group features + # check if data already exists + group_fea = 5 # e.g. 8, 5 or 4 + group_num = tot_fea // group_fea + if tot_fea % group_fea != 0: # sanity check + sys.exit("ERROR: the group_fea must divided tot_fea evenly.") + recreate_flag = False + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # load, reorder and concatenate data (memmap all reordered files per feature) + if recreate_flag: + # init reordered files (.npy appended automatically) + z = np.zeros((group_fea, total_count)) + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}".format( + jn, group_fea + ) + np.save(filename_j, z) + print("Creating " + filename_j) + + for i in range(days): + filename_i = d_path + npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat_t = np.transpose(data["X_cat"]) + X_int_t = np.transpose(data["X_int"]) + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + # print(filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + fj = np.load(filename_j, mmap_mode='r+') + for jg in range(group_fea): + j = jn * group_fea + jg + # print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg)) + if j < tar_fea: + fj[jg, indices[start:end]] = y + elif tar_fea <= j and j < tad_fea: + fj[jg, indices[start:end]] = X_int_t[j - tar_fea, :] + else: + fj[jg, indices[start:end]] = X_cat_t[j - tad_fea, :] + del fj + else: + print("Reordered fea files already exist, skipping ...") + + # check if data already exists + recreate_flag = False + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + if path.exists(filename_i): + print("Using existing" + filename_i) + else: + recreate_flag = True + # split reordered data by files (memmap all reordered files per feature) + # on the day boundary del the file object and memmap again + if recreate_flag: + for i in range(days): + filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i) + size = total_per_file[i] + X_int_t = np.zeros((den_fea, size)) + X_cat_t = np.zeros((spa_fea, size)) + # setup start and end ranges + start = offset_per_file[i] + end = offset_per_file[i + 1] + print("Creating " + filename_i) + # print("start=" + str(start) + " end=" + str(end) + # + " diff=" + str(end - start) + "=" + str(total_per_file[i])) + + for jn in range(group_num): + filename_j = trafile + "_{0}_reordered{1}.npy".format( + jn, group_fea + ) + fj = np.load(filename_j, mmap_mode='r') + for jg in range(group_fea): + j = jn * group_fea + jg + # print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg)) + if j < tar_fea: + y = fj[jg, start:end] + elif tar_fea <= j and j < tad_fea: + X_int_t[j - tar_fea, :] = fj[jg, start:end] + else: + X_cat_t[j - tad_fea, :] = fj[jg, start:end] + del fj + + np.savez_compressed( + filename_i, + X_cat=np.transpose(X_cat_t), # transpose of the data + X_int=np.transpose(X_int_t), # transpose of the data + y=y, + ) + + else: + print("Reordered day files already exist, skipping ...") + ''' + + # Approach 4: Fisher-Yates-Rao (FYR) shuffle algorithm + # 1st pass of FYR shuffle + # check if data already exists + recreate_flag = False + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + if ( + path.exists(filename_j_y) + and path.exists(filename_j_d) + and path.exists(filename_j_s) + ): + print( + "Using existing\n" + + filename_j_y + "\n" + + filename_j_d + "\n" + + filename_j_s + ) + else: + recreate_flag = True + # reorder across buckets using sampling + if recreate_flag: + # init intermediate files (.npy appended automatically) + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s".format(j) + np.save(filename_j_y, np.zeros((total_per_file[j]))) + np.save(filename_j_d, np.zeros((total_per_file[j], den_fea))) + np.save(filename_j_s, np.zeros((total_per_file[j], spa_fea))) + # start processing files + total_counter = [0] * days + for i in range(days): + filename_i = npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + X_cat = data["X_cat"] + X_int = data["X_int"] + y = data["y"] + size = len(y) + # sanity check + if total_per_file[i] != size: + sys.exit("ERROR: sanity check on number of samples failed") + # debug prints + print("Reordering (1st pass) " + filename_i) + + # create buckets using sampling of random ints + # from (discrete) uniform distribution + buckets = [] + for _j in range(days): + buckets.append([]) + counter = [0] * days + days_to_sample = days if data_split == "none" else days - 1 + if randomize == "total": + rand_u = np.random.randint(low=0, high=days_to_sample, size=size) + for k in range(size): + # sample and make sure elements per buckets do not overflow + if data_split == "none" or i < days - 1: + # choose bucket + p = rand_u[k] + # retry of the bucket is full + while total_counter[p] + counter[p] >= total_per_file[p]: + p = np.random.randint(low=0, high=days_to_sample) + else: # preserve the last day/bucket if needed + p = i + buckets[p].append(k) + counter[p] += 1 + else: # randomize is day or none + for k in range(size): + # do not sample, preserve the data in this bucket + p = i + buckets[p].append(k) + counter[p] += 1 + + # sanity check + if np.sum(counter) != size: + sys.exit("ERROR: sanity check on number of samples failed") + # debug prints + # print(counter) + # print(str(np.sum(counter)) + " = " + str(size)) + # print([len(x) for x in buckets]) + # print(total_counter) + + # partially feel the buckets + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + start = total_counter[j] + end = total_counter[j] + counter[j] + # target buckets + fj_y = np.load(filename_j_y, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_y[start:end].shape) + " " + # + str(len(buckets[j]))) + fj_y[start:end] = y[buckets[j]] + del fj_y + # dense buckets + fj_d = np.load(filename_j_d, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_d[start:end, :].shape) + " " + # + str(len(buckets[j]))) + fj_d[start:end, :] = X_int[buckets[j], :] + del fj_d + # sparse buckets + fj_s = np.load(filename_j_s, mmap_mode='r+') + # print("start=" + str(start) + " end=" + str(end) + # + " end - start=" + str(end - start) + " " + # + str(fj_s[start:end, :].shape) + " " + # + str(len(buckets[j]))) + fj_s[start:end, :] = X_cat[buckets[j], :] + del fj_s + # update counters for next step + total_counter[j] += counter[j] + + # 2nd pass of FYR shuffle + # check if data already exists + for j in range(days): + filename_j = npzfile + "_{0}_reordered.npz".format(j) + if path.exists(filename_j): + print("Using existing " + filename_j) + else: + recreate_flag = True + # reorder within buckets + if recreate_flag: + for j in range(days): + filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j) + filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j) + filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j) + fj_y = np.load(filename_j_y) + fj_d = np.load(filename_j_d) + fj_s = np.load(filename_j_s) + + indices = range(total_per_file[j]) + if randomize == "day" or randomize == "total": + if data_split == "none" or j < days - 1: + indices = np.random.permutation(range(total_per_file[j])) + + filename_r = npzfile + "_{0}_reordered.npz".format(j) + print("Reordering (2nd pass) " + filename_r) + np.savez_compressed( + filename_r, + X_cat=fj_s[indices, :], + X_int=fj_d[indices, :], + y=fj_y[indices], + ) + + ''' + # sanity check (under no reordering norms should be zero) + for i in range(days): + filename_i_o = npzfile + "_{0}_processed.npz".format(i) + print(filename_i_o) + with np.load(filename_i_o) as data_original: + X_cat_o = data_original["X_cat"] + X_int_o = data_original["X_int"] + y_o = data_original["y"] + filename_i_r = npzfile + "_{0}_reordered.npz".format(i) + print(filename_i_r) + with np.load(filename_i_r) as data_reordered: + X_cat_r = data_reordered["X_cat"] + X_int_r = data_reordered["X_int"] + y_r = data_reordered["y"] + print(np.linalg.norm(y_o - y_r)) + print(np.linalg.norm(X_int_o - X_int_r)) + print(np.linalg.norm(X_cat_o - X_cat_r)) + ''' + + else: + print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename)) + + # load and concatenate data + for i in range(days): + filename_i = npzfile + "_{0}_processed.npz".format(i) + with np.load(filename_i) as data: + if i == 0: + X_cat = data["X_cat"] + X_int = data["X_int"] + y = data["y"] + else: + X_cat = np.concatenate((X_cat, data["X_cat"])) + X_int = np.concatenate((X_int, data["X_int"])) + y = np.concatenate((y, data["y"])) + print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0])) + + with np.load(d_path + d_file + "_fea_count.npz") as data: + counts = data["counts"] + print("Loaded counts!") + + np.savez_compressed( + d_path + o_filename + ".npz", + X_cat=X_cat, + X_int=X_int, + y=y, + counts=counts, + ) + + return d_path + o_filename + ".npz" + + +def transformCriteoAdData(X_cat, X_int, y, days, data_split, randomize, total_per_file): + # Transforms Criteo Kaggle or terabyte data by applying log transformation + # on dense features and converting everything to appropriate tensors. + # + # Inputs: + # X_cat (ndarray): array of integers corresponding to preprocessed + # categorical features + # X_int (ndarray): array of integers corresponding to dense features + # y (ndarray): array of bool corresponding to labels + # data_split(str): flag for splitting dataset into training/validation/test + # sets + # randomize (str): determines randomization scheme + # "none": no randomization + # "day": randomizes each day"s data (only works if split = True) + # "total": randomizes total dataset + # + # Outputs: + # if split: + # X_cat_train (tensor): sparse features for training set + # X_int_train (tensor): dense features for training set + # y_train (tensor): labels for training set + # X_cat_val (tensor): sparse features for validation set + # X_int_val (tensor): dense features for validation set + # y_val (tensor): labels for validation set + # X_cat_test (tensor): sparse features for test set + # X_int_test (tensor): dense features for test set + # y_test (tensor): labels for test set + # else: + # X_cat (tensor): sparse features + # X_int (tensor): dense features + # y (tensor): label + + # define initial set of indices + indices = np.arange(len(y)) + + # create offset per file + offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + offset_per_file[i + 1] += offset_per_file[i] + + # split dataset + if data_split == 'train': + indices = np.array_split(indices, offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + test_indices, val_indices = np.array_split(test_indices, 2) + + print("Defined training and testing indices...") + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + # indices = np.concatenate((train_indices, test_indices)) + + # create training, validation, and test sets + X_cat_train = X_cat[train_indices] + X_int_train = X_int[train_indices] + y_train = y[train_indices] + + X_cat_val = X_cat[val_indices] + X_int_val = X_int[val_indices] + y_val = y[val_indices] + + X_cat_test = X_cat[test_indices] + X_int_test = X_int[test_indices] + y_test = y[test_indices] + + print("Split data according to indices...") + + X_cat_train = X_cat_train.astype(np.long) + X_int_train = np.log(X_int_train.astype(np.float32) + 1) + y_train = y_train.astype(np.float32) + + X_cat_val = X_cat_val.astype(np.long) + X_int_val = np.log(X_int_val.astype(np.float32) + 1) + y_val = y_val.astype(np.float32) + + X_cat_test = X_cat_test.astype(np.long) + X_int_test = np.log(X_int_test.astype(np.float32) + 1) + y_test = y_test.astype(np.float32) + + print("Converted to tensors...done!") + + return ( + X_cat_train, + X_int_train, + y_train, + X_cat_val, + X_int_val, + y_val, + X_cat_test, + X_int_test, + y_test, + ) + + else: + + # randomize data + if randomize == "total": + indices = np.random.permutation(indices) + print("Randomized indices...") + + X_cat = X_cat[indices].astype(np.long) + X_int = np.log(X_int[indices].astype(np.float32) + 1) + y = y[indices].astype(np.float32) + + print("Converted to tensors...done!") + + return (X_cat, X_int, y, [], [], [], [], [], []) + + +def getCriteoAdData( + datafile, + o_filename, + max_ind_range=-1, + sub_sample_rate=0.0, + days=7, + data_split='train', + randomize='total', + criteo_kaggle=True, + memory_map=False, + dataset_multiprocessing=False, +): + # Passes through entire dataset and defines dictionaries for categorical + # features and determines the number of total categories. + # + # Inputs: + # datafile : path to downloaded raw data file + # o_filename (str): saves results under o_filename if filename is not "" + # + # Output: + # o_file (str): output file path + + #split the datafile into path and filename + lstr = datafile.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + d_file = lstr[-1].split(".")[0] if criteo_kaggle else lstr[-1] + npzfile = d_path + ((d_file + "_day") if criteo_kaggle else d_file) + trafile = d_path + ((d_file + "_fea") if criteo_kaggle else "fea") + + # count number of datapoints in training set + total_file = d_path + d_file + "_day_count.npz" + if path.exists(total_file): + with np.load(total_file) as data: + total_per_file = list(data["total_per_file"]) + total_count = np.sum(total_per_file) + print("Skipping counts per file (already exist)") + else: + total_count = 0 + total_per_file = [] + if criteo_kaggle: + # WARNING: The raw data consists of a single train.txt file + # Each line in the file is a sample, consisting of 13 continuous and + # 26 categorical features (an extra space indicates that feature is + # missing and will be interpreted as 0). + if path.exists(datafile): + print("Reading data from path=%s" % (datafile)) + with open(str(datafile)) as f: + for _ in f: + total_count += 1 + total_per_file.append(total_count) + # reset total per file due to split + num_data_per_split, extras = divmod(total_count, days) + total_per_file = [num_data_per_split] * days + for j in range(extras): + total_per_file[j] += 1 + # split into days (simplifies code later on) + file_id = 0 + boundary = total_per_file[file_id] + nf = open(npzfile + "_" + str(file_id), "w") + with open(str(datafile)) as f: + for j, line in enumerate(f): + if j == boundary: + nf.close() + file_id += 1 + nf = open(npzfile + "_" + str(file_id), "w") + boundary += total_per_file[file_id] + nf.write(line) + nf.close() + else: + sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset") + else: + # WARNING: The raw data consist of day_0.gz,... ,day_23.gz text files + # Each line in the file is a sample, consisting of 13 continuous and + # 26 categorical features (an extra space indicates that feature is + # missing and will be interpreted as 0). + for i in range(days): + datafile_i = datafile + "_" + str(i) # + ".gz" + if path.exists(str(datafile_i)): + print("Reading data from path=%s" % (str(datafile_i))) + # file day_ + total_per_file_count = 0 + with open(str(datafile_i)) as f: + for _ in f: + total_per_file_count += 1 + total_per_file.append(total_per_file_count) + total_count += total_per_file_count + else: + sys.exit("ERROR: Criteo Terabyte Dataset path is invalid; please download from https://labs.criteo.com/2013/12/download-terabyte-click-logs") + + # process a file worth of data and reinitialize data + # note that a file main contain a single or multiple splits + def process_one_file( + datfile, + npzfile, + split, + num_data_in_split, + dataset_multiprocessing, + convertDictsDay=None, + resultDay=None + ): + if dataset_multiprocessing: + convertDicts_day = [{} for _ in range(26)] + + with open(str(datfile)) as f: + y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int + X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int + X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int + if sub_sample_rate == 0.0: + rand_u = 1.0 + else: + rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split) + + i = 0 + percent = 0 + for k, line in enumerate(f): + # process a line (data point) + line = line.split('\t') + # set missing values to zero + for j in range(len(line)): + if (line[j] == '') or (line[j] == '\n'): + line[j] = '0' + # sub-sample data by dropping zero targets, if needed + target = np.int32(line[0]) + if target == 0 and \ + (rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate: + continue + + y[i] = target + X_int[i] = np.array(line[1:14], dtype=np.int32) + if max_ind_range > 0: + X_cat[i] = np.array( + list(map(lambda x: int(x, 16) % max_ind_range, line[14:])), + dtype=np.int32 + ) + else: + X_cat[i] = np.array( + list(map(lambda x: int(x, 16), line[14:])), + dtype=np.int32 + ) + + # count uniques + if dataset_multiprocessing: + for j in range(26): + convertDicts_day[j][X_cat[i][j]] = 1 + # debug prints + if float(i)/num_data_in_split*100 > percent+1: + percent = int(float(i)/num_data_in_split*100) + print( + "Load %d/%d (%d%%) Split: %d Label True: %d Stored: %d" + % ( + i, + num_data_in_split, + percent, + split, + target, + y[i], + ), + end="\n", + ) + else: + for j in range(26): + convertDicts[j][X_cat[i][j]] = 1 + # debug prints + print( + "Load %d/%d Split: %d Label True: %d Stored: %d" + % ( + i, + num_data_in_split, + split, + target, + y[i], + ), + end="\r", + ) + i += 1 + + # store num_data_in_split samples or extras at the end of file + # count uniques + # X_cat_t = np.transpose(X_cat) + # for j in range(26): + # for x in X_cat_t[j,:]: + # convertDicts[j][x] = 1 + # store parsed + filename_s = npzfile + "_{0}.npz".format(split) + if path.exists(filename_s): + print("\nSkip existing " + filename_s) + else: + np.savez_compressed( + filename_s, + X_int=X_int[0:i, :], + # X_cat=X_cat[0:i, :], + X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data + y=y[0:i], + ) + print("\nSaved " + npzfile + "_{0}.npz!".format(split)) + + if dataset_multiprocessing: + resultDay[split] = i + convertDictsDay[split] = convertDicts_day + return + else: + return i + + # create all splits (reuse existing files if possible) + recreate_flag = False + convertDicts = [{} for _ in range(26)] + # WARNING: to get reproducible sub-sampling results you must reset the seed below + # np.random.seed(123) + # in this case there is a single split in each day + for i in range(days): + npzfile_i = npzfile + "_{0}.npz".format(i) + npzfile_p = npzfile + "_{0}_processed.npz".format(i) + if path.exists(npzfile_i): + print("Skip existing " + npzfile_i) + elif path.exists(npzfile_p): + print("Skip existing " + npzfile_p) + else: + recreate_flag = True + + if recreate_flag: + if dataset_multiprocessing: + resultDay = Manager().dict() + convertDictsDay = Manager().dict() + processes = [Process(target=process_one_file, + name="process_one_file:%i" % i, + args=(npzfile + "_{0}".format(i), + npzfile, + i, + total_per_file[i], + dataset_multiprocessing, + convertDictsDay, + resultDay, + ) + ) for i in range(0, days)] + for process in processes: + process.start() + for process in processes: + process.join() + for day in range(days): + total_per_file[day] = resultDay[day] + print("Constructing convertDicts Split: {}".format(day)) + convertDicts_tmp = convertDictsDay[day] + for i in range(26): + for j in convertDicts_tmp[i]: + convertDicts[i][j] = 1 + else: + for i in range(days): + total_per_file[i] = process_one_file( + npzfile + "_{0}".format(i), + npzfile, + i, + total_per_file[i], + dataset_multiprocessing, + ) + + # report and save total into a file + total_count = np.sum(total_per_file) + if not path.exists(total_file): + np.savez_compressed(total_file, total_per_file=total_per_file) + print("Total number of samples:", total_count) + print("Divided into days/splits:\n", total_per_file) + + # dictionary files + counts = np.zeros(26, dtype=np.int32) + if recreate_flag: + # create dictionaries + for j in range(26): + for i, x in enumerate(convertDicts[j]): + convertDicts[j][x] = i + dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j) + if not path.exists(dict_file_j): + np.savez_compressed( + dict_file_j, + unique=np.array(list(convertDicts[j]), dtype=np.int32) + ) + counts[j] = len(convertDicts[j]) + # store (uniques and) counts + count_file = d_path + d_file + "_fea_count.npz" + if not path.exists(count_file): + np.savez_compressed(count_file, counts=counts) + else: + # create dictionaries (from existing files) + for j in range(26): + with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data: + unique = data["unique"] + for i, x in enumerate(unique): + convertDicts[j][x] = i + # load (uniques and) counts + with np.load(d_path + d_file + "_fea_count.npz") as data: + counts = data["counts"] + + # process all splits + if dataset_multiprocessing: + processes = [Process(target=processCriteoAdData, + name="processCriteoAdData:%i" % i, + args=(d_path, + d_file, + npzfile, + i, + convertDicts, + counts, + ) + ) for i in range(0, days)] + for process in processes: + process.start() + for process in processes: + process.join() + + else: + for i in range(days): + processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, counts) + + o_file = concatCriteoAdData( + d_path, + d_file, + npzfile, + trafile, + days, + data_split, + randomize, + total_per_file, + total_count, + memory_map, + o_filename + ) + + return o_file + + +def loadDataset( + dataset, + max_ind_range, + sub_sample_rate, + randomize, + data_split, + raw_path="", + pro_data="", + memory_map=False +): + # dataset + if dataset == "kaggle": + days = 7 + o_filename = "kaggleAdDisplayChallenge_processed" + elif dataset == "terabyte": + days = 24 + o_filename = "terabyte_processed" + else: + raise(ValueError("Data set option is not supported")) + + # split the datafile into path and filename + lstr = raw_path.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + npzfile = (d_file + "_day") if dataset == "kaggle" else d_file + # trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea") + + # check if pre-processed data is available + data_ready = True + if memory_map: + for i in range(days): + reo_data = d_path + npzfile + "_{0}_reordered.npz".format(i) + if not path.exists(str(reo_data)): + data_ready = False + else: + if not path.exists(str(pro_data)): + data_ready = False + + # pre-process data if needed + # WARNNING: when memory mapping is used we get a collection of files + if data_ready: + print("Reading pre-processed data=%s" % (str(pro_data))) + file = str(pro_data) + else: + print("Reading raw data=%s" % (str(raw_path))) + file = getCriteoAdData( + raw_path, + o_filename, + max_ind_range, + sub_sample_rate, + days, + data_split, + randomize, + dataset == "kaggle", + memory_map + ) + + return file, days + + +if __name__ == "__main__": + ### import packages ### + import argparse + + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Preprocess Criteo dataset" + ) + # model related parameters + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument("--memory-map", action="store_true", default=False) + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + args = parser.parse_args() + + loadDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map + ) diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_data_pytorch.py b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_data_pytorch.py new file mode 100644 index 00000000000..f6f30f8e663 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_data_pytorch.py @@ -0,0 +1,575 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: generate inputs and targets for the dlrm benchmark +# The inpts and outputs are generated according to the following three option(s) +# 1) random distribution +# 2) synthetic distribution, based on unique accesses and distances between them +# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven +# Simulation of Cache Memory", IEEE AINAM'07 +# 3) public data set +# i) Criteo Kaggle Display Advertising Challenge Dataset +# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset +# ii) Criteo Terabyte Dataset +# https://labs.criteo.com/2013/12/download-terabyte-click-logs + + +from __future__ import absolute_import, division, print_function, unicode_literals + +# others +from os import path +import sys + +import data_utils + +# numpy +import numpy as np +from numpy import random as ra + + +# pytorch +import torch +from torch.utils.data import Dataset, RandomSampler + +import data_loader_terabyte + + +# Kaggle Display Advertising Challenge Dataset +# dataset (str): name of dataset (Kaggle or Terabyte) +# randomize (str): determines randomization scheme +# "none": no randomization +# "day": randomizes each day"s data (only works if split = True) +# "total": randomizes total dataset +# split (bool) : to split into train, test, validation data-sets +class CriteoDataset(Dataset): + + def __init__( + self, + dataset, + max_ind_range, + sub_sample_rate, + randomize, + split="train", + raw_path="", + pro_data="", + memory_map=False, + dataset_multiprocessing=False, + ): + # dataset + # tar_fea = 1 # single target + den_fea = 13 # 13 dense features + # spa_fea = 26 # 26 sparse features + # tad_fea = tar_fea + den_fea + # tot_fea = tad_fea + spa_fea + if dataset == "kaggle": + days = 7 + out_file = "kaggleAdDisplayChallenge_processed" + elif dataset == "terabyte": + days = 24 + out_file = "terabyte_processed" + else: + raise(ValueError("Data set option is not supported")) + self.max_ind_range = max_ind_range + self.memory_map = memory_map + + # split the datafile into path and filename + lstr = raw_path.split("/") + self.d_path = "/".join(lstr[0:-1]) + "/" + self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1] + self.npzfile = self.d_path + ( + (self.d_file + "_day") if dataset == "kaggle" else self.d_file + ) + self.trafile = self.d_path + ( + (self.d_file + "_fea") if dataset == "kaggle" else "fea" + ) + + # check if pre-processed data is available + data_ready = True + if memory_map: + for i in range(days): + reo_data = self.npzfile + "_{0}_reordered.npz".format(i) + if not path.exists(str(reo_data)): + data_ready = False + else: + if not path.exists(str(pro_data)): + data_ready = False + + # pre-process data if needed + # WARNNING: when memory mapping is used we get a collection of files + if data_ready: + print("Reading pre-processed data=%s" % (str(pro_data))) + file = str(pro_data) + else: + print("Reading raw data=%s" % (str(raw_path))) + file = data_utils.getCriteoAdData( + raw_path, + out_file, + max_ind_range, + sub_sample_rate, + days, + split, + randomize, + dataset == "kaggle", + memory_map, + dataset_multiprocessing, + ) + + # get a number of samples per day + total_file = self.d_path + self.d_file + "_day_count.npz" + with np.load(total_file) as data: + total_per_file = data["total_per_file"] + # compute offsets per file + self.offset_per_file = np.array([0] + [x for x in total_per_file]) + for i in range(days): + self.offset_per_file[i + 1] += self.offset_per_file[i] + # print(self.offset_per_file) + + # setup data + if memory_map: + # setup the training/testing split + self.split = split + if split == 'none' or split == 'train': + self.day = 0 + self.max_day_range = days if split == 'none' else days - 1 + elif split == 'test' or split == 'val': + self.day = days - 1 + num_samples = self.offset_per_file[days] - \ + self.offset_per_file[days - 1] + self.test_size = int(np.ceil(num_samples / 2.)) + self.val_size = num_samples - self.test_size + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + + ''' + # text + print("text") + for i in range(days): + fi = self.npzfile + "_{0}".format(i) + with open(fi) as data: + ttt = 0; nnn = 0 + for _j, line in enumerate(data): + ttt +=1 + if np.int32(line[0]) > 0: + nnn +=1 + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + # processed + print("processed") + for i in range(days): + fi = self.npzfile + "_{0}_processed.npz".format(i) + with np.load(fi) as data: + yyy = data["y"] + ttt = len(yyy) + nnn = np.count_nonzero(yyy) + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + # reordered + print("reordered") + for i in range(days): + fi = self.npzfile + "_{0}_reordered.npz".format(i) + with np.load(fi) as data: + yyy = data["y"] + ttt = len(yyy) + nnn = np.count_nonzero(yyy) + print("day=" + str(i) + " total=" + str(ttt) + " non-zeros=" + + str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%") + ''' + + # load unique counts + with np.load(self.d_path + self.d_file + "_fea_count.npz") as data: + self.counts = data["counts"] + self.m_den = den_fea # X_int.shape[1] + self.n_emb = len(self.counts) + print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den)) + + # Load the test data + # Only a single day is used for testing + if self.split == 'test' or self.split == 'val': + # only a single day is used for testing + fi = self.npzfile + "_{0}_reordered.npz".format( + self.day + ) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + + else: + # load and preprocess data + with np.load(file) as data: + X_int = data["X_int"] # continuous feature + X_cat = data["X_cat"] # categorical feature + y = data["y"] # target + self.counts = data["counts"] + self.m_den = X_int.shape[1] # den_fea + self.n_emb = len(self.counts) + print("Sparse fea = %d, Dense fea = %d" % (self.n_emb, self.m_den)) + + # create reordering + indices = np.arange(len(y)) + + if split == "none": + # randomize all data + if randomize == "total": + indices = np.random.permutation(indices) + print("Randomized indices...") + + X_int[indices] = X_int + X_cat[indices] = X_cat + y[indices] = y + + else: + indices = np.array_split(indices, self.offset_per_file[1:-1]) + + # randomize train data (per day) + if randomize == "day": # or randomize == "total": + for i in range(len(indices) - 1): + indices[i] = np.random.permutation(indices[i]) + print("Randomized indices per day ...") + + train_indices = np.concatenate(indices[:-1]) + test_indices = indices[-1] + test_indices, val_indices = np.array_split(test_indices, 2) + + print("Defined %s indices..." % (split)) + + # randomize train data (across days) + if randomize == "total": + train_indices = np.random.permutation(train_indices) + print("Randomized indices across days ...") + + # create training, validation, and test sets + if split == 'train': + self.X_int = [X_int[i] for i in train_indices] + self.X_cat = [X_cat[i] for i in train_indices] + self.y = [y[i] for i in train_indices] + elif split == 'val': + self.X_int = [X_int[i] for i in val_indices] + self.X_cat = [X_cat[i] for i in val_indices] + self.y = [y[i] for i in val_indices] + elif split == 'test': + self.X_int = [X_int[i] for i in test_indices] + self.X_cat = [X_cat[i] for i in test_indices] + self.y = [y[i] for i in test_indices] + + print("Split data according to indices...") + + def __getitem__(self, index): + + if isinstance(index, slice): + return [ + self[idx] for idx in range( + index.start or 0, index.stop or len(self), index.step or 1 + ) + ] + + if self.memory_map: + if self.split == 'none' or self.split == 'train': + # check if need to switch to next day and load data + if index == self.offset_per_file[self.day]: + # print("day_boundary switch", index) + self.day_boundary = self.offset_per_file[self.day] + fi = self.npzfile + "_{0}_reordered.npz".format( + self.day + ) + # print('Loading file: ', fi) + with np.load(fi) as data: + self.X_int = data["X_int"] # continuous feature + self.X_cat = data["X_cat"] # categorical feature + self.y = data["y"] # target + self.day = (self.day + 1) % self.max_day_range + + i = index - self.day_boundary + elif self.split == 'test' or self.split == 'val': + # only a single day is used for testing + i = index + (0 if self.split == 'test' else self.test_size) + else: + sys.exit("ERROR: dataset split is neither none, nor train or test.") + else: + i = index + + if self.max_ind_range > 0: + return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i] + else: + return self.X_int[i], self.X_cat[i], self.y[i] + + def _default_preprocess(self, X_int, X_cat, y): + X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1) + if self.max_ind_range > 0: + X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long) + else: + X_cat = torch.tensor(X_cat, dtype=torch.long) + y = torch.tensor(y.astype(np.float32)) + + return X_int, X_cat, y + + def __len__(self): + if self.memory_map: + if self.split == 'none': + return self.offset_per_file[-1] + elif self.split == 'train': + return self.offset_per_file[-2] + elif self.split == 'test': + return self.test_size + elif self.split == 'val': + return self.val_size + else: + sys.exit("ERROR: dataset split is neither none, nor train nor test.") + else: + return len(self.y) + + +def collate_wrapper_criteo_offset(list_of_tuples): + # where each tuple is (X_int, X_cat, y) + transposed_data = list(zip(*list_of_tuples)) + X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1) + X_cat = torch.tensor(transposed_data[1], dtype=torch.long) + T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1) + + batchSize = X_cat.shape[0] + featureCnt = X_cat.shape[1] + + lS_i = [X_cat[:, i] for i in range(featureCnt)] + lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)] + + return X_int, torch.stack(lS_o), torch.stack(lS_i), T + + +def ensure_dataset_preprocessed(args, d_path): + _ = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + _ = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + for split in ['train', 'val', 'test']: + print('Running preprocessing for split =', split) + + train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day) + for + day in range(0, 23)] + + test_valid_file = args.raw_data_file + '_23_reordered.npz' + + output_file = d_path + '_{}.bin'.format(split) + + input_files = train_files if split == 'train' else [test_valid_file] + data_loader_terabyte.numpy_to_binary(input_files=input_files, + output_file_path=output_file, + split=split) + + +# Conversion from offset to length +def offset_to_length_converter(lS_o, lS_i): + def diff(tensor): + return tensor[1:] - tensor[:-1] + + return torch.stack( + [ + diff(torch.cat((S_o, torch.tensor(lS_i[ind].shape))).int()) + for ind, S_o in enumerate(lS_o) + ] + ) + + +def collate_wrapper_criteo_length(list_of_tuples): + # where each tuple is (X_int, X_cat, y) + transposed_data = list(zip(*list_of_tuples)) + X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1) + X_cat = torch.tensor(transposed_data[1], dtype=torch.long) + T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1) + + batchSize = X_cat.shape[0] + featureCnt = X_cat.shape[1] + + lS_i = torch.stack([X_cat[:, i] for i in range(featureCnt)]) + lS_o = torch.stack( + [torch.tensor(range(batchSize)) for _ in range(featureCnt)] + ) + + lS_l = offset_to_length_converter(lS_o, lS_i) + + return X_int, lS_l, lS_i, T + + +def make_criteo_data_and_loaders(args, offset_to_length_converter=False): + if args.memory_map and args.data_set == "terabyte": + # more efficient for larger batches + data_directory = path.dirname(args.raw_data_file) + + if args.mlperf_bin_loader: + lstr = args.processed_data_file.split("/") + d_path = "/".join(lstr[0:-1]) + "/" + lstr[-1].split(".")[0] + train_file = d_path + "_train.bin" + test_file = d_path + "_test.bin" + # val_file = d_path + "_val.bin" + counts_file = args.raw_data_file + '_fea_count.npz' + if any(not path.exists(p) for p in [train_file, + test_file, + counts_file]): + ensure_dataset_preprocessed(args, d_path) + + train_data = data_loader_terabyte.CriteoBinDataset( + data_file=train_file, + counts_file=counts_file, + batch_size=args.mini_batch_size, + max_ind_range=args.max_ind_range + ) + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=None, + batch_sampler=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None + ) + + test_data = data_loader_terabyte.CriteoBinDataset( + data_file=test_file, + counts_file=counts_file, + batch_size=args.test_mini_batch_size, + max_ind_range=args.max_ind_range + ) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=None, + batch_sampler=None, + shuffle=False, + num_workers=0, + collate_fn=None, + pin_memory=False, + drop_last=False, + ) + else: + data_filename = args.raw_data_file.split("/")[-1] + + train_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + test_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing + ) + + train_loader = data_loader_terabyte.DataLoader( + data_directory=data_directory, + data_filename=data_filename, + days=list(range(23)), + batch_size=args.mini_batch_size, + max_ind_range=args.max_ind_range, + split="train" + ) + + test_loader = data_loader_terabyte.DataLoader( + data_directory=data_directory, + data_filename=data_filename, + days=[23], + batch_size=args.test_mini_batch_size, + max_ind_range=args.max_ind_range, + split="test" + ) + else: + train_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "train", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing, + ) + + test_data = CriteoDataset( + args.data_set, + args.max_ind_range, + args.data_sub_sample_rate, + args.data_randomize, + "test", + args.raw_data_file, + args.processed_data_file, + args.memory_map, + args.dataset_multiprocessing, + ) + + collate_wrapper_criteo = collate_wrapper_criteo_offset + if offset_to_length_converter: + collate_wrapper_criteo = collate_wrapper_criteo_length + + train_loader = torch.utils.data.DataLoader( + train_data, + batch_size=args.mini_batch_size, + shuffle=False, + num_workers=args.num_workers, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + test_loader = torch.utils.data.DataLoader( + test_data, + batch_size=args.test_mini_batch_size, + shuffle=False, + num_workers=args.test_num_workers, + collate_fn=collate_wrapper_criteo, + pin_memory=False, + drop_last=False, # True + ) + + return train_data, train_loader, test_data, test_loader \ No newline at end of file diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_s_pytorch.py b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_s_pytorch.py new file mode 100644 index 00000000000..2af63ea4b98 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/dlrm_s_pytorch.py @@ -0,0 +1,1154 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +# +# Description: an implementation of a deep learning recommendation model (DLRM) +# The model input consists of dense and sparse features. The former is a vector +# of floating point values. The latter is a list of sparse indices into +# embedding tables, which consist of vectors of floating point values. +# The selected vectors are passed to mlp networks denoted by triangles, +# in some cases the vectors are interacted through operators (Ops). +# +# output: +# vector of values +# model: | +# /\ +# /__\ +# | +# _____________________> Op <___________________ +# / | \ +# /\ /\ /\ +# /__\ /__\ ... /__\ +# | | | +# | Op Op +# | ____/__\_____ ____/__\____ +# | |_Emb_|____|__| ... |_Emb_|__|___| +# input: +# [ dense features ] [sparse indices] , ..., [sparse indices] +# +# More precise definition of model layers: +# 1) fully connected layers of an mlp +# z = f(y) +# y = Wx + b +# +# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk]) +# z = Op(e1,...,ek) +# obtain vectors e1=E[:,p1], ..., ek=E[:,pk] +# +# 3) Operator Op can be one of the following +# Sum(e1,...,ek) = e1 + ... + ek +# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek] +# Cat(e1,...,ek) = [e1', ..., ek']' +# where ' denotes transpose operation +# +# References: +# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang, +# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu, +# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii, +# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko, +# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong, +# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and +# Recommendation Systems", CoRR, arXiv:1906.00091, 2019 + +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse + +# miscellaneous +import builtins +import datetime +import sys +import time + + +# data generation +import dlrm_data_pytorch as dp + +# numpy +import numpy as np +import sklearn.metrics + +# pytorch +import torch +import torch.nn as nn +from torch._ops import ops +from torch.autograd.profiler import record_function +from torch.nn.parallel.parallel_apply import parallel_apply +from torch.nn.parallel.replicate import replicate +from torch.nn.parallel.scatter_gather import gather, scatter +from torch.nn.parameter import Parameter +from torch.optim.lr_scheduler import _LRScheduler +from torch.utils import ThroughputBenchmark +# For distributed run +import extend_distributed as ext_dist + + +try: + import intel_extension_for_pytorch as ipex +except: + assert False, "please install intel-extension-for-pytorch, support version higher than 1.10" + + +exc = getattr(builtins, "IOError", "FileNotFoundError") + +def freeze(model): + return torch.jit._recursive.wrap_cpp_module(torch._C._freeze_module(model._c, preserveParameters=True)) + + +def time_wrap(): + return time.time() + + +def dlrm_wrap(X, *emb_args): + with record_function("DLRM forward"): + return dlrm(X, *emb_args) + + +def loss_fn_wrap(Z, T): + with record_function("DLRM loss compute"): + return dlrm.loss_fn(Z, T) + +# The following function is a wrapper to avoid checking this multiple times in th +# loop below. +def unpack_batch(b): + # Experiment with unweighted samples + return b[0], b[1], b[2], b[3], torch.ones(b[3].size()), None + + +class LRPolicyScheduler(_LRScheduler): + def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps): + self.num_warmup_steps = num_warmup_steps + self.decay_start_step = decay_start_step + self.decay_end_step = decay_start_step + num_decay_steps + self.num_decay_steps = num_decay_steps + + if self.decay_start_step < self.num_warmup_steps: + sys.exit("Learning rate warmup must finish before the decay starts") + + super(LRPolicyScheduler, self).__init__(optimizer) + + def get_lr(self): + step_count = self._step_count + if step_count < self.num_warmup_steps: + # warmup + scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps + lr = [base_lr * scale for base_lr in self.base_lrs] + self.last_lr = lr + elif self.decay_start_step <= step_count and step_count < self.decay_end_step: + # decay + decayed_steps = step_count - self.decay_start_step + scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2 + min_lr = 0.0000001 + lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs] + self.last_lr = lr + else: + if self.num_decay_steps > 0: + # freeze at last, either because we're after decay + # or because we're between warmup and decay + lr = self.last_lr + else: + # do not adjust + lr = self.base_lrs + return lr + + +### define dlrm in PyTorch ### +class DLRM_Net(nn.Module): + def create_mlp(self, ln, sigmoid_layer): + # build MLP layer by layer + layers = nn.ModuleList() + for i in range(0, ln.size - 1): + n = ln[i] + m = ln[i + 1] + + # construct fully connected operator + LL = nn.Linear(int(n), int(m), bias=True) + + # initialize the weights + # with torch.no_grad(): + # custom Xavier input, output or two-sided fill + mean = 0.0 # std_dev = np.sqrt(variance) + std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n) + W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32) + std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1)) + bt = np.random.normal(mean, std_dev, size=m).astype(np.float32) + # approach 1 + LL.weight.data = torch.tensor(W, requires_grad=True) + LL.bias.data = torch.tensor(bt, requires_grad=True) + # approach 2 + # LL.weight.data.copy_(torch.tensor(W)) + # LL.bias.data.copy_(torch.tensor(bt)) + # approach 3 + # LL.weight = Parameter(torch.tensor(W),requires_grad=True) + # LL.bias = Parameter(torch.tensor(bt),requires_grad=True) + layers.append(LL) + + # construct sigmoid or relu operator + if i == sigmoid_layer: + layers.append(nn.Sigmoid()) + else: + layers.append(nn.ReLU()) + + # approach 1: use ModuleList + # return layers + # approach 2: use Sequential container to wrap all layers + return torch.nn.Sequential(*layers) + + def create_emb(self, m, ln, local_ln_emb=None): + emb_l = nn.ModuleList() + n_embs = ln.size if local_ln_emb is None else len(local_ln_emb) + for i in range(n_embs): + if local_ln_emb is None: + n = ln[i] + else: + n = ln[local_ln_emb[i]] + EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True) + # initialize embeddings + if not args.inference_only: + nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n)) + emb_l.append(EE) + return emb_l + + def __init__( + self, + m_spa=None, + ln_emb=None, + ln_bot=None, + ln_top=None, + sigmoid_bot=-1, + sigmoid_top=-1, + weighted_pooling=None, + loss_threshold=0.0, + ): + super(DLRM_Net, self).__init__() + self.loss_threshold = loss_threshold + #If running distributed, get local slice of embedding tables + if ext_dist.my_size > 1: + n_emb = len(ln_emb) + self.n_global_emb = n_emb + self.rank = ext_dist.dist.get_rank() + self.ln_emb = [i for i in range(n_emb)] + self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(n_emb) + self.local_ln_emb_slice = ext_dist.get_my_slice(n_emb) + self.local_ln_emb = self.ln_emb[self.local_ln_emb_slice] + else: + self.local_ln_emb = None + self.emb_l = self.create_emb(m_spa, ln_emb, self.local_ln_emb) + self.bot_l = self.create_mlp(ln_bot, sigmoid_bot) + self.top_l = self.create_mlp(ln_top, sigmoid_top) + self.loss_fn = torch.nn.BCELoss(reduction="mean") + + + def apply_mlp(self, x, layers): + # approach 1: use ModuleList + # for layer in layers: + # x = layer(x) + # return x + # approach 2: use Sequential container to wrap all layers + return layers(x) + + def apply_emb(self, emb_l, *emb_args): + # WARNING: notice that we are processing the batch at once. We implicitly + # assume that the data is laid out such that: + # 1. each embedding is indexed with a group of sparse indices, + # corresponding to a single lookup + # 2. for each embedding the lookups are further organized into a batch + # 3. for a list of embedding tables there is a list of batched lookups + if isinstance(emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + return emb_l(emb_args, self.need_linearize_indices_and_offsets) + lS_o, lS_i = emb_args + ly = [] + for k, sparse_index_group_batch in enumerate(lS_i): + sparse_offset_group_batch = lS_o[k] + + # embedding lookup + # We are using EmbeddingBag, which implicitly uses sum operator. + # The embeddings are represented as tall matrices, with sum + # happening vertically across 0 axis, resulting in a row vector + E = emb_l[k] + V = E( + sparse_index_group_batch, + sparse_offset_group_batch, + ) + + ly.append(V) + + return ly + + def interact_features(self, x, ly): + if args.ipex_interaction: + T = [x] + list(ly) + R = ipex.nn.functional.interaction(*T) + else: + # concatenate dense and sparse features + (batch_size, d) = x.shape + T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d)) + # perform a dot product + Z = torch.bmm(T, torch.transpose(T, 1, 2)) + # append dense feature with the interactions (into a row vector) + # approach 1: all + # Zflat = Z.view((batch_size, -1)) + # approach 2: unique + _, ni, nj = Z.shape + # approach 1: tril_indices + # offset = -1 + # li, lj = torch.tril_indices(ni, nj, offset=offset) + # approach 2: custom + offset = 0 + li = torch.tensor([i for i in range(ni) for j in range(i + offset)]) + lj = torch.tensor([j for i in range(nj) for j in range(i + offset)]) + Zflat = Z[:, li, lj] + # concatenate dense features and interactions + R = torch.cat([x] + [Zflat], dim=1) + return R + + def forward(self, dense_x, *emb_args): + if ext_dist.my_size > 1: + return self.distributed_forward(dense_x, *emb_args) + else: + return self.sequential_forward(dense_x, *emb_args) + + def distributed_forward(self, dense_x, *emb_args): + batch_size = dense_x.size()[0] + vector_lenght = self.emb_l.weights[0].size()[1] + # WARNING: # of ranks must be <= batch size in distributed_forward call + if batch_size < ext_dist.my_size: + sys.exit("ERROR: batch_size (%d) must be larger than number of ranks (%d)" % (batch_size, ext_dist.my_size)) + + # embeddings + ly = self.apply_emb(self.emb_l, *emb_args) + a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank) + # bottom mlp + x = self.apply_mlp(dense_x, self.bot_l) + ly = a2a_req.wait() + _ly = [] + for item in ly: + _ly += [item[:, emb_id * vector_lenght: (emb_id + 1) * vector_lenght] for emb_id in range(self.emb_l.n_tables)] + # interactions + z = self.interact_features(x, _ly) + # top mlp + p = self.apply_mlp(z, self.top_l) + # clamp output if needed + if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: + z = torch.clamp( + p, min=self.loss_threshold, max=(1.0 - self.loss_threshold) + ) + else: + z = p + return z + + + def sequential_forward(self, dense_x, *emb_args): + # process dense features (using bottom mlp), resulting in a row vector + x = self.apply_mlp(dense_x, self.bot_l) + # debug prints + # print("intermediate") + # print(x.detach().cpu().numpy()) + + # process sparse features(using embeddings), resulting in a list of row vectors + ly = self.apply_emb(self.emb_l, *emb_args) + # for y in ly: + # print(y.detach().cpu().numpy()) + + # interact features (dense and sparse) + z = self.interact_features(x, ly) + # print(z.detach().cpu().numpy()) + + # obtain probability of a click (using top mlp) + p = self.apply_mlp(z, self.top_l) + + # clamp output if needed + if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: + z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold)) + else: + z = p + + return z + + +def dash_separated_ints(value): + vals = value.split("-") + for val in vals: + try: + int(val) + except ValueError: + raise argparse.ArgumentTypeError( + "%s is not a valid dash separated list of ints" % value + ) + + return value + + +def trace_or_load_model(args, dlrm, test_ld, inplace=True): + dlrm.eval() + for j, inputBatch in enumerate(test_ld): + X, lS_o, lS_i, _, _, _ = unpack_batch(inputBatch) + if args.bf16: + # at::GradMode::is_enabled() will query a threadlocal flag + # but new thread generate from throughputbench mark will + # init this flag to true, so we temporal cast embedding's + # weight to bfloat16 for now + if args.inference_only: + dlrm.emb_l.bfloat16() + dlrm = ipex.optimize(dlrm, dtype=torch.bfloat16, inplace=inplace) + elif args.int8 and not args.tune: + if args.num_cpu_cores != 0: + torch.set_num_threads(args.num_cpu_cores) + from neural_compressor.torch.quantization import load + dlrm = load(args.save_model) + elif args.int8 and args.tune: + dlrm = dlrm + else: + dlrm = ipex.optimize(dlrm, dtype=torch.float, inplace=True, auto_kernel_selection=True) + with torch.cpu.amp.autocast(enabled=args.bf16): + dlrm = torch.jit.trace(dlrm, (X, lS_o, lS_i), check_trace=True) + dlrm = torch.jit.freeze(dlrm) + dlrm(X, lS_o, lS_i) + dlrm(X, lS_o, lS_i) + return dlrm + + +def run_throughput_benchmark(args, dlrm, test_ld): + bench = ThroughputBenchmark(dlrm) + for j, inputBatch in enumerate(test_ld): + X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch) + bench.add_input(X, lS_o, lS_i) + if args.num_batches > 0 and j == args.num_batches: + break + args.num_batches = args.num_batches if args.num_batches > 0 else j + stats = bench.benchmark( + num_calling_threads=args.share_weight_instance, + num_warmup_iters=100, + num_iters=args.num_batches * args.share_weight_instance, + ) + print(stats) + latency = stats.latency_avg_ms + throughput = (1 / latency) * 1000 * test_ld.dataset.batch_size * args.share_weight_instance + print("throughput: {:.3f} fps".format(throughput)) + print("latency: {:.5f} ms".format(1/throughput * 1000)) + exit(0) + + +def inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld, + trace=True +): + test_accu = 0 + test_samp = 0 + + if args.print_auc: + scores = [] + targets = [] + + total_time = 0 + total_iter = 0 + if args.inference_only and trace: + dlrm = trace_or_load_model(args, dlrm, test_ld) + if args.share_weight_instance != 0: + run_throughput_benchmark(args, dlrm, test_ld) + with torch.cpu.amp.autocast(enabled=args.bf16): + for i, testBatch in enumerate(test_ld): + should_print = ((i + 1) % args.print_freq == 0 or i + 1 == len(test_ld)) and args.inference_only + if should_print: + gT = 1000.0 * total_time / total_iter + print( + "Finished {} it {}/{}, {:.2f} ms/it,".format( + "inference", i + 1, len(test_ld), gT + ), + flush=True, + ) + total_time = 0 + total_iter = 0 + # early exit if nbatches was set by the user and was exceeded + if args.inference_only and nbatches > 0 and i >= nbatches: + break + + X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch( + testBatch + ) + + # forward pass + + if not args.inference_only and isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + n_tables = lS_i_test.shape[0] + idx = [lS_i_test[i] for i in range(n_tables)] + offset = [lS_o_test[i] for i in range(n_tables)] + include_last = [False for i in range(n_tables)] + indices, offsets, indices_with_row_offsets = dlrm.emb_l.linearize_indices_and_offsets(idx, offset, include_last) + + start = time_wrap() + if not args.inference_only and isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + Z_test = dlrm(X_test, indices, offsets, indices_with_row_offsets) + else: + Z_test = dlrm(X_test, lS_o_test, lS_i_test) + + + total_time += (time_wrap() - start) + total_iter += 1 + + if args.print_auc: + S_test = Z_test.detach().cpu().float().numpy() # numpy array + T_test = T_test.detach().cpu().float().numpy() # numpy array + scores.append(S_test) + targets.append(T_test) + elif not args.inference_only: + with record_function("DLRM accuracy compute"): + # compute loss and accuracy + S_test = Z_test.detach().cpu().float().numpy() # numpy array + T_test = T_test.detach().cpu().float().numpy() # numpy array + + mbs_test = T_test.shape[0] # = mini_batch_size except last + A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8)) + + test_accu += A_test + test_samp += mbs_test + else: + # do nothing to save time + pass + + if args.print_auc: + with record_function("DLRM mlperf sklearn metrics compute"): + scores = np.concatenate(scores, axis=0) + targets = np.concatenate(targets, axis=0) + + metrics = { + "recall": lambda y_true, y_score: sklearn.metrics.recall_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "precision": lambda y_true, y_score: sklearn.metrics.precision_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "f1": lambda y_true, y_score: sklearn.metrics.f1_score( + y_true=y_true, y_pred=np.round(y_score) + ), + "ap": sklearn.metrics.average_precision_score, + "roc_auc": sklearn.metrics.roc_auc_score, + "accuracy": lambda y_true, y_score: sklearn.metrics.accuracy_score( + y_true=y_true, y_pred=np.round(y_score) + ), + } + + validation_results = {} + for metric_name, metric_function in metrics.items(): + validation_results[metric_name] = metric_function(targets, scores) + acc_test = validation_results["accuracy"] + elif not args.inference_only: + acc_test = test_accu / test_samp + else: + pass + + model_metrics_dict = { + "nepochs": args.nepochs, + "nbatches": nbatches, + "nbatches_test": nbatches_test, + } + if not args.inference_only: + model_metrics_dict["test_acc"] = acc_test + + if args.print_auc: + is_best = validation_results["roc_auc"] > best_auc_test + if is_best: + best_auc_test = validation_results["roc_auc"] + model_metrics_dict["test_auc"] = best_auc_test + print( + "recall {:.4f}, precision {:.4f},".format( + validation_results["recall"], + validation_results["precision"], + ) + + " f1 {:.4f}, ap {:.4f},".format( + validation_results["f1"], validation_results["ap"] + ) + + " auc {:.4f}, best auc {:.4f},".format( + validation_results["roc_auc"], best_auc_test + ) + + " accuracy {:3.3f} %, best accuracy {:3.3f} %".format( + validation_results["accuracy"] * 100, best_acc_test * 100 + ), + flush=True, + ) + print("Accuracy: {:.34} ".format(validation_results["roc_auc"])) + elif not args.inference_only: + is_best = acc_test > best_acc_test + if is_best: + best_acc_test = acc_test + print( + " accuracy {:3.3f} %, best {:3.3f} %".format( + acc_test * 100, best_acc_test * 100 + ), + flush=True, + ) + else: + pass + if not args.inference_only: + return model_metrics_dict, is_best + else: + return validation_results["roc_auc"] + + +def run(): + ### parse arguments ### + parser = argparse.ArgumentParser( + description="Train Deep Learning Recommendation Model (DLRM)" + ) + # model related parameters + parser.add_argument("--arch-sparse-feature-size", type=int, default=2) + parser.add_argument( + "--arch-embedding-size", type=dash_separated_ints, default="4-3-2" + ) + # j will be replaced with the table number + parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2") + parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1") + # activations and loss + parser.add_argument("--activation-function", type=str, default="relu") + parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7 + parser.add_argument("--round-targets", type=bool, default=False) + # data + parser.add_argument("--num-batches", type=int, default=0) + parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte + parser.add_argument("--raw-data-file", type=str, default="") + parser.add_argument("--processed-data-file", type=str, default="") + parser.add_argument("--max-ind-range", type=int, default=-1) + parser.add_argument("--memory-map", action="store_true", default=False) + parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] + parser.add_argument("--data-randomize", type=str, default="total") # or day or none + parser.add_argument( + "--dataset-multiprocessing", + action="store_true", + default=False, + help="The Kaggle dataset can be multiprocessed in an environment \ + with more than 7 CPU cores and more than 20 GB of memory. \n \ + The Terabyte dataset can be multiprocessed in an environment \ + with more than 24 CPU cores and at least 1 TB of memory.", + ) + # training + parser.add_argument("--mini-batch-size", type=int, default=1) + parser.add_argument("--nepochs", type=int, default=1) + parser.add_argument("--learning-rate", type=float, default=0.01) + parser.add_argument("--print-precision", type=int, default=5) + parser.add_argument("--numpy-rand-seed", type=int, default=123) + # inference + parser.add_argument("--inference-only", action="store_true", default=False) + # store/load model + parser.add_argument("--save-model", type=str, default="") + parser.add_argument("--load-model", type=str, default="") + # debugging and profiling + parser.add_argument("--print-freq", type=int, default=1) + parser.add_argument("--test-freq", type=int, default=-1) + parser.add_argument("--test-mini-batch-size", type=int, default=-1) + parser.add_argument("--print-time", action="store_true", default=False) + parser.add_argument("--print-wall-time", action="store_true", default=False) + parser.add_argument("--enable-profiling", action="store_true", default=False) + # stop at target AUC Terabyte (no subsampling) 0.8025 + parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0) + parser.add_argument("--mlperf-bin-loader", action="store_true", default=False) + parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False) + # LR policy + parser.add_argument("--lr-num-warmup-steps", type=int, default=0) + parser.add_argument("--lr-decay-start-step", type=int, default=0) + parser.add_argument("--lr-num-decay-steps", type=int, default=0) + # intel + parser.add_argument("--print-auc", action="store_true", default=False) + parser.add_argument("--should-test", action="store_true", default=False) + parser.add_argument("--bf16", action="store_true", default=False) + parser.add_argument("--share-weight-instance", type=int, default=0) + parser.add_argument("--num-cpu-cores", type=int, default=0) + parser.add_argument("--ipex-interaction", action="store_true", default=False) + parser.add_argument("--ipex-merged-emb", action="store_true", default=False) + parser.add_argument("--num-warmup-iters", type=int, default=1000) + parser.add_argument("--int8", action="store_true", default=False) + parser.add_argument("--dist-backend", type=str, default="ccl") + parser.add_argument("--tune", action="store_true", default=False) + parser.add_argument("--benchmark", action="store_true", default=False) + parser.add_argument("--accuracy_only", action="store_true", default=False) + + global args + global nbatches + global nbatches_test + args = parser.parse_args() + ext_dist.init_distributed(backend=args.dist_backend) + + + ### some basic setup ### + np.random.seed(args.numpy_rand_seed) + np.set_printoptions(precision=args.print_precision) + torch.set_printoptions(precision=args.print_precision) + torch.manual_seed(args.numpy_rand_seed) + + if args.test_mini_batch_size < 0: + # if the parameter is not set, use the training batch size + args.test_mini_batch_size = args.mini_batch_size + + device = torch.device("cpu") + print("Using CPU...") + + ### prepare training data ### + ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-") + # input data + train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args) + nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) + nbatches_test = len(test_ld) + + ln_emb = train_data.counts + # enforce maximum limit on number of vectors per embedding + if args.max_ind_range > 0: + ln_emb = np.array( + list( + map( + lambda x: x if x < args.max_ind_range else args.max_ind_range, + ln_emb, + ) + ) + ) + else: + ln_emb = np.array(ln_emb) + m_den = train_data.m_den + ln_bot[0] = m_den + + args.ln_emb = ln_emb.tolist() + + ### parse command line arguments ### + m_spa = args.arch_sparse_feature_size + ln_emb = np.asarray(ln_emb) + num_fea = ln_emb.size + 1 # num sparse + num dense features + + m_den_out = ln_bot[ln_bot.size - 1] + # approach 1: all + # num_int = num_fea * num_fea + m_den_out + # approach 2: unique + num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out + + arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top + ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-") + + ### construct the neural network specified above ### + # WARNING: to obtain exactly the same initialization for + # the weights we need to start from the same random seed. + # np.random.seed(args.numpy_rand_seed) + global dlrm + dlrm = DLRM_Net( + m_spa, + ln_emb, + ln_bot, + ln_top, + sigmoid_bot=-1, + sigmoid_top=ln_top.size - 2, + loss_threshold=args.loss_threshold, + ) + if args.ipex_merged_emb: + dlrm.emb_l = ipex.nn.modules.MergedEmbeddingBagWithSGD.from_embeddingbag_list(dlrm.emb_l, lr=args.learning_rate) + dlrm.need_linearize_indices_and_offsets = torch.BoolTensor([False]) + + if not args.inference_only: + optimizer = torch.optim.SGD(dlrm.parameters(), lr=args.learning_rate) + lr_scheduler = LRPolicyScheduler( + optimizer, + args.lr_num_warmup_steps, + args.lr_decay_start_step, + args.lr_num_decay_steps, + ) + + ### main loop ### + + # training or inference + best_acc_test = 0 + best_auc_test = 0 + skip_upto_epoch = 0 + skip_upto_batch = 0 + total_time = 0 + total_loss = 0 + total_iter = 0 + total_samp = 0 + + # Load model is specified + if not (args.load_model == ""): + print("Loading saved model {}".format(args.load_model)) + ld_model = torch.load(args.load_model, map_location=torch.device("cpu")) + dlrm.load_state_dict(ld_model["state_dict"]) + ld_j = ld_model["iter"] + ld_k = ld_model["epoch"] + ld_nepochs = ld_model["nepochs"] + ld_nbatches = ld_model["nbatches"] + ld_nbatches_test = ld_model["nbatches_test"] + ld_train_loss = ld_model["train_loss"] + ld_total_loss = ld_model["total_loss"] + ld_acc_test = ld_model["test_acc"] + if not args.inference_only: + optimizer.load_state_dict(ld_model["opt_state_dict"]) + best_acc_test = ld_acc_test + total_loss = ld_total_loss + skip_upto_epoch = ld_k # epochs + skip_upto_batch = ld_j # batches + else: + args.print_freq = ld_nbatches + args.test_freq = 0 + + print( + "Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format( + ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test + ) + ) + print( + "Training state: loss = {:.6f}".format( + ld_train_loss, + ) + ) + print("Testing state: accuracy = {:3.3f} %".format(ld_acc_test * 100)) + del(ld_model) + + ext_dist.barrier() + print("time/loss/accuracy (if enabled):") + + if args.tune: + # evaluation + def eval_func(model): + args.int8 = getattr(model, "is_quantized", False) + with torch.no_grad(): + return inference( + args, + model, + best_acc_test, + best_auc_test, + test_ld, + trace=args.int8 + ) + + # calibration + def calib_fn(model): + calib_iter = 0 + for X_test, lS_o_test, lS_i_test, T in train_ld: + if calib_iter < 100: + model(X_test, lS_o_test, lS_i_test) + calib_iter += 1 + else: + break + + X_test, lS_o_test, lS_i_test, T = next(iter(train_ld)) + example_inputs = (X_test, lS_o_test, lS_i_test) + assert args.inference_only, "Please set inference_only in arguments" + from neural_compressor.torch.quantization import StaticQuantConfig, autotune, TuningConfig + tune_config = TuningConfig(config_set=StaticQuantConfig.get_config_set_for_tuning()) + + dlrm = autotune( + dlrm, + tune_config=tune_config, + eval_fn=eval_func, + run_fn=calib_fn, + example_inputs=example_inputs, + ) + dlrm.save(args.save_model) + exit(0) + if args.benchmark: + dlrm = trace_or_load_model(args, dlrm, test_ld, inplace=True) + import time + X_test, lS_o_test, lS_i_test, T = next(iter(test_ld)) + total_iters = 100 + warmup_iters = 5 + with torch.no_grad(): + for i in range(total_iters): + if i == warmup_iters: + start = time.time() + dlrm(X_test, lS_o_test, lS_i_test) + end = time.time() + latency = (end - start) / ((total_iters - warmup_iters) * args.mini_batch_size) + throughput = ((total_iters - warmup_iters) * args.mini_batch_size) / (end - start) + print('Batch size = {:d}'.format(args.mini_batch_size)) + print('Latency: {:.3f} ms'.format(latency * 10**3)) + print('Throughput: {:.3f} samples/sec'.format(throughput)) + exit(0) + + if args.accuracy_only: + with torch.no_grad(): + inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld + ) + exit(0) + + + if args.bf16 and not args.inference_only: + for j, inputBatch in enumerate(train_ld): + X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch) + if ext_dist.my_size > 1: + local_bs = X.size()[0] // ext_dist.my_size + rank_id = dlrm.rank + X = X[rank_id * local_bs: (rank_id + 1) * local_bs] + T = T[rank_id * local_bs: (rank_id + 1) * local_bs] + global_bs = local_bs * ext_dist.my_size + lS_o = lS_o[:, :global_bs] + lS_i = lS_i[:, :global_bs] + + if isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + if ext_dist.my_size > 1: + batch_size = X.size()[0] + g_i = lS_i[dlrm.local_ln_emb] + g_o = lS_o[dlrm.local_ln_emb] + n_tables = g_i.shape[0] + idx = [g_i[i] for i in range(n_tables)] + offset = [g_o[i] for i in range(n_tables)] + include_last = [False for i in range(n_tables)] + indices, offsets, indices_with_row_offsets = dlrm.emb_l.linearize_indices_and_offsets(idx, offset, include_last) + else: + n_tables = lS_i.shape[0] + idx = [lS_i[i] for i in range(n_tables)] + offset = [lS_o[i] for i in range(n_tables)] + include_last = [False for i in range(n_tables)] + indices, offsets, indices_with_row_offsets = dlrm.emb_l.linearize_indices_and_offsets(idx, offset, include_last) + if isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + sample_input = (X, indices, offsets, indices_with_row_offsets) + else: + sample_input = (X, lS_o, lS_i) + break + dlrm, optimizer = ipex.optimize(dlrm, dtype=torch.bfloat16, optimizer=optimizer, inplace=True, sample_input=sample_input) + + if args.ipex_merged_emb: + dlrm.emb_l.to_bfloat16_train() + for i in range(len(dlrm.top_l)): + if isinstance(dlrm.top_l[i], ipex.nn.utils._weight_prepack._IPEXLinear): + if isinstance(dlrm.top_l[i+1], torch.nn.ReLU): + dlrm.top_l[i] = ipex.nn.modules.IPEXLinearEltwise(dlrm.top_l[i], 'relu') + else: + dlrm.top_l[i] = ipex.nn.modules.IPEXLinearEltwise(dlrm.top_l[i], 'sigmoid') + dlrm.top_l[i + 1] = torch.nn.Identity() + for i in range(len(dlrm.bot_l)): + if isinstance(dlrm.bot_l[i], ipex.nn.utils._weight_prepack._IPEXLinear): + if isinstance(dlrm.bot_l[i+1], torch.nn.ReLU): + dlrm.bot_l[i] = ipex.nn.modules.IPEXLinearEltwise(dlrm.bot_l[i], 'relu') + else: + dlrm.bot_l[i] = ipex.nn.modules.IPEXLinearEltwise(dlrm.bot_l[i], 'sigmoid') + dlrm.bot_l[i + 1] = torch.nn.Identity() + + if ext_dist.my_size > 1: + dlrm.bot_l = ext_dist.DDP(dlrm.bot_l) + dlrm.top_l = ext_dist.DDP(dlrm.top_l) + training_record = [0, 0] + def update_training_performance(time, iters, training_record=training_record): + if iters > args.num_warmup_iters: + training_record[0] += time + training_record[1] += 1 + + def print_training_performance(training_record=training_record): + if training_record[0] == 0: + print("num-batches larger than warm up iters, please increase num-batches or decrease warmup iters") + exit() + total_samples = training_record[1] * args.mini_batch_size + throughput = total_samples / training_record[0] * 1000 + print("throughput: {:.3f} fps".format(throughput)) + + test_freq = args.test_freq if args.test_freq != -1 else nbatches // 20 + with torch.autograd.profiler.profile( + enabled=args.enable_profiling, use_cuda=False, record_shapes=False + ) as prof: + if not args.inference_only: + k = 0 + while k < args.nepochs: + + if k < skip_upto_epoch: + continue + + for j, inputBatch in enumerate(train_ld): + + if j < skip_upto_batch: + continue + + X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch) + if ext_dist.my_size > 1: + local_bs = X.size()[0] // ext_dist.my_size + rank_id = dlrm.rank + X = X[rank_id * local_bs: (rank_id + 1) * local_bs] + T = T[rank_id * local_bs: (rank_id + 1) * local_bs] + global_bs = local_bs * ext_dist.my_size + lS_o = lS_o[:, :global_bs] + lS_i = lS_i[:, :global_bs] + + if isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + if ext_dist.my_size > 1: + batch_size = X.size()[0] + g_i = lS_i[dlrm.local_ln_emb] + g_o = lS_o[dlrm.local_ln_emb] + n_tables = g_i.shape[0] + idx = [g_i[i] for i in range(n_tables)] + offset = [g_o[i] for i in range(n_tables)] + include_last = [False for i in range(n_tables)] + indices, offsets, indices_with_row_offsets = dlrm.emb_l.linearize_indices_and_offsets(idx, offset, include_last) + else: + n_tables = lS_i.shape[0] + idx = [lS_i[i] for i in range(n_tables)] + offset = [lS_o[i] for i in range(n_tables)] + include_last = [False for i in range(n_tables)] + indices, offsets, indices_with_row_offsets = dlrm.emb_l.linearize_indices_and_offsets(idx, offset, include_last) + + t1 = time_wrap() + + # early exit if nbatches was set by the user and has been exceeded + if nbatches > 0 and j >= nbatches: + break + + mbs = T.shape[0] # = args.mini_batch_size except maybe for last + + # forward pass + with torch.cpu.amp.autocast(enabled=args.bf16): + if isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + Z = dlrm_wrap( + X, + indices, + offsets, + indices_with_row_offsets + ).float() + else: + Z = dlrm_wrap( + X, + lS_o, + lS_i, + ).float() + + # loss + E = loss_fn_wrap(Z, T) + + # compute loss and accuracy + L = E.detach().cpu().numpy() # numpy array + + with record_function("DLRM backward"): + # scaled error gradient propagation + # (where we do not accumulate gradients across mini-batches) + optimizer.zero_grad(set_to_none=True) + # backward pass + E.backward() + + with record_function("DLRM update"): + # optimizer + optimizer.step() + lr_scheduler.step() + if isinstance(dlrm.emb_l, ipex.nn.modules.MergedEmbeddingBagWithSGD): + dlrm.emb_l.sgd_args = dlrm.emb_l.sgd_args._replace(lr=lr_scheduler.get_last_lr()[0]) + + t2 = time_wrap() + total_time += t2 - t1 + + total_loss += L * mbs + total_iter += 1 + total_samp += mbs + + should_print = ((j + 1) % args.print_freq == 0) or ( + j + 1 == nbatches + ) + should_test = ( + (args.should_test) + and (((j + 1) % test_freq == 0) or (j + 1 == nbatches)) + ) + + # print time, loss and accuracy + if should_print or should_test: + gT = 1000.0 * total_time / total_iter if args.print_time else -1 + total_time = 0 + + train_loss = total_loss / total_samp + total_loss = 0 + + str_run_type = ( + "inference" if args.inference_only else "training" + ) + + wall_time = "" + if args.print_wall_time: + wall_time = " ({})".format(time.strftime("%H:%M")) + + print( + "Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format( + str_run_type, j + 1, nbatches, k, gT + ) + + " loss {:.6f}".format(train_loss) + + wall_time, + flush=True, + ) + update_training_performance(gT, j) + + total_iter = 0 + total_samp = 0 + + # testing + if should_test: + model_metrics_dict, is_best = inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld, + ) + + if ( + is_best + and not (args.save_model == "") + and not args.inference_only + ): + model_metrics_dict["epoch"] = k + model_metrics_dict["iter"] = j + 1 + model_metrics_dict["train_loss"] = train_loss + model_metrics_dict["total_loss"] = total_loss + model_metrics_dict[ + "opt_state_dict" + ] = optimizer.state_dict() + print("Saving model to {}".format(args.save_model)) + torch.save(model_metrics_dict, args.save_model) + + if ( + (args.mlperf_auc_threshold > 0) + and (best_auc_test > args.mlperf_auc_threshold) + ): + print( + "MLPerf testing auc threshold " + + str(args.mlperf_auc_threshold) + + " reached, stop training" + ) + k += 1 # nepochs + else: + print("Testing for inference only") + with torch.no_grad(): + inference( + args, + dlrm, + best_acc_test, + best_auc_test, + test_ld + ) + + # profiling + if not args.inference_only: + print_training_performance() + + if args.enable_profiling: + time_stamp = str(datetime.datetime.now()).replace(" ", "_") + with open("dlrm_s_pytorch" + time_stamp + "_shape.prof", "w") as prof_f: + prof_f.write( + prof.key_averages(group_by_input_shape=True).table( + sort_by="self_cpu_time_total" + ) + ) + with open("dlrm_s_pytorch" + time_stamp + "_total.prof", "w") as prof_f: + prof_f.write(prof.key_averages().table(sort_by="self_cpu_time_total")) + prof.export_chrome_trace("dlrm_s_pytorch" + time_stamp + ".json") + exit(0) + +if __name__ == "__main__": + run() diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/extend_distributed.py b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/extend_distributed.py new file mode 100644 index 00000000000..0b117975b25 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/extend_distributed.py @@ -0,0 +1,424 @@ +import os +import builtins +import numpy as np +import torch +from torch.autograd import Function +from torch.nn.parallel import DistributedDataParallel as DDP +import torch.distributed as dist +try: + import torch_ccl +except ImportError as e: + #print(e) + torch_ccl = False + +my_rank = -1 +my_size = -1 +my_local_rank = -1 +my_local_size = -1 +alltoall_supported = False +allgatherv_supported = False +a2a_impl = os.environ.get('DLRM_ALLTOALL_IMPL', '') + +myreq = None + +def env2int(env_list, default = -1): + for e in env_list: + val = int(os.environ.get(e, -1)) + if val >= 0: return val + return default + +def get_my_slice(n): + my_size = dist.get_world_size() + my_rank = dist.get_rank() + k, m = divmod(n, my_size) + return slice(my_rank * k + min(my_rank, m), (my_rank+1) * k + min(my_rank+1, m), 1) + +def get_split_lengths(n): + my_size = dist.get_world_size() + k, m = divmod(n, my_size) + if m == 0: + splits = None + my_len = k + else: + my_rank = dist.get_rank() + splits = [(k+1) if i < m else k for i in range(my_size)] + my_len = splits[my_rank] + return (my_len, splits) + +def init_distributed(rank = -1, size = -1, backend=''): + global myreq + #global my_rank + global my_size + global my_local_rank + global my_local_size + global a2a_impl + global alltoall_supported + global allgatherv_supported + # guess MPI ranks from env (works for IMPI, OMPI and MVAPICH2) + num_mpi_ranks = env2int(['PMI_SIZE', 'OMPI_COMM_WORLD_SIZE', 'MV2_COMM_WORLD_SIZE', 'WORLD_SIZE']) + if backend == '' and num_mpi_ranks > 1: + if torch_ccl and env2int(['CCL_WORKER_COUNT']) > 0: + backend = 'ccl' + elif dist.is_mpi_available(): + backend = 'mpi' + else: + print("WARNING: MPI multi-process launch detected but PyTorch MPI backend not available.") + backend = 'gloo' + if backend != '': + #guess Rank and size + if rank == -1: + rank = env2int(['PMI_RANK', 'OMPI_COMM_WORLD_RANK', 'MV2_COMM_WORLD_RANK', 'RANK'], 0) + if size == -1: + size = env2int(['PMI_SIZE', 'OMPI_COMM_WORLD_SIZE', 'MV2_COMM_WORLD_SIZE', 'WORLD_SIZE'], 1) + if not os.environ.get('RANK', None) and rank != -1: os.environ['RANK'] = str(rank) + if not os.environ.get('WORLD_SIZE', None) and size != -1: os.environ['WORLD_SIZE'] = str(size) + if not os.environ.get('MASTER_PORT', None): os.environ['MASTER_PORT'] = '29500' + if not os.environ.get('MASTER_ADDR', None): + local_size = env2int(['MPI_LOCALNRANKS', 'OMPI_COMM_WORLD_LOCAL_SIZE', 'MV2_COMM_WORLD_LOCAL_SIZE'], 1) + if local_size != size and backend != 'mpi': + print("Warning: Looks like distributed multinode run but MASTER_ADDR env not set, using '127.0.0.1' as default") + print("If this run hangs, try exporting rank 0's hostname as MASTER_ADDR") + os.environ['MASTER_ADDR'] = '127.0.0.1' + if size > 1: + dist.init_process_group(backend, rank=rank, world_size=size) + my_rank = dist.get_rank() + my_size = dist.get_world_size() + my_local_rank = env2int(['MPI_LOCALRANKID', 'OMPI_COMM_WORLD_LOCAL_RANK', 'MV2_COMM_WORLD_LOCAL_RANK'], 0) + my_local_size = env2int(['MPI_LOCALNRANKS', 'OMPI_COMM_WORLD_LOCAL_SIZE', 'MV2_COMM_WORLD_LOCAL_SIZE'], 1) + if my_rank == 0: print("Running on %d ranks using %s backend" % (my_size, backend)) + if backend == 'ccl': + print("Using CCL_ATL_TRANSPORT=%s" % os.environ.get('CCL_ATL_TRANSPORT', '(default)')) + print("Using CCL_ATL_SHM=%s" % os.environ.get('CCL_ATL_SHM', '(default)')) + if hasattr(dist, 'all_to_all_single'): + try: + # dist.all_to_all_single(torch.empty([0]), torch.empty([0])) + alltoall_supported = True + except RuntimeError: + pass + if a2a_impl == 'alltoall' and alltoall_supported == False: + print("Requested DLRM_ALLTOALL_IMPL=%s but backend %s does not support it, use scatter/gather based alltoall" % (a2a_impl, backend)) + a2a_impl = 'scatter' + if a2a_impl != '': print("Using DLRM_ALLTOALL_IMPL=%s" % a2a_impl) + try: + x = torch.ones([my_rank]) + y = torch.zeros([(my_size*(my_size-1))//2]) + y = list(y.split([r for r in range(my_size)])) + dist.all_gather(y, x) + allgatherv_supported = True + except RuntimeError: + pass + else: + my_rank = 0 + my_size = 1 + my_local_rank = 0 + my_local_size = 1 + myreq = Request() + +class Request(object): + def __init__(self): + self.req = None + self.tensor = None + self.WaitFunction = All2All_Scatter_Wait + + def wait(self): + ret = self.WaitFunction.apply(*self.tensor) + self.req = None + self.tensor = None + return ret + +class All2All_ScatterList_Req(Function): + @staticmethod + def forward(ctx, a2ai, *inputs): + global myreq + my_rank = dist.get_rank() + #print("All2All_ScatterList_Req:forward") + mb_split_lengths = a2ai.gNS if a2ai.gNS else a2ai.lN + emb_split_lengths = a2ai.gSS if a2ai.gSS else [a2ai.lS] * my_size + gather_list = [] + req_list = [] + for i in range(my_size): + for j in range(emb_split_lengths[i]): + out_tensor = inputs[0].new_empty([a2ai.lN, a2ai.E]) + scatter_list = list(inputs[j].split(mb_split_lengths, dim = 0)) if i == my_rank else [] + req = dist.scatter(out_tensor, scatter_list, src=i, async_op=True) + gather_list.append(out_tensor) + req_list.append(req) + myreq.req = req_list + myreq.tensor = tuple(gather_list) + myreq.a2ai = a2ai + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + #print("All2All_ScatterList_Req:backward") + for r in myreq.req: + r.wait() + myreq.req = None + grad_inputs = myreq.tensor + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_ScatterList_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + #print("All2All_Scatter_Wait:forward") + ctx.a2ai = myreq.a2ai + for r in myreq.req: + r.wait() + myreq.req = None + myreq.tensor = None + return output + + @staticmethod + def backward(ctx, *grad_output): + global myreq + my_rank = dist.get_rank() + a2ai = ctx.a2ai + grad_output = [t.contiguous() for t in grad_output] + mb_split_lengths = a2ai.gNS if a2ai.gNS else [a2ai.lN] * my_size + per_rank_split_lengths = a2ai.gSS if a2ai.gSS else [a2ai.lS] * my_size + grad_inputs = [grad_output[0].new_empty([ctx.a2ai.N, ctx.a2ai.E]) for _ in range(a2ai.lS)] + req_list = [] + ind = 0 + for i in range(my_size): + for j in range(per_rank_split_lengths[i]): + gather_list = list(grad_inputs[j].split(mb_split_lengths, dim = 0)) if i == my_rank else None + req = dist.gather(grad_output[ind], gather_list, dst = i, async_op=True) + req_list.append(req) + ind += 1 + myreq.req = req_list + myreq.tensor = grad_inputs + return tuple(grad_output) + + + +class All2All_Scatter_Req(Function): + @staticmethod + def forward(ctx, a2ai, *inputs): + global myreq + #print("All2All_Scatter_Req:forward") + my_rank = dist.get_rank() + mb_split_lengths = a2ai.gNS if a2ai.gNS else a2ai.lN + emb_split_lengths = a2ai.gSS if a2ai.gSS else [a2ai.lS] * my_size + input = torch.cat(inputs, dim=1) + scatter_list = list(input.split(mb_split_lengths, dim=0)) + gather_list = [] + req_list = [] + for i in range(my_size): + out_tensor = input.new_empty([a2ai.lN, emb_split_lengths[i] * a2ai.E]) + req = dist.scatter(out_tensor, scatter_list if i == my_rank else [], src=i, async_op=True) + gather_list.append(out_tensor) + req_list.append(req) + myreq.req = req_list + myreq.tensor = tuple(gather_list) + myreq.a2ai = a2ai + ctx.a2ai = a2ai + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + #print("All2All_Scatter_Req:backward") + for r in myreq.req: + r.wait() + myreq.req = None + grad_input = myreq.tensor + grad_inputs = grad_input.split(ctx.a2ai.E, dim=1) + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_Scatter_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + #print("All2All_Scatter_Wait:forward") + ctx.a2ai = myreq.a2ai + for r in myreq.req: + r.wait() + myreq.req = None + myreq.tensor = None + return output + + @staticmethod + def backward(ctx, *grad_output): + global myreq + my_rank = dist.get_rank() + #print("All2All_Scatter_Wait:backward") + assert len(grad_output) == my_size + scatter_list = [t.contiguous() for t in grad_output] + a2ai = ctx.a2ai + mb_split_lengths = a2ai.gNS if a2ai.gNS else a2ai.lN + emb_split_lengths = a2ai.gSS if a2ai.gSS else [a2ai.lS] * my_size + grad_input = grad_output[0].new_empty([a2ai.N, a2ai.E*a2ai.lS]) + gather_list = list(grad_input.split(mb_split_lengths, dim=0)) + req_list = [] + for i in range(my_size): + #req = dist.scatter(gather_list[i], scatter_list if i == my_rank else [], src=i, async_op=True) + req = dist.gather(scatter_list[i], gather_list if i == my_rank else [], dst=i, async_op=True) + req_list.append(req) + myreq.req = req_list + myreq.tensor = grad_input + return grad_output + + +class All2All_Req(Function): + @staticmethod + def forward(ctx, a2ai, *inputs): + global myreq + #print("All2All_Req:forward") + mb_split_lengths = a2ai.gNS + if mb_split_lengths: mb_split_lengths = [m * a2ai.lS * a2ai.E for m in mb_split_lengths] + emb_split_lengths = a2ai.gSS + if emb_split_lengths: emb_split_lengths = [a2ai.lN * e * a2ai.E for e in emb_split_lengths] + input = torch.cat(inputs, dim=1).view([-1]) + output = input.new_empty([a2ai.S*a2ai.lN*a2ai.E]) + req = dist.all_to_all_single(output, input, emb_split_lengths, mb_split_lengths, async_op=True) + myreq.req = req + myreq.tensor = [] + myreq.tensor.append(output) + myreq.tensor = tuple(myreq.tensor) + a2ai.mb_split_lengths = mb_split_lengths + a2ai.emb_split_lengths = emb_split_lengths + myreq.a2ai = a2ai + ctx.a2ai = a2ai + return myreq.tensor + + @staticmethod + def backward(ctx, *grad_output): + global myreq + #print("All2All_Req:backward") + a2ai = ctx.a2ai + myreq.req.wait() + myreq.req = None + grad_input = myreq.tensor + grad_inputs = grad_input.view([a2ai.N, -1]).split(a2ai.E, dim=1) + grad_inputs = [gin.contiguous() for gin in grad_inputs] + myreq.tensor = None + return (None, *grad_inputs) + + +class All2All_Wait(Function): + @staticmethod + def forward(ctx, *output): + global myreq + #print("All2All_Wait:forward") + a2ai = myreq.a2ai + ctx.a2ai = a2ai + myreq.req.wait() + myreq.req = None + myreq.tensor = None + emb_split_lengths = a2ai.emb_split_lengths if a2ai.emb_split_lengths else a2ai.lS * a2ai.lN * a2ai.E + outputs = output[0].split(emb_split_lengths) + outputs = tuple([out.view([a2ai.lN, -1]) for out in outputs]) + return outputs + + @staticmethod + def backward(ctx, *grad_outputs): + global myreq + #print("All2All_Wait:backward") + a2ai = ctx.a2ai + grad_outputs = [gout.contiguous().view([-1]) for gout in grad_outputs] + grad_output = torch.cat(grad_outputs) + grad_input = grad_output.new_empty([a2ai.N * a2ai.lS * a2ai.E]) + req = dist.all_to_all_single(grad_input, grad_output, a2ai.mb_split_lengths, a2ai.emb_split_lengths, async_op=True) + myreq.req = req + myreq.tensor = grad_input + return (grad_output,) + +class AllGather(Function): + + @staticmethod + def forward(ctx, input, global_lengths, dim=0): + if not isinstance(global_lengths, (list, tuple)): + global_lengths = [global_lengths] * my_size + my_rank = dist.get_rank() + assert(len(global_lengths) == my_size) + assert(global_lengths[my_rank] == input.size(dim)) + local_start = sum(global_lengths[:my_rank]) + + output_size = list(input.size()) + + ctx.dim = dim + ctx.local_start = local_start + ctx.local_length = global_lengths[my_rank] + + input = input.contiguous() + if dim == 0: + out_len = sum(global_lengths) + output_size[dim] = out_len + output = input.new_empty(output_size) + gather_list = list(output.split(global_lengths, dim=0)) + else: + gather_list = [torch.empty_like(input) for _ in range(my_size)] + gather_list = [] + for l in global_lengths: + output_size[dim] = l + gather_list.append(input.new_empty(output_size)) + + dist.all_gather(gather_list, input) + + if dim != 0: + output = torch.cat(gather_list, dim=dim) + + return output + + @staticmethod + def backward(ctx, grad_output): + # print("Inside All2AllBackward") + dim = ctx.dim + start = ctx.local_start + length = ctx.local_length + + grad_input = grad_output.narrow(dim, start, length) + + return (grad_input, None, None) + +class All2AllInfo(object): + pass + +def alltoall(inputs, per_rank_split_lengths): + global myreq + N, E = inputs[0].size() + a2ai = All2AllInfo() + a2ai.lS = len(inputs) + a2ai.gSS = per_rank_split_lengths + a2ai.lN, a2ai.gNS = get_split_lengths(N) + a2ai.E = E + a2ai.N = N + a2ai.S = sum(per_rank_split_lengths) if per_rank_split_lengths else a2ai.lS * my_size + if a2a_impl == '' and alltoall_supported or a2a_impl == 'alltoall': + output = All2All_Req.apply(a2ai, *inputs) + myreq.WaitFunction = All2All_Wait + elif a2a_impl == '' or a2a_impl == 'scatter': + #print("Using All2All_Scatter_Req") + output = All2All_Scatter_Req.apply(a2ai, *inputs) + myreq.WaitFunction = All2All_Scatter_Wait + elif a2a_impl == 'scatter_list': + #print("Using All2All_ScatterList_Req") + output = All2All_ScatterList_Req.apply(a2ai, *inputs) + myreq.WaitFunction = All2All_ScatterList_Wait + else: + print("Unknown value set for DLRM_ALLTOALL_IMPL (%s), please use one of [alltoall, scatter, scatter_list]" % a2a_impl) + return myreq + +def shuffle_data(inputs): + input = torch.cat(inputs) + output = input.new_empty(input.size()) + req = dist.all_to_all_single(output, input) + output = output.reshape(my_size, -1) + return output + + +def all_gather(input, lengths, dim=0): + #print("lengths: ", lengths) + if not lengths: lengths = [input.size(0)] * my_size + return AllGather.apply(input, lengths, dim) + +def barrier(): + if my_size > 1: + dist.barrier() + diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/requirements.txt b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/requirements.txt new file mode 100644 index 00000000000..859bbfc346b --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/requirements.txt @@ -0,0 +1,8 @@ +future +numpy +pydot +neural-compressor +scikit-learn +tqdm +torch>=1.11.0 +intel_extension_for_pytorch>=1.11.0 \ No newline at end of file diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_benchmark.sh b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_benchmark.sh new file mode 100755 index 00000000000..dc593308678 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_benchmark.sh @@ -0,0 +1,98 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + tuned_checkpoint=saved_results + batch_size=16384 + iters=100 + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo ${var} |cut -f2 -d=) + ;; + --config=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + MODEL_SCRIPT=dlrm_s_pytorch.py + + # Create the output directory in case it doesn't already exist + mkdir -p ${tuned_checkpoint}/dlrm_inference_accuracy_log + + LOG=${tuned_checkpoint}/dlrm_inference_accuracy_log + + CORES=`lscpu | grep Core | awk '{print $4}'` + + ARGS="" + if [[ ${int8} == "true" ]]; then + echo "running int8 path" + ARGS="$ARGS --int8" + else + echo "running fp32 path" + fi + + if [[ ${mode} == "accuracy" ]]; then + python -u $MODEL_SCRIPT \ + --raw-data-file=${dataset_location}/day --processed-data-file=${dataset_location}/terabyte_processed.npz \ + --data-set=terabyte \ + --memory-map --mlperf-bin-loader --round-targets=True --learning-rate=1.0 \ + --arch-mlp-bot=13-512-256-128 --arch-mlp-top=1024-1024-512-256-1 \ + --arch-sparse-feature-size=128 --max-ind-range=40000000 \ + --numpy-rand-seed=727 --inference-only --ipex-interaction \ + --print-freq=100 --print-time --mini-batch-size=2048 --test-mini-batch-size=16384 \ + --save-model ${tuned_checkpoint} --test-freq=2048 --print-auc $ARGS \ + --load-model=${input_model} --accuracy_only + elif [[ ${mode} == "performance" ]]; then + incbench --num_cores_per_instance 4 -u $MODEL_SCRIPT \ + --raw-data-file=${dataset_location}/day --processed-data-file=${dataset_location}/terabyte_processed.npz \ + --data-set=terabyte --benchmark \ + --memory-map --mlperf-bin-loader --round-targets=True --learning-rate=1.0 \ + --arch-mlp-bot=13-512-256-128 --arch-mlp-top=1024-1024-512-256-1 \ + --arch-sparse-feature-size=128 --max-ind-range=40000000 --ipex-interaction \ + --numpy-rand-seed=727 --inference-only --num-batches=1000 \ + --print-freq=10 --print-time --mini-batch-size=128 --test-mini-batch-size=${batch_size} \ + --save-model ${tuned_checkpoint} + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_quant.sh b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_quant.sh new file mode 100755 index 00000000000..58d8b1fe491 --- /dev/null +++ b/examples/3.x_api/pytorch/recommendation/dlrm/static_quant/ipex/run_quant.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + tuned_checkpoint=saved_results + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + tuned_checkpoint=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +CORES=`lscpu | grep Core | awk '{print $4}'` +# use first socket +numa_cmd="numactl -C 0-$((CORES-1)) " +echo "will run on core 0-$((CORES-1)) on socket 0" + +export OMP_NUM_THREADS=$CORES + +# run_tuning +function run_tuning { + MODEL_SCRIPT=dlrm_s_pytorch.py + + # Create the output directory in case it doesn't already exist + mkdir -p ${tuned_checkpoint}/dlrm_inference_accuracy_log + + LOG=${tuned_checkpoint}/dlrm_inference_accuracy_log + CORES=`lscpu | grep Core | awk '{print $4}'` + ARGS="" + + $numa_cmd python -u $MODEL_SCRIPT \ + --raw-data-file=${dataset_location}/day --processed-data-file=${dataset_location}/terabyte_processed.npz \ + --data-set=terabyte \ + --memory-map --mlperf-bin-loader --round-targets=True --learning-rate=1.0 \ + --arch-mlp-bot=13-512-256-128 --arch-mlp-top=1024-1024-512-256-1 \ + --arch-sparse-feature-size=128 --max-ind-range=40000000 \ + --numpy-rand-seed=727 --inference-only --ipex-interaction \ + --print-freq=100 --print-time --mini-batch-size=2048 --test-mini-batch-size=16384 \ + --test-freq=2048 --print-auc --tune --save-model=${tuned_checkpoint} $ARGS \ + --load-model=${input_model} --num-cpu-cores=${CORES} | tee $LOG +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/README.md b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/README.md new file mode 100644 index 00000000000..057b3559756 --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/README.md @@ -0,0 +1,109 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. + +# Prerequisite + + +## 1. Environment +Recommend python 3.6 or higher version. + +### Install Intel® Neural Compressor +```shell +pip install neural-compressor +``` + +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Installation Dependency packages +```shell +cd examples\tensorflow\graph_networks\graphsage\quantization\ptq +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Model +Download Frozen graph: +```shell +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/2_12_0/graphsage_frozen_model.pb +``` + +## 3. Prepare Dataset + +```shell +wget https://snap.stanford.edu/graphsage/ppi.zip +unzip ppi.zip +``` + +# Run + +## 1. Quantization + + ```shell + # The cmd of running faster_rcnn_resnet50 + bash run_quant.sh --input_model=./graphsage_frozen_model.pb --output_model=./nc_graphsage_int8_model.pb --dataset_location=./ppi + ``` + +## 2. Benchmark + ```shell + bash run_benchmark.sh --input_model=./nc_graphsage_int8_model.pb --dataset_location=./ppi --mode=performance + ``` + +Details of enabling Intel® Neural Compressor on graphsage for Tensorflow. +========================= + +This is a tutorial of how to enable graphsage model with Intel® Neural Compressor. +## User Code Analysis +User specifies fp32 *model*, calibration dataset *calib_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. + +For graphsage, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. + +### Code update + +After prepare step is done, we just need update main.py like below. +```python + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + from neural_compressor.tensorflow.utils import BaseDataLoader + + dataset = CustomDataset() + calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=1, collate_fn=collate_function) + quant_config = StaticQuantConfig() + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + elif args.mode == 'accuracy': + acc_result = evaluate(args.input_graph) + print("Batch size = %d" % args.batch_size) + print("Accuracy: %.5f" % acc_result) + +``` + +The quantization.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/dataloader.py b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/dataloader.py new file mode 100644 index 00000000000..e2a1d28d7d7 --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/dataloader.py @@ -0,0 +1,80 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import numpy as np +import random +import json +import sys +import os + +import networkx as nx +from networkx.readwrite import json_graph + + +def load_data(prefix, normalize=True, load_walks=False): + G_data = json.load(open(prefix + "-G.json")) + G = json_graph.node_link_graph(G_data) + if isinstance(list(G.nodes())[0], int): + conversion = lambda n : int(n) + else: + conversion = lambda n : n + + if os.path.exists(prefix + "-feats.npy"): + feats = np.load(prefix + "-feats.npy") + else: + print("No features present.. Only identity features will be used.") + feats = None + id_map = json.load(open(prefix + "-id_map.json")) + id_map = {conversion(k):int(v) for k,v in id_map.items()} + walks = [] + class_map = json.load(open(prefix + "-class_map.json")) + if isinstance(list(class_map.values())[0], list): + lab_conversion = lambda n : n + else: + lab_conversion = lambda n : int(n) + + class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()} + + ## Remove all nodes that do not have val/test annotations + ## (necessary because of networkx weirdness with the Reddit data) + broken_count = 0 + for node in G.nodes(): + if not 'val' in G.nodes[node] or not 'test' in G.nodes[node]: + G.remove_node(node) + broken_count += 1 + print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count)) + + ## Make sure the graph has edge train_removed annotations + ## (some datasets might already have this..) + print("Loaded data.. now preprocessing..") + for edge in G.edges(): + if (G.nodes[edge[0]]['val'] or G.nodes[edge[1]]['val'] or + G.nodes[edge[0]]['test'] or G.nodes[edge[1]]['test']): + G[edge[0]][edge[1]]['train_removed'] = True + else: + G[edge[0]][edge[1]]['train_removed'] = False + + if normalize and not feats is None: + from sklearn.preprocessing import StandardScaler + train_ids = np.array([id_map[n] for n in G.nodes() if not G.nodes[n]['val'] and not G.nodes[n]['test']]) + train_feats = feats[train_ids] + scaler = StandardScaler() + scaler.fit(train_feats) + feats = scaler.transform(feats) + + return G, feats, id_map, walks, class_map diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/main.py b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/main.py new file mode 100644 index 00000000000..87837510d3d --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/main.py @@ -0,0 +1,189 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import time +import utils +import dataloader +import numpy as np +import tensorflow as tf + +from tensorflow.python.platform import tf_logging +from tensorflow.core.protobuf import rewriter_config_pb2 + +from argparse import ArgumentParser + +np.random.seed(123) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + +arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) +arg_parser.add_argument('--batch_size', type=int, default=1000, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +args = arg_parser.parse_args() + +def prepare_Dataset(): + data_location = args.dataset_location + pretrained_model = args.input_graph + data = dataloader.load_data(prefix=data_location+'/ppi') + G = data[0] + features = data[1] + id_map = data[2] + class_map = data[4] + if isinstance(list(class_map.values())[0], list): + num_classes = len(list(class_map.values())[0]) + else: + num_classes = len(set(class_map.values())) + + context_pairs = data[3] + placeholders = utils.construct_placeholders(num_classes) + minibatch = utils.NodeMinibatchIterator(G, + id_map, + placeholders, + class_map, + num_classes, + batch_size=args.batch_size, + context_pairs = context_pairs) + return minibatch + +class CustomDataset(object): + def __init__(self): + self.batch1 = [] + self.batch_labels = [] + minibatch = prepare_Dataset() + self.parse_minibatch(minibatch) + + def parse_minibatch(self, minibatch): + iter_num = 0 + finished = False + while not finished: + feed_dict_val, batch_labels, finished, _ = minibatch.incremental_node_val_feed_dict(args.batch_size, iter_num, test=True) + self.batch1.append(feed_dict_val['batch1:0']) + self.batch_labels.append(batch_labels) + iter_num += 1 + + def __getitem__(self, index): + return (self.batch1[index], len(self.batch1[index])), self.batch_labels[index] + + def __len__(self): + return len(self.batch1) + +def evaluate(model): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + minibatch = prepare_Dataset() + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + #output_tensor = model.sess.graph.get_tensor_by_name('Sigmoid:0') + def eval_func(size, output_tensor, minibatch, test): + t_test = time.time() + val_losses = [] + val_preds = [] + labels = [] + iter_num = 0 + finished = False + total_time = 0 + while not finished: + feed_dict_val, batch_labels, finished, _ = minibatch.incremental_node_val_feed_dict(size, iter_num, test=True) + tf_logging.warn('\n---> Start iteration {0}'.format(str(iter_num))) + start_time = time.time() + node_outs_val = model.sess.run([output_tensor],feed_dict=feed_dict_val) + time_consume = time.time() - start_time + val_preds.append(node_outs_val[0].astype(float)) + labels.append(batch_labels) + iter_num += 1 + total_time += time_consume + if iteration != -1 and iter_num >= iteration: + break + tf_logging.warn('\n---> Stop iteration {0}'.format(str(iter_num))) + val_preds = np.vstack(val_preds) + labels = np.vstack(labels) + f1_scores = utils.calc_f1(labels, val_preds) + time_average = total_time / iter_num + return f1_scores, (time.time() - t_test)/iter_num, time_average + + test_f1_micro, duration, time_average = eval_func(args.batch_size, output_tensor, minibatch, test=True) + if args.benchmark and args.mode == 'performance': + latency = time_average / args.batch_size + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + return test_f1_micro + +def collate_function(batch): + return (batch[0][0][0], batch[0][0][1]), batch[0][1] + +class eval_graphsage_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor.common import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + from neural_compressor.tensorflow.utils import BaseDataLoader + + dataset = CustomDataset() + calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=1, collate_fn=collate_function) + quant_config = StaticQuantConfig() + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + elif args.mode == 'accuracy': + acc_result = evaluate(args.input_graph) + print("Batch size = %d" % args.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_graphsage_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..a6c2afe448c --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +networkx +scikit-learn \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..89c7cc19b6e --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=1000 + iters=100 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location "${dataset_location}" \ + --batch_size ${batch_size} \ + --iters ${iters} \ + --benchmark +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..f7046cc3df7 --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/run_quant.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph "${input_model}" \ + --output-graph "${output_model}" \ + --dataset_location "${dataset_location}" \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/utils.py b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/utils.py new file mode 100644 index 00000000000..babe7146f5c --- /dev/null +++ b/examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/utils.py @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import numpy as np +import random +import json +import sys +import os +import json +import networkx as nx +from networkx.readwrite import json_graph +import tensorflow as tf +from sklearn import metrics + +def calc_f1(y_true, y_pred): + y_pred[y_pred > 0.5] = 1 + y_pred[y_pred <= 0.5] = 0 + return metrics.f1_score(y_true, y_pred, average="micro") + +def construct_placeholders(num_classes): + # Define placeholders + tf.compat.v1.disable_eager_execution() + placeholders = { + 'labels' : tf.compat.v1.placeholder(tf.float32, shape=(None, num_classes), name='labels'), + 'batch' : tf.compat.v1.placeholder(tf.int32, shape=(None), name='batch1'), + 'batch_size' : tf.compat.v1.placeholder(tf.int32, name='batch_size'), + } + return placeholders + + +class NodeMinibatchIterator(object): + + """ + This minibatch iterator iterates over nodes for supervised learning. + + G -- networkx graph + id2idx -- dict mapping node ids to integer values indexing feature tensor + placeholders -- standard tensorflow placeholders object for feeding + label_map -- map from node ids to class values (integer or list) + num_classes -- number of output classes + batch_size -- size of the minibatches + max_degree -- maximum size of the downsampled adjacency lists + """ + # (G, + # id_map, + # placeholders, + # class_map, + # num_classes, + # batch_size=FLAGS.batch_size, + # max_degree=FLAGS.max_degree, + # context_pairs = context_pairs) + def __init__(self, G, id2idx, + placeholders, label_map, num_classes, + batch_size=100, max_degree=25, + **kwargs): + + self.G = G + self.nodes = G.nodes() + self.id2idx = id2idx + self.placeholders = placeholders + self.batch_size = batch_size + self.max_degree = max_degree + self.batch_num = 0 + self.label_map = label_map + self.num_classes = num_classes + self.test_nodes = [n for n in self.G.nodes() if self.G.nodes[n]['test']] + + def _make_label_vec(self, node): + label = self.label_map[node] + if isinstance(label, list): + label_vec = np.array(label) + else: + label_vec = np.zeros((self.num_classes)) + class_ind = self.label_map[node] + label_vec[class_ind] = 1 + return label_vec + def batch_feed_dict(self, batch_nodes, val=False): + batch1id = batch_nodes + batch1 = [self.id2idx[n] for n in batch1id] + + labels = np.vstack([self._make_label_vec(node) for node in batch1id]) + feed_dict = dict() + feed_dict.update({'batch1:0': batch1}) + feed_dict.update({'batch_size:0' : len(batch1)}) + return feed_dict, labels + + + def incremental_node_val_feed_dict(self, size, iter_num, test=False): + if test: + val_nodes = self.test_nodes + else: + val_nodes = self.val_nodes + val_node_subset = val_nodes[iter_num*size:min((iter_num+1)*size, + len(val_nodes))] + + # add a dummy neighbor + ret_val = self.batch_feed_dict(val_node_subset) + return ret_val[0], ret_val[1], (iter_num+1)*size >= len(val_nodes), val_node_subset diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/README.md b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/README.md new file mode 100644 index 00000000000..34eb64fcf74 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/README.md @@ -0,0 +1,75 @@ +Step-by-Step +============ + +This document list steps of reproducing inception_v3 model tuning and benchmark results via Neural Compressor. +This example can run on Intel CPUs and GPUs. + +> **Note**: +> The models is supported in validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). +# Prerequisite + +## 1. Environment + +### Installation +Recommend python 3.9 or higher version. +```shell +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare pre-trained model + + Download pre-trained PB + ```shell + wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/inceptionv3_fp32_pretrained_model.pb + ``` + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in ` examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/cv + # convert validation subset + bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run + +## 1 Quantization + + ```shell + bash run_quant.sh --input_model=/PATH/TO/inceptionv3_fp32_pretrained_model.pb \ + --output_model=./nc_inception_v3.pb --dataset_location=/path/to/ImageNet/ + ``` + +## 2. Benchmark + ```shell + bash run_benchmark.sh --input_model=./nc_inception_v3.pb --mode=accuracy --dataset_location=/path/to/ImageNet/ --batch_size=32 + bash run_benchmark.sh --input_model=./nc_inception_v3.pb --mode=performance --dataset_location=/path/to/ImageNet/ --batch_size=1 + ``` diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/data_process.py new file mode 100644 index 00000000000..ecfca2348cd --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/data_process.py @@ -0,0 +1,511 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class BilinearImagenetTransform(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height: Height of the result + width:Width of the result + central_fraction(float, default=0.875):fraction of size to crop + mean_value(list, default=[0.0,0.0,0.0]):means for each channel + scale(float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__(self, height, width, central_fraction=0.875, mean_value=[0.0, 0.0, 0.0], scale=1.0): + """Initialize `BilinearImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.central_fraction = central_fraction + + # sample is (images, labels) + def __call__(self, sample): + """Convert `BilinearImagenetTransform` feature.""" + image, label = sample + if image.dtype is not tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image containing 87.5% area of the original image. + if self.central_fraction: + image = tf.image.central_crop(image, central_fraction=self.central_fraction) + + if self.height and self.width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [self.height, self.width], method=tf.image.ResizeMethod.BILINEAR) + image = tf.squeeze(image, [0]) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/main.py b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/main.py new file mode 100644 index 00000000000..9b0f737b619 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/main.py @@ -0,0 +1,144 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +import tensorflow as tf +import numpy as np + +from argparse import ArgumentParser +from data_process import ( + ImageRecordDataset, + ComposeTransform, + BilinearImagenetTransform, + TFDataLoader, + TopKMetric, +) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +args = arg_parser.parse_args() + +def evaluate(model, eval_dataloader, metric, postprocess=None): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list).mean() / args.batch_size + return latency + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=299, width=299), + ] + ) + ) + calib_dataloader = TFDataLoader(dataset=dataset) + + # maybe we need to exclud bf16 + quant_config = StaticQuantConfig() + conv_config = StaticQuantConfig(weight_dtype="fp32", act_dtype="fp32") + quant_config.set_local("v0/cg/conv0/conv2d/Conv2D", conv_config) + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=299, width=299), + ] + ) + ) + dataloader = TFDataLoader(dataset=dataset, batch_size=args.batch_size) + def eval(model): + top1 = TopKMetric(k=1) + return evaluate(model, dataloader, top1) + + if args.mode == 'performance': + eval(args.input_graph) + elif args.mode == 'accuracy': + acc_result = eval(args.input_graph) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2755e1a41ac --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +neural-compressor diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..8ecac837cf7 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location ${dataset_location} \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..6a9e1b859c9 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/inception_v3/quantization/ptq/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph ${input_model} \ + --output-graph ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/README.md b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/README.md new file mode 100644 index 00000000000..25755074a06 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/README.md @@ -0,0 +1,108 @@ +Step-by-Step +============ + +This document list steps of reproducing mobilenet_v2 model tuning and benchmark results via Neural Compressor. +This example can run on Intel CPUs and GPUs. + +> **Note**: +> The model is supported in validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). +# Prerequisite + +## 1. Environment + +### Installation +Recommend python 3.9 or higher version. +```shell +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare pre-trained model + +The mobilenet_v2 checkpoint file comes from [models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models). +We can get the pb file by convert the checkpoint file. + + 1. Download the checkpoint file from [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) + ```shell + wget https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz + tar -xvf mobilenet_v2_1.4_224.tgz + ``` + + 2. Exporting the Inference Graph + ```shell + git clone https://github.com/tensorflow/models + cd models/research/slim + python export_inference_graph.py \ + --alsologtostderr \ + --model_name=mobilenet_v2 \ + --output_file=/tmp/mobilenet_v2_inf_graph.pb + ``` + Make sure to use intel-tensorflow v1.15, and pip install tf_slim. + #### Install Intel Tensorflow 1.15 up2 + Check your python version and use pip install 1.15.0 up2 from links below: + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp36-cp36m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp35-cp35m-manylinux2010_x86_64.whl + > Please note: The ImageNet dataset has 1001, the **VGG** and **ResNet V1** final layers have only 1000 outputs rather than 1001. So we need add the `--labels_offset=1` flag in the inference graph exporting command. + + 3. Use [Netron](https://lutzroeder.github.io/netron/) to get the input/output layer name of inference graph pb, for vgg_16 the output layer name is `MobilenetV2/Predictions/Reshape_1` + + 4. Freezing the exported Graph, please use the tool `freeze_graph.py` in [tensorflow v1.15.2](https://github.com/tensorflow/tensorflow/blob/v1.15.2/tensorflow/python/tools/freeze_graph.py) repo + ```shell + python freeze_graph.py \ + --input_graph=/tmp/mobilenet_v2_inf_graph.pb \ + --input_checkpoint=./mobilenet_v2.ckpt \ + --input_binary=true \ + --output_graph=./frozen_mobilenet_v2.pb \ + --output_node_names=MobilenetV2/Predictions/Reshape_1 + ``` + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in ` examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/cv + # convert validation subset + bash prepare_dataset.sh --output_dir=./mobilenet_v2/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./mobilenet_v2/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run + +## 1 Quantization + + ```shell + bash run_quant.sh --input_model=/PATH/TO/frozen_mobilenet_v2.pb \ + --output_model=./nc_mobilenet_v2.pb --dataset_location=/path/to/ImageNet/ + ``` + +## 2. Benchmark + ```shell + bash run_benchmark.sh --input_model=./nc_mobilenet_v2.pb --mode=accuracy --dataset_location=/path/to/ImageNet/ --batch_size=32 + bash run_benchmark.sh --input_model=./nc_mobilenet_v2.pb --mode=performance --dataset_location=/path/to/ImageNet/ --batch_size=1 + ``` diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/data_process.py new file mode 100644 index 00000000000..ecfca2348cd --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/data_process.py @@ -0,0 +1,511 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class BilinearImagenetTransform(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height: Height of the result + width:Width of the result + central_fraction(float, default=0.875):fraction of size to crop + mean_value(list, default=[0.0,0.0,0.0]):means for each channel + scale(float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__(self, height, width, central_fraction=0.875, mean_value=[0.0, 0.0, 0.0], scale=1.0): + """Initialize `BilinearImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.central_fraction = central_fraction + + # sample is (images, labels) + def __call__(self, sample): + """Convert `BilinearImagenetTransform` feature.""" + image, label = sample + if image.dtype is not tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image containing 87.5% area of the original image. + if self.central_fraction: + image = tf.image.central_crop(image, central_fraction=self.central_fraction) + + if self.height and self.width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [self.height, self.width], method=tf.image.ResizeMethod.BILINEAR) + image = tf.squeeze(image, [0]) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/main.py b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/main.py new file mode 100644 index 00000000000..fd3a07937de --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/main.py @@ -0,0 +1,142 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +import tensorflow as tf +import numpy as np + +from argparse import ArgumentParser +from data_process import ( + ImageRecordDataset, + ComposeTransform, + BilinearImagenetTransform, + TFDataLoader, + TopKMetric, +) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +args = arg_parser.parse_args() + +def evaluate(model, eval_dataloader, metric, postprocess=None): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list).mean() / args.batch_size + return latency + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor.common import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=224, width=224), + ] + ) + ) + calib_dataloader = TFDataLoader(dataset=dataset, batch_size=10) + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=224, width=224), + ] + ) + ) + dataloader = TFDataLoader(dataset=dataset, batch_size=args.batch_size) + + def eval(model): + top1 = TopKMetric(k=1) + return evaluate(model, dataloader, top1) + + if args.mode == 'performance': + eval(args.input_graph) + elif args.mode == 'accuracy': + acc_result = eval(args.input_graph) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2755e1a41ac --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +neural-compressor diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..8ecac837cf7 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location ${dataset_location} \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..6a9e1b859c9 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/mobilenet_v2/quantization/ptq/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph ${input_model} \ + --output-graph ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/prepare_dataset.sh b/examples/3.x_api/tensorflow/image_recognition/prepare_dataset.sh new file mode 100644 index 00000000000..4aad5d69a3f --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/prepare_dataset.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# set -x + +OUTPUT_DIR="./data" +SUBSET="validation" +SHARDS=1 + +help() +{ + cat <<- EOF + Desc: Convert prepared raw imagnet dataset to tfrecord + -h --help help info + --output_dir Output data directory + default: './data' + --raw_dir Raw data directory + --shards Number of shards in TFRecord files. + default: '1' + --subset Subset of imagenet, can be validation/train. + default: 'validation' +EOF + exit 0 +} + +function main { + init_params "$@" + convert_dataset +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --output_dir=*) + OUTPUT_DIR=$(echo $var |cut -f2 -d=) + ;; + --raw_dir=*) + RAW_DIR=$(echo $var |cut -f2 -d=) + ;; + --shards=*) + SHARDS=$(echo $var |cut -f2 -d=) + ;; + --subset=*) + SUBSET=$(echo $var |cut -f2 -d=) + ;; + -h|--help) help + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done +} + +# convert dataset +function convert_dataset { + if [ ! -d ${OUTPUT_DIR} ]; then + mkdir ${OUTPUT_DIR} + fi + python imagenet_prepare/build_imagenet_data.py \ + --imagenet_metadata_file "imagenet_prepare/imagenet_metadata.txt" \ + --labels_file "imagenet_prepare/imagenet_lsvrc_2015_synsets.txt" \ + --output_directory ${OUTPUT_DIR} \ + --subset ${SUBSET} \ + --raw_directory ${RAW_DIR} \ + --shards ${SHARDS} +} + +main "$@" + diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/README.md b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/README.md new file mode 100644 index 00000000000..bc07e651f96 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/README.md @@ -0,0 +1,107 @@ +Step-by-Step +============ + +This document list steps of reproducing resnet_v2_50 model tuning and benchmark results via Neural Compressor. +This example can run on Intel CPUs and GPUs. + +> **Note**: +> The models is supported in validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). +# Prerequisite + +## 1. Environment + +### Installation +Recommend python 3.9 or higher version. +```shell +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare pre-trained model +The resnet_v2_50 checkpoint file comes from [models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models). +We can get the pb file by convert the checkpoint file. + + 1. Download the checkpoint file from [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) + ```shell + wget http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz + tar -xvf resnet_v2_50_2017_04_14.tar.gz + ``` + + 2. Exporting the Inference Graph + ```shell + git clone https://github.com/tensorflow/models + cd models/research/slim + python export_inference_graph.py \ + --alsologtostderr \ + --model_name=resnet_v2_50 \ + --output_file=/tmp/resnet_v2_50_inf_graph.pb + ``` + Make sure to use intel-tensorflow v1.15, and pip install tf_slim. + #### Install Intel Tensorflow 1.15 up2 + Check your python version and use pip install 1.15.0 up2 from links below: + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp36-cp36m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp35-cp35m-manylinux2010_x86_64.whl + > Please note: The ImageNet dataset has 1001, the **VGG** and **ResNet V1** final layers have only 1000 outputs rather than 1001. So we need add the `--labels_offset=1` flag in the inference graph exporting command. + + 3. Use [Netron](https://lutzroeder.github.io/netron/) to get the input/output layer name of inference graph pb, for vgg_16 the output layer name is `resnet_v2_50/predictions/Reshape_1` + + 4. Freezing the exported Graph, please use the tool `freeze_graph.py` in [tensorflow v1.15.2](https://github.com/tensorflow/tensorflow/blob/v1.15.2/tensorflow/python/tools/freeze_graph.py) repo + ```shell + python freeze_graph.py \ + --input_graph=/tmp/resnet_v2_50_inf_graph.pb \ + --input_checkpoint=./resnet_v2_50.ckpt \ + --input_binary=true \ + --output_graph=./frozen_resnet_v2_50.pb \ + --output_node_names=resnet_v2_50/predictions/Reshape_1 + ``` + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in ` examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/cv + # convert validation subset + bash prepare_dataset.sh --output_dir=./resnet_v2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./resnet_v2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run + +## 1 Quantization + + ```shell + bash run_quant.sh --input_model=/PATH/TO/frozen_resnet_v2_50.pb \ + --output_model=./nc_resnet_v2_50.pb --dataset_location=/path/to/ImageNet/ + ``` + +## 2. Benchmark + ```shell + bash run_benchmark.sh --input_model=./nc_resnet_v2_50.pb --mode=accuracy --dataset_location=/path/to/ImageNet/ --batch_size=32 + bash run_benchmark.sh --input_model=./nc_resnet_v2_50.pb --mode=performance --dataset_location=/path/to/ImageNet/ --batch_size=1 + ``` diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/data_process.py new file mode 100644 index 00000000000..ecfca2348cd --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/data_process.py @@ -0,0 +1,511 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class BilinearImagenetTransform(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height: Height of the result + width:Width of the result + central_fraction(float, default=0.875):fraction of size to crop + mean_value(list, default=[0.0,0.0,0.0]):means for each channel + scale(float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__(self, height, width, central_fraction=0.875, mean_value=[0.0, 0.0, 0.0], scale=1.0): + """Initialize `BilinearImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.central_fraction = central_fraction + + # sample is (images, labels) + def __call__(self, sample): + """Convert `BilinearImagenetTransform` feature.""" + image, label = sample + if image.dtype is not tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image containing 87.5% area of the original image. + if self.central_fraction: + image = tf.image.central_crop(image, central_fraction=self.central_fraction) + + if self.height and self.width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [self.height, self.width], method=tf.image.ResizeMethod.BILINEAR) + image = tf.squeeze(image, [0]) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/main.py b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/main.py new file mode 100644 index 00000000000..bb82476fced --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/main.py @@ -0,0 +1,143 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +import tensorflow as tf +import numpy as np + +from argparse import ArgumentParser +from data_process import ( + ImageRecordDataset, + ComposeTransform, + BilinearImagenetTransform, + TFDataLoader, + TopKMetric, +) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--diagnose', dest='diagnose', action='store_true', help='use Neural Insights to diagnose tuning and benchmark.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +args = arg_parser.parse_args() + +def evaluate(model, eval_dataloader, metric, postprocess=None): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list).mean() / args.batch_size + return latency + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor.common import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=224, width=224), + ] + ) + ) + calib_dataloader = TFDataLoader(dataset=dataset, batch_size=10) + + quant_config = StaticQuantConfig() + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + BilinearImagenetTransform(height=224, width=224), + ] + ) + ) + dataloader = TFDataLoader(dataset=dataset, batch_size=args.batch_size) + + def eval(model): + top1 = TopKMetric(k=1) + return evaluate(model, dataloader, top1) + + if args.mode == 'performance': + eval(args.input_graph) + elif args.mode == 'accuracy': + acc_result = eval(args.input_graph) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2755e1a41ac --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +neural-compressor diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..8ecac837cf7 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location ${dataset_location} \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..6a9e1b859c9 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph ${input_model} \ + --output-graph ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/README.md b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/README.md new file mode 100644 index 00000000000..00e00c7846d --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/README.md @@ -0,0 +1,108 @@ +Step-by-Step +============ + +This document list steps of reproducing vgg16 model tuning and benchmark results via Neural Compressor. +This example can run on Intel CPUs and GPUs. + +> **Note**: +> The model is supported in validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). +# Prerequisite + +## 1. Environment + +### Installation +Recommend python 3.9 or higher version. +```shell +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare pre-trained model + +The vgg16 checkpoint file comes from [models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models). +We can get the pb file by convert the checkpoint file. + + 1. Download the checkpoint file from [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) + ```shell + wget http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz + tar -xvf vgg_16_2016_08_28.tar.gz + ``` + + 2. Exporting the Inference Graph + ```shell + git clone https://github.com/tensorflow/models + cd models/research/slim + python export_inference_graph.py \ + --alsologtostderr \ + --model_name=vgg_16 \ + --output_file=/tmp/vgg_16_inf_graph.pb + ``` + Make sure to use intel-tensorflow v1.15, and pip install tf_slim. + #### Install Intel Tensorflow 1.15 up2 + Check your python version and use pip install 1.15.0 up2 from links below: + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp36-cp36m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl + https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp35-cp35m-manylinux2010_x86_64.whl + > Please note: The ImageNet dataset has 1001, the **VGG** and **ResNet V1** final layers have only 1000 outputs rather than 1001. So we need add the `--labels_offset=1` flag in the inference graph exporting command. + + 3. Use [Netron](https://lutzroeder.github.io/netron/) to get the input/output layer name of inference graph pb, for vgg_16 the output layer name is `vgg_16/fc8/squeezed` + + 4. Freezing the exported Graph, please use the tool `freeze_graph.py` in [tensorflow v1.15.2](https://github.com/tensorflow/tensorflow/blob/v1.15.2/tensorflow/python/tools/freeze_graph.py) repo + ```shell + python freeze_graph.py \ + --input_graph=/tmp/vgg_16_inf_graph.pb \ + --input_checkpoint=./vgg_16.ckpt \ + --input_binary=true \ + --output_graph=./frozen_vgg16.pb \ + --output_node_names=vgg_16/fc8/squeezed + ``` + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in `examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/cv + # convert validation subset + bash prepare_dataset.sh --output_dir=./vgg16/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./vgg16/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run + +## 1 Quantization + + ```shell + bash run_quant.sh --input_model=/PATH/TO/frozen_vgg16.pb \ + --output_model=./nc_vgg16.pb --dataset_location=/path/to/ImageNet/ + ``` + +## 2. Benchmark + ```shell + bash run_benchmark.sh --input_model=./nc_vgg16.pb --mode=accuracy --dataset_location=/path/to/ImageNet/ --batch_size=32 + bash run_benchmark.sh --input_model=./nc_vgg16.pb --mode=performance --dataset_location=/path/to/ImageNet/ --batch_size=1 + ``` diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/data_process.py new file mode 100644 index 00000000000..17b4d9cec5e --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/data_process.py @@ -0,0 +1,581 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class ResizeCropImagenet(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height (int): Height of the result + width (int): Width of the result + random_crop (bool, default=False): whether to random crop + resize_side (int, default=256):desired shape after resize operation + random_flip_left_right (bool, default=False): whether to random flip left and right + mean_value (list, default=[0.0,0.0,0.0]):means for each channel + scale (float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__( + self, + height, + width, + random_crop=False, + resize_side=256, + resize_method="bilinear", + random_flip_left_right=False, + mean_value=[0.0, 0.0, 0.0], + scale=1.0, + data_format="channels_last", + subpixels="RGB", + ): + """Initialize `TensorflowResizeCropImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.random_crop = random_crop + self.random_flip_left_right = random_flip_left_right + self.resize_side = resize_side + self.resize_method = resize_method + self.data_format = data_format + self.subpixels = subpixels + + # sample is (images, labels) + def __call__(self, sample): + """Convert `TensorflowResizeCropImagenetTransform` feature.""" + image, label = sample + shape = tf.shape(input=image) + + height = ( + tf.cast(shape[0], dtype=tf.float32) + if self.data_format == "channels_last" + else tf.cast(shape[1], dtype=tf.float32) + ) + width = ( + tf.cast(shape[1], dtype=tf.float32) + if self.data_format == "channels_last" + else tf.cast(shape[2], dtype=tf.float32) + ) + scale = tf.cond( + pred=tf.greater(height, width), + true_fn=lambda: self.resize_side / width, + false_fn=lambda: self.resize_side / height, + ) + + scale = tf.cast(scale, dtype=tf.float32) + new_height = tf.cast(tf.math.rint(height * scale), dtype=tf.int32) + new_width = tf.cast(tf.math.rint(width * scale), dtype=tf.int32) + + if self.subpixels == "BGR" and self.data_format == "channels_first": + # 'RGB'->'BGR' + image = tf.cond( + tf.equal(tf.rank(image), 3), + lambda: tf.experimental.numpy.moveaxis(image[::-1, ...], 0, -1), + lambda: tf.experimental.numpy.moveaxis(image[:, ::-1, ...], 1, -1), + ) + elif self.subpixels == "BGR": + # 'RGB'->'BGR' + image = image[..., ::-1] + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [new_height, new_width], method=self.resize_method) + image = tf.squeeze(image) + shape = tf.shape(input=image) + if self.random_crop: + y0 = tf.random.uniform(shape=[], minval=0, maxval=(shape[0] - self.height + 1), dtype=tf.dtypes.int32) + x0 = tf.random.uniform(shape=[], minval=0, maxval=(shape[1] - self.width + 1), dtype=tf.dtypes.int32) + else: + y0 = (shape[0] - self.height) // 2 + x0 = (shape[1] - self.width) // 2 + + image = tf.image.crop_to_bounding_box(image, y0, x0, self.height, self.width) + image.set_shape([self.height, self.width, 3]) + if self.random_flip_left_right: + image = tf.image.random_flip_left_right(image) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class LabelShift(object): + """Convert label to label - label_shift. + + Args: + label_shift(int, default=0): number of label shift + + Returns: + tuple of processed image and label + """ + + def __init__(self, label_shift=0): + """Initialize `LabelShift` class.""" + self.label_shift = label_shift + + def __call__(self, sample): + """Convert label to label_shift.""" + images, labels = sample + if isinstance(labels, np.ndarray): + labels = labels - self.label_shift + elif isinstance(labels, list): + if isinstance(labels[0], tuple): + labels = [tuple(np.array(label) - self.label_shift) for label in labels] + elif isinstance(labels[0], np.ndarray): + labels = [label - self.label_shift for label in labels] + else: + labels = np.array(labels) - self.label_shift + labels = labels.tolist() + else: + labels = np.array(labels) - self.label_shift + return images, labels + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/main.py b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/main.py new file mode 100644 index 00000000000..ffe960e1b1e --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/main.py @@ -0,0 +1,146 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +import tensorflow as tf +import numpy as np + +from argparse import ArgumentParser +from data_process import ( + ImageRecordDataset, + ComposeTransform, + ResizeCropImagenet, + LabelShift, + TFDataLoader, + TopKMetric +) + + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +args = arg_parser.parse_args() + +def evaluate(model, eval_dataloader, metric, postprocess=None): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + if postprocess: + predictions, labels = postprocess((predictions, labels)) + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list).mean() / args.batch_size + return latency + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor.common import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + ResizeCropImagenet(height=224, width=224, mean_value=[123.68, 116.78, 103.94]), + ] + ) + ) + calib_dataloader = TFDataLoader(dataset=dataset, batch_size=10) + + quant_config = StaticQuantConfig() + q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + ResizeCropImagenet(height=224, width=224, mean_value=[123.68, 116.78, 103.94]), + ] + ) + ) + dataloader = TFDataLoader(dataset=dataset, batch_size=args.batch_size) + + def eval(model): + top1 = TopKMetric(k=1) + postprocess = LabelShift(label_shift=1) + return evaluate(model, dataloader, top1, postprocess) + + if args.mode == 'performance': + eval(args.input_graph) + elif args.mode == 'accuracy': + acc_result = eval(args.input_graph) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2755e1a41ac --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +neural-compressor diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..8ecac837cf7 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location ${dataset_location} \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..6a9e1b859c9 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vgg16/quantization/ptq/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph ${input_model} \ + --output-graph ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/README.md b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/README.md new file mode 100644 index 00000000000..0d4fa041690 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/README.md @@ -0,0 +1,79 @@ +Step-by-Step +============ + +This document list steps of reproducing Vision Transformer model tuning results via Neural Compressor. + +# Prerequisite + +## 1. Environment + +### Install Dependency Package + +``` +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Pretrained model + +``` +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/2_11_0/HF-ViT-Base16-Img224-frozen.pb +``` + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in `examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/cv + # convert validation subset + bash prepare_dataset.sh --output_dir=./vision_transformer/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./vision_transformer/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run + +## 1. Quantization + +```shell +bash run_quant.sh --input_model=./HF-ViT-Base16-Img224-frozen.pb --output_model=./output --dataset_location= +``` + + +## 2. Benchmark + +### Benchmark the fp32 model + +```shell +bash run_benchmark.sh --input_model=./HF-ViT-Base16-Img224-frozen.pb --mode=accuracy --dataset_location= --batch_size=32 +``` + +### Benchmark the int8 model + +```shell +bash run_benchmark.sh --input_model=./output.pb --mode=accuracy --dataset_location= --batch_size=32 --int8=true +``` \ No newline at end of file diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/model/.gitkeep b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/__init__.py similarity index 100% rename from neural_solution/examples/custom_models_optimized/tf_example1/model/.gitkeep rename to examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/__init__.py diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/data_process.py new file mode 100644 index 00000000000..8d28e4a3e17 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/data_process.py @@ -0,0 +1,576 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class ResizeCropImagenet(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height (int): Height of the result + width (int): Width of the result + random_crop (bool, default=False): whether to random crop + resize_side (int, default=256):desired shape after resize operation + random_flip_left_right (bool, default=False): whether to random flip left and right + mean_value (list, default=[0.0,0.0,0.0]):means for each channel + scale (float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__( + self, + height, + width, + random_crop=False, + resize_side=256, + resize_method="bilinear", + random_flip_left_right=False, + mean_value=[0.0, 0.0, 0.0], + scale=1.0, + data_format="channels_last", + subpixels="RGB", + ): + """Initialize `TensorflowResizeCropImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.random_crop = random_crop + self.random_flip_left_right = random_flip_left_right + self.resize_side = resize_side + self.resize_method = resize_method + self.data_format = data_format + self.subpixels = subpixels + + # sample is (images, labels) + def __call__(self, sample): + """Convert `TensorflowResizeCropImagenetTransform` feature.""" + image, label = sample + shape = tf.shape(input=image) + + height = ( + tf.cast(shape[0], dtype=tf.float32) + if self.data_format == "channels_last" + else tf.cast(shape[1], dtype=tf.float32) + ) + width = ( + tf.cast(shape[1], dtype=tf.float32) + if self.data_format == "channels_last" + else tf.cast(shape[2], dtype=tf.float32) + ) + scale = tf.cond( + pred=tf.greater(height, width), + true_fn=lambda: self.resize_side / width, + false_fn=lambda: self.resize_side / height, + ) + + scale = tf.cast(scale, dtype=tf.float32) + new_height = tf.cast(tf.math.rint(height * scale), dtype=tf.int32) + new_width = tf.cast(tf.math.rint(width * scale), dtype=tf.int32) + + if self.subpixels == "BGR" and self.data_format == "channels_first": + # 'RGB'->'BGR' + image = tf.cond( + tf.equal(tf.rank(image), 3), + lambda: tf.experimental.numpy.moveaxis(image[::-1, ...], 0, -1), + lambda: tf.experimental.numpy.moveaxis(image[:, ::-1, ...], 1, -1), + ) + elif self.subpixels == "BGR": + # 'RGB'->'BGR' + image = image[..., ::-1] + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [new_height, new_width], method=self.resize_method) + image = tf.squeeze(image) + shape = tf.shape(input=image) + if self.random_crop: + y0 = tf.random.uniform(shape=[], minval=0, maxval=(shape[0] - self.height + 1), dtype=tf.dtypes.int32) + x0 = tf.random.uniform(shape=[], minval=0, maxval=(shape[1] - self.width + 1), dtype=tf.dtypes.int32) + else: + y0 = (shape[0] - self.height) // 2 + x0 = (shape[1] - self.width) // 2 + + image = tf.image.crop_to_bounding_box(image, y0, x0, self.height, self.width) + image.set_shape([self.height, self.width, 3]) + if self.random_flip_left_right: + image = tf.image.random_flip_left_right(image) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class TransposeLastChannel(object): + """Transpose NHWC to NCHW. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + image = tf.transpose(image, perm=[2, 0, 1]) + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/main.py b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/main.py new file mode 100644 index 00000000000..92b2ea0fb2a --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/main.py @@ -0,0 +1,186 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import time + +import numpy as np +import tensorflow as tf + +from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference +from tensorflow.python.framework import dtypes +from tensorflow.core.protobuf import saved_model_pb2 + +from argparse import ArgumentParser +from data_process import ( + ImageRecordDataset, + ComposeTransform, + ResizeCropImagenet, + TransposeLastChannel, + ShiftRescale, + TFDataLoader, +) + + +INPUTS = 'inputs' +OUTPUTS = 'Identity' + +RESNET_IMAGE_SIZE = 224 +IMAGENET_VALIDATION_IMAGES = 50000 + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +arg_parser = ArgumentParser(description='Parse args') +arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') +arg_parser.add_argument("--output-graph", + help='Specify tune result model save dir', + dest='output_graph') +arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') +arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') +arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') +arg_parser.add_argument('--diagnose', dest='diagnose', action='store_true', help='use Neural Insights to diagnose tuning and benchmark.') +arg_parser.add_argument('--dataset_location', dest='dataset_location', + help='location of calibration dataset and evaluate dataset') +arg_parser.add_argument('--batch_size', type=int, default=32, dest='batch_size', help='batch_size of benchmark') +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='interations') +arg_parser.add_argument('--int8', dest='int8', action='store_true', help='whether to use int8 model for benchmark') +args = arg_parser.parse_args() + +def evaluate(model, eval_dataloader, preprocess=None): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from data_process import TopKMetric + from neural_compressor.tensorflow import Model + model = Model(model) + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + metric = TopKMetric() + if args.benchmark and args.mode == 'performance': + iteration = args.iters + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # shift the label and rescale the inputs + inputs, labels = preprocess((inputs, labels)) + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + if isinstance(predictions, list): + if len(model.output_tensor_names) == 1: + predictions = predictions[0] + elif len(model.output_tensor_names) > 1: + predictions = predictions[1] + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list).mean() / args.batch_size + return latency + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph.""" + + def run(self): + """This is neural_compressor function include tuning, export and benchmark option.""" + from neural_compressor.common import set_random_seed + set_random_seed(9527) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + ResizeCropImagenet(height=224, width=224), + TransposeLastChannel(), + ] + ) + ) + calib_dataloader = TFDataLoader(dataset=dataset, batch_size=10) + + quant_config = StaticQuantConfig() + matmul_config = StaticQuantConfig(weight_dtype="fp32", act_dtype="fp32") + conv_config = StaticQuantConfig(weight_dtype="fp32", act_dtype="fp32") + quant_config.set_local("StatefulPartitionedCall/vit/encoder/layer_._9/output/dense/Tensordot/MatMul", matmul_config) + quant_config.set_local("Conv2D", conv_config) + + sm = saved_model_pb2.SavedModel() + with tf.io.gfile.GFile(args.input_graph, "rb") as f: + sm.ParseFromString(f.read()) + graph_def = sm.meta_graphs[0].graph_def + + q_model = quantize_model(graph_def, quant_config, calib_dataloader) + q_model.save(args.output_graph) + + if args.benchmark: + dataset = ImageRecordDataset( + root=args.dataset_location, + transform=ComposeTransform(transform_list= [ + ResizeCropImagenet(height=224, width=224), + TransposeLastChannel(), + ] + ) + ) + dataloader = TFDataLoader(dataset=dataset, batch_size=args.batch_size) + + if args.int8 or args.input_graph.endswith("-tune.pb"): + input_graph = args.input_graph + else: + sm = saved_model_pb2.SavedModel() + with tf.io.gfile.GFile(args.input_graph, "rb") as f: + sm.ParseFromString(f.read()) + graph_def = sm.meta_graphs[0].graph_def + input_graph = graph_def + + def eval(model): + preprocess = ShiftRescale() + return evaluate(model, dataloader, preprocess) + + if args.mode == 'performance': + eval(input_graph) + elif args.mode == 'accuracy': + acc_result = eval(input_graph) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..d86161032c2 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +neural-compressor \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..2348865d66e --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_benchmark.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + if [[ ${int8} == "true" ]]; then + extra_cmd=$extra_cmd" --int8" + fi + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location ${dataset_location} \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} \ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..6a9e1b859c9 --- /dev/null +++ b/examples/3.x_api/tensorflow/image_recognition/vision_transformer/quantization/ptq/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph ${input_model} \ + --output-graph ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/build_imagenet_data.py b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/build_imagenet_data.py new file mode 100644 index 00000000000..c52d2bd4218 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/build_imagenet_data.py @@ -0,0 +1,567 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Converts ImageNet data to TFRecords file format with Example protos. + +The raw ImageNet data set is expected to reside in JPEG files located in the +following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + ... + +where 'n01440764' is the unique synset label associated with +these images. + +The training data set consists of 1000 sub-directories (i.e. labels) +each containing 1200 JPEG images for a total of 1.2M JPEG images. + +The evaluation data set consists of 1000 sub-directories (i.e. labels) +each containing 50 JPEG images for a total of 50K JPEG images. + +This TensorFlow script converts the training and evaluation data into +a sharded data set consisting of 1024 and 128 TFRecord files, respectively. + + train_directory/train-00000-of-01024 + train_directory/train-00001-of-01024 + ... + train_directory/train-00127-of-01024 + +and + + validation_directory/validation-00000-of-00128 + validation_directory/validation-00001-of-00128 + ... + validation_directory/validation-00127-of-00128 + +Each validation TFRecord file contains ~390 records. Each training TFREcord +file contains ~1250 records. Each record within the TFRecord file is a +serialized Example proto. The Example proto contains the following fields: + + image/encoded: string containing JPEG encoded image in RGB colorspace + image/height: integer, image height in pixels + image/width: integer, image width in pixels + image/colorspace: string, specifying the colorspace, always 'RGB' + image/channels: integer, specifying the number of channels, always 3 + image/format: string, specifying the format, always'JPEG' + + image/filename: string containing the basename of the image file + e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG' + image/class/label: integer specifying the index in a classification layer. + The label ranges from [1, 1000] where 0 is not used. + image/class/synset: string specifying the unique ID of the label, + e.g. 'n01440764' + image/class/text: string specifying the human-readable version of the label + e.g. 'red fox, Vulpes vulpes' + +Note that the length of xmin is identical to the length of xmax, ymin and ymax +for each example. + +Running this script using 16 threads may take around ~2.5 hours on a HP Z420. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from datetime import datetime +import os +import random +import sys +import threading + +import numpy as np +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +tf.compat.v1.disable_eager_execution() + + +tf.compat.v1.app.flags.DEFINE_string('raw_directory', None, + 'Raw data directory') + +tf.compat.v1.app.flags.DEFINE_string('output_directory', None, + 'Output data directory') + +tf.compat.v1.app.flags.DEFINE_integer('shards', 1, + 'Number of shards in TFRecord files.') + +tf.compat.v1.app.flags.DEFINE_string('subset', 'validation', + 'Subset of imagenet, can be validation/train') + +tf.compat.v1.app.flags.DEFINE_integer('num_threads', 1, + 'Number of threads to preprocess the images.') + +# The labels file contains a list of valid labels are held in this file. +# Assumes that the file contains entries as such: +# n01440764 +# n01443537 +# n01484850 +# where each line corresponds to a label expressed as a synset. We map +# each synset contained in the file to an integer (based on the alphabetical +# ordering). See below for details. +tf.compat.v1.app.flags.DEFINE_string('labels_file', + 'imagenet_lsvrc_2015_synsets.txt', + 'Labels file') + +# This file containing mapping from synset to human-readable label. +# Assumes each line of the file looks like: +# +# n02119247 black fox +# n02119359 silver fox +# n02119477 red fox, Vulpes fulva +# +# where each line corresponds to a unique mapping. Note that each line is +# formatted as \t. +tf.compat.v1.app.flags.DEFINE_string('imagenet_metadata_file', + 'imagenet_metadata.txt', + 'ImageNet metadata file') + +FLAGS = tf.compat.v1.app.flags.FLAGS + + +def _int64_feature(value): + """Wrapper for inserting int64 features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + + +def _float_feature(value): + """Wrapper for inserting float features into Example proto.""" + if not isinstance(value, list): + value = [value] + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + + +def _bytes_feature(value): + """Wrapper for inserting bytes features into Example proto.""" + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + + +def _convert_to_example(filename, image_buffer, label, synset, human, + height, width): + """Build an Example proto for an example. + + Args: + filename: string, path to an image file, e.g., '/path/to/example.JPG' + image_buffer: string, JPEG encoding of RGB image + label: integer, identifier for the ground truth for the network + synset: string, unique WordNet ID specifying the label, e.g., 'n02323233' + human: string, human-readable label, e.g., 'red fox, Vulpes vulpes' + height: integer, image height in pixels + width: integer, image width in pixels + Returns: + Example proto + """ + + colorspace = b'RGB' + channels = 3 + image_format = b'JPEG' + + example = tf.train.Example(features=tf.train.Features(feature={ + 'image/height': _int64_feature(height), + 'image/width': _int64_feature(width), + 'image/colorspace': _bytes_feature(colorspace), + 'image/channels': _int64_feature(channels), + 'image/class/label': _int64_feature(label), + 'image/class/synset': _bytes_feature(bytes(synset,'utf-8')), + 'image/class/text': _bytes_feature(bytes(human,'utf-8')), + 'image/format': _bytes_feature(image_format), + 'image/filename': _bytes_feature(bytes(os.path.basename(filename),'utf-8')), + 'image/encoded': _bytes_feature(image_buffer)})) + return example + + +class ImageCoder(object): + """Helper class that provides TensorFlow image coding utilities.""" + + def __init__(self): + # Create a single Session to run all image coding calls. + self._sess = tf.compat.v1.Session() + + # Initializes function that converts PNG to JPEG data. + self._png_data = tf.compat.v1.placeholder(dtype=tf.string) + image = tf.image.decode_png(self._png_data, channels=3) + self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that converts CMYK JPEG data to RGB JPEG data. + self._cmyk_data = tf.compat.v1.placeholder(dtype=tf.string) + image = tf.image.decode_jpeg(self._cmyk_data, channels=0) + self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100) + + # Initializes function that decodes RGB JPEG data. + self._decode_jpeg_data = tf.compat.v1.placeholder(dtype=tf.string) + self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) + + def png_to_jpeg(self, image_data): + return self._sess.run(self._png_to_jpeg, + feed_dict={self._png_data: image_data}) + + def cmyk_to_rgb(self, image_data): + return self._sess.run(self._cmyk_to_rgb, + feed_dict={self._cmyk_data: image_data}) + + def decode_jpeg(self, image_data): + image = self._sess.run(self._decode_jpeg, + feed_dict={self._decode_jpeg_data: image_data}) + assert len(image.shape) == 3 + assert image.shape[2] == 3 + return image + + +def _is_png(filename): + """Determine if a file contains a PNG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a PNG. + """ + # File list from: + # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU + return 'n02105855_2933.JPEG' in filename + + +def _is_cmyk(filename): + """Determine if file contains a CMYK JPEG format image. + + Args: + filename: string, path of the image file. + + Returns: + boolean indicating if the image is a JPEG encoded with CMYK color space. + """ + # File list from: + # https://github.com/cytsai/ilsvrc-cmyk-image-list + blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG', + 'n02447366_23489.JPEG', 'n02492035_15739.JPEG', + 'n02747177_10752.JPEG', 'n03018349_4028.JPEG', + 'n03062245_4620.JPEG', 'n03347037_9675.JPEG', + 'n03467068_12171.JPEG', 'n03529860_11437.JPEG', + 'n03544143_17228.JPEG', 'n03633091_5218.JPEG', + 'n03710637_5125.JPEG', 'n03961711_5286.JPEG', + 'n04033995_2932.JPEG', 'n04258138_17003.JPEG', + 'n04264628_27969.JPEG', 'n04336792_7448.JPEG', + 'n04371774_5854.JPEG', 'n04596742_4225.JPEG', + 'n07583066_647.JPEG', 'n13037406_4650.JPEG'] + return filename.split('/')[-1] in blacklist + + +def _process_image(filename, coder): + """Process a single image file. + + Args: + filename: string, path to an image file e.g., '/path/to/example.JPG'. + coder: instance of ImageCoder to provide TensorFlow image coding utils. + Returns: + image_buffer: string, JPEG encoding of RGB image. + height: integer, image height in pixels. + width: integer, image width in pixels. + """ + # Read the image file. + image_data = tf.io.gfile.GFile(filename, 'rb').read() + + # Clean the dirty data. + if _is_png(filename): + # 1 image is a PNG. + print('Converting PNG to JPEG for %s' % filename) + image_data = coder.png_to_jpeg(image_data) + elif _is_cmyk(filename): + # 22 JPEG images are in CMYK colorspace. + print('Converting CMYK to RGB for %s' % filename) + image_data = coder.cmyk_to_rgb(image_data) + + # Decode the RGB JPEG. + image = coder.decode_jpeg(image_data) + + # Check that image converted to RGB + assert len(image.shape) == 3 + height = image.shape[0] + width = image.shape[1] + assert image.shape[2] == 3 + + return image_data, height, width + + +def _process_image_files_batch(coder, thread_index, ranges, name, filenames, + synsets, labels, humans, num_shards): + """Processes and saves list of images as TFRecord in 1 thread. + + Args: + coder: instance of ImageCoder to provide TensorFlow image coding utils. + thread_index: integer, unique batch to run index is within [0, len(ranges)). + ranges: list of pairs of integers specifying ranges of each batches to + analyze in parallel. + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + num_shards: integer number of shards for this data set. + """ + # Each thread produces N shards where N = int(num_shards / num_threads). + # For instance, if num_shards = 128, and the num_threads = 2, then the first + # thread would produce shards [0, 64). + num_threads = len(ranges) + assert not num_shards % num_threads + num_shards_per_batch = int(num_shards / num_threads) + + shard_ranges = np.linspace(ranges[thread_index][0], + ranges[thread_index][1], + num_shards_per_batch + 1).astype(int) + num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0] + + counter = 0 + for s in xrange(num_shards_per_batch): + # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' + shard = thread_index * num_shards_per_batch + s + output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards) + output_file = os.path.join(FLAGS.output_directory, output_filename) + writer = tf.io.TFRecordWriter(output_file) + + shard_counter = 0 + files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int) # HERE + for i in files_in_shard: + filename = filenames[i] + label = labels[i] + synset = synsets[i] + human = humans[i] + + image_buffer, height, width = _process_image(filename, coder) + + example = _convert_to_example(filename, image_buffer, label, synset, human, height, width) + writer.write(example.SerializeToString()) + shard_counter += 1 + counter += 1 + + if not counter % 1000: + print('%s [thread %d]: Processed %d of %d images in thread batch.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + writer.close() + print('%s [thread %d]: Wrote %d images to %s' % + (datetime.now(), thread_index, shard_counter, output_file)) + sys.stdout.flush() + shard_counter = 0 + print('%s [thread %d]: Wrote %d images to %d shards.' % + (datetime.now(), thread_index, counter, num_files_in_thread)) + sys.stdout.flush() + + +def _process_image_files(name, filenames, synsets, labels, humans, num_shards): + """Process and save list of images as TFRecord of Example protos. + + Args: + name: string, unique identifier specifying the data set + filenames: list of strings; each string is a path to an image file + synsets: list of strings; each string is a unique WordNet ID + labels: list of integer; each integer identifies the ground truth + humans: list of strings; each string is a human-readable label + num_shards: integer number of shards for this data set. + """ + assert len(filenames) == len(synsets) + assert len(filenames) == len(labels) + assert len(filenames) == len(humans) + + # Break all images into batches with a [ranges[i][0], ranges[i][1]]. + spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int) + ranges = [] + threads = [] + for i in xrange(len(spacing) - 1): + ranges.append([spacing[i], spacing[i+1]]) + + # Launch a thread for each batch. + print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)) + sys.stdout.flush() + + # Create a mechanism for monitoring when all threads are finished. + coord = tf.train.Coordinator() + + # Create a generic TensorFlow-based utility for converting all image codings. + coder = ImageCoder() + + threads = [] + for thread_index in xrange(len(ranges)): + args = (coder, thread_index, ranges, name, filenames, + synsets, labels, humans, num_shards) + t = threading.Thread(target=_process_image_files_batch, args=args) + t.start() + threads.append(t) + + # Wait for all the threads to terminate. + coord.join(threads) + print('%s: Finished writing all %d images in data set.' % + (datetime.now(), len(filenames))) + sys.stdout.flush() + + +def _find_image_files(data_dir, labels_file): + """Build a list of all images files and labels in the data set. + + Args: + data_dir: string, path to the root directory of images. + + Assumes that the ImageNet data set resides in JPEG files located in + the following directory structure. + + data_dir/n01440764/ILSVRC2012_val_00000293.JPEG + data_dir/n01440764/ILSVRC2012_val_00000543.JPEG + + where 'n01440764' is the unique synset label associated with these images. + + labels_file: string, path to the labels file. + + The list of valid labels are held in this file. Assumes that the file + contains entries as such: + n01440764 + n01443537 + n01484850 + where each line corresponds to a label expressed as a synset. We map + each synset contained in the file to an integer (based on the alphabetical + ordering) starting with the integer 1 corresponding to the synset + contained in the first line. + + The reason we start the integer labels at 1 is to reserve label 0 as an + unused background class. + + Returns: + filenames: list of strings; each string is a path to an image file. + synsets: list of strings; each string is a unique WordNet ID. + labels: list of integer; each integer identifies the ground truth. + """ + print('Determining list of input files and labels from %s.' % data_dir) + challenge_synsets = [l.strip() for l in + tf.compat.v1.gfile.FastGFile(labels_file, 'r').readlines()] + + labels = [] + filenames = [] + synsets = [] + + # Leave label index 0 empty as a background class. + label_index = 1 + + # Construct the list of JPEG files and labels. + for synset in challenge_synsets: + jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset) + matching_files = tf.io.gfile.glob(jpeg_file_path) + + labels.extend([label_index] * len(matching_files)) + synsets.extend([synset] * len(matching_files)) + filenames.extend(matching_files) + + if not label_index % 100: + print('Finished finding files in %d of %d classes.' % ( + label_index, len(challenge_synsets))) + label_index += 1 + + # Shuffle the ordering of all image files in order to guarantee + # random ordering of the images with respect to label in the + # saved TFRecord files. Make the randomization repeatable. + shuffled_index = range(len(filenames)) + random.seed(12345) + + random.shuffle(list(range(len(shuffled_index)))) + + filenames = [filenames[i] for i in shuffled_index] + synsets = [synsets[i] for i in shuffled_index] + labels = [labels[i] for i in shuffled_index] + + print('Found %d JPEG files across %d labels inside %s.' % + (len(filenames), len(challenge_synsets), data_dir)) + return filenames, synsets, labels + + +def _find_human_readable_labels(synsets, synset_to_human): + """Build a list of human-readable labels. + + Args: + synsets: list of strings; each string is a unique WordNet ID. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + + Returns: + List of human-readable strings corresponding to each synset. + """ + humans = [] + for s in synsets: + assert s in synset_to_human, ('Failed to find: %s' % s) + humans.append(synset_to_human[s]) + return humans + + +def _process_dataset(name, directory, num_shards, synset_to_human): + """Process a complete data set and save it as a TFRecord. + + Args: + name: string, unique identifier specifying the data set. + directory: string, root path to the data set. + num_shards: integer number of shards for this data set. + synset_to_human: dict of synset to human labels, e.g., + 'n02119022' --> 'red fox, Vulpes vulpes' + """ + filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file) + humans = _find_human_readable_labels(synsets, synset_to_human) + + _process_image_files(name, filenames, synsets, labels, + humans, num_shards) + + +def _build_synset_lookup(imagenet_metadata_file): + """Build lookup for synset to human-readable label. + + Args: + imagenet_metadata_file: string, path to file containing mapping from + synset to human-readable label. + + Assumes each line of the file looks like: + + n02119247 black fox + n02119359 silver fox + n02119477 red fox, Vulpes fulva + + where each line corresponds to a unique mapping. Note that each line is + formatted as \t. + + Returns: + Dictionary of synset to human labels, such as: + 'n02119022' --> 'red fox, Vulpes vulpes' + """ + lines = tf.compat.v1.gfile.FastGFile(imagenet_metadata_file, 'r').readlines() + synset_to_human = {} + for l in lines: + if l: + parts = l.strip().split('\t') + assert len(parts) == 2 + synset = parts[0] + human = parts[1] + synset_to_human[synset] = human + return synset_to_human + + +def main(unused_argv): + assert not FLAGS.shards % FLAGS.num_threads, ( + 'Please make the FLAGS.num_threads commensurate with FLAGS.shards') + + print('Saving results to %s' % FLAGS.output_directory) + + # Build a map from synset to human-readable label. + synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file) + + if(FLAGS.raw_directory != None): + _process_dataset(FLAGS.subset, FLAGS.raw_directory,FLAGS.shards, synset_to_human) + +if __name__ == '__main__': + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_and_convert_imagenet.sh b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_and_convert_imagenet.sh new file mode 100644 index 00000000000..f9baa85ab07 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_and_convert_imagenet.sh @@ -0,0 +1,100 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download and preprocess ImageNet Challenge 2012 +# training and validation data set. +# +# The final output of this script are sharded TFRecord files containing +# serialized Example protocol buffers. See build_imagenet_data.py for +# details of how the Example protocol buffers contain the ImageNet data. +# +# The final output of this script appears as such: +# +# data_dir/train-00000-of-01024 +# data_dir/train-00001-of-01024 +# ... +# data_dir/train-00127-of-01024 +# +# and +# +# data_dir/validation-00000-of-00128 +# data_dir/validation-00001-of-00128 +# ... +# data_dir/validation-00127-of-00128 +# +# Note that this script may take several hours to run to completion. The +# conversion of the ImageNet data to TFRecords alone takes 2-3 hours depending +# on the speed of your machine. Please be patient. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# + +set -e + +if [ -z "$1" ]; then + echo "usage download_and_convert_imagenet.sh [data dir]" + exit +fi + +# Create the output and temporary directories. +DATA_DIR="${1%/}" +SCRATCH_DIR="${DATA_DIR}/raw-data/" +mkdir -p "${DATA_DIR}" +mkdir -p "${SCRATCH_DIR}" +WORK_DIR="$0.runfiles/__main__" + +# Download the ImageNet data. +LABELS_FILE="${WORK_DIR}/datasets/imagenet_lsvrc_2015_synsets.txt" +DOWNLOAD_SCRIPT="${WORK_DIR}/datasets/download_imagenet.sh" +"${DOWNLOAD_SCRIPT}" "${SCRATCH_DIR}" "${LABELS_FILE}" + +# Note the locations of the train and validation data. +TRAIN_DIRECTORY="${SCRATCH_DIR}train/" +VALIDATION_DIRECTORY="${SCRATCH_DIR}validation/" + +# Preprocess the validation data by moving the images into the appropriate +# sub-directory based on the label (synset) of the image. +echo "Organizing the validation data into sub-directories." +PREPROCESS_VAL_SCRIPT="${WORK_DIR}/datasets/preprocess_imagenet_validation_data.py" +VAL_LABELS_FILE="${WORK_DIR}/datasets/imagenet_2012_validation_synset_labels.txt" + +"${PREPROCESS_VAL_SCRIPT}" "${VALIDATION_DIRECTORY}" "${VAL_LABELS_FILE}" + +# Convert the XML files for bounding box annotations into a single CSV. +echo "Extracting bounding box information from XML." +BOUNDING_BOX_SCRIPT="${WORK_DIR}/datasets/process_bounding_boxes.py" +BOUNDING_BOX_FILE="${SCRATCH_DIR}/imagenet_2012_bounding_boxes.csv" +BOUNDING_BOX_DIR="${SCRATCH_DIR}bounding_boxes/" + +"${BOUNDING_BOX_SCRIPT}" "${BOUNDING_BOX_DIR}" "${LABELS_FILE}" \ + | sort >"${BOUNDING_BOX_FILE}" +echo "Finished downloading and preprocessing the ImageNet data." + +# Build the TFRecords version of the ImageNet data. +BUILD_SCRIPT="${WORK_DIR}/build_imagenet_data" +OUTPUT_DIRECTORY="${DATA_DIR}" +IMAGENET_METADATA_FILE="${WORK_DIR}/datasets/imagenet_metadata.txt" + +"${BUILD_SCRIPT}" \ + --train_directory="${TRAIN_DIRECTORY}" \ + --validation_directory="${VALIDATION_DIRECTORY}" \ + --output_directory="${OUTPUT_DIRECTORY}" \ + --imagenet_metadata_file="${IMAGENET_METADATA_FILE}" \ + --labels_file="${LABELS_FILE}" \ + --bounding_box_file="${BOUNDING_BOX_FILE}" diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_imagenet.sh b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_imagenet.sh new file mode 100644 index 00000000000..c780e179f93 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/imagenet_prepare/download_imagenet.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Script to download ImageNet Challenge 2012 training and validation data set. +# +# Downloads and decompresses raw images and bounding boxes. +# +# **IMPORTANT** +# To download the raw images, the user must create an account with image-net.org +# and generate a username and access_key. The latter two are required for +# downloading the raw images. +# +# usage: +# ./download_imagenet.sh [dirname] +set -e + +if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then + cat < Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +## 2. Prepare Pretrained model + +The pretrained model is provided by [Keras Applications](https://keras.io/api/applications/). prepare the model, Run as follow: + ``` +python prepare_model.py --output_model=./inception_v3_keras + ``` +`--output_model ` the model should be saved as SavedModel format or H5 format. + + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in `imagenet_prepare` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/keras/cv/ + # convert validation subset + bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + cd inception_v3/quantization/ptq + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run Command + +## Quantization + ```shell + bash run_quant.sh --input_model=./inception_v3_keras --output_model=./result --dataset_location=/path/to/evaluation/dataset + ``` + +## Benchmark + ```shell + bash run_benchmark.sh --input_model=./result --dataset_location=/path/to/evaluation/dataset --mode=performance --batch_size=1 + bash run_benchmark.sh --input_model=./result --dataset_location=/path/to/evaluation/dataset --mode=accuracy --batch_size=32 + ``` diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/data_process.py new file mode 100644 index 00000000000..b8cd01593c6 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/data_process.py @@ -0,0 +1,543 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class BilinearImagenetTransform(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height: Height of the result + width:Width of the result + central_fraction(float, default=0.875):fraction of size to crop + mean_value(list, default=[0.0,0.0,0.0]):means for each channel + scale(float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__(self, height, width, central_fraction=0.875, mean_value=[0.0, 0.0, 0.0], scale=1.0): + """Initialize `BilinearImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.central_fraction = central_fraction + + # sample is (images, labels) + def __call__(self, sample): + """Convert `BilinearImagenetTransform` feature.""" + image, label = sample + if image.dtype is not tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image containing 87.5% area of the original image. + if self.central_fraction: + image = tf.image.central_crop(image, central_fraction=self.central_fraction) + + if self.height and self.width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [self.height, self.width], method=tf.image.ResizeMethod.BILINEAR) + image = tf.squeeze(image, [0]) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class LabelShift(object): + """Convert label to label - label_shift. + + Args: + label_shift(int, default=0): number of label shift + + Returns: + tuple of processed image and label + """ + + def __init__(self, label_shift=0): + """Initialize `LabelShift` class.""" + self.label_shift = label_shift + + def __call__(self, sample): + """Convert label to label_shift.""" + images, labels = sample + if isinstance(labels, np.ndarray): + labels = labels - self.label_shift + elif isinstance(labels, list): + if isinstance(labels[0], tuple): + labels = [tuple(np.array(label) - self.label_shift) for label in labels] + elif isinstance(labels[0], np.ndarray): + labels = [label - self.label_shift for label in labels] + else: + labels = np.array(labels) - self.label_shift + labels = labels.tolist() + else: + labels = np.array(labels) - self.label_shift + return images, labels + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/main.py b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/main.py new file mode 100644 index 00000000000..5f8b08d6e11 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/main.py @@ -0,0 +1,144 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import time + +import numpy as np +import tensorflow as tf + +from neural_compressor.utils import logger +from data_process import ( + ImageRecordDataset, + ComposeTransform, + BilinearImagenetTransform, + TFDataLoader, + TopKMetric, + LabelShift, +) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS + +## Required parameters +flags.DEFINE_string( + 'input_model', None, 'Run inference with specified keras model.') + +flags.DEFINE_string( + 'output_model', None, 'The output quantized model.') + +flags.DEFINE_string( + 'mode', 'performance', 'define benchmark mode for accuracy or performance') + +flags.DEFINE_bool( + 'tune', False, 'whether to tune the model') + +flags.DEFINE_bool( + 'benchmark', False, 'whether to benchmark the model') + +flags.DEFINE_string( + 'calib_data', None, 'location of calibration dataset') + +flags.DEFINE_string( + 'eval_data', None, 'location of evaluate dataset') + +flags.DEFINE_integer('batch_size', 32, 'batch_size') + +flags.DEFINE_integer( + 'iters', 100, 'maximum iteration when evaluating performance') + +height = width = 299 +eval_dataset = ImageRecordDataset(root=FLAGS.eval_data, transform=ComposeTransform(transform_list= \ + [BilinearImagenetTransform(height=height, width=width)])) + +eval_dataloader = TFDataLoader(dataset=eval_dataset, batch_size=FLAGS.batch_size) + +if FLAGS.calib_data: + calib_dataset = ImageRecordDataset(root=FLAGS.calib_data, transform= \ + ComposeTransform(transform_list= [BilinearImagenetTransform(height=height, width=width)])) + calib_dataloader = TFDataLoader(dataset=calib_dataset, batch_size=10) + +def evaluate(model): + """ + Custom evaluate function to inference the model for specified metric on validation dataset. + + Args: + model (tf.keras.Model): The input model will be the objection of tf.keras.Model. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + latency_list = [] + metric = TopKMetric() + postprocess = LabelShift(label_shift=1) + + def eval_func(dataloader, metric): + warmup = 5 + iteration = None + if FLAGS.benchmark and FLAGS.mode == 'performance': + iteration = FLAGS.iters + for idx, (inputs, labels) in enumerate(dataloader): + start = time.time() + predictions = model.predict_on_batch(inputs) + end = time.time() + latency_list.append(end - start) + predictions, labels = postprocess((predictions, labels)) + metric.update(predictions, labels) + if iteration and idx >= iteration: + break + latency = np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size + return latency + + latency = eval_func(eval_dataloader, metric) + if FLAGS.benchmark: + logger.info("\n{} mode benchmark result:".format(FLAGS.mode)) + for i, res in enumerate(latency_list): + logger.debug("Iteration {} result {}:".format(i, res)) + if FLAGS.benchmark and FLAGS.mode == 'performance': + logger.info("Batch size = {}".format(eval_dataloader.batch_size)) + logger.info("Latency: {:.3f} ms".format(latency * 1000)) + logger.info("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + if FLAGS.tune: + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow import quantize_model + from neural_compressor.tensorflow.keras import StaticQuantConfig + + set_random_seed(9527) + quant_config = StaticQuantConfig() + q_model = quantize_model(FLAGS.input_model, quant_config, calib_dataloader) + q_model.save(FLAGS.output_model) + logger.info("Save quantized model to {}.".format(FLAGS.output_model)) + + if FLAGS.benchmark: + from neural_compressor.tensorflow import Model + + inc_model = Model(FLAGS.input_model) + if FLAGS.mode == 'performance': + evaluate(inc_model.model) + else: + accuracy = evaluate(inc_model.model) + logger.info('Batch size = %d' % FLAGS.batch_size) + logger.info("Accuracy: %.5f" % accuracy) + + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/prepare_model.py b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/prepare_model.py new file mode 100644 index 00000000000..abf63dc93b4 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/prepare_model.py @@ -0,0 +1,35 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +from tensorflow.keras.applications.inception_v3 import InceptionV3 +def get_inception_v3_model(saved_path): + model = InceptionV3(weights='imagenet') + model.save(saved_path) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Export pretained keras model', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '--output_model', + type=str, + help='path to exported model file') + + args = parser.parse_args() + get_inception_v3_model(args.output_model) diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2f0697d8502 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow +intel-extension-for-tensorflow[cpu] diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..43b1636c839 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input_model ${input_model} \ + --benchmark \ + --mode ${mode} \ + --eval_data ${dataset_location} \ + --batch_size ${batch_size} \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..7e3ed727f71 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/inception_v3/quantization/ptq/run_quant.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_model ${input_model} \ + --output_model ${output_model} \ + --eval_data ${dataset_location} \ + --calib_data ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/prepare_dataset.sh b/examples/3.x_api/tensorflow/keras/image_recognition/prepare_dataset.sh new file mode 100644 index 00000000000..4aad5d69a3f --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/prepare_dataset.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# set -x + +OUTPUT_DIR="./data" +SUBSET="validation" +SHARDS=1 + +help() +{ + cat <<- EOF + Desc: Convert prepared raw imagnet dataset to tfrecord + -h --help help info + --output_dir Output data directory + default: './data' + --raw_dir Raw data directory + --shards Number of shards in TFRecord files. + default: '1' + --subset Subset of imagenet, can be validation/train. + default: 'validation' +EOF + exit 0 +} + +function main { + init_params "$@" + convert_dataset +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --output_dir=*) + OUTPUT_DIR=$(echo $var |cut -f2 -d=) + ;; + --raw_dir=*) + RAW_DIR=$(echo $var |cut -f2 -d=) + ;; + --shards=*) + SHARDS=$(echo $var |cut -f2 -d=) + ;; + --subset=*) + SUBSET=$(echo $var |cut -f2 -d=) + ;; + -h|--help) help + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done +} + +# convert dataset +function convert_dataset { + if [ ! -d ${OUTPUT_DIR} ]; then + mkdir ${OUTPUT_DIR} + fi + python imagenet_prepare/build_imagenet_data.py \ + --imagenet_metadata_file "imagenet_prepare/imagenet_metadata.txt" \ + --labels_file "imagenet_prepare/imagenet_lsvrc_2015_synsets.txt" \ + --output_directory ${OUTPUT_DIR} \ + --subset ${SUBSET} \ + --raw_directory ${RAW_DIR} \ + --shards ${SHARDS} +} + +main "$@" + diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/README.md b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/README.md new file mode 100644 index 00000000000..a276ef7cd0d --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/README.md @@ -0,0 +1,65 @@ +Step-by-Step +============ + +This document is used to enable Tensorflow Keras models using Intel® Neural Compressor. +This example can run on Intel CPUs and GPUs. + + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` + +### Install Requirements +The Tensorflow and intel-extension-for-tensorflow is mandatory to be installed to run this example. +The Intel Extension for Tensorflow for Intel CPUs is installed as default. +```shell +pip install -r requirements.txt +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +## 2. Prepare Pretrained model + +The pretrained model is provided by [Keras Applications](https://keras.io/api/applications/). prepare the model, Run as follow: + ``` +python prepare_model.py --output_model=./resnetv2_50_keras + ``` +`--output_model ` the model should be saved as SavedModel format or H5 format. + +## 3. Prepare Dataset + + TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format. + We also prepared related scripts in `imagenet_prepare` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format. + + ```shell + cd examples/3.x_api/tensorflow/keras/cv/ + # convert validation subset + bash prepare_dataset.sh --output_dir=./resnetv2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation + # convert train subset + bash prepare_dataset.sh --output_dir=./resnetv2_50/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train + cd resnetv2_50/quantization/ptq + ``` +> **Note**: +> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG
+>         /PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG
+> where 'n01440764' is the unique synset label associated with these images. + +# Run Command + +## Quantization + ```shell + bash run_quant.sh --input_model=./resnetv2_50_keras --output_model=./result --dataset_location=/path/to/evaluation/dataset + ``` + +## Benchmark + ```shell + bash run_benchmark.sh --input_model=./result --mode=accuracy --dataset_location=/path/to/evaluation/dataset --batch_size=32 + bash run_benchmark.sh --input_model=./result --mode=performance --dataset_location=/path/to/evaluation/dataset --batch_size=1 + ``` + diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/data_process.py new file mode 100644 index 00000000000..b8cd01593c6 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/data_process.py @@ -0,0 +1,543 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +class ParseDecodeImagenet: + """Parse features in Example proto. + + Returns: + tuple of parsed image and label + """ + + def __call__(self, sample): + """Parse features in example.""" + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.io.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/class/label": tf.io.FixedLenFeature([1], dtype=tf.int64, default_value=-1), + } + + sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(serialized=sample, features=feature_map) + label = tf.cast(features["image/class/label"], dtype=tf.int32) + image = features["image/encoded"] + image = tf.image.decode_jpeg(image, channels=3, fancy_upscaling=False, dct_method="INTEGER_FAST") + return (image, label) + + +class BilinearImagenetTransform(object): + """Combination of a series of transforms which is applicable to images in Imagenet. + + Args: + height: Height of the result + width:Width of the result + central_fraction(float, default=0.875):fraction of size to crop + mean_value(list, default=[0.0,0.0,0.0]):means for each channel + scale(float, default=1.0):std value + + Returns: + tuple of processed image and label + """ + + def __init__(self, height, width, central_fraction=0.875, mean_value=[0.0, 0.0, 0.0], scale=1.0): + """Initialize `BilinearImagenetTransform` class.""" + self.height = height + self.width = width + self.mean_value = mean_value + self.scale = scale + self.central_fraction = central_fraction + + # sample is (images, labels) + def __call__(self, sample): + """Convert `BilinearImagenetTransform` feature.""" + image, label = sample + if image.dtype is not tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image containing 87.5% area of the original image. + if self.central_fraction: + image = tf.image.central_crop(image, central_fraction=self.central_fraction) + + if self.height and self.width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize(image, [self.height, self.width], method=tf.image.ResizeMethod.BILINEAR) + image = tf.squeeze(image, [0]) + + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + means = tf.broadcast_to(self.mean_value, tf.shape(input=image)) + image = (image - means) * self.scale + return (image, label) + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ShiftRescale(object): + """Label shift by 1 and rescale. + + Returns: + tuple of processed image and label + """ + + def __call__(self, sample): + image, label = sample + label -= 1 + image = (image - 127.5) / 127.5 + return (image, label) + + +class LabelShift(object): + """Convert label to label - label_shift. + + Args: + label_shift(int, default=0): number of label shift + + Returns: + tuple of processed image and label + """ + + def __init__(self, label_shift=0): + """Initialize `LabelShift` class.""" + self.label_shift = label_shift + + def __call__(self, sample): + """Convert label to label_shift.""" + images, labels = sample + if isinstance(labels, np.ndarray): + labels = labels - self.label_shift + elif isinstance(labels, list): + if isinstance(labels[0], tuple): + labels = [tuple(np.array(label) - self.label_shift) for label in labels] + elif isinstance(labels[0], np.ndarray): + labels = [label - self.label_shift for label in labels] + else: + labels = np.array(labels) - self.label_shift + labels = labels.tolist() + else: + labels = np.array(labels) - self.label_shift + return images, labels + + +class ImageRecordDataset(object): + """Tensorflow imageNet database in tf record format. + + Please arrange data in this way: + root/validation-000-of-100 + root/validation-001-of-100 + ... + root/validation-099-of-100 + The file name needs to follow this pattern: '* - * -of- *' + + Args: root (str): Root directory of dataset. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + """Configuration for Imagenet dataset.""" + + def __new__(cls, root, transform=None, filter=None): + """Build a new object of TensorflowImageRecord class.""" + from tensorflow.python.platform import gfile # pylint: disable=no-name-in-module + + glob_pattern = os.path.join(root, "*-*-of-*") + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError("Found no files in --root matching: {}".format(glob_pattern)) + + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) + ds = ds.apply(parallel_interleave(tf.data.TFRecordDataset, cycle_length=len(file_names))) + + if transform is not None: + transform.transform_list.insert(0, ParseDecodeImagenet()) + else: + transform = ParseDecodeImagenet() + ds = ds.map(transform, num_parallel_calls=None) + ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # this number can be tuned + return ds + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/main.py b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/main.py new file mode 100644 index 00000000000..7fc6a2cdf10 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/main.py @@ -0,0 +1,143 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import time + +import numpy as np +import tensorflow as tf + +from neural_compressor.utils import logger +from data_process import ( + ImageRecordDataset, + ComposeTransform, + BilinearImagenetTransform, + TFDataLoader, + TopKMetric, + LabelShift +) + +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS + +## Required parameters +flags.DEFINE_string( + 'input_model', None, 'Run inference with specified keras model.') + +flags.DEFINE_string( + 'output_model', None, 'The output quantized model.') + +flags.DEFINE_string( + 'mode', 'performance', 'define benchmark mode for accuracy or performance') + +flags.DEFINE_bool( + 'tune', False, 'whether to tune the model') + +flags.DEFINE_bool( + 'benchmark', False, 'whether to benchmark the model') + +flags.DEFINE_string( + 'calib_data', None, 'location of calibration dataset') + +flags.DEFINE_string( + 'eval_data', None, 'location of evaluate dataset') + +flags.DEFINE_integer('batch_size', 32, 'batch_size') + +flags.DEFINE_integer( + 'iters', 100, 'maximum iteration when evaluating performance') + +height = width = 224 +eval_dataset = ImageRecordDataset(root=FLAGS.eval_data, transform=ComposeTransform(transform_list= \ + [BilinearImagenetTransform(height=height, width=width)])) + +eval_dataloader = TFDataLoader(dataset=eval_dataset, batch_size=FLAGS.batch_size) + +if FLAGS.calib_data: + calib_dataset = ImageRecordDataset(root=FLAGS.calib_data, transform= \ + ComposeTransform(transform_list= [BilinearImagenetTransform(height=height, width=width)])) + calib_dataloader = TFDataLoader(dataset=calib_dataset, batch_size=10) + +def evaluate(model): + """ + Custom evaluate function to inference the model for specified metric on validation dataset. + + Args: + model (tf.keras.Model): The input model will be the objection of tf.keras.Model. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + latency_list = [] + metric = TopKMetric() + postprocess = LabelShift(label_shift=1) + + def eval_func(dataloader, metric): + warmup = 5 + iteration = None + if FLAGS.benchmark and FLAGS.mode == 'performance': + iteration = FLAGS.iters + for idx, (inputs, labels) in enumerate(dataloader): + start = time.time() + predictions = model.predict_on_batch(inputs) + end = time.time() + latency_list.append(end - start) + predictions, labels = postprocess((predictions, labels)) + metric.update(predictions, labels) + if iteration and idx >= iteration: + break + latency = np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size + return latency + + latency = eval_func(eval_dataloader, metric) + if FLAGS.benchmark: + logger.info("\n{} mode benchmark result:".format(FLAGS.mode)) + for i, res in enumerate(latency_list): + logger.debug("Iteration {} result {}:".format(i, res)) + if FLAGS.benchmark and FLAGS.mode == 'performance': + logger.info("Batch size = {}".format(eval_dataloader.batch_size)) + logger.info("Latency: {:.3f} ms".format(latency * 1000)) + logger.info("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + if FLAGS.tune: + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow import quantize_model + from neural_compressor.tensorflow.keras import StaticQuantConfig + + set_random_seed(9527) + quant_config = StaticQuantConfig() + q_model = quantize_model(FLAGS.input_model, quant_config, calib_dataloader) + q_model.save(FLAGS.output_model) + logger.info("Save quantized model to {}.".format(FLAGS.output_model)) + + if FLAGS.benchmark: + from neural_compressor.tensorflow import Model + + inc_model = Model(FLAGS.input_model) + if FLAGS.mode == 'performance': + evaluate(inc_model.model) + else: + accuracy = evaluate(inc_model.model) + logger.info('Batch size = %d' % FLAGS.batch_size) + logger.info("Accuracy: %.5f" % accuracy) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/prepare_model.py b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/prepare_model.py new file mode 100644 index 00000000000..f8cd505f965 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/prepare_model.py @@ -0,0 +1,35 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import tensorflow as tf +def get_resnet50_v2_model(saved_path): + model = tf.keras.applications.ResNet50V2(weights='imagenet') + model.save(saved_path) + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Export pretained keras model', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + '--output_model', + type=str, + help='path to exported model file') + + args = parser.parse_args() + get_resnet50_v2_model(args.output_model) diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..8b7b47da969 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +tensorflow>=2.11.1 +intel-extension-for-tensorflow[cpu] diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..d464b019f8e --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_benchmark.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + esac + done + +} + +# run_tuning +function run_benchmark { + + python main.py \ + --input_model ${input_model} \ + --benchmark \ + --mode ${mode} \ + --eval_data ${dataset_location} \ + --batch_size ${batch_size} \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..7e3ed727f71 --- /dev/null +++ b/examples/3.x_api/tensorflow/keras/image_recognition/resnet_v2_50/quantization/ptq/run_quant.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -x + +function main { + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_model ${input_model} \ + --output_model ${output_model} \ + --eval_data ${dataset_location} \ + --calib_data ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/README.md b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/README.md new file mode 100644 index 00000000000..41a673fc834 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/README.md @@ -0,0 +1,92 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning result of Intel® Model Zoo bert large model on squad v1.1 task. +This example can run on Intel CPUs and GPUs. + + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` + +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` + +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Pretrained model +```shell +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_7_0/fp32_bert_squad.pb +``` + +## 3. Prepare Dataset +Please choose one way to prepare the dataset from the manual approach and the automatic approach. +### Manual approach +```shell +wget https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip +``` + +```shell +unzip wwm_uncased_L-24_H-1024_A-16.zip +``` + +```shell +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -P wwm_uncased_L-24_H-1024_A-16 +``` +wwm_uncased_L-24_H-1024_A-16 folder will be located on your data path. + +#### Automatic dataset download +Run the `prepare_dataset.sh` script located in `examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq`. + +Usage: +```shell +cd examples/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq +bash prepare_dataset.sh --output_dir=./data +``` + +### Convert the dataset to TF Record format +After the dataset is downloaded by either of ways above, the dataset should be converted to files of TF Record format. +```shell +python create_tf_record.py --vocab_file=data/vocab.txt --predict_file=data/dev-v1.1.json --output_file=data/eval.tf_record +``` + +# Run Command + Please make sure below command should be executed with the same Tensorflow runtime version as above step. + +## Quantization + ```shell + bash run_quant.sh --input_model=./fp32_bert_squad.pb --output_model=./bert_squad_int8.pb --dataset_location=data + ``` + +## Benchmark + ```shell + bash run_benchmark.sh --input_model=./bert_squad_int8.pb --mode=accuracy --dataset_location=data --batch_size=64 + bash run_benchmark.sh --input_model=./bert_squad_int8.pb --mode=performance --dataset_location=data --batch_size=64 + ``` \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_pretraining_data.py b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_pretraining_data.py new file mode 100644 index 00000000000..8adecb971fd --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_pretraining_data.py @@ -0,0 +1,475 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import random +import tokenization +import tensorflow as tf + +from absl import app +#from absl import flags +from absl import logging +flags = tf.compat.v1.flags + +FLAGS = flags.FLAGS + +flags.DEFINE_string("input_file", None, + "Input raw text file (or comma-separated list of files).") + +flags.DEFINE_string( + "output_file", None, + "Output TF example file (or comma-separated list of files).") + +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") + +flags.DEFINE_bool( + "do_lower_case", True, + "Whether to lower case the input text. Should be True for uncased " + "models and False for cased models.") + +flags.DEFINE_bool( + "do_whole_word_mask", False, + "Whether to use whole word masking rather than per-WordPiece masking.") + +flags.DEFINE_integer("max_seq_length", 128, "Maximum sequence length.") + +flags.DEFINE_integer("max_predictions_per_seq", 20, + "Maximum number of masked LM predictions per sequence.") + +flags.DEFINE_integer("random_seed", 12345, "Random seed for data generation.") + +flags.DEFINE_integer( + "dupe_factor", 10, + "Number of times to duplicate the input data (with different masks).") + +flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") + +flags.DEFINE_float( + "short_seq_prob", 0.1, + "Probability of creating sequences which are shorter than the " + "maximum length.") + + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_files(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_files): + """Create TF example files from `TrainingInstance`s.""" + writers = [] + for output_file in output_files: + writers.append(tf.io.TFRecordWriter(output_file)) + + writer_index = 0 + + total_written = 0 + for (inst_index, instance) in enumerate(instances): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + features = collections.OrderedDict() + features["input_ids"] = create_int_feature(input_ids) + features["input_mask"] = create_int_feature(input_mask) + features["segment_ids"] = create_int_feature(segment_ids) + features["masked_lm_positions"] = create_int_feature(masked_lm_positions) + features["masked_lm_ids"] = create_int_feature(masked_lm_ids) + features["masked_lm_weights"] = create_float_feature(masked_lm_weights) + features["next_sentence_labels"] = create_int_feature([next_sentence_label]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + + writers[writer_index].write(tf_example.SerializeToString()) + writer_index = (writer_index + 1) % len(writers) + + total_written += 1 + + if inst_index < 20: + tf.compat.v1.logging.info("*** Example ***") + tf.compat.v1.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in instance.tokens])) + + for feature_name in features.keys(): + feature = features[feature_name] + values = [] + if feature.int64_list.value: + values = feature.int64_list.value + elif feature.float_list.value: + values = feature.float_list.value + tf.compat.v1.logging.info( + "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) + + for writer in writers: + writer.close() + + tf.compat.v1.logging.info("Wrote %d total instances", total_written) + + +def create_int_feature(values): + feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return feature + + +def create_float_feature(values): + feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) + return feature + + +def create_training_instances(input_files, tokenizer, max_seq_length, + dupe_factor, short_seq_prob, masked_lm_prob, + max_predictions_per_seq, rng): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + with tf.io.gfile.GFile(input_file, "r") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + # Whole Word Masking means that if we mask all of the wordpieces + # corresponding to an original word. When a word has been split into + # WordPieces, the first token does not have any marker and any subsequence + # tokens are prefixed with ##. So whenever we see the ## token, we + # append it to the previous set of word indexes. + # + # Note that Whole Word Masking does *not* change the training code + # at all -- we still predict each WordPiece independently, softmaxed + # over the entire vocabulary. + if (FLAGS.do_whole_word_mask and len(cand_indexes) >= 1 and + token.startswith("##")): + cand_indexes[-1].append(i) + else: + cand_indexes.append([i]) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index_set in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + # If adding a whole-word mask would exceed the maximum number of + # predictions, then just skip this candidate. + if len(masked_lms) + len(index_set) > num_to_predict: + continue + is_any_index_covered = False + for index in index_set: + if index in covered_indexes: + is_any_index_covered = True + break + if is_any_index_covered: + continue + for index in index_set: + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + assert len(masked_lms) <= num_to_predict + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(_): + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) + + input_files = [] + for input_pattern in FLAGS.input_file.split(","): + input_files.extend(tf.io.gfile.glob(input_pattern)) + + tf.compat.v1.logging.info("*** Reading from input files ***") + for input_file in input_files: + tf.compat.v1.logging.info(" %s", input_file) + + rng = random.Random(FLAGS.random_seed) + instances = create_training_instances( + input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor, + FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, + rng) + + output_files = FLAGS.output_file.split(",") + tf.compat.v1.logging.info("*** Writing to output files ***") + for output_file in output_files: + tf.compat.v1.logging.info(" %s", output_file) + + write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length, + FLAGS.max_predictions_per_seq, output_files) + + +if __name__ == "__main__": + flags.mark_flag_as_required("input_file") + flags.mark_flag_as_required("output_file") + flags.mark_flag_as_required("vocab_file") + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py new file mode 100644 index 00000000000..12c6486283d --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/create_tf_record.py @@ -0,0 +1,509 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""generate bert dataset""" + +import collections +import json +import os +import tokenization +import six +import tensorflow as tf + +from absl import app +#from absl import flags +from absl import logging + +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS + +## Required parameters +flags.DEFINE_string("vocab_file", None, + "The vocabulary file that the BERT model was trained on.") + +flags.DEFINE_string( + "predict_file", None, + "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") + +flags.DEFINE_string( + "output_file", None, "The output tf_record for usage.") + +class SquadExample(object): + """A single training/test example for simple sequence classification. + + For examples without an answer, the start and end position are -1. + """ + + def __init__(self, + qas_id, + question_text, + doc_tokens, + orig_answer_text=None, + start_position=None, + end_position=None, + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.doc_tokens = doc_tokens + self.orig_answer_text = orig_answer_text + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + #self.startpb = 0 + + def __str__(self): + return self.__repr__() + + def __repr__(self): + s = "" + s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) + s += ", question_text: %s" % ( + tokenization.printable_text(self.question_text)) + s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) + if self.start_position: + s += ", start_position: %d" % (self.start_position) + if self.start_position: + s += ", end_position: %d" % (self.end_position) + if self.start_position: + s += ", is_impossible: %r" % (self.is_impossible) + return s + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, + unique_id, + example_index, + doc_span_index, + tokens, + token_to_orig_map, + token_is_max_context, + input_ids, + input_mask, + segment_ids, + start_position=None, + end_position=None, + is_impossible=None): + self.unique_id = unique_id + self.example_index = example_index + self.doc_span_index = doc_span_index + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + self.token_is_max_context = token_is_max_context + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + + +def read_squad_examples(input_file, is_training=None): + """Read a SQuAD json file into a list of SquadExample.""" + with tf.io.gfile.GFile(input_file, "r") as reader: + input_data = json.load(reader)["data"] + + def is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + examples = [] + for entry in input_data: + for paragraph in entry["paragraphs"]: + paragraph_text = paragraph["context"] + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + for c in paragraph_text: + if is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position = None + end_position = None + orig_answer_text = None + is_impossible = False + if is_training: + + if FLAGS.version_2_with_negative: + is_impossible = qa["is_impossible"] + if (len(qa["answers"]) != 1) and (not is_impossible): + raise ValueError( + "For training, each question should have exactly 1 answer.") + if not is_impossible: + answer = qa["answers"][0] + orig_answer_text = answer["text"] + answer_offset = answer["answer_start"] + answer_length = len(orig_answer_text) + start_position = char_to_word_offset[answer_offset] + end_position = char_to_word_offset[answer_offset + answer_length - + 1] + # Only add answers where the text can be exactly recovered from the + # document. If this CAN'T happen it's likely due to weird Unicode + # stuff so we will just skip the example. + # + # Note that this means for training mode, every example is NOT + # guaranteed to be preserved. + actual_text = " ".join( + doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join( + tokenization.whitespace_tokenize(orig_answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + tf.compat.v1.logging.warning("Could not find answer: '%s' vs. '%s'", + actual_text, cleaned_answer_text) + continue + else: + start_position = -1 + end_position = -1 + orig_answer_text = "" + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + doc_tokens=doc_tokens, + orig_answer_text=orig_answer_text, + start_position=start_position, + end_position=end_position, + is_impossible=is_impossible) + examples.append(example) + + return examples + + +def convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training=None, + output_fn=None): + """Loads a data file into a list of `InputBatch`s.""" + + unique_id = 1000000000 + + for (example_index, example) in enumerate(examples): + query_tokens = tokenizer.tokenize(example.question_text) + + if len(query_tokens) > max_query_length: + query_tokens = query_tokens[0:max_query_length] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + tok_start_position = None + tok_end_position = None + if is_training and example.is_impossible: + tok_start_position = -1 + tok_end_position = -1 + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, + example.orig_answer_text) + + # The -3 accounts for [CLS], [SEP] and [SEP] + max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 + + # We can have documents that are longer than the maximum sequence length. + # To deal with this we do a sliding window approach, where we take chunks + # of the up to our max length with a stride of `doc_stride`. + _DocSpan = collections.namedtuple( # pylint: disable=invalid-name + "DocSpan", ["start", "length"]) + doc_spans = [] + start_offset = 0 + while start_offset < len(all_doc_tokens): + length = len(all_doc_tokens) - start_offset + if length > max_tokens_for_doc: + length = max_tokens_for_doc + doc_spans.append(_DocSpan(start=start_offset, length=length)) + if start_offset + length == len(all_doc_tokens): + break + start_offset += min(length, doc_stride) + + for (doc_span_index, doc_span) in enumerate(doc_spans): + tokens = [] + token_to_orig_map = {} + token_is_max_context = {} + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in query_tokens: + tokens.append(token) + segment_ids.append(0) + tokens.append("[SEP]") + segment_ids.append(0) + + for i in range(doc_span.length): + split_token_index = doc_span.start + i + token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] + + is_max_context = _check_is_max_context(doc_spans, doc_span_index, + split_token_index) + token_is_max_context[len(tokens)] = is_max_context + tokens.append(all_doc_tokens[split_token_index]) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + start_position = None + end_position = None + if is_training and not example.is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = doc_span.start + doc_end = doc_span.start + doc_span.length - 1 + out_of_span = False + if not (tok_start_position >= doc_start and + tok_end_position <= doc_end): + out_of_span = True + if out_of_span: + start_position = 0 + end_position = 0 + else: + doc_offset = len(query_tokens) + 2 + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + if is_training and example.is_impossible: + start_position = 0 + end_position = 0 + + if example_index < 1: + tf.compat.v1.logging.info("*** Example ***") + tf.compat.v1.logging.info("unique_id: %s" % (unique_id)) + tf.compat.v1.logging.info("example_index: %s" % (example_index)) + tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index)) + tf.compat.v1.logging.info("tokens: %s" % " ".join( + [tokenization.printable_text(x) for x in tokens])) + tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join( + ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) + tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([ + "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) + ])) + tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + tf.compat.v1.logging.info( + "input_mask: %s" % " ".join([str(x) for x in input_mask])) + tf.compat.v1.logging.info( + "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + if is_training and example.is_impossible: + tf.compat.v1.logging.info("impossible example") + if is_training and not example.is_impossible: + answer_text = " ".join(tokens[start_position:(end_position + 1)]) + tf.compat.v1.logging.info("start_position: %d" % (start_position)) + tf.compat.v1.logging.info("end_position: %d" % (end_position)) + tf.compat.v1.logging.info( + "answer: %s" % (tokenization.printable_text(answer_text))) + + feature = InputFeatures( + unique_id=unique_id, + example_index=example_index, + doc_span_index=doc_span_index, + tokens=tokens, + token_to_orig_map=token_to_orig_map, + token_is_max_context=token_is_max_context, + input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + start_position=start_position, + end_position=end_position, + is_impossible=example.is_impossible) + + # Run callback + output_fn(feature) + + unique_id += 1 + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + + # The SQuAD annotations are character based. We first project them to + # whitespace-tokenized words. But then after WordPiece tokenization, we can + # often find a "better match". For example: + # + # Question: What year was John Smith born? + # Context: The leader was John Smith (1895-1943). + # Answer: 1895 + # + # The original whitespace-tokenized answer will be "(1895-1943).". However + # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match + # the exact answer, 1895. + # + # However, this is not always possible. Consider the following: + # + # Question: What country is the top exporter of electornics? + # Context: The Japanese electronics industry is the lagest in the world. + # Answer: Japan + # + # In this case, the annotator chose "Japan" as a character sub-span of + # the word "Japanese". Since our WordPiece tokenizer does not split + # "Japanese", we just use "Japanese" as the annotation. This is fairly rare + # in SQuAD, but does happen. + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + + # Because of the sliding window approach taken to scoring documents, a single + # token can appear in multiple documents. E.g. + # Doc: the man went to the store and bought a gallon of milk + # Span A: the man went to the + # Span B: to the store and bought + # Span C: and bought a gallon of + # ... + # + # Now the word 'bought' will have two scores from spans B and C. We only + # want to consider the score with "maximum context", which we define as + # the *minimum* of its left and right context (the *sum* of left and + # right context will always be the same, of course). + # + # In the example the maximum context for 'bought' would be span C since + # it has 1 left context and 3 right context, while span B has 4 left context + # and 0 right context. + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + +class FeatureWriter(object): + """Writes InputFeature to TF example file.""" + + def __init__(self, filename, is_training): + self.is_training = is_training + self.num_features = 0 + self.filename = filename + self._writer = tf.io.TFRecordWriter(self.filename) + + def process_feature(self, feature): + """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" + self.num_features += 1 + + def create_int_feature(values): + feature = tf.train.Feature( + int64_list=tf.train.Int64List(value=list(values))) + return feature + + features = collections.OrderedDict() + features["unique_ids"] = create_int_feature([feature.unique_id]) + features["input_ids"] = create_int_feature(feature.input_ids) + features["input_mask"] = create_int_feature(feature.input_mask) + features["segment_ids"] = create_int_feature(feature.segment_ids) + + if self.is_training: + features["start_positions"] = create_int_feature([feature.start_position]) + features["end_positions"] = create_int_feature([feature.end_position]) + impossible = 0 + if feature.is_impossible: + impossible = 1 + features["is_impossible"] = create_int_feature([impossible]) + + tf_example = tf.train.Example(features=tf.train.Features(feature=features)) + self._writer.write(tf_example.SerializeToString()) + + def close(self): + self._writer.close() + + def rm_tmp_file(self): + os.remove(self.filename) + +def main(_): + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + + tokenizer = tokenization.FullTokenizer( + vocab_file=FLAGS.vocab_file, do_lower_case=True) + + eval_examples = read_squad_examples( + input_file=FLAGS.predict_file, is_training=False) + + eval_writer = FeatureWriter( + filename=FLAGS.output_file, is_training=False) + + eval_features = [] + def append_feature(feature): + eval_features.append(feature) + eval_writer.process_feature(feature) + convert_examples_to_features( + examples=eval_examples, + tokenizer=tokenizer, + max_seq_length=384, + doc_stride=128, + max_query_length=64, + is_training=False, + output_fn=append_feature) + + +if __name__ == "__main__": + flags.mark_flag_as_required("vocab_file") + flags.mark_flag_as_required("predict_file") + flags.mark_flag_as_required("output_file") + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/data_process.py new file mode 100644 index 00000000000..6e9d169ada5 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/data_process.py @@ -0,0 +1,936 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import re +import sys +import string +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from collections import Counter +from neural_compressor.tensorflow.utils.data import default_collate, BaseDataLoader, BatchSampler, IterableFetcher + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + """Calculate the max metric for each ground truth. + + For each answer in ground_truths, evaluate the metric of prediction with + this answer, and return the max metric. + + Args: + metric_fn: The function to calculate the metric. + prediction: The prediction result. + ground_truths: A list of correct answers. + + Returns: + The max metric. Float point number. + """ + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + +def normalize_answer(text: str) -> str: + """Normalize the answer text. + + Lower text, remove punctuation, articles and extra whitespace, + and replace other whitespace (newline, tab, etc.) to space. + + Args: + s: The text to be normalized. + + Returns: + The normalized text. + """ + + def _remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def _white_space_fix(text): + return " ".join(text.split()) + + def _remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def _lower(text): + return text.lower() + + return _white_space_fix(_remove_articles(_remove_punc(_lower(text)))) + +def exact_match_score(prediction, ground_truth): + """Compute the exact match score between prediction and ground truth. + + Args: + prediction: The result of predictions to be evaluated. + ground_truth: The ground truth. + + Returns: + The exact match score. + """ + return normalize_answer(prediction) == normalize_answer(ground_truth) + +def f1_score(prediction, ground_truth): + """Calculate the F1 score of the prediction and the ground_truth. + + Args: + prediction: The predicted result. + ground_truth: The ground truth. + + Returns: + The F1 score of prediction. Float point number. + """ + prediction_tokens = normalize_answer(prediction).split() + ground_truth_tokens = normalize_answer(ground_truth).split() + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(ground_truth_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + +def evaluate(dataset, predictions): + """Evaluate the average F1 score and the exact match score for Question-Answering results. + + Args: + dataset: The dataset to evaluate the prediction. A list instance of articles. + An article contains a list of paragraphs, a paragraph contains a list of + question-and-answers (qas), and a question-and-answer contains an id, a question, + and a list of correct answers. For example: + predictions: The result of predictions to be evaluated. A dict mapping the id of + a question to the predicted answer of the question. + + Returns: + The F1 score and the exact match score. + """ + f1 = exact_match = total = 0 + for article in dataset: + for paragraph in article["paragraphs"]: + for qa in paragraph["qas"]: + total += 1 + if qa["id"] not in predictions: + message = "Unanswered question " + qa["id"] + " will receive score 0." + print(message, file=sys.stderr) + continue + ground_truths = list(map(lambda x: x["text"], qa["answers"])) + prediction = predictions[qa["id"]] + exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) + f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) + + exact_match = 100.0 * exact_match / total + f1 = 100.0 * f1 / total + + return {"exact_match": exact_match, "f1": f1} + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class SquadF1(BaseMetric): + """Evaluate for v1.1 of the SQuAD dataset.""" + + def __init__(self): + """Initialize the score list.""" + self._score_list = [] # squad metric only work when all data preds collected + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + if preds: + if getattr(self, "_hvd", None) is not None: + gathered_preds_list = self._hvd.allgather_object(preds) + gathered_labels_list = self._hvd.allgather_object(labels) + temp_preds_list, temp_labels_list = [], [] + for i in range(0, self._hvd.size()): + temp_preds_list += gathered_preds_list[i] + temp_labels_list += gathered_labels_list[i] + preds = temp_preds_list + labels = temp_labels_list + result = evaluate(labels, preds) + self._score_list.append(result["f1"]) + + def reset(self): + """Reset the score list.""" + self._score_list = [] + + def result(self): + """Compute F1 score.""" + if len(self._score_list) == 0: + return 0.0 + return np.array(self._score_list).mean() + + +class ParseDecodeBert: + """Helper function for TensorflowModelZooBertDataset. + + Parse the features from sample. + """ + + def __call__(self, sample): + """Parse the sample data. + + Args: + sample: Data to be parsed. + """ + # Dense features in Example proto. + feature_map = { + "input_ids": tf.compat.v1.VarLenFeature(dtype=tf.int64), + "input_mask": tf.compat.v1.VarLenFeature(dtype=tf.int64), + "segment_ids": tf.compat.v1.VarLenFeature(dtype=tf.int64), + } + + features = tf.io.parse_single_example(sample, feature_map) + + input_ids = features["input_ids"].values + input_mask = features["input_mask"].values + segment_ids = features["segment_ids"].values + + return (input_ids, input_mask, segment_ids) + + +class TFDataLoader(object): # pragma: no cover + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return + + +class ModelZooBertDataset(object): + """Tensorflow dataset for three-input Bert in tf record format. + + Root is a full path to tfrecord file, which contains the file name. + Please use Resize transform when batch_size > 1 + Args: root (str): path of dataset. + label_file (str): path of label file. + task (str, default='squad'): task type of model. + model_type (str, default='bert'): model type, support 'bert'. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according. + """ + + def __init__(self, root, label_file, task="squad", model_type="bert", transform=None, filter=None, num_cores=28): + """Initialize the attributes of class.""" + with open(label_file) as lf: + label_json = json.load(lf) + assert label_json["version"] == "1.1", "only support squad 1.1" + self.label = label_json["data"] + + record_iterator = tf.compat.v1.python_io.tf_record_iterator(root) + example = tf.train.SequenceExample() + for element in record_iterator: + example.ParseFromString(element) + break + feature = example.context.feature + if len(feature["input_ids"].int64_list.value) == 0 and len(feature["input_mask"].int64_list.value) == 0: + raise ValueError( + "Tfrecord format is incorrect, please refer\ + 'https://github.com/tensorflow/models/blob/master/research/\ + object_detection/dataset_tools/' to create correct tfrecord" + ) + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + tfrecord_paths = [root] + ds = tf.data.TFRecordDataset.list_files(tfrecord_paths) + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, + cycle_length=num_cores, + block_length=5, + sloppy=True, + buffer_output_elements=10000, + prefetch_input_elements=10000, + ) + ) + if transform is not None: + transform.transform_list.insert(0, ParseDecodeBert()) + else: + transform = ParseDecodeBert() + ds = ds.map(transform, num_parallel_calls=None) + if filter is not None: + ds = ds.filter(filter) + ds = ds.prefetch(buffer_size=1000) + ds = TFDataLoader(ds) + self.root = [] + for inputs in ds: + self.root.append(inputs) + self.transform = transform + self.filter = filter + + def __getitem__(self, index): + """Magic method. + + x[i] is roughly equivalent to type(x).__getitem__(x, index) + """ + return self.root[index], self.label + + def __len__(self): + """Length of the dataset.""" + return len(self.root) + + +class TFSquadV1PostTransform(object): + """Postprocess the predictions of bert on SQuAD. + + Args: + label_file (str): path of label file + vocab_file(str): path of vocabulary file + n_best_size (int, default=20): + The total number of n-best predictions to generate in nbest_predictions.json + max_seq_length (int, default=384): + The maximum total input sequence length after WordPiece tokenization. + Sequences longer than this will be truncated, shorter than this will be padded + max_query_length (int, default=64): + The maximum number of tokens for the question. + Questions longer than this will be truncated to this length + max_answer_length (int, default=30): + The maximum length of an answer that can be generated. This is needed because + the start and end predictions are not conditioned on one another + do_lower_case (bool, default=True): + Whether to lower case the input text. + Should be True for uncased models and False for cased models + doc_stride (int, default=128): + When splitting up a long document into chunks, + how much stride to take between chunks + + Returns: + tuple of processed prediction and label + """ + + def __init__( + self, + label_file, + vocab_file, + n_best_size=20, + max_seq_length=384, + max_query_length=64, + max_answer_length=30, + do_lower_case=True, + doc_stride=128, + ): + """Initialize `TFSquadV1PostTransform` class.""" + from tokenization import FullTokenizer + from create_tf_record import read_squad_examples, convert_examples_to_features + self.eval_examples = read_squad_examples(label_file) + tokenizer = FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) + + self.eval_features = [] + + def append_feature(feature): + self.eval_features.append(feature) + + convert_examples_to_features( + examples=self.eval_examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + output_fn=append_feature, + ) + + self.n_best_size = n_best_size + self.max_answer_length = max_answer_length + self.do_lower_case = do_lower_case + self.RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) + + def process_result(self, results): + """Get the processed results.""" + processed_results = [] + # notice the result list sequence + for unique_id, start_logits, end_logits in zip(*results): + processed_results.append( + self.RawResult( + unique_id=int(unique_id), + start_logits=[float(x) for x in start_logits.flat], + end_logits=[float(x) for x in end_logits.flat], + ) + ) + + return processed_results + + def get_postprocess_result(self, sample): + """Get the post processed results.""" + if sample == (None, None): + return (None, None) + all_results, label = sample + all_results = self.process_result(all_results) + example_index_to_features = collections.defaultdict(list) + for feature in self.eval_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] + ) + + all_predictions = collections.OrderedDict() + for example_index, example in enumerate(self.eval_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min mull score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for feature_index, feature in enumerate(features): + # skip the case that is not predicted + if feature.unique_id not in unique_id_to_result: + all_predictions[example.qas_id] = "*#skip this example#*" + continue + result = unique_id_to_result[feature.unique_id] + start_indexes = TFSquadV1PostTransform._get_best_indexes(result.start_logits, self.n_best_size) + end_indexes = TFSquadV1PostTransform._get_best_indexes(result.end_logits, self.n_best_size) + + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > self.max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index], + ) + ) + + prelim_predictions = sorted( + prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True + ) + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"] + ) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= self.n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = TFSquadV1PostTransform.get_final_text(tok_text, orig_text, self.do_lower_case) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit) + ) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + probs = TFSquadV1PostTransform._compute_softmax(total_scores) + + nbest_json = [] + for i, entry in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + all_predictions[example.qas_id] = nbest_json[0]["text"] + return (all_predictions, label) + + @staticmethod + def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + @staticmethod + def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + import math + + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + @staticmethod + def get_final_text(pred_text, orig_text, do_lower_case): + """Project the tokenized prediction back to the original text.""" + import six + + from tokenization import BasicTokenizer + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for i, c in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + tok_text = " ".join(tokenizer.tokenize(orig_text)) + start_position = tok_text.find(pred_text) + if start_position == -1: + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + return orig_text + + tok_s_to_ns_map = {} + for i, tok_index in six.iteritems(tok_ns_to_s_map): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + return orig_text + + output_text = orig_text[orig_start_position : (orig_end_position + 1)] + return output_text + + def __call__(self, sample): + """Call the get_postprocess_result.""" + return self.get_postprocess_result(sample) + + +class CollectTransform(object): + """Postprocess the predictions, collect data.""" + + def __init__(self, length=10833): + """Initialize `CollectTransform` class.""" + self.length = length + self.unique_id = [] + self.start_logits = [] + self.end_logits = [] + self.all_sample = (None, None) + self.idx = 1000000000 + + def __call__(self, sample): + """Collect postprocess data.""" + all_results, label = sample + result_list = [np.expand_dims(result, 0) for result in all_results] + for result in result_list: + if len(self.unique_id) < self.length: + result = result.transpose(2, 0, 1) + self.unique_id.append(self.idx) + self.start_logits.append(result[0]) + self.end_logits.append(result[1]) + self.idx += 1 + if len(self.unique_id) == self.length: + self.all_sample = ([self.unique_id, self.start_logits, self.end_logits], label) + return self.all_sample + + +class TFModelZooCollectTransform(CollectTransform): + """Postprocess the predictions of model zoo, collect data.""" + + def __call__(self, sample): + """Collect postprocess data.""" + all_results, label = sample + if len(all_results) == 1: + all_results = all_results.reshape((2, 1, 384)) + all_results = zip(all_results[0], all_results[1]) + for start_logits, end_logits in all_results: + if len(self.unique_id) < self.length: + self.unique_id.append(self.idx) + self.start_logits.append(start_logits) + self.end_logits.append(end_logits) + self.idx += 1 + if len(self.unique_id) == self.length: + self.all_sample = ([self.unique_id, self.start_logits, self.end_logits], label) + return self.all_sample + + +class TFSquadV1ModelZooPostTransform(TFSquadV1PostTransform): + """Postprocess the predictions of bert on SQuADV1.1. + + See class TFSquadV1PostTransform for more details + """ + + def __init__( + self, + label_file, + vocab_file, + n_best_size=20, + max_seq_length=384, + max_query_length=64, + max_answer_length=30, + do_lower_case=True, + doc_stride=128, + ): + """Initialize `TFSquadV1ModelZooPostTransform` class.""" + super().__init__( + label_file, + vocab_file, + n_best_size, + max_seq_length, + max_query_length, + max_answer_length, + do_lower_case, + doc_stride, + ) + self.length = len(self.eval_features) + self.collect_data = TFModelZooCollectTransform(length=self.length) + + def __call__(self, sample): + """Collect data and get postprocess results.""" + sample = self.collect_data(sample) + return self.get_postprocess_result(sample) + + +class ModelZooBertDataLoader(BaseDataLoader): # pragma: no cover + """This dataloader is designed to satisfy the usage of Model Zoo Bert models.""" + + def _generate_dataloader( + self, + dataset, + batch_size, + last_batch, + collate_fn, + sampler, + batch_sampler, + num_workers, + pin_memory, + shuffle, + distributed, + ): + def bert_collate_fn(batch): + input_ids = [] + input_mask = [] + segment_ids = [] + for elem in batch: + input_ids.append(elem[0][0][0]) + input_mask.append(elem[0][1][0]) + segment_ids.append(elem[0][2][0]) + inputs = [input_ids, input_mask, segment_ids] + return inputs, batch[0][1] + + drop_last = False if last_batch == "rollover" else True + sampler = self._generate_sampler(dataset, distributed) + self.batch_sampler = BatchSampler(sampler, batch_size, drop_last) + self.fetcher = IterableFetcher(dataset, bert_collate_fn, drop_last, distributed) + + inputs = [] + for batched_indices in self.batch_sampler: + try: + data = self.fetcher(batched_indices) + yield data + except StopIteration: + return diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/main.py b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/main.py new file mode 100644 index 00000000000..7f99b3507fc --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/main.py @@ -0,0 +1,138 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" +import os +import time + +import numpy as np +import tensorflow as tf + +from data_process import SquadF1, ModelZooBertDataset, TFSquadV1ModelZooPostTransform, ModelZooBertDataLoader + +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS + +## Required parameters +flags.DEFINE_string( + 'input_model', None, 'Run inference with specified pb graph.') + +flags.DEFINE_string( + 'output_model', None, 'The output model of the quantized model.') + +flags.DEFINE_string( + 'mode', 'performance', 'define benchmark mode for accuracy or performance') + +flags.DEFINE_bool( + 'tune', False, 'whether to tune the model') + +flags.DEFINE_bool( + 'benchmark', False, 'whether to benchmark the model') + +flags.DEFINE_bool( + 'strip_iterator', False, 'whether to strip the iterator of the model') + +flags.DEFINE_string('dataset_location', None, + 'location of calibration dataset and evaluate dataset') + +flags.DEFINE_integer("batch_size", 64, "run batch size") + +flags.DEFINE_integer("iters", 100, "The iteration used for benchmark.") + + +def evaluate(model, dataloader, metric, postprocess): + """Custom evaluate function to estimate the accuracy of the bert model. + + Args: + model (tf.Graph_def): The input model graph + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow.quantization.utils.utility import iterator_sess_run + from neural_compressor.tensorflow.utils import Model, BaseModel + if not isinstance(model, BaseModel): + model = Model(model) + model.input_tensor_names = ['input_ids', 'input_mask', 'segment_ids'] + model.output_tensor_names = ['start_logits', 'end_logits'] + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + iteration = -1 + if FLAGS.benchmark and FLAGS.mode == 'performance': + iteration = FLAGS.iters + + warmup = 5 + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + assert len(input_tensor) == len(inputs), \ + 'inputs len must equal with input_tensor' + feed_dict = dict(zip(input_tensor, inputs)) + start_time = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + latency_list.append(time.time() - start_time) + predictions, labels = postprocess((predictions, labels)) + metric.update(predictions, labels) + if idx + 1 == iteration: + break + + latency = np.array(latency_list[warmup:]).mean() / FLAGS.batch_size + + if FLAGS.benchmark and FLAGS.mode == 'performance': + print("Batch size = {}".format(FLAGS.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + tf.compat.v1.disable_eager_execution() + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) + + data_path = os.path.join(FLAGS.dataset_location, 'eval.tf_record') + label_path = os.path.join(FLAGS.dataset_location, 'dev-v1.1.json') + vocab_path = os.path.join(FLAGS.dataset_location, 'vocab.txt') + + dataset = ModelZooBertDataset(root=data_path, label_file=label_path) + dataloader = ModelZooBertDataLoader(dataset=dataset, batch_size=FLAGS.batch_size) + + def eval(model): + metric = SquadF1() + postprocess = TFSquadV1ModelZooPostTransform(label_file=label_path, vocab_file=vocab_path) + return evaluate(model, dataloader, metric, postprocess) + + if FLAGS.benchmark: + if FLAGS.mode == 'performance': + eval(FLAGS.input_model) + elif FLAGS.mode == 'accuracy': + acc_result = eval(FLAGS.input_model) + print("Batch size = %d" % dataloader.batch_size) + print("Accuracy: %.5f" % acc_result) + + elif FLAGS.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + model = Model(FLAGS.input_model) + model.input_tensor_names = ['input_ids', 'input_mask', 'segment_ids'] + model.output_tensor_names = ['start_logits', 'end_logits'] + quant_config = StaticQuantConfig() + q_model = quantize_model(model, quant_config, dataloader) + q_model.save(FLAGS.output_model) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/prepare_dataset.sh b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/prepare_dataset.sh new file mode 100644 index 00000000000..acae8ce944d --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/prepare_dataset.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# set -x + +OUTPUT_DIR="./data" + +help() +{ + cat <<- EOF + Desc: Prepare bert dataset + -h --help help info + --output_dir Output data directory + default: './data' +EOF + exit 0 +} + +function main { + init_params "$@" + convert_dataset +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --output_dir=*) + OUTPUT_DIR=$(echo $var |cut -f2 -d=) + ;; + -h|--help) help + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done +} + +# convert dataset +function convert_dataset { + if [ ! -d ${OUTPUT_DIR} ]; then + echo '${OUTPUT_DIR} already exists, please check...' + fi + wget https://storage.googleapis.com/bert_models/2019_05_30/wwm_uncased_L-24_H-1024_A-16.zip + unzip wwm_uncased_L-24_H-1024_A-16.zip + wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -P wwm_uncased_L-24_H-1024_A-16 + mv wwm_uncased_L-24_H-1024_A-16 ${OUTPUT_DIR} + +} + +main "$@" + diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..d42132a4e87 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/requirements.txt @@ -0,0 +1 @@ +intel-tensorflow>=2.12.0 \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..aa8d269a79a --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_benchmark.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + + python main.py \ + --input_model=${input_model} \ + --mode=${mode} \ + --dataset_location=${dataset_location} \ + --batch_size=${batch_size} \ + --benchmark \ + +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..ddc30b40177 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/run_quant.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + batch_size=64 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_model=${input_model} \ + --output_model=${output_model} \ + --dataset_location=${dataset_location} \ + --batch_size=${batch_size} \ + --tune \ + +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py new file mode 100644 index 00000000000..77c3175db07 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/bert_large_squad_model_zoo/quantization/ptq/tokenization.py @@ -0,0 +1,402 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Tokenization classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import unicodedata +import six +import tensorflow as tf + + +def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): + """Checks whether the casing config is consistent with the checkpoint name.""" + + # The casing has to be passed in by the user and there is no explicit check + # as to whether it matches the checkpoint. The casing information probably + # should have been stored in the bert_config.json file, but it's not, so + # we have to heuristically detect it to validate. + + if not init_checkpoint: + return + + m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) + if m is None: + return + + model_name = m.group(1) + + lower_models = [ + "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", + "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" + ] + + cased_models = [ + "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", + "multi_cased_L-12_H-768_A-12" + ] + + is_bad_config = False + if model_name in lower_models and not do_lower_case: + is_bad_config = True + actual_flag = "False" + case_name = "lowercased" + opposite_flag = "True" + + if model_name in cased_models and do_lower_case: + is_bad_config = True + actual_flag = "True" + case_name = "cased" + opposite_flag = "False" + + if is_bad_config: + raise ValueError( + "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " + "However, `%s` seems to be a %s model, so you " + "should pass in `--do_lower_case=%s` so that the fine-tuning matches " + "how the model was pre-training. If this error is wrong, please " + "just comment out this check." % (actual_flag, init_checkpoint, + model_name, case_name, opposite_flag)) + + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def printable_text(text): + """Returns text encoded in a way suitable for print or `tf.logging`.""" + + # These functions want `str` for both Python2 and Python3, but in one case + # it's a Unicode string and in the other it's a byte string. + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return text.encode("utf-8") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with tf.io.gfile.GFile(vocab_file, "r") as reader: + while True: + token = convert_to_unicode(reader.readline()) + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def convert_by_vocab(vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + +def convert_tokens_to_ids(vocab, tokens): + return convert_by_vocab(vocab, tokens) + + +def convert_ids_to_tokens(inv_vocab, ids): + return convert_by_vocab(inv_vocab, ids) + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class FullTokenizer(object): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True): + self.vocab = load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_tokens_to_ids(self, tokens): + return convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return convert_by_vocab(self.inv_vocab, ids) + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, do_lower_case=True): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + """ + self.do_lower_case = do_lower_case + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = convert_to_unicode(text) + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.do_lower_case: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenziation.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer. + + Returns: + A list of wordpiece tokens. + """ + + text = convert_to_unicode(text) + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically control characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat in ("Cc", "Cf"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/README.md b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/README.md new file mode 100644 index 00000000000..7a8c22631e0 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/README.md @@ -0,0 +1,141 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor smooth quantization of language models gpt-j-6B. + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +pip install -r requirements.txt +``` + +## 2. Prepare Pretrained model +Run the follow script to download gpt-j-6B saved_model to ```./gpt-j-6B```: + ``` +bash prepare_model.sh + ``` + +## 3. Install TensorFlow 2.11.dev202242 +Build a TensorFlow pip package from [intel-tensorflow spr_ww42 branch](https://github.com/Intel-tensorflow/tensorflow/tree/spr_ww42) and install it. How to build a TensorFlow pip package from source please refer to this [tutorial](https://www.tensorflow.org/install/source). + +The performance of int8 gpt-j-6B would be better once intel-tensorflow for gnr is released. + +## 4. Prepare Dataset +The dataset will be automatically loaded. + +# Run + +## Smooth Quantization + +```shell +bash run_quant.sh --input_model= --output_model= +``` + +## Benchmark + +### Evaluate Performance + +```shell +bash run_benchmark.sh --input_model= --mode=benchmark +``` + +### Evaluate Accuracy + +```shell +bash run_benchmark.sh --input_model= --mode=accuracy +``` + + +Details of enabling Intel® Neural Compressor on gpt-j-6B for TensorFlow +========================= + +This is a tutorial of how to enable gpt-j-6B model with Intel® Neural Compressor. +## User Code Analysis + +User specifies fp32 *model*, calibration dataloader *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataloader and metric by itself. + +### calib_dataloader Part Adaption +Below dataloader class uses generator function to provide the model with input. + +```python +class MyDataloader: + def __init__(self, dataset, batch_size=1): + self.dataset = dataset + self.batch_size = batch_size + self.length = math.ceil(len(dataset) / self.batch_size) + + def generate_data(self, data, pad_token_id=50256): + input_ids = tf.convert_to_tensor([data[:-1]], dtype=tf.int32) + cur_len = len(data)-1 + input_ids_padding = tf.ones((self.batch_size, 1), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + model_kwargs = {'attention_mask': prepare_attention_mask_for_generation(input_ids)} + if model_kwargs.get("past_key_values") is None: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + return model_kwargs['attention_mask'], input_ids + + def __iter__(self): + labels = None + for _, data in enumerate(self.dataset): + cur_input = self.generate_data(data) + yield (cur_input, labels) + + def __len__(self): + return self.length +``` + + +### Code Update +After prepare step is done, we add the code for quantization tuning to generate quantized model. + +Firstly, let's load a INC inner class model from the path of gpt-j-6B saved_model. +```python + from neural_compressor import Model + model = Model(run_args.input_model, modelType='llm_saved_model') +``` + +#### Tune + +To apply quantization, the function that maps names from AutoTrackable variables to graph nodes must be defined to match names of nodes in different format. +```python + def weight_name_mapping(name): + """The function that maps name from AutoTrackable variables to graph nodes""" + name = name.replace('tfgptj_for_causal_lm', 'StatefulPartitionedCall') + name = name.replace('kernel:0', 'Tensordot/ReadVariableOp') + return name +``` + +Please use the recipe to set smooth quantization. +```python + from neural_compressor.tensorflow import StaticQuantConfig, SmoothQuantConfig, autotune + from neural_compressor.tensorflow.quantization import TuningConfig + from neural_compressor.tensorflow.utils import BaseDataLoader + + calib_dataloader = MyDataloader(mydata, batch_size=run_args.batch_size) + quant_config = [SmoothQuantConfig(alpha=0.52705), StaticQuantConfig(act_dtype="int8", weight_dtype="int8")] + tune_config = TuningConfig(config_set=quant_config, max_trials=1) + model.weight_name_mapping = weight_name_mapping + q_model = autotune(model, + tune_config, + eval_fn=evaluate, + calib_dataloader=calib_dataloader) + q_model.save(run_args.output_model) +``` +#### Benchmark +```python + if run_args.mode == "performance": + evaluate(model.model) + elif run_args.mode == "accuracy": + acc_result = evaluate(model.model) + print("Batch size = %d" % run_args.batch_size) + print("Accuracy: %.5f" % acc_result) +``` + +The Intel® Neural Compressor quantization.fit() function will return a best quantized model under time constraint. \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/main.py b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/main.py new file mode 100644 index 00000000000..faf54b65bd0 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/main.py @@ -0,0 +1,349 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import time +import math +import numpy as np +import logging +import datasets +import tensorflow as tf +from typing import Optional +from itertools import chain +from datasets import load_dataset +from collections import defaultdict +from dataclasses import dataclass, field + +import transformers +from transformers import ( + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + AutoConfig, + AutoTokenizer, + HfArgumentParser, + TFAutoModelForCausalLM, + TFTrainingArguments, + set_seed, +) +from transformers.utils.versions import require_version + +logger = logging.getLogger(__name__) +require_version("datasets>=1.8.0", "To fix: pip install -r requirements.txt") +MODEL_CONFIG_CLASSES = list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to use. + """ + + model_name_or_path: Optional[str] = field( + default="EleutherAI/gpt-j-6B", + metadata={ + "help": ( + "The model checkpoint for GPT-J weights." + ) + }, + ) + config_overrides: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override some existing default config settings when a model is trained from scratch. Example: " + "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" + ) + }, + ) + checkpoint: Optional[str] = field( + default=None, + metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + precision: Optional[str] = field( + default="fp32", + metadata={"help": "The precision that we want to run with."}, + ) + + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for evaluation. + """ + + dataset_name: Optional[str] = field( + default="EleutherAI/lambada_openai", metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + block_size: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Optional input sequence length after tokenization. " + "The training dataset will be truncated in block of this size for training. " + "Default to the model max input length for single sentence inputs (take into account special tokens)." + ) + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + + +@dataclass +class RunningArguments: + """ + Arguments for options of running. + """ + + input_model: Optional[str] = field( + default="./gpt-j-6B", + metadata={ + "help": ( + "The path of input model." + ) + }, + ) + output_model: Optional[str] = field( + default="./nc_int8_gpt-j-6B", + metadata={ + "help": ( + "The path save quantized gpt-j-6B int8 model." + ) + }, + ) + tune: bool = field( + default=False, + metadata={"help": "Whether to apply quantization."}, + ) + benchmark: bool = field( + default=False, + metadata={"help": "Whether to apply benchmarking."}, + ) + mode: Optional[str] = field( + default="performance", + metadata={"help": ("Evaluate performance or accuracy benchmark." + "Set it to be accuracy or performance.")}, + ) + batch_size: Optional[int] = field( + default=1, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + iteration: Optional[int] = field( + default=200, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + + + +parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments, RunningArguments)) +model_args, data_args, train_args, run_args = parser.parse_args_into_dataclasses() + +logger.setLevel(logging.INFO) +datasets.utils.logging.set_verbosity_warning() +transformers.utils.logging.set_verbosity_info() + +if train_args.seed is not None: + set_seed(train_args.seed) + +raw_datasets = load_dataset( + data_args.dataset_name, + data_args.dataset_config_name, + cache_dir=model_args.checkpoint, + use_auth_token=None, + ) + +config = AutoConfig.from_pretrained(model_args.model_name_or_path) +tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) +column_names = raw_datasets["test"].column_names +text_column_name = "text" if "text" in column_names else column_names[0] + +mydata = tokenizer(raw_datasets["test"][text_column_name], return_tensors="np").input_ids + + +def prepare_attention_mask_for_generation( + inputs: tf.Tensor, + pad_token_id=50256, + eos_token_id=50256, +) -> tf.Tensor: + """Generate attention_mask from input_ids. + + Args: + inputs (tf.Tensor): The tensor of input_ids. + + Returns: + attention_mask (tf.Tensor): The tensor of attention_mask. + """ + is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) + is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) + + # Check if input is input_ids and padded -> only then is attention_mask defined + attention_mask = tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) \ + if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id \ + else tf.ones(inputs.shape[:2], dtype=tf.int32) + + return attention_mask + +class MyDataloader: + def __init__(self, dataset, batch_size=1): + self.dataset = dataset + self.batch_size = batch_size + self.length = math.ceil(len(dataset) / self.batch_size) + + def generate_data(self, data, pad_token_id=50256): + input_ids = tf.convert_to_tensor([data[:-1]], dtype=tf.int32) + cur_len = len(data)-1 + input_ids_padding = tf.ones((self.batch_size, 1), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + model_kwargs = {'attention_mask': prepare_attention_mask_for_generation(input_ids)} + if model_kwargs.get("past_key_values") is None: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + return model_kwargs['attention_mask'], input_ids + + def __iter__(self): + labels = None + for _, data in enumerate(self.dataset): + cur_input = self.generate_data(data) + yield (cur_input, labels) + + def __len__(self): + return self.length + +def postprocess(outputs, generated, batch_size, cur_len): + """The function that processes the inference outputs to prediction""" + finished_sequences = tf.convert_to_tensor([False]) + next_token_logits = outputs['logits'][:, -1] + # pre-process distribution + next_tokens_scores = next_token_logits + # argmax + next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) + + pad_token_id = 50256 + eos_token_id = [50256] + + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + return tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + +def evaluate(model, tf_eval_dataset=mydata): + """Evaluate function that inference the model to apply calibration or benchmarking. + + Args: + model (tf.python.trackable.autotrackable): The model to be evaluated. + The object is usually gotten by using tf.saved_model.load(model_dir) API. + + Returns: + accuracy (float): The accuracy result. + """ + warmup = 5 + batch_size = run_args.batch_size + pad_token_id = 50256 + iteration = run_args.iteration + correct = 0 + latency_list = [] + from neural_compressor.tensorflow.utils import BaseModel + + if isinstance(model, BaseModel): + model = model.model + infer = model.signatures["serving_default"] + for idx, data in enumerate(tf_eval_dataset): + input_ids = tf.convert_to_tensor([data[:-1]], dtype=tf.int32) + cur_len = len(data)-1 + input_ids_padding = tf.ones((batch_size, 1), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + input_ids = generated[:, :cur_len] + attention_mask = prepare_attention_mask_for_generation(input_ids) + inputs = {'input_ids': input_ids, 'attention_mask': attention_mask} + + start = time.time() + outputs = infer(**inputs) + end = time.time() + dur = end-start + + predictions = postprocess(outputs, generated, batch_size, cur_len) + if data[-1] == predictions[0][-1].numpy(): + correct+=1 + + latency_list.append(dur) + if idx >= iteration: + break + latency = np.array(latency_list[warmup:]).mean() / 1 + acc = correct/(iteration+1) + if run_args.benchmark and run_args.mode == 'performance': + print("Batch size = {}".format(run_args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + return acc + +def weight_name_mapping(name): + """The function that maps name from AutoTrackable variables to graph nodes""" + name = name.replace('tfgptj_for_causal_lm', 'StatefulPartitionedCall') + name = name.replace('kernel:0', 'Tensordot/ReadVariableOp') + return name + +def main(): + with train_args.strategy.scope(): + options = tf.data.Options() + options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + from neural_compressor.tensorflow import Model + model = Model(run_args.input_model, modelType='llm_saved_model') + + if run_args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, SmoothQuantConfig, autotune + from neural_compressor.tensorflow.quantization import TuningConfig + from neural_compressor.tensorflow.utils import BaseDataLoader + + calib_dataloader = MyDataloader(mydata, batch_size=run_args.batch_size) + quant_config = [SmoothQuantConfig(alpha=0.52705), StaticQuantConfig(act_dtype="int8", weight_dtype="int8")] + tune_config = TuningConfig(config_set=quant_config, max_trials=1) + model.weight_name_mapping = weight_name_mapping + q_model = autotune(model, + tune_config, + eval_fn=evaluate, + calib_dataloader=calib_dataloader) + q_model.save(run_args.output_model) + if run_args.benchmark: + if run_args.mode == "performance": + evaluate(model.model) + elif run_args.mode == "accuracy": + acc_result = evaluate(model.model) + print("Batch size = %d" % run_args.batch_size) + print("Accuracy: %.5f" % acc_result) + +if __name__ == "__main__": + main() diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.py b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.py new file mode 100644 index 00000000000..cb4cd7f3f29 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.py @@ -0,0 +1,23 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from transformers import AutoTokenizer, TFAutoModelForCausalLM + +tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") +model = TFAutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") +model.save_pretrained("./gpt-j-6B", saved_model=True) \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.sh b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.sh new file mode 100644 index 00000000000..67e59f983f5 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/prepare_model.sh @@ -0,0 +1,6 @@ +pip install transformers==4.25.0 +python prepare_model.py +mv ./gpt-j-6B/saved_model/1 ./ +rm -r ./gpt-j-6B +mv ./1 ./gpt-j-6B +pip install transformers==4.35 \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt new file mode 100644 index 00000000000..23c79d8bbd3 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt @@ -0,0 +1,4 @@ +tensorflow==2.12 +transformers +datasets==2.17 +numpy \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_benchmark.sh b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_benchmark.sh new file mode 100644 index 00000000000..75196199bce --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_benchmark.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=1 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + python main.py \ + --input_model ${input_model} \ + --mode ${mode} \ + --batch_size ${batch_size} \ + --benchmark \ + --output_dir "./outputs" + +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_quant.sh b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_quant.sh new file mode 100644 index 00000000000..e8ad1f1dc19 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/run_quant.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_model=${input_model} \ + --output_model=${output_model} \ + --output_dir="./outputs" \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/README.md b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/README.md new file mode 100644 index 00000000000..fa45adbd5ef --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/README.md @@ -0,0 +1,52 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor quantization and smooth quantization of language models such as OPT and GPT2. + +## Prerequisite + +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +pip install -r requirements +``` +## Run + + +### Basic quantization + +``` +python main.py --model_name_or_path +``` + +`` can be following: + +- gpt2-medium +- facebook/opt-125m + +### Smooth quant + +```shell +bash run_quant.sh --input_model= +``` + +Or you can use + +``` +python main.py --model_name_or_path --sq +``` + +## Benchmark + +### Get the FP32 performance + +```shell +bash run_benchmark.sh --input_model= +``` + +### Get the INT8 performance + +```shell +bash run_benchmark.sh --input_model= --int8=true +``` + diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/benchmark.py b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/benchmark.py new file mode 100644 index 00000000000..673d50c034f --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/benchmark.py @@ -0,0 +1,190 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os.path +import transformers +import tensorflow as tf +from tqdm import tqdm +import sys +import argparse +from datasets import load_dataset +import numpy as np +import time + +sys.path.insert(0, './') + +parser = argparse.ArgumentParser() +parser.add_argument('--int8', action='store_true', help="eval fp32 model or int8 model") +parser.add_argument('--model_name_or_path', type=str, default='facebook/opt-125m') +parser.add_argument('--batch_size', type=int, default=16) +parser.add_argument('--warmup', type=int, default=10) +args = parser.parse_args() + +class Evaluator: + def __init__(self, dataset, tokenizer, device, batch_size=args.batch_size): + self.dataset = dataset + self.tokenizer = tokenizer + self.device = device + self.dataloader = INCDataloader(dataset, tokenizer, batch_size, device) + + def evaluate(self, model): + # model.eval() + # The task is to predict the last word of the input. + total, hit = 0, 0 + index = 1 + for input_ids, label, label_indices in tqdm(self.dataloader): + # TFCausalLMOutputWithPast len: 2 + # first element shape (16, 196, 50272) + # second element shape (16, 12, 196, 64) + outputs = model(input_ids) + last_token_logits = outputs[0].numpy()[np.arange(len(label_indices)), label_indices, :] + pred = last_token_logits.argmax(axis=-1) + total += label.shape[0] + hit += (pred == label.numpy()).sum().item() + index += 1 + acc = hit / total + print(acc, flush=True) + return acc + + def get_attention_mask(self, input_ids): + return tf.constant(1 - (input_ids==1).numpy().astype(int)) + + def evaluate_tf_v1(self, model): + total, hit = 0, 0 + index = 1 + infer = model.signatures["serving_default"] + overall_infer_duration = 0 + for input_ids, label, label_indices in tqdm(self.dataloader): + attention_mask = self.get_attention_mask(input_ids) + input_ids = tf.constant(input_ids.numpy(), dtype=infer.inputs[0].dtype) + attention_mask = tf.constant(attention_mask.numpy(), dtype=infer.inputs[0].dtype) + start = time.time() + results = infer(input_ids=input_ids, attention_mask=attention_mask) # len: 25 Identity: [16, 196, 50272], Identity_1: [16, 12, 196, 64] + batch_infer_time = time.time() - start + if index > args.warmup: + overall_infer_duration += batch_infer_time + last_token_logits = results['Identity'].numpy()[np.arange(len(label_indices)), label_indices, :] + pred = last_token_logits.argmax(axis=-1) + total += label.shape[0] + hit += (pred == label.numpy()).sum().item() + index += 1 + acc = hit / total + print("\nEvaluation result: ") + print(f"Batch size = {args.batch_size}") + print(f"Accuracy: {acc}") + print( + f"Throughput: {(len(self.dataloader) - args.warmup * args.batch_size) / overall_infer_duration} samples/sec" + ) + +class INCDataloader: + # for_calib=True in quantization, only input_id is needed, =False in evaluation need label + def __init__(self, dataset, tokenizer, batch_size=1, device='cpu', for_calib=False): + self.dataset = dataset + self.tokenizer = tokenizer + self.device = device + self.batch_size = batch_size + self.for_calib = for_calib + import math + self.length = math.ceil(len(dataset) / self.batch_size) # batch number + self.pad_len = 196 + + # tokenize the dataset + def tokenize_function(examples): + example = self.tokenizer(examples['text']) + return example + + self.dataset = self.dataset.map(tokenize_function, batched=True) + self.dataset.set_format(type='tensorflow', columns=['input_ids']) + def get_attention_mask(self, input_ids): + return 1 - (input_ids==1).numpy().astype(int) + def pad_input(self, input): # input: a record + input_id = input['input_ids'] + if input_id.numpy().shape[0] > self.pad_len: # truncate the sequence to pad_len if the sequence is longer than pad_len + input_id = input_id[:self.pad_len] + label = input_id[-1] + pad_len = self.pad_len - input_id.numpy().shape[0] + label_index = -2 - pad_len # last logit index + input_id = tf.pad(input_id, tf.constant([[0,pad_len]]), constant_values=1) + input_id = tf.expand_dims(input_id, axis=0) + label = tf.expand_dims(label, axis=0) + return (input_id, label, label_index) + + def __iter__(self): + if self.for_calib: + labels = None + # label_indices = None + for idx, record in enumerate(self.dataset): + input_id, label, label_index = self.pad_input(record) + attention_mask = self.get_attention_mask(input_id) + # compose attention_mask and input_id together + # during the calibration, it requires to yield a + # cur_input = tf.constant(np.append(attention_mask, input_id.numpy(), axis=0)) + cur_input = {"input_ids": input_id.numpy(), "attention_mask": attention_mask} + assert self.batch_size == 1 + yield (cur_input, label) + else: + input_ids = None + labels = None + label_indices = None + for idx, record in enumerate(self.dataset): + input_id, label, label_index = self.pad_input(record) + if input_ids is None: + input_ids = input_id + labels = label + label_indices = [label_index] + else: + input_ids = tf.concat([input_ids, input_id], 0) + labels = tf.concat([labels, label], 0) + + label_indices.append(label_index) + + if (idx + 1) % self.batch_size == 0: + yield (input_ids, labels, label_indices) + input_ids = None + labels = None + label_indices = None + if (idx + 1) % self.batch_size != 0: + yield (input_ids, labels, label_indices) + + def __len__(self): + return self.length + +from datasets import load_dataset + +model_name = args.model_name_or_path +tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, +) +eval_dataset = load_dataset('lambada', split='validation') + +evaluator = Evaluator(eval_dataset, tokenizer, 'cpu') + +if args.int8: + print("benchmarking int8 model") + int8_folder = model_name.split('/')[-1] + "_int8" + if not os.path.exists(int8_folder): + print(f"could not find int8 folder {int8_folder} ") + exit() + model = tf.saved_model.load(int8_folder) # tensorflow.python.trackable.autotrackable.AutoTrackable object +else: + print("benchmaking fp32 model") + model = transformers.TFAutoModelForCausalLM.from_pretrained(model_name) + from neural_compressor.tensorflow import Model + + model = Model(model).model # tensorflow.python.trackable.autotrackable.AutoTrackable object + +evaluator.evaluate_tf_v1(model) diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py new file mode 100644 index 00000000000..8f012ceb404 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py @@ -0,0 +1,140 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import transformers +import tensorflow as tf +from tqdm import tqdm +import sys +import argparse +from datasets import load_dataset +import numpy as np + +sys.path.insert(0, './') + +parser = argparse.ArgumentParser() +parser.add_argument('--sq', action='store_true', default=False, help="whether to use smooth quant") +parser.add_argument('--model_name_or_path', type=str, default="facebook/opt-125m") +parser.add_argument('--alpha', type=float, default=0.5, help="alpha value for smoothing.") +parser.add_argument('--log_frequency', type=int, default=100) +parser.add_argument('--batch_size', type=int, default=16) +parser.add_argument('--kl', action='store_true', default=False, help="whether to use kl divergence for calibration") +parser.add_argument('--fallback_add', action='store_true', default=False, help="Whether to add fp32 fallback option" ) +args = parser.parse_args() + +class CustomDataloader: + # for_calib=True in quantization, only input_id is needed, =False in evaluation need label + def __init__(self, dataset, tokenizer, batch_size=1, device='cpu', for_calib=False): + self.dataset = dataset + self.tokenizer = tokenizer + self.device = device + self.batch_size = batch_size + self.for_calib = for_calib + import math + self.length = math.ceil(len(dataset) / self.batch_size) # batch number + self.pad_len = 196 + + # tokenize the dataset + def tokenize_function(examples): + example = self.tokenizer(examples['text']) + return example + + self.dataset = self.dataset.map(tokenize_function, batched=True) + self.dataset.set_format(type='tensorflow', columns=['input_ids']) + def get_attention_mask(self, input_ids): + return 1 - (input_ids==1).numpy().astype(int) + def pad_input(self, input): # input: a record + input_id = input['input_ids'] + if input_id.numpy().shape[0] > self.pad_len: # truncate the sequence to pad_len if the sequence is longer than pad_len + input_id = input_id[:self.pad_len] + label = input_id[-1] + pad_len = self.pad_len - input_id.numpy().shape[0] + label_index = -2 - pad_len # last logit index + input_id = tf.pad(input_id, tf.constant([[0,pad_len]]), constant_values=1) # TODO need to check why pad with 1 + input_id = tf.expand_dims(input_id, axis=0) + label = tf.expand_dims(label, axis=0) + return (input_id, label, label_index) + + def __iter__(self): + if self.for_calib: + labels = None + for idx, record in enumerate(self.dataset): + input_id, label, label_index = self.pad_input(record) + attention_mask = self.get_attention_mask(input_id) + cur_input = {"input_ids": input_id.numpy(), "attention_mask": attention_mask} + assert self.batch_size == 1 + yield (cur_input, label) + else: + input_ids = None + labels = None + label_indices = None + for idx, record in enumerate(self.dataset): + input_id, label, label_index = self.pad_input(record) + if input_ids is None: + input_ids = input_id + labels = label + label_indices = [label_index] + else: + input_ids = tf.concat([input_ids, input_id], 0) + labels = tf.concat([labels, label], 0) + + label_indices.append(label_index) + + if (idx + 1) % self.batch_size == 0: + yield (input_ids, labels, label_indices) + input_ids = None + labels = None + label_indices = None + if (idx + 1) % self.batch_size != 0: + yield (input_ids, labels, label_indices) + + def __len__(self): + return self.length + + +model_name = args.model_name_or_path + +tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) +model = transformers.TFAutoModelForCausalLM.from_pretrained(model_name) + +calib_dataset = load_dataset('lambada', split='validation') +calib_dataset = calib_dataset.shuffle(seed=42) +calib_dataloader = CustomDataloader(calib_dataset, tokenizer, device='cpu', batch_size=1, for_calib=True) + +from neural_compressor.tensorflow import StaticQuantConfig, SmoothQuantConfig, quantize_model + +ptq_config = None +quant_config = [] + +if args.sq: + quant_config.append(SmoothQuantConfig(alpha=args.alpha)) +if args.kl: + ptq_config = StaticQuantConfig(act_dtype="int8", weight_dtype="int8", act_algorithm="kl") +if args.fallback_add: + ptq_config = StaticQuantConfig(act_dtype="int8", weight_dtype="int8") + ptq_config.set_local("Add", StaticQuantConfig(act_dtype="fp32", weight_dtype="fp32")) + +if not ptq_config: + ptq_config = StaticQuantConfig(act_dtype="int8", weight_dtype="int8") +quant_config.append(ptq_config) + +q_model = quantize_model(model, + quant_config, + calib_dataloader=calib_dataloader) + +save_model_name = model_name.split("/")[-1] +q_model.save(f"{save_model_name}_int8") diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/requirements.txt b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/requirements.txt new file mode 100644 index 00000000000..3486c09473c --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/requirements.txt @@ -0,0 +1,3 @@ +tensorflow==2.15 +datasets +transformers==4.35 \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_benchmark.sh b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_benchmark.sh new file mode 100644 index 00000000000..b8fad17eebd --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_benchmark.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + int8=false + batch_size=16 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --int8=*) + int8=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_benchmark { + if [[ "${int8}" == "true" ]]; then + python benchmark.py \ + --model_name_or_path ${input_model} \ + --batch_size ${batch_size} \ + --int8 + else + python benchmark.py \ + --model_name_or_path ${input_model} \ + --batch_size ${batch_size} + fi + +} + +main "$@" \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_quant.sh b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_quant.sh new file mode 100644 index 00000000000..4295060acb9 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/run_quant.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --sq=*) + sq=$(echo ${var} |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + + ext_cmd="" + if [[ ${sq} == "True" ]]; then + ext_cmd="--sq" + fi + python main.py \ + --model_name_or_path ${input_model} \ + ${ext_cmd} +} + +main "$@" \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/README.md b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/README.md new file mode 100644 index 00000000000..544e954371e --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/README.md @@ -0,0 +1,130 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning zoo result of Transformer-LT. This example can run on Intel CPUs and GPUs. + +## Prerequisite + +### 1. Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` + +### 2. Install Tensorflow +```shell +pip install tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### 3. Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers). + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +### 4. Prepare Dataset & Pretrained model + +```shell +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_2_0/transformer-lt-official-fp32-inference.tar.gz +tar -zxvf transformer-lt-official-fp32-inference.tar.gz +cd transformer-lt-official-fp32-inference +tar -zxvf transformer_lt_official_fp32_pretrained_model.tar.gz +``` + +Dataset is in data folder, pretrained model is in graph folder. + +#### Automatic dataset & model download +Run the `prepare_dataset_model.sh` script located in `examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq`. + +```shell +cd examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq +bash prepare_dataset_model.sh +``` + +## Run Command +### Quantization + +```shell +bash run_quant.sh --input_model=./model/fp32_graphdef.pb --dataset_location=./data --output_model=./model/int8_graphdef.pb +``` +### Benchmark +```shell +bash run_benchmark.sh --input_model=./model/int8_graphdef.pb --dataset_location=./data --mode=performance + +bash run_benchmark.sh --input_model=./model/int8_graphdef.pb --dataset_location=./data --mode=accuracy --batch_size=1 +``` + +Details of enabling Intel® Neural Compressor on transformer-lt for Tensorflow. +========================= + +This is a tutorial of how to enable transformer-lt model with Intel® Neural Compressor. + +### q_dataloader Part Adaption +Below dataset class uses getitem to provide the model with input. + +```python +class Dataset(object): + def __init__(self, *args): + # initialize dataset related info here + ... + + def __getitem__(self, index): + data = self.batch[index] + label = self.ref_lines[index] + return data[0], label + + def __len__(self): + return len(self.batch) +``` + +### Evaluation Part Adaption +We evaluate the model with BLEU score, its source: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py + +Here we set the input tensor and output tensors name into *inputs* and *outputs* args. +In this case we calibrate and quantize the model, and use our calibration dataloader initialized from a 'Dataset' object. + +### Code update +After prepare step is done, we add tune code to generate quantized model. + +#### Tune +```python + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + dataset = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) + calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=FLAGS.batch_size, collate_fn=collate_fn) + + quant_config = StaticQuantConfig() + model = Model(graph) + model.input_tensor_names = ['input_tensor'] + model.output_tensor_names = ['model/Transformer/strided_slice_19'] + q_model = quantize_model(model, quant_config, calib_dataloader) + try: + q_model.save(FLAGS.output_model) + except Exception as e: + print("Failed to save model due to {}".format(str(e))) +``` +#### Benchmark +```python + if FLAGS.benchmark: + assert FLAGS.mode == 'performance' or FLAGS.mode == 'accuracy', \ + "Benchmark only supports performance or accuracy mode." + acc = eval_func(graph) + if FLAGS.mode == 'accuracy': + print('Accuracy is {:.3f}'.format(acc)) +``` diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/main.py b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/main.py new file mode 100644 index 00000000000..58a93090e7a --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/main.py @@ -0,0 +1,258 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +import re +import six +import sys +import time +import numpy as np +import unicodedata +import pandas as pd +from absl import app +import tensorflow as tf +from argparse import ArgumentParser + +from utils import metrics +from utils import tokenizer +from utils.tokenizer import Subtokenizer +from neural_compressor.tensorflow.utils import BaseDataLoader + +flags = tf.compat.v1.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer("batch_size", 64, + "run batch size") + +flags.DEFINE_string("input_graph", None, + "The path of input model file.") + +flags.DEFINE_string("inputs_file", None, + "File saved to an output file.") + +flags.DEFINE_string("reference_file", None, + "File containing reference translation.") + +flags.DEFINE_string("vocab_file", None, + "Path to subtoken vocabulary file.") + +flags.DEFINE_string("output_model", None, + "The output model of the quantized model.") + +flags.DEFINE_bool('tune', False, + 'whether to tune the model') + +flags.DEFINE_bool('benchmark', False, + 'whether to benchmark the model') + +flags.DEFINE_string("mode", 'performance', + "One of three options: 'performance'/'accuracy'.") + +flags.DEFINE_integer("iters", 100, + "The iteration used for benchmark.") + +class UnicodeRegex(object): + def __init__(self): + punctuation = self.property_chars("P") + self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])") + self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])") + self.symbol_re = re.compile("([" + self.property_chars("S") + "])") + + def property_chars(self, prefix): + return "".join(six.unichr(x) for x in range(sys.maxunicode) + if unicodedata.category(six.unichr(x)).startswith(prefix)) + +uregex = UnicodeRegex() + +def bleu_tokenize(string): + string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) + string = uregex.punct_nondigit_re.sub(r" \1 \2", string) + string = uregex.symbol_re.sub(r" \1 ", string) + return string.split() + +class bleu(object): + def __init__(self): + self.translations = [] + self.labels = [] + + def reset(self): + self.translations = [] + self.labels = [] + + def update(self, pred, label): + if len(label) != len(pred): + raise ValueError("Reference and translation files have different number " + "of lines. If training only a few steps (100-200), the " + "translation may be empty.") + label = [x.lower() for x in label] + pred = [x.lower() for x in pred] + label = [bleu_tokenize(x) for x in label] + pred = [bleu_tokenize(x) for x in pred] + self.labels.extend(label) + self.translations.extend(pred) + + def result(self): + return metrics.compute_bleu(self.labels, self.translations) * 100 + +def collate_fn(batch): + """Puts each data field into a pd frame with outer dimension batch size""" + elem = batch[0] + if isinstance(elem, tuple): + batch = zip(*batch) + return [collate_fn(samples) for samples in batch] + elif isinstance(elem, np.ndarray): + return [list(elem) for elem in batch] + elif isinstance(elem, str): + return batch + else: + return pd.DataFrame(batch).fillna(0).values.astype(np.int32) + +def load_graph(file_name): + tf.compat.v1.logging.info('Loading graph from: ' + file_name) + with tf.io.gfile.GFile(file_name, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='') + return graph + +def eval_func(infer_graph, iteration=-1): + if isinstance(infer_graph, tf.compat.v1.GraphDef): + graph = tf.Graph() + with graph.as_default(): + tf.import_graph_def(infer_graph, name='') + infer_graph = graph + + subtokenizer = Subtokenizer(FLAGS.vocab_file) + input_tensor = infer_graph.get_tensor_by_name('input_tensor:0') + output_tensor = infer_graph.get_tensor_by_name(\ + 'model/Transformer/strided_slice_19:0') + + ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) + dataloader = BaseDataLoader(dataset=ds, batch_size=FLAGS.batch_size, collate_fn=collate_fn) + config = tf.compat.v1.ConfigProto() + config.use_per_session_threads = 1 + config.inter_op_parallelism_threads = 1 + sess = tf.compat.v1.Session(graph=infer_graph, config=config) + iteration=-1 + time_list = [] + bleu_eval = bleu() + predictions = [] + labels = [] + warmup = 10 + if FLAGS.benchmark and FLAGS.mode == 'performance': + iteration = FLAGS.iters + assert iteration >= warmup, 'iteration must be larger than warmup' + + for idx, (input_data, label) in enumerate(dataloader): + if idx < iteration or iteration == -1: + time_start = time.time() + out = sess.run([output_tensor], {input_tensor: input_data}) + duration = time.time() - time_start + time_list.append(duration) + predictions.append(out) + labels.extend(label) + else: + break + + latency = np.array(time_list[warmup: ]).mean() / FLAGS.batch_size + if FLAGS.benchmark and FLAGS.mode == 'performance': + print('Batch size = {}'.format(FLAGS.batch_size)) + print('Latency: {:.3f} ms'.format(latency * 1000)) + print('Throughput: {:.3f} items/sec'.format(1./ latency)) + + # only calculate accuracy when running out all predictions + if iteration == -1: + decode = [] + for i,tr in enumerate(predictions): + for j,itr in enumerate(tr): + for k, otr in enumerate(itr): + try: + index = list(otr).index(tokenizer.EOS_ID) + decode.append(subtokenizer.decode(otr[:index])) + except: + decode.append(subtokenizer.decode(otr)) + bleu_eval.update(decode, labels) + return bleu_eval.result() + +class Dataset(object): + def __init__(self, inputs_file, reference_file, vocab_file): + with tf.io.gfile.GFile(inputs_file) as f: + records = f.read().split("\n") + inputs = [record.strip() for record in records] + if not inputs[-1]: + inputs.pop() + + self.ref_lines = tokenizer.native_to_unicode( + tf.io.gfile.GFile(reference_file).read()).strip().splitlines() + + subtokenizer = Subtokenizer(vocab_file) + self.batch = [] + token_lens=[] + for i, line in enumerate(inputs): + enc = subtokenizer.encode(line, add_eos=True) + token_lens.append((i, len(enc))) + + sorted_by_token_input_lens = sorted(token_lens, key=lambda x: x[1], reverse=True) + + sorted_inputs = [None] * len(sorted_by_token_input_lens) + sorted_keys = [0] * len(sorted_by_token_input_lens) + + lines = [] + for i, (index, _) in enumerate(sorted_by_token_input_lens): + sorted_inputs[i] = inputs[index] + sorted_keys[index] = i + enc=subtokenizer.encode(sorted_inputs[i], add_eos=True) + lines.append([enc]) + for i in sorted_keys: + self.batch.append(lines[i]) + + def __getitem__(self, index): + data = self.batch[index] + label = self.ref_lines[index] + return data[0], label + + def __len__(self): + return len(self.batch) + +def main(_): + graph = load_graph(FLAGS.input_graph) + if FLAGS.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + dataset = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) + calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=FLAGS.batch_size, collate_fn=collate_fn) + + quant_config = StaticQuantConfig() + model = Model(graph) + model.input_tensor_names = ['input_tensor'] + model.output_tensor_names = ['model/Transformer/strided_slice_19'] + q_model = quantize_model(model, quant_config, calib_dataloader) + try: + q_model.save(FLAGS.output_model) + except Exception as e: + print("Failed to save model due to {}".format(str(e))) + + if FLAGS.benchmark: + assert FLAGS.mode == 'performance' or FLAGS.mode == 'accuracy', \ + "Benchmark only supports performance or accuracy mode." + acc = eval_func(graph) + if FLAGS.mode == 'accuracy': + print('Accuracy is {:.3f}'.format(acc)) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/prepare_dataset_model.sh b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/prepare_dataset_model.sh new file mode 100644 index 00000000000..3d47dbad80c --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/prepare_dataset_model.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# set -x + +DATA_DIR="../data" +MODEL_DIR="../model" + +help() +{ + cat <<- EOF + Desc: Prepare bert dataset + -h --help help info + --data_dir Output data directory + default: './data' + --model_dir Output model directory + default: './model' +EOF + exit 0 +} + +function main { + init_params "$@" + prepare +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --data_dir=*) + DATA_DIR=$(echo $var |cut -f2 -d=) + ;; + --model_dir=*) + MODEL_DIR=$(echo $var |cut -f2 -d=) + ;; + -h|--help) help + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done +} + +# prepare data and model +function prepare { + if [ ! -d ${DATA_DIR} ]; then + echo '${DATA_DIR} already exists, please check...' + fi + if [ ! -d ${MODEL_DIR} ]; then + echo '${MODEL_DIR} already exists, please check...' + fi + wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_2_0/transformer-lt-official-fp32-inference.tar.gz + tar -zxvf transformer-lt-official-fp32-inference.tar.gz + cd transformer-lt-official-fp32-inference + tar -zxvf transformer_lt_official_fp32_pretrained_model.tar.gz + mv transformer_lt_official_fp32_pretrained_model/data ${DATA_DIR} + mv transformer_lt_official_fp32_pretrained_model/graph ${MODEL_DIR} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..87bc4c7d5c1 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_benchmark.sh @@ -0,0 +1,64 @@ +#!/bin/bash +# set -x + +function main { + + init_params "$@" + + run_benchmark + +} + +# init params +function init_params { + iters=100 + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + esac + done + +} + +function define_mode { + + if [[ ${mode} == "accuracy" ]]; then + mode="accuracy" + elif [[ ${mode} == "performance" ]]; then + mode="performance" + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +# run_benchmark +function run_benchmark { + python main.py \ + --input_graph=${input_model} \ + --inputs_file=${dataset_location}/newstest2014.en \ + --reference_file=${dataset_location}/newstest2014.de \ + --vocab_file=${dataset_location}/vocab.txt \ + --benchmark \ + --mode=${mode} \ + --iters=${iters} \ + --batch_size=${batch_size} +} + +main "$@" + diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..2f2075cf346 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/run_quant.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input_graph=${input_model} \ + --inputs_file=${dataset_location}/newstest2014.en \ + --reference_file=${dataset_location}/newstest2014.de \ + --vocab_file=${dataset_location}/vocab.txt \ + --output_model=${output_model} \ + --tune +} + +main "$@" + diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/__init__.py b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/metrics.py b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/metrics.py new file mode 100644 index 00000000000..3e41f985c63 --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/metrics.py @@ -0,0 +1,490 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions for calculating loss, accuracy, and other model metrics. + +Metrics: + - Padded loss, accuracy, and negative log perplexity. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py + - BLEU approximation. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py + - ROUGE score. Source: + https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import math + +import numpy as np +import six +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + + +def _pad_tensors_to_same_length(x, y): + """Pad x and y so that the results have the same length (second dimension).""" + with tf.name_scope("pad_to_same_length"): + x_length = tf.shape(x)[1] + y_length = tf.shape(y)[1] + + max_length = tf.maximum(x_length, y_length) + + x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]]) + y = tf.pad(y, [[0, 0], [0, max_length - y_length]]) + return x, y + + +def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): + """Calculate cross entropy loss while ignoring padding. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch_size, length_labels] + smoothing: Label smoothing constant, used to determine the on and off values + vocab_size: int size of the vocabulary + Returns: + Returns the cross entropy loss and weight tensors: float32 tensors with + shape [batch_size, max(length_logits, length_labels)] + """ + with tf.name_scope("loss", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + + # Calculate smoothing cross entropy + with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]): + confidence = 1.0 - smoothing + low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) + soft_targets = tf.one_hot( + tf.cast(labels, tf.int32), + depth=vocab_size, + on_value=confidence, + off_value=low_confidence) + xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, labels=soft_targets) + + # Calculate the best (lowest) possible value of cross entropy, and + # subtract from the cross entropy loss. + normalizing_constant = -( + confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * + low_confidence * tf.log(low_confidence + 1e-20)) + xentropy -= normalizing_constant + + weights = tf.to_float(tf.not_equal(labels, 0)) + return xentropy * weights, weights + + +def _convert_to_eval_metric(metric_fn): + """Wrap a metric fn that returns scores and weights as an eval metric fn. + + The input metric_fn returns values for the current batch. The wrapper + aggregates the return values collected over all of the batches evaluated. + + Args: + metric_fn: function that returns scores and weights for the current batch's + logits and predicted labels. + + Returns: + function that aggregates the scores and weights from metric_fn. + """ + def problem_metric_fn(*args): + """Returns an aggregation of the metric_fn's returned values.""" + (scores, weights) = metric_fn(*args) + + # The tf.metrics.mean function assures correct aggregation. + return tf.metrics.mean(scores, weights) + return problem_metric_fn + + +def get_eval_metrics(logits, labels, params): + """Return dictionary of model evaluation metrics.""" + metrics = { + "accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels), + "accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)( + logits, labels), + "accuracy_per_sequence": _convert_to_eval_metric( + padded_sequence_accuracy)(logits, labels), + "neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)( + logits, labels, params["vocab_size"]), + } + + if not params["use_tpu"]: + # TPU does not support tf.py_func + metrics.update({ + "approx_bleu_score": _convert_to_eval_metric( + bleu_score)(logits, labels), + "rouge_2_fscore": _convert_to_eval_metric( + rouge_2_fscore)(logits, labels), + "rouge_L_fscore": _convert_to_eval_metric( + rouge_l_fscore)(logits, labels), + }) + + # Prefix each of the metric names with "metrics/". This allows the metric + # graphs to display under the "metrics" category in TensorBoard. + metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)} + return metrics + + +def padded_accuracy(logits, labels): + """Percentage of times that predictions matches labels on non-0s.""" + with tf.variable_scope("padded_accuracy", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + outputs = tf.to_int32(tf.argmax(logits, axis=-1)) + padded_labels = tf.to_int32(labels) + return tf.to_float(tf.equal(outputs, padded_labels)), weights + + +def padded_accuracy_topk(logits, labels, k): + """Percentage of times that top-k predictions matches labels on non-0s.""" + with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + effective_k = tf.minimum(k, tf.shape(logits)[-1]) + _, outputs = tf.nn.top_k(logits, k=effective_k) + outputs = tf.to_int32(outputs) + padded_labels = tf.to_int32(labels) + padded_labels = tf.expand_dims(padded_labels, axis=-1) + padded_labels += tf.zeros_like(outputs) # Pad to same shape. + same = tf.to_float(tf.equal(outputs, padded_labels)) + same_topk = tf.reduce_sum(same, axis=-1) + return same_topk, weights + + +def padded_accuracy_top5(logits, labels): + return padded_accuracy_topk(logits, labels, 5) + + +def padded_sequence_accuracy(logits, labels): + """Percentage of times that predictions matches labels everywhere (non-0).""" + with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]): + logits, labels = _pad_tensors_to_same_length(logits, labels) + weights = tf.to_float(tf.not_equal(labels, 0)) + outputs = tf.to_int32(tf.argmax(logits, axis=-1)) + padded_labels = tf.to_int32(labels) + not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights + axis = list(range(1, len(outputs.get_shape()))) + correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) + return correct_seq, tf.constant(1.0) + + +def padded_neg_log_perplexity(logits, labels, vocab_size): + """Average log-perplexity excluding padding 0s. No smoothing.""" + num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size) + return -num, den + + +def bleu_score(logits, labels): + """Approximate BLEU score computation between labels and predictions. + + An approximate BLEU scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4 + and use brevity penalty. Also, this does not have beam search. + + Args: + logits: Tensor of size [batch_size, length_logits, vocab_size] + labels: Tensor of size [batch-size, length_labels] + + Returns: + bleu: int, approx bleu score + """ + predictions = tf.to_int32(tf.argmax(logits, axis=-1)) + # TODO: Look into removing use of py_func + bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) + return bleu, tf.constant(1.0) + + +def _get_ngrams_with_counter(segment, max_order): + """Extracts all n-grams up to a given maximum order from an input segment. + + Args: + segment: text segment from which n-grams will be extracted. + max_order: maximum length in tokens of the n-grams returned by this + methods. + + Returns: + The Counter containing all n-grams upto max_order in segment + with a count of how many times each n-gram occurred. + """ + ngram_counts = collections.Counter() + for order in xrange(1, max_order + 1): + for i in xrange(0, len(segment) - order + 1): + ngram = tuple(segment[i:i + order]) + ngram_counts[ngram] += 1 + return ngram_counts + + +def compute_bleu(reference_corpus, translation_corpus, max_order=4, + use_bp=True): + """Computes BLEU score of translated segments against one or more references. + + Args: + reference_corpus: list of references for each translation. Each + reference should be tokenized into a list of tokens. + translation_corpus: list of translations to score. Each translation + should be tokenized into a list of tokens. + max_order: Maximum n-gram order to use when computing BLEU score. + use_bp: boolean, whether to apply brevity penalty. + + Returns: + BLEU score. + """ + reference_length = 0 + translation_length = 0 + bp = 1.0 + geo_mean = 0 + + matches_by_order = [0] * max_order + possible_matches_by_order = [0] * max_order + precisions = [] + + for (references, translations) in zip(reference_corpus, translation_corpus): + reference_length += len(references) + translation_length += len(translations) + ref_ngram_counts = _get_ngrams_with_counter(references, max_order) + translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) + + overlap = dict((ngram, + min(count, translation_ngram_counts[ngram])) + for ngram, count in ref_ngram_counts.items()) + + for ngram in overlap: + matches_by_order[len(ngram) - 1] += overlap[ngram] + for ngram in translation_ngram_counts: + possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[ + ngram] + + precisions = [0] * max_order + smooth = 1.0 + + for i in xrange(0, max_order): + if possible_matches_by_order[i] > 0: + precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i] + if matches_by_order[i] > 0: + precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[ + i] + else: + smooth *= 2 + precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) + else: + precisions[i] = 0.0 + + if max(precisions) > 0: + p_log_sum = sum(math.log(p) for p in precisions if p) + geo_mean = math.exp(p_log_sum / max_order) + + if use_bp: + ratio = translation_length / reference_length + bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 + bleu = geo_mean * bp + return np.float32(bleu) + + +def rouge_2_fscore(logits, labels): + """ROUGE-2 F1 score computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + logits: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge2_fscore: approx rouge-2 f1 score. + """ + predictions = tf.to_int32(tf.argmax(logits, axis=-1)) + # TODO: Look into removing use of py_func + rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) + return rouge_2_f_score, tf.constant(1.0) + + +def _get_ngrams(n, text): + """Calculates n-grams. + + Args: + n: which n-grams to calculate + text: An array of tokens + + Returns: + A set of n-grams + """ + ngram_set = set() + text_length = len(text) + max_index_ngram_start = text_length - n + for i in range(max_index_ngram_start + 1): + ngram_set.add(tuple(text[i:i + n])) + return ngram_set + + +def rouge_n(eval_sentences, ref_sentences, n=2): + """Computes ROUGE-N f1 score of two text collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + eval_sentences: Predicted sentences. + ref_sentences: Sentences from the reference set + n: Size of ngram. Defaults to 2. + + Returns: + f1 score for ROUGE-N + """ + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + eval_ngrams = _get_ngrams(n, eval_sentence) + ref_ngrams = _get_ngrams(n, ref_sentence) + ref_count = len(ref_ngrams) + eval_count = len(eval_ngrams) + + # Count the overlapping ngrams between evaluated and reference + overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) + overlapping_count = len(overlapping_ngrams) + + # Handle edge case. This isn't mathematically correct, but it's good enough + if eval_count == 0: + precision = 0.0 + else: + precision = float(overlapping_count) / eval_count + if ref_count == 0: + recall = 0.0 + else: + recall = float(overlapping_count) / ref_count + f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) + + # return overlapping_count / reference_count + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_l_fscore(predictions, labels): + """ROUGE scores computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge_l_fscore: approx rouge-l f1 score. + """ + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), + tf.float32) + return rouge_l_f_score, tf.constant(1.0) + + +def rouge_l_sentence_level(eval_sentences, ref_sentences): + """Computes ROUGE-L (sentence level) of two collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Calculated according to: + R_lcs = LCS(X,Y)/m + P_lcs = LCS(X,Y)/n + F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) + + where: + X = reference summary + Y = Candidate summary + m = length of reference summary + n = length of candidate summary + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + + Returns: + A float: F_lcs + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + m = float(len(ref_sentence)) + n = float(len(eval_sentence)) + lcs = _len_lcs(eval_sentence, ref_sentence) + f1_scores.append(_f_lcs(lcs, m, n)) + return np.mean(f1_scores, dtype=np.float32) + + +def _len_lcs(x, y): + """Returns the length of the Longest Common Subsequence between two seqs. + + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: sequence of words + y: sequence of words + + Returns + integer: Length of LCS between x and y + """ + table = _lcs(x, y) + n, m = len(x), len(y) + return table[n, m] + + +def _lcs(x, y): + """Computes the length of the LCS between two seqs. + + The implementation below uses a DP programming algorithm and runs + in O(nm) time where n = len(x) and m = len(y). + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: collection of words + y: collection of words + + Returns: + Table of dictionary of coord and len lcs + """ + n, m = len(x), len(y) + table = dict() + for i in range(n + 1): + for j in range(m + 1): + if i == 0 or j == 0: + table[i, j] = 0 + elif x[i - 1] == y[j - 1]: + table[i, j] = table[i - 1, j - 1] + 1 + else: + table[i, j] = max(table[i - 1, j], table[i, j - 1]) + return table + + +def _f_lcs(llcs, m, n): + """Computes the LCS-based F-measure score. + + Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/ + rouge-working-note-v1.3.1.pdf + + Args: + llcs: Length of LCS + m: number of words in reference summary + n: number of words in candidate summary + + Returns: + Float. LCS-based F-measure score + """ + r_lcs = llcs / m + p_lcs = llcs / n + beta = p_lcs / (r_lcs + 1e-12) + num = (1 + (beta ** 2)) * r_lcs * p_lcs + denom = r_lcs + ((beta ** 2) * p_lcs) + f_lcs = num / (denom + 1e-12) + return f_lcs diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py new file mode 100644 index 00000000000..33f144b23fd --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer.py @@ -0,0 +1,620 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines Subtokenizer class to encode and decode strings.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import re +import sys +import unicodedata + +import numpy as np +import six +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +PAD = "" +PAD_ID = 0 +EOS = "" +EOS_ID = 1 +RESERVED_TOKENS = [PAD, EOS] + +# Set of characters that will be used in the function _escape_token() (see func +# docstring for more details). +# This set is added to the alphabet list to ensure that all escaped tokens can +# be encoded. +_ESCAPE_CHARS = set(u"\\_u;0123456789") +# Regex for the function _unescape_token(), the inverse of _escape_token(). +# This is used to find "\u", "\\", and "\###;" substrings in the token. +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") + +_UNDEFINED_UNICODE = u"\u3013" + +# Set contains all letter and number characters. +_ALPHANUMERIC_CHAR_SET = set( + six.unichr(i) for i in xrange(sys.maxunicode) + if (unicodedata.category(six.unichr(i)).startswith("L") or + unicodedata.category(six.unichr(i)).startswith("N"))) + +# min_count is the minimum number of times a subtoken must appear in the data +# before before it is added to the vocabulary. The value is found using binary +# search to obtain the target vocabulary size. +_MIN_MIN_COUNT = 1 # min value to use when binary searching for min_count +_MAX_MIN_COUNT = 1000 # max value to use when binary searching for min_count + + +class Subtokenizer(object): + """Encodes and decodes strings to/from integer IDs.""" + + def __init__(self, vocab_file, reserved_tokens=None): + """Initializes class, creating a vocab file if data_files is provided.""" + tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." % + vocab_file) + + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens) + self.alphabet = _generate_alphabet_dict(self.subtoken_list) + self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list) + + self.max_subtoken_length = 0 + for subtoken in self.subtoken_list: + self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken)) + + # Create cache to speed up subtokenization + self._cache_size = 2 ** 20 + self._cache = [(None, None)] * self._cache_size + + @staticmethod + def init_from_files( + vocab_file, files, target_vocab_size, threshold, min_count=None, + file_byte_limit=1e6, reserved_tokens=None, correct_strip=True): + """Create subtoken vocabulary based on files, and save vocab to file. + + Args: + vocab_file: String name of vocab file to store subtoken vocabulary. + files: List of file paths that will be used to generate vocabulary. + target_vocab_size: target vocabulary size to generate. + threshold: int threshold of vocabulary size to accept. + min_count: int minimum count to use for generating the vocabulary. The min + count is the minimum number of times a subtoken should appear in the + files before it is added to the vocabulary. If set to none, this value + is found using binary search. + file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that + will be drawn from the files. + reserved_tokens: List of string tokens that are guaranteed to be at the + beginning of the subtoken vocabulary list. + correct_strip: Whether to convert text to unicode before strip. + + Returns: + Subtokenizer object + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + if tf.io.gfile.exists(vocab_file): + tf.compat.v1.logging.info("Vocab file already exists (%s)" % vocab_file) + else: + tf.compat.v1.logging.info("Begin steps to create subtoken vocabulary...") + token_counts = _count_tokens(files, file_byte_limit, correct_strip) + alphabet = _generate_alphabet_dict(token_counts) + subtoken_list = _generate_subtokens_with_target_vocab_size( + token_counts, alphabet, target_vocab_size, threshold, min_count, + reserved_tokens) + tf.compat.v1.logging.info("Generated vocabulary with %d subtokens." % + len(subtoken_list)) + _save_vocab_file(vocab_file, subtoken_list) + return Subtokenizer(vocab_file) + + def encode(self, raw_string, add_eos=False): + """Encodes a string into a list of int subtoken ids.""" + ret = [] + tokens = _split_string_to_tokens(native_to_unicode(raw_string)) + for token in tokens: + ret.extend(self._token_to_subtoken_ids(token)) + if add_eos: + ret.append(EOS_ID) + return ret + + def _token_to_subtoken_ids(self, token): + """Encode a single token into a list of subtoken ids.""" + cache_location = hash(token) % self._cache_size + cache_key, cache_value = self._cache[cache_location] + if cache_key == token: + return cache_value + + ret = _split_token_to_subtokens( + _escape_token(token, self.alphabet), self.subtoken_to_id_dict, + self.max_subtoken_length) + ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret] + + self._cache[cache_location] = (token, ret) + return ret + + def decode(self, subtokens): + """Converts list of int subtokens ids into a string.""" + if isinstance(subtokens, np.ndarray): + # Note that list(subtokens) converts subtokens to a python list, but the + # items remain as np.int32. This converts both the array and its items. + subtokens = subtokens.tolist() + + if not subtokens: + return "" + + assert isinstance(subtokens, list) and isinstance(subtokens[0], int), ( + "Subtokens argument passed into decode() must be a list of integers.") + + return _unicode_to_native( + _join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens))) + + def _subtoken_ids_to_tokens(self, subtokens): + """Convert list of int subtoken ids to a list of string tokens.""" + escaped_tokens = "".join([ + self.subtoken_list[s] for s in subtokens + if s < len(self.subtoken_list)]) + escaped_tokens = escaped_tokens.split("_") + + # All tokens in the vocabulary list have been escaped (see _escape_token()) + # so each token must be unescaped when decoding. + ret = [] + for token in escaped_tokens: + if token: + ret.append(_unescape_token(token)) + return ret + + +def _save_vocab_file(vocab_file, subtoken_list): + """Save subtokens to file.""" + with tf.io.gfile.GFile(vocab_file, mode="w") as f: + for subtoken in subtoken_list: + f.write("'%s'\n" % _unicode_to_native(subtoken)) + + +def _load_vocab_file(vocab_file, reserved_tokens=None): + """Load vocabulary while ensuring reserved tokens are at the top.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + subtoken_list = [] + with tf.io.gfile.GFile(vocab_file, mode="r") as f: + for line in f: + subtoken = native_to_unicode(line.strip()) + subtoken = subtoken[1:-1] # Remove surrounding single-quotes + if subtoken in reserved_tokens: + continue + subtoken_list.append(native_to_unicode(subtoken)) + return reserved_tokens + subtoken_list + + +def native_to_unicode(s): + """Convert string to unicode (required in Python 2).""" + try: # Python 2 + return s if isinstance(s, unicode) else s.decode("utf-8") + except NameError: # Python 3 + return s + + +def _unicode_to_native(s): + """Convert string from unicode to native format (required in Python 2).""" + try: # Python 2 + return s.encode("utf-8") if isinstance(s, unicode) else s + except NameError: # Python 3 + return s + + +def _split_string_to_tokens(text): + """Splits text to a list of string tokens.""" + if not text: + return [] + ret = [] + token_start = 0 + # Classify each character in the input string + is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text] + for pos in xrange(1, len(text)): + if is_alnum[pos] != is_alnum[pos - 1]: + token = text[token_start:pos] + if token != u" " or token_start == 0: + ret.append(token) + token_start = pos + final_token = text[token_start:] + ret.append(final_token) + return ret + + +def _join_tokens_to_string(tokens): + """Join a list of string tokens into a single string.""" + token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] + ret = [] + for i, token in enumerate(tokens): + if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: + ret.append(u" ") + ret.append(token) + return "".join(ret) + + +def _escape_token(token, alphabet): + r"""Replace characters that aren't in the alphabet and append "_" to token. + + Apply three transformations to the token: + 1. Replace underline character "_" with "\u", and backslash "\" with "\\". + 2. Replace characters outside of the alphabet with "\###;", where ### is the + character's Unicode code point. + 3. Appends "_" to mark the end of a token. + + Args: + token: unicode string to be escaped + alphabet: list of all known characters + + Returns: + escaped string + """ + token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") + ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] + return u"".join(ret) + "_" + + +def _unescape_token(token): + r"""Replaces escaped characters in the token with their unescaped versions. + + Applies inverse transformations as _escape_token(): + 1. Replace "\u" with "_", and "\\" with "\". + 2. Replace "\###;" with the unicode character the ### refers to. + + Args: + token: escaped string + + Returns: + unescaped string + """ + + def match(m): + r"""Returns replacement string for matched object. + + Matched objects contain one of the strings that matches the regex pattern: + r"\\u|\\\\|\\([0-9]+);" + The strings can be '\u', '\\', or '\###;' (### is any digit number). + + m.group(0) refers to the entire matched string ('\u', '\\', or '\###;'). + m.group(1) refers to the first parenthesized subgroup ('###'). + + m.group(0) exists for all match objects, while m.group(1) exists only for + the string '\###;'. + + This function looks to see if m.group(1) exists. If it doesn't, then the + matched string must be '\u' or '\\' . In this case, the corresponding + replacement ('_' and '\') are returned. Note that in python, a single + backslash is written as '\\', and double backslash as '\\\\'. + + If m.group(1) exists, then use the integer in m.group(1) to return a + unicode character. + + Args: + m: match object + + Returns: + String to replace matched object with. + """ + # Check if the matched strings are '\u' or '\\'. + if m.group(1) is None: + return u"_" if m.group(0) == u"\\u" else u"\\" + + # If m.group(1) exists, try and return unicode character. + try: + return six.unichr(int(m.group(1))) + except (ValueError, OverflowError) as _: + return _UNDEFINED_UNICODE + + # Use match function to replace escaped substrings in the token. + return _UNESCAPE_REGEX.sub(match, token) + + +def _count_tokens(files, file_byte_limit=1e6, correct_strip=True): + """Return token counts of words in the files. + + Samples file_byte_limit bytes from each file, and counts the words that appear + in the samples. The samples are semi-evenly distributed across the file. + + Args: + files: List of filepaths + file_byte_limit: Max number of bytes that will be read from each file. + correct_strip: Whether to convert text to unicode before strip. This affects + vocabulary generation for PY2. Sets correct_strip to False in PY2 to + reproduce previous common public result. Sets correct_strip to True will + let PY2 and PY3 get a consistent vocabulary. + + Returns: + Dictionary mapping tokens to the number of times they appear in the sampled + lines from the files. + """ + token_counts = collections.defaultdict(int) + + for filepath in files: + with tf.io.gfile.GFile(filepath, mode="r") as reader: + file_byte_budget = file_byte_limit + counter = 0 + lines_to_skip = int(reader.size() / (file_byte_budget * 2)) + for line in reader: + if counter < lines_to_skip: + counter += 1 + else: + if file_byte_budget < 0: + break + if correct_strip: + line = native_to_unicode(line) + line = line.strip() + file_byte_budget -= len(line) + counter = 0 + + # Add words to token counts + for token in _split_string_to_tokens(native_to_unicode(line)): + token_counts[token] += 1 + return token_counts + + +def _list_to_index_dict(lst): + """Create dictionary mapping list items to their indices in the list.""" + return {item: n for n, item in enumerate(lst)} + + +def _split_token_to_subtokens(token, subtoken_dict, max_subtoken_length): + """Splits a token into subtokens defined in the subtoken dict.""" + ret = [] + start = 0 + token_len = len(token) + while start < token_len: + # Find the longest subtoken, so iterate backwards. + for end in xrange(min(token_len, start + max_subtoken_length), start, -1): + subtoken = token[start:end] + if subtoken in subtoken_dict: + ret.append(subtoken) + start = end + break + else: # Did not break + # If there is no possible encoding of the escaped token then one of the + # characters in the token is not in the alphabet. This should be + # impossible and would be indicative of a bug. + raise ValueError("Was unable to split token \"%s\" into subtokens." % + token) + return ret + + +def _generate_subtokens_with_target_vocab_size( + token_counts, alphabet, target_size, threshold, min_count=None, + reserved_tokens=None): + """Generate subtoken vocabulary close to the target size.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + if min_count is not None: + tf.compat.v1.logging.info( + "Using min_count=%d to generate vocab with target size %d" % + (min_count, target_size)) + return _generate_subtokens( + token_counts, alphabet, min_count, reserved_tokens=reserved_tokens) + + def bisect(min_val, max_val): + """Recursive function to binary search for subtoken vocabulary.""" + cur_count = (min_val + max_val) // 2 + tf.compat.v1.logging.info("Binary search: trying min_count=%d (%d %d)" % + (cur_count, min_val, max_val)) + subtoken_list = _generate_subtokens( + token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens) + + val = len(subtoken_list) + tf.compat.v1.logging.info( + "Binary search: min_count=%d resulted in %d tokens" % (cur_count, val)) + + within_threshold = abs(val - target_size) < threshold + if within_threshold or min_val >= max_val or cur_count < 2: + return subtoken_list + if val > target_size: + other_subtoken_list = bisect(cur_count + 1, max_val) + else: + other_subtoken_list = bisect(min_val, cur_count - 1) + + # Return vocabulary dictionary with the closest number of tokens. + other_val = len(other_subtoken_list) + if abs(other_val - target_size) < abs(val - target_size): + return other_subtoken_list + return subtoken_list + + tf.compat.v1.logging.info("Finding best min_count to get target size of %d" % + target_size) + return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT) + + +def _generate_alphabet_dict(iterable, reserved_tokens=None): + """Create set of characters that appear in any element in the iterable.""" + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + alphabet = {c for token in iterable for c in token} + alphabet |= {c for token in reserved_tokens for c in token} + alphabet |= _ESCAPE_CHARS # Add escape characters to alphabet set. + return alphabet + + +def _count_and_gen_subtokens( + token_counts, alphabet, subtoken_dict, max_subtoken_length): + """Count number of times subtokens appear, and generate new subtokens. + + Args: + token_counts: dict mapping tokens to the number of times they appear in the + original files. + alphabet: list of allowed characters. Used to escape the tokens, which + guarantees that all tokens can be split into subtokens. + subtoken_dict: dict mapping subtokens to ids. + max_subtoken_length: maximum length of subtoken in subtoken_dict. + + Returns: + A defaultdict mapping subtokens to the number of times they appear in the + tokens. The dict may contain new subtokens. + """ + subtoken_counts = collections.defaultdict(int) + for token, count in six.iteritems(token_counts): + token = _escape_token(token, alphabet) + subtokens = _split_token_to_subtokens( + token, subtoken_dict, max_subtoken_length) + + # Generate new subtokens by taking substrings from token. + start = 0 + for subtoken in subtokens: + for end in xrange(start + 1, len(token) + 1): + new_subtoken = token[start:end] + subtoken_counts[new_subtoken] += count + start += len(subtoken) + + return subtoken_counts + + +def _filter_and_bucket_subtokens(subtoken_counts, min_count): + """Return a bucketed list of subtokens that are filtered by count. + + Args: + subtoken_counts: defaultdict mapping subtokens to their counts + min_count: int count used to filter subtokens + + Returns: + List of subtoken sets, where subtokens in set i have the same length=i. + """ + # Create list of buckets, where subtokens in bucket i have length i. + subtoken_buckets = [] + for subtoken, count in six.iteritems(subtoken_counts): + if count < min_count: # Filter out subtokens that don't appear enough + continue + while len(subtoken_buckets) <= len(subtoken): + subtoken_buckets.append(set()) + subtoken_buckets[len(subtoken)].add(subtoken) + return subtoken_buckets + + +def _gen_new_subtoken_list( + subtoken_counts, min_count, alphabet, reserved_tokens=None): + """Generate candidate subtokens ordered by count, and new max subtoken length. + + Add subtokens to the candidate list in order of length (longest subtokens + first). When a subtoken is added, the counts of each of its prefixes are + decreased. Prefixes that don't appear much outside the subtoken are not added + to the candidate list. + + For example: + subtoken being added to candidate list: 'translate' + subtoken_counts: {'translate':10, 't':40, 'tr':16, 'tra':12, ...} + min_count: 5 + + When 'translate' is added, subtoken_counts is updated to: + {'translate':0, 't':30, 'tr':6, 'tra': 2, ...} + + The subtoken 'tra' will not be added to the candidate list, because it appears + twice (less than min_count) outside of 'translate'. + + Args: + subtoken_counts: defaultdict mapping str subtokens to int counts + min_count: int minimum count requirement for subtokens + alphabet: set of characters. Each character is added to the subtoken list to + guarantee that all tokens can be encoded. + reserved_tokens: list of tokens that will be added to the beginning of the + returned subtoken list. + + Returns: + List of candidate subtokens in decreasing count order, and maximum subtoken + length + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + # Create a list of (count, subtoken) for each candidate subtoken. + subtoken_candidates = [] + + # Use bucketted list to iterate through subtokens in order of length. + # subtoken_buckets[i] = set(subtokens), where each subtoken has length i. + subtoken_buckets = _filter_and_bucket_subtokens(subtoken_counts, min_count) + max_subtoken_length = len(subtoken_buckets) - 1 + + # Go through the list in reverse order to consider longer subtokens first. + for subtoken_len in xrange(max_subtoken_length, 0, -1): + for subtoken in subtoken_buckets[subtoken_len]: + count = subtoken_counts[subtoken] + + # Possible if this subtoken is a prefix of another token. + if count < min_count: + continue + + # Ignore alphabet/reserved tokens, which will be added manually later. + if subtoken not in alphabet and subtoken not in reserved_tokens: + subtoken_candidates.append((count, subtoken)) + + # Decrement count of the subtoken's prefixes (if a longer subtoken is + # added, its prefixes lose priority to be added). + for end in xrange(1, subtoken_len): + subtoken_counts[subtoken[:end]] -= count + + # Add alphabet subtokens (guarantees that all strings are encodable). + subtoken_candidates.extend((subtoken_counts.get(a, 0), a) for a in alphabet) + + # Order subtoken candidates by decreasing count. + subtoken_list = [t for _, t in sorted(subtoken_candidates, reverse=True)] + + # Add reserved tokens to beginning of the list. + subtoken_list = reserved_tokens + subtoken_list + return subtoken_list, max_subtoken_length + + +def _generate_subtokens( + token_counts, alphabet, min_count, num_iterations=4, + reserved_tokens=None): + """Create a list of subtokens in decreasing order of frequency. + + Args: + token_counts: dict mapping str tokens -> int count + alphabet: set of characters + min_count: int minimum number of times a subtoken must appear before it is + added to the vocabulary. + num_iterations: int number of iterations to generate new tokens. + reserved_tokens: list of tokens that will be added to the beginning to the + returned subtoken list. + + Returns: + Sorted list of subtokens (most frequent first) + """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + # Use alphabet set to create initial list of subtokens + subtoken_list = reserved_tokens + list(alphabet) + max_subtoken_length = 1 + + # On each iteration, segment all words using the subtokens defined in + # subtoken_dict, count how often the resulting subtokens appear, and update + # the dictionary with subtokens w/ high enough counts. + for i in xrange(num_iterations): + tf.compat.v1.logging.info("\tGenerating subtokens: iteration %d" % i) + # Generate new subtoken->id dictionary using the new subtoken list. + subtoken_dict = _list_to_index_dict(subtoken_list) + + # Create dict mapping subtoken->count, with additional subtokens created + # from substrings taken from the tokens. + subtoken_counts = _count_and_gen_subtokens( + token_counts, alphabet, subtoken_dict, max_subtoken_length) + + # Generate new list of subtokens sorted by subtoken count. + subtoken_list, max_subtoken_length = _gen_new_subtoken_list( + subtoken_counts, min_count, alphabet, reserved_tokens) + + tf.compat.v1.logging.info("\tVocab size: %d" % len(subtoken_list)) + return subtoken_list diff --git a/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer_test.py b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer_test.py new file mode 100644 index 00000000000..f757389f30d --- /dev/null +++ b/examples/3.x_api/tensorflow/nlp/transformer_lt/quantization/ptq/utils/tokenizer_test.py @@ -0,0 +1,182 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test Subtokenizer and string helper methods.""" + +import collections +import tempfile + +import tensorflow as tf # pylint: disable=g-bad-import-order + +from official.transformer.utils import tokenizer + + +class SubtokenizerTest(tf.test.TestCase): + + def _init_subtokenizer(self, vocab_list): + temp_file = tempfile.NamedTemporaryFile(delete=False) + with tf.io.gfile.GFile(temp_file.name, "w") as w: + for subtoken in vocab_list: + w.write("'%s'" % subtoken) + w.write("\n") + return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) + + def test_encode(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + s = "testing 123" + encoded_list = subtokenizer.encode(s) + self.assertEqual([1, 2, 0], encoded_list) + + def test_decode(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + encoded_list = [1, 2, 0] # testing 123 + decoded_str = subtokenizer.decode(encoded_list) + self.assertEqual("testing 123", decoded_str) + + def test_subtoken_ids_to_tokens(self): + vocab_list = ["123_", "test", "ing_"] + subtokenizer = self._init_subtokenizer(vocab_list) + encoded_list = [1, 2, 0] # testing 123 + token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) + self.assertEqual([u"testing", u"123"], token_list) + + +class StringHelperTest(tf.test.TestCase): + + def test_split_string_to_tokens(self): + text = "test? testing 123." + + tokens = tokenizer._split_string_to_tokens(text) + self.assertEqual(["test", "? ", "testing", "123", "."], tokens) + + def test_join_tokens_to_string(self): + tokens = ["test", "? ", "testing", "123", "."] + + s = tokenizer._join_tokens_to_string(tokens) + self.assertEqual("test? testing 123.", s) + + def test_escape_token(self): + token = u"abc_\\4" + alphabet = set("abc_\\u;") + + escaped_token = tokenizer._escape_token(token, alphabet) + self.assertEqual("abc\\u\\\\\\52;_", escaped_token) + + def test_unescape_token(self): + escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;" + + unescaped_token = tokenizer._unescape_token(escaped_token) + self.assertEqual( + "Underline: _, Backslash: \\, Unicode: 4", unescaped_token) + + def test_list_to_index_dict(self): + lst = ["test", "strings"] + + d = tokenizer._list_to_index_dict(lst) + self.assertDictEqual({"test": 0, "strings": 1}, d) + + def test_split_token_to_subtokens(self): + token = "abc" + subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3} + max_subtoken_length = 2 + + subtokens = tokenizer._split_token_to_subtokens( + token, subtoken_dict, max_subtoken_length) + self.assertEqual(["ab", "c"], subtokens) + + def test_generate_alphabet_dict(self): + s = ["testing", "123"] + reserved_tokens = ["???"] + + alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) + self.assertIn("?", alphabet) + self.assertIn("t", alphabet) + self.assertIn("e", alphabet) + self.assertIn("s", alphabet) + self.assertIn("i", alphabet) + self.assertIn("n", alphabet) + self.assertIn("g", alphabet) + self.assertIn("1", alphabet) + self.assertIn("2", alphabet) + self.assertIn("3", alphabet) + + def test_count_and_gen_subtokens(self): + token_counts = {"abc": 5} + alphabet = set("abc_") + subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3} + max_subtoken_length = 2 + + subtoken_counts = tokenizer._count_and_gen_subtokens( + token_counts, alphabet, subtoken_dict, max_subtoken_length) + + self.assertIsInstance(subtoken_counts, collections.defaultdict) + self.assertDictEqual( + {"a": 5, "b": 5, "c": 5, "_": 5, "ab": 5, "bc": 5, "c_": 5, + "abc": 5, "bc_": 5, "abc_": 5}, subtoken_counts) + + def test_filter_and_bucket_subtokens(self): + subtoken_counts = collections.defaultdict( + int, {"a": 2, "b": 4, "c": 1, "ab": 6, "ac": 3, "abbc": 5}) + min_count = 3 + + subtoken_buckets = tokenizer._filter_and_bucket_subtokens( + subtoken_counts, min_count) + + self.assertEqual(len(subtoken_buckets[0]), 0) + self.assertEqual(set("b"), subtoken_buckets[1]) + self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2]) + self.assertEqual(len(subtoken_buckets[3]), 0) + self.assertEqual(set(["abbc"]), subtoken_buckets[4]) + + def test_gen_new_subtoken_list(self): + subtoken_counts = collections.defaultdict( + int, {"translate": 10, "t": 40, "tr": 16, "tra": 12}) + min_count = 5 + alphabet = set("translate") + reserved_tokens = ["reserved", "tokens"] + + subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( + subtoken_counts, min_count, alphabet, reserved_tokens) + + # Check that "tra" isn"t in the list (its count should be decremented to 2, + # so it should not be added to the canddiate list). + self.assertNotIn("tra", subtoken_list) + + self.assertIn("tr", subtoken_list) + self.assertIn("t", subtoken_list) + + self.assertEqual(len("translate"), max_token_length) + + def test_generate_subtokens(self): + token_counts = {"ab": 1, "bc": 3, "abc": 5} + alphabet = set("abc_") + min_count = 100 + num_iterations = 1 + reserved_tokens = ["reserved", "tokens"] + + vocab_list = tokenizer._generate_subtokens( + token_counts, alphabet, min_count, num_iterations, reserved_tokens) + + # Check that reserved tokens are at the front of the list + self.assertEqual(vocab_list[:2], reserved_tokens) + + # Check that each character in alphabet is in the vocab list + for c in alphabet: + self.assertIn(c, vocab_list) + + +if __name__ == "__main__": + tf.test.main() diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/README.md b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/README.md new file mode 100644 index 00000000000..b7b90b6f8ec --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/README.md @@ -0,0 +1,133 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. + +# Prerequisite + + +## 1. Environment +Recommend python 3.9 or higher version. + +### Install Intel® Neural Compressor +```shell +pip install neural-compressor +``` + +### Install Intel Tensorflow +```shell +pip install tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Installation Dependency packages +```shell +cd examples/3.x_api/tensorflow/object_detection +pip install -r requirements.txt +cd faster_rcnn_resnet50/quantization/ptq +``` + +### Install Protocol Buffer Compiler + +`Protocol Buffer Compiler` in version higher than 3.0.0 is necessary ingredient for automatic COCO dataset preparation. To install please follow +[Protobuf installation instructions](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager). + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Model + +```shell +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz +tar -xvf faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz +``` + +## 3. Prepare Dataset + +### Automatic dataset download + +> **_Note: `prepare_dataset.sh` script works with TF version 1.x._** + +Run the `prepare_dataset.sh` script located in `examples/3.x_api/tensorflow/object_detection`. + +Usage: +```shell +cd examples/3.x_api/tensorflow/object_detection +. prepare_dataset.sh +cd faster_rcnn_resnet50/quantization/ptq +``` + +This script will download the *train*, *validation* and *test* COCO datasets. Furthermore it will convert them to +tensorflow records using the `https://github.com/tensorflow/models.git` dedicated script. + +### Manual dataset download +Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). + + +# Run + +## 1. Quantization + + ```shell + bash run_quant.sh --input_model=./faster_rcnn_resnet50_fp32_coco_pretrained_model/frozen_inference_graph.pb --output_model=./tensorflow-faster_rcnn_resnet50-tune.pb --dataset_location=/path/to/dataset/coco_val.record + ``` + +## 2. Benchmark + ```shell + # run performance benchmark + bash run_benchmark.sh --input_model=./tensorflow-faster_rcnn_resnet50-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=performance + + # run accuracy benchmark + bash run_benchmark.sh --input_model=./tensorflow-faster_rcnn_resnet50-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=accuracy + ``` + +Details of enabling Intel® Neural Compressor on faster_rcnn_resnet50 for Tensorflow. +========================= + +This is a tutorial of how to enable faster_rcnn_resnet50 model with Intel® Neural Compressor. +## User Code Analysis +User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. + +For faster_rcnn_resnet50, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. + +### Code update + +After prepare step is done, we just need update main.py like below. +```python + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) +``` + +The quantization.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/coco_tools.py b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/coco_tools.py new file mode 100644 index 00000000000..2f9369798df --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/coco_tools.py @@ -0,0 +1,694 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for third party pycocotools to be used within object_detection. + +Note that nothing in this file is tensorflow related and thus cannot +be called directly as a slim metric, for example. + +TODO(jonathanhuang): wrap as a slim metric in metrics.py + + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes and classes) +and detections (boxes, scores and classes), where elements of each list +correspond to detections/annotations of a single image, +then evaluation (in multi-class mode) can be invoked as follows: + + groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( + image_ids, groundtruth_boxes_list, groundtruth_classes_list, + max_num_classes, output_path=None) + detections_list = coco_tools.ExportDetectionsToCOCO( + image_ids, detection_boxes_list, detection_scores_list, + detection_classes_list, output_path=None) + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() +""" + +import copy +import time +from collections import OrderedDict +from typing import Any, Dict, List, Set, Union + +import numpy as np +from pycocotools import coco, cocoeval, mask + +from neural_compressor.utils import logger + + +class COCOWrapper(coco.COCO): + """Wrapper for the pycocotools COCO class. + + Attributes: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + """ + + def __init__(self, dataset: Dict[str, Any], detection_type: str = "bbox"): + """Construct a COCOWrapper. + + See http://mscoco.org/dataset/#format for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + supported_detection_types = ["bbox", "segmentation"] + if detection_type not in supported_detection_types: + raise ValueError( + "Unsupported detection type: {}. " + "Supported values are: {}".format(detection_type, supported_detection_types) + ) + self._detection_type = detection_type + coco.COCO.__init__(self) + self.dataset = dataset + self.createIndex() + + def LoadAnnotations(self, annotations: list) -> coco.COCO: + """Load annotations dictionary into COCO datastructure. + + See http://mscoco.org/dataset/#format for a description of the annotations + format. As above, this function replicates the default behavior of the API + but does not require writing to external storage. + + Args: + annotations: python list holding object detection results where each + detection is encoded as a dict with required keys ['image_id', + 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on + `detection_type`. + + Returns: + a coco.COCO datastructure holding object detection annotations results + + Raises: + ValueError: if (1) annotations is not a list or annotations do not + correspond to the images contained in self. + """ + results = coco.COCO() + results.dataset["images"] = [img for img in self.dataset["images"]] + + logger.info("Load and prepare annotation results.") + tic = time.time() + + if not isinstance(annotations, list): + raise ValueError("annotations is not a list of objects") + annotation_img_ids = [ann["image_id"] for ann in annotations] + if set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds())): + raise ValueError("Results do not correspond to current coco set") + results.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) + if self._detection_type == "bbox": + for idx, ann in enumerate(annotations): + bb = ann["bbox"] + ann["area"] = bb[2] * bb[3] + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + elif self._detection_type == "segmentation": + for idx, ann in enumerate(annotations): + ann["area"] = mask.area(ann["segmentation"]) + ann["bbox"] = mask.toBbox(ann["segmentation"]) + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + logger.info("DONE (t=%0.2fs)", (time.time() - tic)) + + results.dataset["annotations"] = annotations + results.createIndex() + return results + + +class COCOEvalWrapper(cocoeval.COCOeval): + """Wrapper for the pycocotools COCOeval class. + + To evaluate, create two objects (groundtruth_dict and detections_list) + using the conventions listed at http://mscoco.org/dataset/#format. + Then call evaluation as follows: + + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() + """ + + def __init__( + self, + groundtruth: coco.COCO = None, + detections: coco.COCO = None, + agnostic_mode=False, + iou_type: str = "bbox", + iou_thrs: Union[str, float] = None, + map_points=None, + ): + """Construct a COCOEvalWrapper. + + Note that for the area-based metrics to be meaningful, detection and + groundtruth boxes must be in image coordinates measured in pixels. + + Args: + groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding + groundtruth annotations + detections: a coco.COCO (or coco_tools.COCOWrapper) object holding + detections + agnostic_mode: boolean (default: False). If True, evaluation ignores + class labels, treating all detections as proposals. + iou_thrs: Minimal value for intersection over union that allows to + make decision that prediction bounding box is true positive. + You can specify one float value between 0 to 1 or + string "05:0.05:0.95" for standard COCO thresholds. + iou_type: IOU type to use for evaluation. Supports `bbox` or `segm`. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + """ + cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type) + if agnostic_mode: + self.params.useCats = 0 + if iou_thrs == "0.5:0.05:0.95": + self.params.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + elif isinstance(iou_thrs, float): + self.params.iouThrs = [iou_thrs] + + if map_points == 101: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + if map_points == 11: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.1)) + 1, endpoint=True) + if map_points == 0: + self.params.recThrs = [-1] + + def GetCategory(self, category_id: int) -> dict: + """Fetch dictionary holding category information given category id. + + Args: + category_id: integer id + + Returns: + dictionary holding 'id', 'name'. + """ + return self.cocoGt.cats[category_id] + + def GetAgnosticMode(self) -> bool: + """Return whether COCO Eval is configured to evaluate in agnostic mode.""" + return self.params.useCats == 0 + + def GetCategoryIdList(self) -> List[int]: + """Return the list of IDs of all valid categories.""" + return self.params.catIds + + def accumulate(self, p: cocoeval.Params = None): + """Accumulate evaluation results per image and store it to self.eval. + + Args: + p: input params for evaluation + """ + print("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + print("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories + recall = -np.ones((T, K, A, M)) + scores = -np.ones((T, R, K, A, M)) + + # create dictionary for future indexing + _pe = self._paramsEval + print("-pe", _pe) + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + dtScoresSorted = dtScores[inds] + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + + # calculate precision + if R == 1: + rc = np.concatenate(([0.0], rc, [1.0])) + pr = np.concatenate(([0.0], pr, [0.0])) + + # compute the precision envelope + for i in range(pr.size - 1, 0, -1): + pr[i - 1] = np.maximum(pr[i - 1], pr[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + change_point = np.where(rc[1:] != rc[:-1])[0] + # and sum (\Delta recall) * recall + res = np.sum((rc[change_point + 1] - rc[change_point]) * pr[change_point + 1]) + precision[t, :, k, a, m] = np.array([res]) + else: + q = np.zeros((R,)) + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except: + pass + precision[t, :, k, a, m] = np.array(q) + + # calculate recall + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # calculate score + ss = np.zeros((R,)) + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + ss[ri] = dtScoresSorted[pi] + except: + pass + scores[t, :, k, a, m] = np.array(ss) + # exit(0) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "precision": precision, + "recall": recall, + "scores": scores, + } + toc = time.time() + print("DONE (t={:0.2f}s).".format(toc - tic)) + + def ComputeMetrics( + self, include_metrics_per_category: bool = False, all_metrics_per_category: bool = False + ): # pragma: no cover + """Compute detection metrics. + + Args: + include_metrics_per_category: Whether include metrics per category. + all_metrics_per_category: Whether include all the summery metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + + Returns: + A tuple of (summary_metrics, per_category_ap), in which + (1) summary_metrics is a dictionary holding: + 'Precision/mAP': mean average precision over classes averaged over IOU + thresholds ranging from .5 to .95 with .05 increments; + 'Precision/mAP@.50IOU': mean average precision at 50% IOU; + 'Precision/mAP@.75IOU': mean average precision at 75% IOU; + 'Precision/mAP (small)': mean average precision for small objects + (area < 32^2 pixels); + 'Precision/mAP (medium)': mean average precision for medium sized + objects (32^2 pixels < area < 96^2 pixels); + 'Precision/mAP (large)': mean average precision for large objects + (96^2 pixels < area < 10000^2 pixels); + 'Recall/AR@1': average recall with 1 detection; + 'Recall/AR@10': average recall with 10 detections; + 'Recall/AR@100': average recall with 100 detections; + 'Recall/AR@100 (small)': average recall for small objects with 100 + detections; + 'Recall/AR@100 (medium)': average recall for medium objects with 100 + detections; + 'Recall/AR@100 (large)': average recall for large objects with 100 + detections; + and (2) per_category_ap is a dictionary holding category specific results with + keys of the form: 'Precision mAP ByCategory/category' + (without the supercategory part if no supercategories exist). + + For backward compatibility 'PerformanceByCategory' is included in the + output regardless of all_metrics_per_category. If evaluating class-agnostic + mode, per_category_ap is an empty dictionary. + + Raises: + ValueError: If category_stats does not exist. + """ + self.evaluate() + self.accumulate() + self.summarize() + + summary_metrics = OrderedDict( + [ + ("Precision/mAP", self.stats[0]), + ("Precision/mAP@.50IOU", self.stats[1]), + ("Precision/mAP@.75IOU", self.stats[2]), + ("Precision/mAP (small)", self.stats[3]), + ("Precision/mAP (medium)", self.stats[4]), + ("Precision/mAP (large)", self.stats[5]), + ("Recall/AR@1", self.stats[6]), + ("Recall/AR@10", self.stats[7]), + ("Recall/AR@100", self.stats[8]), + ("Recall/AR@100 (small)", self.stats[9]), + ("Recall/AR@100 (medium)", self.stats[10]), + ("Recall/AR@100 (large)", self.stats[11]), + ] + ) + if not include_metrics_per_category: + return summary_metrics, {} + if not hasattr(self, "category_stats"): + raise ValueError("Category stats do not exist") + per_category_ap = OrderedDict([]) + if self.GetAgnosticMode(): + return summary_metrics, per_category_ap + for category_index, category_id in enumerate(self.GetCategoryIdList()): + category = self.GetCategory(category_id)["name"] + # Kept for backward compatilbility + # pylint: disable=no-member + per_category_ap["PerformanceByCategory/mAP/{}".format(category)] = self.category_stats[0][category_index] + if all_metrics_per_category: + per_category_ap["Precision mAP ByCategory/{}".format(category)] = self.category_stats[0][category_index] + per_category_ap["Precision mAP@.50IOU ByCategory/{}".format(category)] = self.category_stats[1][ + category_index + ] + per_category_ap["Precision mAP@.75IOU ByCategory/{}".format(category)] = self.category_stats[2][ + category_index + ] + per_category_ap["Precision mAP (small) ByCategory/{}".format(category)] = self.category_stats[3][ + category_index + ] + per_category_ap["Precision mAP (medium) ByCategory/{}".format(category)] = self.category_stats[4][ + category_index + ] + per_category_ap["Precision mAP (large) ByCategory/{}".format(category)] = self.category_stats[5][ + category_index + ] + per_category_ap["Recall AR@1 ByCategory/{}".format(category)] = self.category_stats[6][category_index] + per_category_ap["Recall AR@10 ByCategory/{}".format(category)] = self.category_stats[7][category_index] + per_category_ap["Recall AR@100 ByCategory/{}".format(category)] = self.category_stats[8][category_index] + per_category_ap["Recall AR@100 (small) ByCategory/{}".format(category)] = self.category_stats[9][ + category_index + ] + per_category_ap["Recall AR@100 (medium) ByCategory/{}".format(category)] = self.category_stats[10][ + category_index + ] + per_category_ap["Recall AR@100 (large) ByCategory/{}".format(category)] = self.category_stats[11][ + category_index + ] + + return summary_metrics, per_category_ap + + +def _ConvertBoxToCOCOFormat(box): + """Convert a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a numpy array in format of [ymin, xmin, ymax, xmax] + + Returns: + A list of floats, in COCO format, representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), float(box[2] - box[0])] + + +def _RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + return mask.encode(np.asfortranarray(masks)) + + +def ExportSingleImageGroundtruthToCoco( + image_id: Union[int, str], + next_annotation_id: int, + category_id_set: Set[str], + groundtruth_boxes: np.array, + groundtruth_classes: np.array, + groundtruth_masks: Union[np.array, None] = None, + groundtruth_is_crowd: Union[np.array, None] = None, +) -> list: + """Export groundtruth of a single image to COCO format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the COCO evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in + correspondence - that is: groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes] + indicating whether groundtruth boxes are crowd. + + Returns: + A list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + if len(groundtruth_classes.shape) != 1: + raise ValueError("groundtruth_classes is " "expected to be of rank 1.") + if len(groundtruth_boxes.shape) != 2: + raise ValueError("groundtruth_boxes is expected to be of " "rank 2.") + if groundtruth_boxes.shape[1] != 4: + raise ValueError("groundtruth_boxes should have " "shape[1] == 4.") + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError( + "Corresponding entries in groundtruth_classes, " + "and groundtruth_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension)." + "Classes shape: %d. Boxes shape: %d. Image ID: %s" + % (groundtruth_classes.shape[0], groundtruth_boxes.shape[0], image_id) + ) + has_is_crowd = groundtruth_is_crowd is not None + if has_is_crowd and len(groundtruth_is_crowd.shape) != 1: + raise ValueError("groundtruth_is_crowd is expected to be of rank 1.") + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0 + export_dict = { + "id": next_annotation_id + i, + "image_id": image_id, + "category_id": int(groundtruth_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + "area": float( + (groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) + * (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]) + ), + "iscrowd": iscrowd, + } + if groundtruth_masks is not None: + export_dict["segmentation"] = _RleCompress(groundtruth_masks[i]) + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportSingleImageDetectionBoxesToCoco( + image_id: Union[int, str], + category_id_set: Set[int], + detection_boxes: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detections of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. Note that the image_ids + provided here must match the ones given to the + ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in + correspondence - that is: boxes[i, :], and classes[i] + are associated with the same groundtruth annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_boxes: float numpy array of shape [num_detections, 4] containing + detection boxes. + detection_scores: float numpy array of shape [num_detections] containing + scored for the detection boxes. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection boxes. + + Returns: + A list of detection annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_boxes, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + if len(detection_boxes.shape) != 2: + raise ValueError("All entries in detection_boxes expected to be of " "rank 2.") + if detection_boxes.shape[1] != 4: + raise ValueError("All entries in detection_boxes should have " "shape[1] == 4.") + num_boxes = detection_classes.shape[0] + if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension). " + "Classes shape: %d. Boxes shape: %d. " + "Scores shape: %d" % (detection_classes.shape[0], detection_boxes.shape[0], detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), + "score": float(detection_scores[i]), + } + ) + return detections_list + + +def ExportSingleImageDetectionMasksToCoco( + image_id: Union[str, int], + category_id_set: Set[int], + detection_masks: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detection masks of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + A list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_masks should have " + "compatible lengths and shapes " + "Classes length: %d. Masks length: %d. " + "Scores length: %d" % (detection_classes.shape[0], len(detection_masks), detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "segmentation": _RleCompress(detection_masks[i]), + "score": float(detection_scores[i]), + } + ) + return detections_list diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/data_process.py new file mode 100644 index 00000000000..32e55adb3fd --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/data_process.py @@ -0,0 +1,655 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import cv2 +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +interpolation_map = { + "nearest": cv2.INTER_NEAREST, + "bilinear": cv2.INTER_LINEAR, + "bicubic": cv2.INTER_CUBIC, +} + +category_map = { + 1: "person", + 2: "bicycle", + 3: "car", + 4: "motorcycle", + 5: "airplane", + 6: "bus", + 7: "train", + 8: "truck", + 9: "boat", + 10: "traffic light", + 11: "fire hydrant", + 13: "stop sign", + 14: "parking meter", + 15: "bench", + 16: "bird", + 17: "cat", + 18: "dog", + 19: "horse", + 20: "sheep", + 21: "cow", + 22: "elephant", + 23: "bear", + 24: "zebra", + 25: "giraffe", + 27: "backpack", + 28: "umbrella", + 31: "handbag", + 32: "tie", + 33: "suitcase", + 34: "frisbee", + 35: "skis", + 36: "snowboard", + 37: "sports ball", + 38: "kite", + 39: "baseball bat", + 40: "baseball glove", + 41: "skateboard", + 42: "surfboard", + 43: "tennis racket", + 44: "bottle", + 46: "wine glass", + 47: "cup", + 48: "fork", + 49: "knife", + 50: "spoon", + 51: "bowl", + 52: "banana", + 53: "apple", + 54: "sandwich", + 55: "orange", + 56: "broccoli", + 57: "carrot", + 58: "hot dog", + 59: "pizza", + 60: "donut", + 61: "cake", + 62: "chair", + 63: "couch", + 64: "potted plant", + 65: "bed", + 67: "dining table", + 70: "toilet", + 72: "tv", + 73: "laptop", + 74: "mouse", + 75: "remote", + 76: "keyboard", + 77: "cell phone", + 78: "microwave", + 79: "oven", + 80: "toaster", + 81: "sink", + 82: "refrigerator", + 84: "book", + 85: "clock", + 86: "vase", + 87: "scissors", + 88: "teddy bear", + 89: "hair drier", + 90: "toothbrush", +} + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ResizeTFTransform(object): + """Resize the input image to the given size. + + Args: + size (list or int): Size of the result + interpolation (str, default='bilinear'):Desired interpolation type, + support 'bilinear', 'nearest', 'bicubic' + + Returns: + tuple of processed image and label + """ + + def __init__(self, size, interpolation="bilinear"): + """Initialize `ResizeTFTransform` class.""" + if isinstance(size, int): + self.size = size, size + elif isinstance(size, list): + if len(size) == 1: + self.size = size[0], size[0] + elif len(size) == 2: + self.size = size[0], size[1] + self.interpolation = interpolation + + if self.interpolation not in ["bilinear", "nearest", "bicubic"]: + raise ValueError("Unsupported interpolation type!") + + def __call__(self, sample): + """Resize the input image in sample to the given size.""" + image, label = sample + if isinstance(image, tf.Tensor): + image = tf.image.resize(image, self.size, method=self.interpolation) + else: + image = cv2.resize(image, self.size, interpolation=interpolation_map[self.interpolation]) + return (image, label) + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class COCOmAPv2(BaseMetric): + """Compute mean average precision of the detection task.""" + + def __init__( + self, + anno_path=None, + iou_thrs="0.5:0.05:0.95", + map_points=101, + map_key="DetectionBoxes_Precision/mAP", + output_index_mapping={"num_detections": -1, "boxes": 0, "scores": 1, "classes": 2}, + ): + """Initialize the metric. + + Args: + anno_path: The path of annotation file. + iou_thrs: Minimal value for intersection over union that allows to make decision + that prediction bounding box is true positive. You can specify one float value + between 0 to 1 or string "05:0.05:0.95" for standard COCO thresholds. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + map_key: The key that mapping to pycocotools COCOeval. + Defaults to 'DetectionBoxes_Precision/mAP'. + output_index_mapping: The output index mapping. + Defaults to {'num_detections':-1, 'boxes':0, 'scores':1, 'classes':2}. + """ + self.output_index_mapping = output_index_mapping + + if anno_path: + import os + import yaml + + assert os.path.exists(anno_path), "Annotation path does not exists!" + with open(anno_path, "r") as f: + label_map = yaml.safe_load(f.read()) + self.category_map_reverse = {k: v for k, v in label_map.items()} + else: + # label: index + self.category_map_reverse = {v: k for k, v in category_map.items()} + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + self.category_map = category_map + self.category_id_set = set([cat for cat in self.category_map]) # index + self.iou_thrs = iou_thrs + self.map_points = map_points + self.map_key = map_key + + def update(self, predicts, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + predicts: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. Defaults to None. + """ + from coco_tools import ExportSingleImageDetectionBoxesToCoco, ExportSingleImageGroundtruthToCoco + + detections = [] + if "num_detections" in self.output_index_mapping and self.output_index_mapping["num_detections"] > -1: + for item in zip(*predicts): + detection = {} + num = int(item[self.output_index_mapping["num_detections"]]) + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]])[0:num] + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]])[0:num] + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]])[0:num] + detections.append(detection) + else: + for item in zip(*predicts): + detection = {} + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]]) + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]]) + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]]) + detections.append(detection) + + bboxes, str_labels, int_labels, image_ids = labels + labels = [] + if len(int_labels[0]) == 0: + for str_label in str_labels: + str_label = [x if type(x) == "str" else x.decode("utf-8") for x in str_label] + labels.append([self.category_map_reverse[x] for x in str_label]) + elif len(str_labels[0]) == 0: + for int_label in int_labels: + labels.append([x for x in int_label]) + + for idx, image_id in enumerate(image_ids): + image_id = image_id if type(image_id) == "str" else image_id.decode("utf-8") + if image_id in self.image_ids: + continue + self.image_ids.append(image_id) + + ground_truth = {} + ground_truth["boxes"] = np.asarray(bboxes[idx]) + ground_truth["classes"] = np.asarray(labels[idx]) + + self.ground_truth_list.extend( + ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self.annotation_id, + category_id_set=self.category_id_set, + groundtruth_boxes=ground_truth["boxes"], + groundtruth_classes=ground_truth["classes"], + ) + ) + self.annotation_id += ground_truth["boxes"].shape[0] + + self.detection_list.extend( + ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self.category_id_set, + detection_boxes=detections[idx]["boxes"], + detection_scores=detections[idx]["scores"], + detection_classes=detections[idx]["classes"], + ) + ) + + def reset(self): + """Reset the prediction and labels.""" + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + + def result(self): + """Compute mean average precision. + + Returns: + The mean average precision score. + """ + from coco_tools import COCOEvalWrapper, COCOWrapper + + if len(self.ground_truth_list) == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + else: + groundtruth_dict = { + "annotations": self.ground_truth_list, + "images": [{"id": image_id} for image_id in self.image_ids], + "categories": [{"id": k, "name": v} for k, v in self.category_map.items()], + } + coco_wrapped_groundtruth = COCOWrapper(groundtruth_dict) + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(self.detection_list) + box_evaluator = COCOEvalWrapper( + coco_wrapped_groundtruth, + coco_wrapped_detections, + agnostic_mode=False, + iou_thrs=self.iou_thrs, + map_points=self.map_points, + ) + box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( + include_metrics_per_category=False, all_metrics_per_category=False + ) + box_metrics.update(box_per_category_ap) + box_metrics = {"DetectionBoxes_" + key: value for key, value in iter(box_metrics.items())} + + return box_metrics[self.map_key] + + +class ParseDecodeCoco: # pragma: no cover + """Helper function for TensorflowModelZooBertDataset. + + Parse the features from sample. + """ + + def __call__(self, sample): + """Parse the sample data. + + Args: + sample: Data to be parsed. + """ + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/object/class/text": tf.compat.v1.VarLenFeature(dtype=tf.string), + "image/object/class/label": tf.compat.v1.VarLenFeature(dtype=tf.int64), + "image/source_id": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + } + sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(sample, feature_map) + + xmin = tf.expand_dims(features["image/object/bbox/xmin"].values, 0) + ymin = tf.expand_dims(features["image/object/bbox/ymin"].values, 0) + xmax = tf.expand_dims(features["image/object/bbox/xmax"].values, 0) + ymax = tf.expand_dims(features["image/object/bbox/ymax"].values, 0) + + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + encoded_image = features["image/encoded"] + image_tensor = tf.image.decode_image(encoded_image, channels=3) + image_tensor.set_shape([None, None, 3]) + + str_label = features["image/object/class/text"].values + int_label = features["image/object/class/label"].values + image_id = features["image/source_id"] + + return image_tensor, (bbox[0], str_label, int_label, image_id) + + +class COCORecordDataset(object): + """Tensorflow COCO dataset in tf record format. + + Root is a full path to tfrecord file, which contains the file name. + Please use Resize transform when batch_size > 1 + + Args: root (str): Root directory of dataset. + num_cores (int, default=28):The number of input Datasets to interleave from in parallel. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + def __new__(cls, root, num_cores=28, transform=None, filter=filter): + """Build a new object.""" + record_iterator = tf.compat.v1.python_io.tf_record_iterator(root) + example = tf.train.SequenceExample() + for element in record_iterator: + example.ParseFromString(element) + break + feature = example.context.feature + if ( + len(feature["image/object/class/text"].bytes_list.value) == 0 + and len(feature["image/object/class/label"].int64_list.value) == 0 + ): + raise ValueError( + "Tfrecord format is incorrect, please refer\ + 'https://github.com/tensorflow/models/blob/master/research/\ + object_detection/dataset_tools/create_coco_tf_record.py' to\ + create correct tfrecord" + ) + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + tfrecord_paths = [root] + ds = tf.data.TFRecordDataset.list_files(tfrecord_paths) + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, + cycle_length=num_cores, + block_length=5, + sloppy=True, + buffer_output_elements=10000, + prefetch_input_elements=10000, + ) + ) + if transform is not None: + transform.transform_list.insert(0, ParseDecodeCoco()) + else: + transform = ParseDecodeCoco() + ds = ds.map(transform, num_parallel_calls=None) + if filter is not None: + ds = ds.filter(filter) + ds = ds.prefetch(buffer_size=1000) + return ds + + +class TFDataLoader(object): + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/main.py b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/main.py new file mode 100644 index 00000000000..0ca37671fd6 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/main.py @@ -0,0 +1,128 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +from __future__ import division + +import time + +import numpy as np +import tensorflow as tf + +from argparse import ArgumentParser +from data_process import( + COCOmAPv2, + COCORecordDataset, + ComposeTransform, + ResizeTFTransform, + TFDataLoader, +) + +arg_parser = ArgumentParser(description='Parse args') + +arg_parser.add_argument('-g', + "--input-graph", + help='Specify the input graph.', + dest='input_graph') +arg_parser.add_argument('--config', type=str, default='') +arg_parser.add_argument('--dataset_location', type=str, default='') +arg_parser.add_argument('--output_model', type=str, default='') +arg_parser.add_argument('--mode', type=str, default='performance') +arg_parser.add_argument('--batch_size', type=int, default=10) +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='iterations') +arg_parser.add_argument('--tune', action='store_true', default=False) +arg_parser.add_argument('--benchmark', dest='benchmark', + action='store_true', help='run benchmark') +args = arg_parser.parse_args() + +def evaluate(model): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph): The input model graph. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + model = Model(model) + model.input_tensor_names = ["image_tensor:0"] + model.output_tensor_names = ["num_detections:0", "detection_boxes:0", \ + "detection_scores:0", "detection_classes:0"] + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + warmup = 5 + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + metric = COCOmAPv2(output_index_mapping={'num_detections':0, 'boxes':1, 'scores':2, 'classes':3}) + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list[warmup:]).mean() / args.batch_size + return latency + + eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, \ + transform=ComposeTransform(transform_list=[ResizeTFTransform(size=600)])) + eval_dataloader=TFDataLoader(dataset=eval_dataset, batch_size=args.batch_size) + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + calib_dataset = COCORecordDataset(root=args.dataset_location, filter=None, \ + transform=ComposeTransform(transform_list=[ResizeTFTransform(size=600)])) + calib_dataloader = TFDataLoader(dataset=calib_dataset, batch_size=args.batch_size) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..6c2115f58ff --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location "${dataset_location}" \ + --batch_size ${batch_size} \ + --iters ${iters} \ + --benchmark +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..559d695f768 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/faster_rcnn_resnet50/quantization/ptq/run_quant.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph "${input_model}" \ + --output_model "${output_model}" \ + --dataset_location "${dataset_location}" \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/README.md b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/README.md new file mode 100644 index 00000000000..9ec8ae2ad78 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/README.md @@ -0,0 +1,142 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. + +# Prerequisite + + +## 1. Environment +Recommend python 3.6 or higher version. + +### Install Intel® Neural Compressor +```shell +pip install neural-compressor +``` + +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Installation Dependency packages +```shell +cd examples/3.x_api/tensorflow/object_detection +pip install -r requirements.txt +cd mask_rcnn_inception_v2/quantization/ptq +``` + +### Install Protocol Buffer Compiler + +`Protocol Buffer Compiler` in version higher than 3.0.0 is necessary ingredient for automatic COCO dataset preparation. To install please follow +[Protobuf installation instructions](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager). + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Model + +```shell +wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz +tar -xvzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz +``` + +## 3. Prepare Dataset + +### Automatic dataset download + +> **_Note: `prepare_dataset.sh` script works with TF version 1.x._** + +Run the `prepare_dataset.sh` script located in `examples/3.x_api/tensorflow/object_detection`. + +Usage: +```shell +cd examples/3.x_api/tensorflow/object_detection/ +. prepare_dataset.sh +cd mask_rcnn_inception_v2/quantization/ptq +``` + +This script will download the *train*, *validation* and *test* COCO datasets. Furthermore it will convert them to +tensorflow records using the `https://github.com/tensorflow/models.git` dedicated script. + +### Manual dataset download +Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). + + +# Run + +Now we support both pb and ckpt formats. + +## 1. Quantization +### For PB format + + ```shell + bash run_quant.sh --input_model=./mask_rcnn_inception_v2_coco_2018_01_28/frozen_inference_graph.pb --output_model=./tensorflow-mask_rcnn_inception_v2-tune.pb --dataset_location=/path/to/dataset/coco_val.record + ``` + +### For ckpt format + + ```shell + bash run_quant.sh --input_model=./mask_rcnn_inception_v2_coco_2018_01_28/ --output_model=./tensorflow-mask_rcnn_inception_v2-tune.pb --dataset_location=/path/to/dataset/coco_val.record + ``` + +## 2. Benchmark + ```shell + # run performance benchmark + bash run_benchmark.sh --input_model=./tensorflow-mask_rcnn_inception_v2-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=performance + + # run accuracy benchmark + bash run_benchmark.sh --input_model=./tensorflow-mask_rcnn_inception_v2-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=accuracy + ``` + +Details of enabling Intel® Neural Compressor on mask_rcnn_inception_v2 for Tensorflow. +========================= + +This is a tutorial of how to enable mask_rcnn_inception_v2 model with Intel® Neural Compressor. +## User Code Analysis +User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. + +For mask_rcnn_inception_v2, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. + +### Code update + +After prepare step is done, we just need update main.py like below. +```python + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) +``` + +The quantization.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/coco_tools.py b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/coco_tools.py new file mode 100644 index 00000000000..2f9369798df --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/coco_tools.py @@ -0,0 +1,694 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for third party pycocotools to be used within object_detection. + +Note that nothing in this file is tensorflow related and thus cannot +be called directly as a slim metric, for example. + +TODO(jonathanhuang): wrap as a slim metric in metrics.py + + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes and classes) +and detections (boxes, scores and classes), where elements of each list +correspond to detections/annotations of a single image, +then evaluation (in multi-class mode) can be invoked as follows: + + groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( + image_ids, groundtruth_boxes_list, groundtruth_classes_list, + max_num_classes, output_path=None) + detections_list = coco_tools.ExportDetectionsToCOCO( + image_ids, detection_boxes_list, detection_scores_list, + detection_classes_list, output_path=None) + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() +""" + +import copy +import time +from collections import OrderedDict +from typing import Any, Dict, List, Set, Union + +import numpy as np +from pycocotools import coco, cocoeval, mask + +from neural_compressor.utils import logger + + +class COCOWrapper(coco.COCO): + """Wrapper for the pycocotools COCO class. + + Attributes: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + """ + + def __init__(self, dataset: Dict[str, Any], detection_type: str = "bbox"): + """Construct a COCOWrapper. + + See http://mscoco.org/dataset/#format for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + supported_detection_types = ["bbox", "segmentation"] + if detection_type not in supported_detection_types: + raise ValueError( + "Unsupported detection type: {}. " + "Supported values are: {}".format(detection_type, supported_detection_types) + ) + self._detection_type = detection_type + coco.COCO.__init__(self) + self.dataset = dataset + self.createIndex() + + def LoadAnnotations(self, annotations: list) -> coco.COCO: + """Load annotations dictionary into COCO datastructure. + + See http://mscoco.org/dataset/#format for a description of the annotations + format. As above, this function replicates the default behavior of the API + but does not require writing to external storage. + + Args: + annotations: python list holding object detection results where each + detection is encoded as a dict with required keys ['image_id', + 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on + `detection_type`. + + Returns: + a coco.COCO datastructure holding object detection annotations results + + Raises: + ValueError: if (1) annotations is not a list or annotations do not + correspond to the images contained in self. + """ + results = coco.COCO() + results.dataset["images"] = [img for img in self.dataset["images"]] + + logger.info("Load and prepare annotation results.") + tic = time.time() + + if not isinstance(annotations, list): + raise ValueError("annotations is not a list of objects") + annotation_img_ids = [ann["image_id"] for ann in annotations] + if set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds())): + raise ValueError("Results do not correspond to current coco set") + results.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) + if self._detection_type == "bbox": + for idx, ann in enumerate(annotations): + bb = ann["bbox"] + ann["area"] = bb[2] * bb[3] + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + elif self._detection_type == "segmentation": + for idx, ann in enumerate(annotations): + ann["area"] = mask.area(ann["segmentation"]) + ann["bbox"] = mask.toBbox(ann["segmentation"]) + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + logger.info("DONE (t=%0.2fs)", (time.time() - tic)) + + results.dataset["annotations"] = annotations + results.createIndex() + return results + + +class COCOEvalWrapper(cocoeval.COCOeval): + """Wrapper for the pycocotools COCOeval class. + + To evaluate, create two objects (groundtruth_dict and detections_list) + using the conventions listed at http://mscoco.org/dataset/#format. + Then call evaluation as follows: + + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() + """ + + def __init__( + self, + groundtruth: coco.COCO = None, + detections: coco.COCO = None, + agnostic_mode=False, + iou_type: str = "bbox", + iou_thrs: Union[str, float] = None, + map_points=None, + ): + """Construct a COCOEvalWrapper. + + Note that for the area-based metrics to be meaningful, detection and + groundtruth boxes must be in image coordinates measured in pixels. + + Args: + groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding + groundtruth annotations + detections: a coco.COCO (or coco_tools.COCOWrapper) object holding + detections + agnostic_mode: boolean (default: False). If True, evaluation ignores + class labels, treating all detections as proposals. + iou_thrs: Minimal value for intersection over union that allows to + make decision that prediction bounding box is true positive. + You can specify one float value between 0 to 1 or + string "05:0.05:0.95" for standard COCO thresholds. + iou_type: IOU type to use for evaluation. Supports `bbox` or `segm`. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + """ + cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type) + if agnostic_mode: + self.params.useCats = 0 + if iou_thrs == "0.5:0.05:0.95": + self.params.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + elif isinstance(iou_thrs, float): + self.params.iouThrs = [iou_thrs] + + if map_points == 101: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + if map_points == 11: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.1)) + 1, endpoint=True) + if map_points == 0: + self.params.recThrs = [-1] + + def GetCategory(self, category_id: int) -> dict: + """Fetch dictionary holding category information given category id. + + Args: + category_id: integer id + + Returns: + dictionary holding 'id', 'name'. + """ + return self.cocoGt.cats[category_id] + + def GetAgnosticMode(self) -> bool: + """Return whether COCO Eval is configured to evaluate in agnostic mode.""" + return self.params.useCats == 0 + + def GetCategoryIdList(self) -> List[int]: + """Return the list of IDs of all valid categories.""" + return self.params.catIds + + def accumulate(self, p: cocoeval.Params = None): + """Accumulate evaluation results per image and store it to self.eval. + + Args: + p: input params for evaluation + """ + print("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + print("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories + recall = -np.ones((T, K, A, M)) + scores = -np.ones((T, R, K, A, M)) + + # create dictionary for future indexing + _pe = self._paramsEval + print("-pe", _pe) + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + dtScoresSorted = dtScores[inds] + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + + # calculate precision + if R == 1: + rc = np.concatenate(([0.0], rc, [1.0])) + pr = np.concatenate(([0.0], pr, [0.0])) + + # compute the precision envelope + for i in range(pr.size - 1, 0, -1): + pr[i - 1] = np.maximum(pr[i - 1], pr[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + change_point = np.where(rc[1:] != rc[:-1])[0] + # and sum (\Delta recall) * recall + res = np.sum((rc[change_point + 1] - rc[change_point]) * pr[change_point + 1]) + precision[t, :, k, a, m] = np.array([res]) + else: + q = np.zeros((R,)) + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except: + pass + precision[t, :, k, a, m] = np.array(q) + + # calculate recall + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # calculate score + ss = np.zeros((R,)) + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + ss[ri] = dtScoresSorted[pi] + except: + pass + scores[t, :, k, a, m] = np.array(ss) + # exit(0) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "precision": precision, + "recall": recall, + "scores": scores, + } + toc = time.time() + print("DONE (t={:0.2f}s).".format(toc - tic)) + + def ComputeMetrics( + self, include_metrics_per_category: bool = False, all_metrics_per_category: bool = False + ): # pragma: no cover + """Compute detection metrics. + + Args: + include_metrics_per_category: Whether include metrics per category. + all_metrics_per_category: Whether include all the summery metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + + Returns: + A tuple of (summary_metrics, per_category_ap), in which + (1) summary_metrics is a dictionary holding: + 'Precision/mAP': mean average precision over classes averaged over IOU + thresholds ranging from .5 to .95 with .05 increments; + 'Precision/mAP@.50IOU': mean average precision at 50% IOU; + 'Precision/mAP@.75IOU': mean average precision at 75% IOU; + 'Precision/mAP (small)': mean average precision for small objects + (area < 32^2 pixels); + 'Precision/mAP (medium)': mean average precision for medium sized + objects (32^2 pixels < area < 96^2 pixels); + 'Precision/mAP (large)': mean average precision for large objects + (96^2 pixels < area < 10000^2 pixels); + 'Recall/AR@1': average recall with 1 detection; + 'Recall/AR@10': average recall with 10 detections; + 'Recall/AR@100': average recall with 100 detections; + 'Recall/AR@100 (small)': average recall for small objects with 100 + detections; + 'Recall/AR@100 (medium)': average recall for medium objects with 100 + detections; + 'Recall/AR@100 (large)': average recall for large objects with 100 + detections; + and (2) per_category_ap is a dictionary holding category specific results with + keys of the form: 'Precision mAP ByCategory/category' + (without the supercategory part if no supercategories exist). + + For backward compatibility 'PerformanceByCategory' is included in the + output regardless of all_metrics_per_category. If evaluating class-agnostic + mode, per_category_ap is an empty dictionary. + + Raises: + ValueError: If category_stats does not exist. + """ + self.evaluate() + self.accumulate() + self.summarize() + + summary_metrics = OrderedDict( + [ + ("Precision/mAP", self.stats[0]), + ("Precision/mAP@.50IOU", self.stats[1]), + ("Precision/mAP@.75IOU", self.stats[2]), + ("Precision/mAP (small)", self.stats[3]), + ("Precision/mAP (medium)", self.stats[4]), + ("Precision/mAP (large)", self.stats[5]), + ("Recall/AR@1", self.stats[6]), + ("Recall/AR@10", self.stats[7]), + ("Recall/AR@100", self.stats[8]), + ("Recall/AR@100 (small)", self.stats[9]), + ("Recall/AR@100 (medium)", self.stats[10]), + ("Recall/AR@100 (large)", self.stats[11]), + ] + ) + if not include_metrics_per_category: + return summary_metrics, {} + if not hasattr(self, "category_stats"): + raise ValueError("Category stats do not exist") + per_category_ap = OrderedDict([]) + if self.GetAgnosticMode(): + return summary_metrics, per_category_ap + for category_index, category_id in enumerate(self.GetCategoryIdList()): + category = self.GetCategory(category_id)["name"] + # Kept for backward compatilbility + # pylint: disable=no-member + per_category_ap["PerformanceByCategory/mAP/{}".format(category)] = self.category_stats[0][category_index] + if all_metrics_per_category: + per_category_ap["Precision mAP ByCategory/{}".format(category)] = self.category_stats[0][category_index] + per_category_ap["Precision mAP@.50IOU ByCategory/{}".format(category)] = self.category_stats[1][ + category_index + ] + per_category_ap["Precision mAP@.75IOU ByCategory/{}".format(category)] = self.category_stats[2][ + category_index + ] + per_category_ap["Precision mAP (small) ByCategory/{}".format(category)] = self.category_stats[3][ + category_index + ] + per_category_ap["Precision mAP (medium) ByCategory/{}".format(category)] = self.category_stats[4][ + category_index + ] + per_category_ap["Precision mAP (large) ByCategory/{}".format(category)] = self.category_stats[5][ + category_index + ] + per_category_ap["Recall AR@1 ByCategory/{}".format(category)] = self.category_stats[6][category_index] + per_category_ap["Recall AR@10 ByCategory/{}".format(category)] = self.category_stats[7][category_index] + per_category_ap["Recall AR@100 ByCategory/{}".format(category)] = self.category_stats[8][category_index] + per_category_ap["Recall AR@100 (small) ByCategory/{}".format(category)] = self.category_stats[9][ + category_index + ] + per_category_ap["Recall AR@100 (medium) ByCategory/{}".format(category)] = self.category_stats[10][ + category_index + ] + per_category_ap["Recall AR@100 (large) ByCategory/{}".format(category)] = self.category_stats[11][ + category_index + ] + + return summary_metrics, per_category_ap + + +def _ConvertBoxToCOCOFormat(box): + """Convert a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a numpy array in format of [ymin, xmin, ymax, xmax] + + Returns: + A list of floats, in COCO format, representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), float(box[2] - box[0])] + + +def _RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + return mask.encode(np.asfortranarray(masks)) + + +def ExportSingleImageGroundtruthToCoco( + image_id: Union[int, str], + next_annotation_id: int, + category_id_set: Set[str], + groundtruth_boxes: np.array, + groundtruth_classes: np.array, + groundtruth_masks: Union[np.array, None] = None, + groundtruth_is_crowd: Union[np.array, None] = None, +) -> list: + """Export groundtruth of a single image to COCO format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the COCO evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in + correspondence - that is: groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes] + indicating whether groundtruth boxes are crowd. + + Returns: + A list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + if len(groundtruth_classes.shape) != 1: + raise ValueError("groundtruth_classes is " "expected to be of rank 1.") + if len(groundtruth_boxes.shape) != 2: + raise ValueError("groundtruth_boxes is expected to be of " "rank 2.") + if groundtruth_boxes.shape[1] != 4: + raise ValueError("groundtruth_boxes should have " "shape[1] == 4.") + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError( + "Corresponding entries in groundtruth_classes, " + "and groundtruth_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension)." + "Classes shape: %d. Boxes shape: %d. Image ID: %s" + % (groundtruth_classes.shape[0], groundtruth_boxes.shape[0], image_id) + ) + has_is_crowd = groundtruth_is_crowd is not None + if has_is_crowd and len(groundtruth_is_crowd.shape) != 1: + raise ValueError("groundtruth_is_crowd is expected to be of rank 1.") + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0 + export_dict = { + "id": next_annotation_id + i, + "image_id": image_id, + "category_id": int(groundtruth_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + "area": float( + (groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) + * (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]) + ), + "iscrowd": iscrowd, + } + if groundtruth_masks is not None: + export_dict["segmentation"] = _RleCompress(groundtruth_masks[i]) + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportSingleImageDetectionBoxesToCoco( + image_id: Union[int, str], + category_id_set: Set[int], + detection_boxes: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detections of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. Note that the image_ids + provided here must match the ones given to the + ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in + correspondence - that is: boxes[i, :], and classes[i] + are associated with the same groundtruth annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_boxes: float numpy array of shape [num_detections, 4] containing + detection boxes. + detection_scores: float numpy array of shape [num_detections] containing + scored for the detection boxes. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection boxes. + + Returns: + A list of detection annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_boxes, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + if len(detection_boxes.shape) != 2: + raise ValueError("All entries in detection_boxes expected to be of " "rank 2.") + if detection_boxes.shape[1] != 4: + raise ValueError("All entries in detection_boxes should have " "shape[1] == 4.") + num_boxes = detection_classes.shape[0] + if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension). " + "Classes shape: %d. Boxes shape: %d. " + "Scores shape: %d" % (detection_classes.shape[0], detection_boxes.shape[0], detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), + "score": float(detection_scores[i]), + } + ) + return detections_list + + +def ExportSingleImageDetectionMasksToCoco( + image_id: Union[str, int], + category_id_set: Set[int], + detection_masks: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detection masks of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + A list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_masks should have " + "compatible lengths and shapes " + "Classes length: %d. Masks length: %d. " + "Scores length: %d" % (detection_classes.shape[0], len(detection_masks), detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "segmentation": _RleCompress(detection_masks[i]), + "score": float(detection_scores[i]), + } + ) + return detections_list diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/data_process.py new file mode 100644 index 00000000000..8d0a074ee82 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/data_process.py @@ -0,0 +1,767 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import cv2 +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +interpolation_map = { + "nearest": cv2.INTER_NEAREST, + "bilinear": cv2.INTER_LINEAR, + "bicubic": cv2.INTER_CUBIC, +} + +category_map = { + 1: "person", + 2: "bicycle", + 3: "car", + 4: "motorcycle", + 5: "airplane", + 6: "bus", + 7: "train", + 8: "truck", + 9: "boat", + 10: "traffic light", + 11: "fire hydrant", + 13: "stop sign", + 14: "parking meter", + 15: "bench", + 16: "bird", + 17: "cat", + 18: "dog", + 19: "horse", + 20: "sheep", + 21: "cow", + 22: "elephant", + 23: "bear", + 24: "zebra", + 25: "giraffe", + 27: "backpack", + 28: "umbrella", + 31: "handbag", + 32: "tie", + 33: "suitcase", + 34: "frisbee", + 35: "skis", + 36: "snowboard", + 37: "sports ball", + 38: "kite", + 39: "baseball bat", + 40: "baseball glove", + 41: "skateboard", + 42: "surfboard", + 43: "tennis racket", + 44: "bottle", + 46: "wine glass", + 47: "cup", + 48: "fork", + 49: "knife", + 50: "spoon", + 51: "bowl", + 52: "banana", + 53: "apple", + 54: "sandwich", + 55: "orange", + 56: "broccoli", + 57: "carrot", + 58: "hot dog", + 59: "pizza", + 60: "donut", + 61: "cake", + 62: "chair", + 63: "couch", + 64: "potted plant", + 65: "bed", + 67: "dining table", + 70: "toilet", + 72: "tv", + 73: "laptop", + 74: "mouse", + 75: "remote", + 76: "keyboard", + 77: "cell phone", + 78: "microwave", + 79: "oven", + 80: "toaster", + 81: "sink", + 82: "refrigerator", + 84: "book", + 85: "clock", + 86: "vase", + 87: "scissors", + 88: "teddy bear", + 89: "hair drier", + 90: "toothbrush", +} + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ResizeWithRatio(): + """Resize image with aspect ratio and pad it to max shape(optional). + + If the image is padded, the label will be processed at the same time. + The input image should be np.array. + + Args: + min_dim (int, default=800): + Resizes the image such that its smaller dimension == min_dim + max_dim (int, default=1365): + Ensures that the image longest side doesn't exceed this value + padding (bool, default=False): + If true, pads image with zeros so its size is max_dim x max_dim + + Returns: + tuple of processed image and label + """ + + def __init__(self, min_dim=800, max_dim=1365, padding=False, constant_value=0): + """Initialize `ResizeWithRatio` class.""" + self.min_dim = min_dim + self.max_dim = max_dim + self.padding = padding + self.constant_value = constant_value + + def __call__(self, sample): + """Resize the image with ratio in sample.""" + image, label = sample + height, width = image.shape[:2] + scale = 1 + if self.min_dim: + scale = max(1, self.min_dim / min(height, width)) + if self.max_dim: + image_max = max(height, width) + if round(image_max * scale) > self.max_dim: + scale = self.max_dim / image_max + if scale != 1: + image = cv2.resize(image, (round(height * scale), round(width * scale))) + + bbox, str_label, int_label, image_id = label + + if self.padding: + h, w = image.shape[:2] + pad_param = [ + [(self.max_dim - h) // 2, self.max_dim - h - (self.max_dim - h) // 2], + [(self.max_dim - w) // 2, self.max_dim - w - (self.max_dim - w) // 2], + [0, 0], + ] + if not isinstance(bbox, np.ndarray): + bbox = np.array(bbox) + resized_box = bbox * [height, width, height, width] * scale + moved_box = resized_box + [ + (self.max_dim - h) // 2, + (self.max_dim - w) // 2, + (self.max_dim - h) // 2, + (self.max_dim - w) // 2, + ] + bbox = moved_box / [self.max_dim, self.max_dim, self.max_dim, self.max_dim] + image = np.pad(image, pad_param, mode="constant", constant_values=self.constant_value) + return image, (bbox, str_label, int_label, image_id) + + +class TensorflowResizeWithRatio(): + """Resize image with aspect ratio and pad it to max shape(optional). + + If the image is padded, the label will be processed at the same time. + The input image should be np.array or tf.Tensor. + + Args: + min_dim (int, default=800): + Resizes the image such that its smaller dimension == min_dim + max_dim (int, default=1365): + Ensures that the image longest side doesn't exceed this value + padding (bool, default=False): + If true, pads image with zeros so its size is max_dim x max_dim + + Returns: + tuple of processed image and label + """ + + def __init__(self, min_dim=800, max_dim=1365, padding=False, constant_value=0): + """Initialize `TensorflowResizeWithRatio` class.""" + self.min_dim = min_dim + self.max_dim = max_dim + self.padding = padding + self.constant_value = constant_value + + def __call__(self, sample): + """Resize the image with ratio in sample.""" + image, label = sample + if isinstance(image, tf.Tensor): + shape = tf.shape(input=image) + height = tf.cast(shape[0], dtype=tf.float32) + width = tf.cast(shape[1], dtype=tf.float32) + scale = 1 + if self.min_dim: + scale = tf.maximum(1.0, tf.cast(self.min_dim / tf.math.minimum(height, width), dtype=tf.float32)) + if self.max_dim: + image_max = tf.cast(tf.maximum(height, width), dtype=tf.float32) + scale = tf.cond( + pred=tf.greater(tf.math.round(image_max * scale), self.max_dim), + true_fn=lambda: self.max_dim / image_max, + false_fn=lambda: scale, + ) + image = tf.image.resize(image, (tf.math.round(height * scale), tf.math.round(width * scale))) + bbox, str_label, int_label, image_id = label + + if self.padding: + shape = tf.shape(input=image) + h = tf.cast(shape[0], dtype=tf.float32) + w = tf.cast(shape[1], dtype=tf.float32) + pad_param = [ + [(self.max_dim - h) // 2, self.max_dim - h - (self.max_dim - h) // 2], + [(self.max_dim - w) // 2, self.max_dim - w - (self.max_dim - w) // 2], + [0, 0], + ] + resized_box = bbox * [height, width, height, width] * scale + moved_box = resized_box + [ + (self.max_dim - h) // 2, + (self.max_dim - w) // 2, + (self.max_dim - h) // 2, + (self.max_dim - w) // 2, + ] + bbox = moved_box / [self.max_dim, self.max_dim, self.max_dim, self.max_dim] + image = tf.pad(image, pad_param, constant_values=self.constant_value) + else: + transform = ResizeWithRatio(self.min_dim, self.max_dim, self.padding) + image, (bbox, str_label, int_label, image_id) = transform(sample) + return image, (bbox, str_label, int_label, image_id) + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class LabelBalanceCOCORecordFilter(object): + """The label balance filter for COCO Record.""" + + def __init__(self, size=1): + """Initialize the attribute of class.""" + self.size = size + + def __call__(self, image, label): + """Execute the filter. + + Args: + image: Not used. + label: label of a sample. + """ + return tf.math.equal(len(label[0]), self.size) + + +class COCOmAPv2(BaseMetric): + """Compute mean average precision of the detection task.""" + + def __init__( + self, + anno_path=None, + iou_thrs="0.5:0.05:0.95", + map_points=101, + map_key="DetectionBoxes_Precision/mAP", + output_index_mapping={"num_detections": -1, "boxes": 0, "scores": 1, "classes": 2}, + ): + """Initialize the metric. + + Args: + anno_path: The path of annotation file. + iou_thrs: Minimal value for intersection over union that allows to make decision + that prediction bounding box is true positive. You can specify one float value + between 0 to 1 or string "05:0.05:0.95" for standard COCO thresholds. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + map_key: The key that mapping to pycocotools COCOeval. + Defaults to 'DetectionBoxes_Precision/mAP'. + output_index_mapping: The output index mapping. + Defaults to {'num_detections':-1, 'boxes':0, 'scores':1, 'classes':2}. + """ + self.output_index_mapping = output_index_mapping + + if anno_path: + import os + import yaml + + assert os.path.exists(anno_path), "Annotation path does not exists!" + with open(anno_path, "r") as f: + label_map = yaml.safe_load(f.read()) + self.category_map_reverse = {k: v for k, v in label_map.items()} + else: + # label: index + self.category_map_reverse = {v: k for k, v in category_map.items()} + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + self.category_map = category_map + self.category_id_set = set([cat for cat in self.category_map]) # index + self.iou_thrs = iou_thrs + self.map_points = map_points + self.map_key = map_key + + def update(self, predicts, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + predicts: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. Defaults to None. + """ + from coco_tools import ExportSingleImageDetectionBoxesToCoco, ExportSingleImageGroundtruthToCoco + + detections = [] + if "num_detections" in self.output_index_mapping and self.output_index_mapping["num_detections"] > -1: + for item in zip(*predicts): + detection = {} + num = int(item[self.output_index_mapping["num_detections"]]) + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]])[0:num] + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]])[0:num] + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]])[0:num] + detections.append(detection) + else: + for item in zip(*predicts): + detection = {} + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]]) + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]]) + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]]) + detections.append(detection) + + bboxes, str_labels, int_labels, image_ids = labels + labels = [] + if len(int_labels[0]) == 0: + for str_label in str_labels: + str_label = [x if type(x) == "str" else x.decode("utf-8") for x in str_label] + labels.append([self.category_map_reverse[x] for x in str_label]) + elif len(str_labels[0]) == 0: + for int_label in int_labels: + labels.append([x for x in int_label]) + + for idx, image_id in enumerate(image_ids): + image_id = image_id if type(image_id) == "str" else image_id.decode("utf-8") + if image_id in self.image_ids: + continue + self.image_ids.append(image_id) + + ground_truth = {} + ground_truth["boxes"] = np.asarray(bboxes[idx]) + ground_truth["classes"] = np.asarray(labels[idx]) + + self.ground_truth_list.extend( + ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self.annotation_id, + category_id_set=self.category_id_set, + groundtruth_boxes=ground_truth["boxes"], + groundtruth_classes=ground_truth["classes"], + ) + ) + self.annotation_id += ground_truth["boxes"].shape[0] + + self.detection_list.extend( + ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self.category_id_set, + detection_boxes=detections[idx]["boxes"], + detection_scores=detections[idx]["scores"], + detection_classes=detections[idx]["classes"], + ) + ) + + def reset(self): + """Reset the prediction and labels.""" + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + + def result(self): + """Compute mean average precision. + + Returns: + The mean average precision score. + """ + from coco_tools import COCOEvalWrapper, COCOWrapper + + if len(self.ground_truth_list) == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + else: + groundtruth_dict = { + "annotations": self.ground_truth_list, + "images": [{"id": image_id} for image_id in self.image_ids], + "categories": [{"id": k, "name": v} for k, v in self.category_map.items()], + } + coco_wrapped_groundtruth = COCOWrapper(groundtruth_dict) + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(self.detection_list) + box_evaluator = COCOEvalWrapper( + coco_wrapped_groundtruth, + coco_wrapped_detections, + agnostic_mode=False, + iou_thrs=self.iou_thrs, + map_points=self.map_points, + ) + box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( + include_metrics_per_category=False, all_metrics_per_category=False + ) + box_metrics.update(box_per_category_ap) + box_metrics = {"DetectionBoxes_" + key: value for key, value in iter(box_metrics.items())} + + return box_metrics[self.map_key] + + +class ParseDecodeCoco: # pragma: no cover + """Helper function for TensorflowModelZooBertDataset. + + Parse the features from sample. + """ + + def __call__(self, sample): + """Parse the sample data. + + Args: + sample: Data to be parsed. + """ + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/object/class/text": tf.compat.v1.VarLenFeature(dtype=tf.string), + "image/object/class/label": tf.compat.v1.VarLenFeature(dtype=tf.int64), + "image/source_id": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + } + sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(sample, feature_map) + + xmin = tf.expand_dims(features["image/object/bbox/xmin"].values, 0) + ymin = tf.expand_dims(features["image/object/bbox/ymin"].values, 0) + xmax = tf.expand_dims(features["image/object/bbox/xmax"].values, 0) + ymax = tf.expand_dims(features["image/object/bbox/ymax"].values, 0) + + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + encoded_image = features["image/encoded"] + image_tensor = tf.image.decode_image(encoded_image, channels=3) + image_tensor.set_shape([None, None, 3]) + + str_label = features["image/object/class/text"].values + int_label = features["image/object/class/label"].values + image_id = features["image/source_id"] + + return image_tensor, (bbox[0], str_label, int_label, image_id) + + +class COCORecordDataset(object): + """Tensorflow COCO dataset in tf record format. + + Root is a full path to tfrecord file, which contains the file name. + Please use Resize transform when batch_size > 1 + + Args: root (str): Root directory of dataset. + num_cores (int, default=28):The number of input Datasets to interleave from in parallel. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + def __new__(cls, root, num_cores=28, transform=None, filter=filter): + """Build a new object.""" + record_iterator = tf.compat.v1.python_io.tf_record_iterator(root) + example = tf.train.SequenceExample() + for element in record_iterator: + example.ParseFromString(element) + break + feature = example.context.feature + if ( + len(feature["image/object/class/text"].bytes_list.value) == 0 + and len(feature["image/object/class/label"].int64_list.value) == 0 + ): + raise ValueError( + "Tfrecord format is incorrect, please refer\ + 'https://github.com/tensorflow/models/blob/master/research/\ + object_detection/dataset_tools/create_coco_tf_record.py' to\ + create correct tfrecord" + ) + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + tfrecord_paths = [root] + ds = tf.data.TFRecordDataset.list_files(tfrecord_paths) + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, + cycle_length=num_cores, + block_length=5, + sloppy=True, + buffer_output_elements=10000, + prefetch_input_elements=10000, + ) + ) + if transform is not None: + transform.transform_list.insert(0, ParseDecodeCoco()) + else: + transform = ParseDecodeCoco() + ds = ds.map(transform, num_parallel_calls=None) + if filter is not None: + ds = ds.filter(filter) + ds = ds.prefetch(buffer_size=1000) + return ds + + +class TFDataLoader(object): + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/main.py b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/main.py new file mode 100644 index 00000000000..632d66ac25a --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/main.py @@ -0,0 +1,133 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +from __future__ import division + +import time + +import numpy as np +import tensorflow as tf + +from argparse import ArgumentParser +from data_process import( + COCOmAPv2, + COCORecordDataset, + ComposeTransform, + TFDataLoader, + LabelBalanceCOCORecordFilter, + TensorflowResizeWithRatio, +) + +arg_parser = ArgumentParser(description='Parse args') + +arg_parser.add_argument('-g', + "--input-graph", + help='Specify the input graph.', + dest='input_graph') +arg_parser.add_argument('--config', type=str, default='') +arg_parser.add_argument('--dataset_location', type=str, default='') +arg_parser.add_argument('--output_model', type=str, default='') +arg_parser.add_argument('--mode', type=str, default='accuracy') +arg_parser.add_argument('--batch_size', type=int, default=10) +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='iterations') +arg_parser.add_argument('--tune', action='store_true', default=False) +arg_parser.add_argument('--benchmark', dest='benchmark', + action='store_true', help='run benchmark') +args = arg_parser.parse_args() + +def evaluate(model): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph or string or INC.model.TensorflowCheckpointModel): The input model. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + if isinstance(model, str) or isinstance(model, tf.compat.v1.Graph): + model = Model(model) + model.input_tensor_names = ["image_tensor:0"] + model.output_tensor_names = ["num_detections:0", "detection_boxes:0", \ + "detection_scores:0", "detection_classes:0"] + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + warmup = 5 + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + metric = COCOmAPv2(output_index_mapping={'num_detections':0, 'boxes':1, 'scores':2, 'classes':3}) + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list[warmup:]).mean() / args.batch_size + return latency + + use_padding = True if args.mode == 'performance' else False + eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, \ + transform=ComposeTransform(transform_list=[TensorflowResizeWithRatio( + min_dim=800, max_dim=1356, padding=use_padding)])) + batch_size = 1 if args.mode == 'accuracy' else args.batch_size + eval_dataloader=TFDataLoader(dataset=eval_dataset, batch_size=batch_size) + + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + calib_dataset = COCORecordDataset(root=args.dataset_location, filter=LabelBalanceCOCORecordFilter(size=1)) + calib_dataloader = TFDataLoader(dataset=calib_dataset, batch_size=1) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..6c2115f58ff --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_benchmark.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location "${dataset_location}" \ + --batch_size ${batch_size} \ + --iters ${iters} \ + --benchmark +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..559d695f768 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/mask_rcnn_inception_v2/quantization/ptq/run_quant.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph "${input_model}" \ + --output_model "${output_model}" \ + --dataset_location "${dataset_location}" \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/prepare_dataset.sh b/examples/3.x_api/tensorflow/object_detection/prepare_dataset.sh new file mode 100644 index 00000000000..fea0ff1c373 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/prepare_dataset.sh @@ -0,0 +1,136 @@ +!/bin/bash +# set -x + +DATA_DIR="${PWD}/data" +DATA_NAME="val2017" +DATA_URL_LIST='http://images.cocodataset.org/zips/val2017.zip http://images.cocodataset.org/annotations/annotations_trainval2017.zip' +PACKAGES_LIST='val2017.zip annotations_trainval2017.zip' +VAL_IMAGE_DIR=$DATA_DIR/val2017 +TRAIN_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json +VAL_ANNOTATIONS_FILE=$DATA_DIR/annotations/instances_val2017.json +TESTDEV_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json +OUTPUT_DIR=$DATA_DIR + +help() +{ + cat <<- EOF + + Desc: Prepare dataset for Tensorflow COCO object detection. + + -h --help help info + + --dataset_location set dataset location, default is ./data + +EOF + exit 0 +} + +function main { + init_params "$@" + download_dataset + convert_to_tf_record +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --dataset_location=*) + DATA_DIR=$(echo "$var" |cut -f2 -d=) + ;; + -h|--help) help + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# removes files that will not be used anymore +function remove_zipped_packages { + for package in $PACKAGES_LIST; do + rm "$package" + done +} + +function download_tf_models_repo { + if [ ! -d models ]; then + git clone https://github.com/tensorflow/models.git + fi + cd models || exit + git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 + cd .. +} + +function divide_tf_records_by_dataset { + if [ ! -d "${DATA_DIR}/tf_test2017" ]; then + mkdir "${DATA_DIR}/tf_test2017" + fi + if [ ! -d "${DATA_DIR}/tf_train2017" ]; then + mkdir "${DATA_DIR}/tf_train2017" + fi + if [ ! -d "${DATA_DIR}/tf_val2017" ]; then + mkdir "${DATA_DIR}/tf_val2017" + fi + mv ${DATA_DIR}/coco_testdev.record* ${DATA_DIR}/tf_test2017 + mv ${DATA_DIR}/coco_train.record* ${DATA_DIR}/tf_train2017 + mv ${DATA_DIR}/coco_val.record* ${DATA_DIR}/tf_val2017 +} + +function convert { + cd models/research + protoc object_detection/protos/*.proto --python_out=. + export PYTHONPATH=$PYTHONPATH:$(pwd) + export PYTHONPATH=$PYTHONPATH:$(pwd)/slim + python ./object_detection/dataset_tools/create_coco_tf_record.py --logtostderr \ + --train_image_dir=empty_dir \ + --val_image_dir="${VAL_IMAGE_DIR}" \ + --test_image_dir=empty_dir \ + --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ + --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ + --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ + --output_dir="${OUTPUT_DIR}" +} + +function convert_to_tf_record { + download_tf_models_repo + convert + divide_tf_records_by_dataset +} + +# download_dataset +function download_dataset { + if [ ! -d "${DATA_DIR}" ]; then + mkdir "${DATA_DIR}" + fi + + cd "${DATA_DIR}" || exit + if [ ! -f "${VAL_IMAGE_DIR}" ]; then + + for dataset_dowload_link in $DATA_URL_LIST; do + wget "$dataset_dowload_link" + done + for package in $PACKAGES_LIST; do + unzip -o "$package" + done + remove_zipped_packages + if [ ! -d empty_dir ]; then + mkdir empty_dir + fi + + cd annotations || exit + echo "{ \"images\": {}, \"categories\": {}}" > empty.json + cd .. + else + echo "Dataset ${DATA_NAME} is exist!" + fi + + cd ../ +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/requirements.txt b/examples/3.x_api/tensorflow/object_detection/requirements.txt new file mode 100644 index 00000000000..865df0f3a6b --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/requirements.txt @@ -0,0 +1,8 @@ +Cython +contextlib2 +pillow>=8.2.0 +lxml>=4.6.2 +matplotlib +numpy>=1.17.4 +pycocotools +protobuf diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/README.md b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/README.md new file mode 100644 index 00000000000..1b52ecf8b17 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/README.md @@ -0,0 +1,160 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. + +# Prerequisite + + +## 1. Environment +Recommend python 3.6 or higher version. + +### Install Intel® Neural Compressor +```shell +pip install neural-compressor +``` + +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Installation Dependency packages +```shell +cd examples/3.x_api//tensorflow/object_detection +pip install -r requirements.txt +cd ssd_mobilenet_v1/quantization/ptq +``` + +### Install Protocol Buffer Compiler + +`Protocol Buffer Compiler` in version higher than 3.0.0 is necessary ingredient for automatic COCO dataset preparation. To install please follow +[Protobuf installation instructions](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager). + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Model + +### Automated approach +Run the `prepare_model.py` script located in `examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq`. + +``` +python prepare_model.py --model_name=ssd_mobilenet_v1 --model_path=./ + +Prepare pre-trained model for COCO object detection + +optional arguments: + -h, --help show this help message and exit + --model_name {ssd_resnet50_v1,ssd_mobilenet_v1} + model to download, default is ssd_resnet50_v1 + --model_path MODEL_PATH + directory to put models, default is ./model +``` + +### Manual approach + +```shell +wget http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz +tar -xvzf ssd_mobilenet_v1_coco_2018_01_28.tar.gz +``` + +## 3. Prepare Dataset + +### Automatic dataset download + +> **_Note: `prepare_dataset.sh` script works with TF version 1.x._** + +Run the `prepare_dataset.sh` script located in `examples/3.x_api/tensorflow/object_detection`. + +Usage: +```shell +cd examples/3.x_api/tensorflow/object_detection +. prepare_dataset.sh +cd ssd_mobilenet_v1/quantization/ptq +``` + +This script will download the *train*, *validation* and *test* COCO datasets. Furthermore it will convert them to +tensorflow records using the `https://github.com/tensorflow/models.git` dedicated script. + +### Manual dataset download +Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). + + +# Run Command + +Now we support both pb and ckpt formats. + +## 1. Quantization +### For PB format + + ```shell + bash run_quant.sh --input_model=./ssd_mobilenet_v1_coco_2018_01_28/frozen_inference_graph.pb --output_model=./tensorflow-ssd_mobilenet_v1-tune.pb --dataset_location=/path/to/dataset/coco_val.record + ``` + +### For ckpt format + + ```shell + bash run_quant.sh --input_model=./ssd_mobilenet_v1_coco_2018_01_28/ --output_model=./tensorflow-ssd_mobilenet_v1-tune.pb --dataset_location=/path/to/dataset/coco_val.record + ``` + +## 2. Benchmark + ```shell + # run performance benchmark + bash run_benchmark.sh --input_model=./tensorflow-ssd_mobilenet_v1-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=performance + + # run accuracy benchmark + bash run_benchmark.sh --input_model=./tensorflow-ssd_mobilenet_v1-tune.pb --dataset_location=/path/to/dataset/coco_val.record --mode=accuracy + ``` + +Details of enabling Intel® Neural Compressor on ssd_mobilenet_v1 for Tensorflow. +========================= + +This is a tutorial of how to enable ssd_mobilenet_v1 model with Intel® Neural Compressor. +## User Code Analysis +User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. + +For ssd_mobilenet_v1, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. + +### Code update + +After prepare step is done, we just need update main.py like below. +```python + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) +``` + +The quantization.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/coco_tools.py b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/coco_tools.py new file mode 100644 index 00000000000..2f9369798df --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/coco_tools.py @@ -0,0 +1,694 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for third party pycocotools to be used within object_detection. + +Note that nothing in this file is tensorflow related and thus cannot +be called directly as a slim metric, for example. + +TODO(jonathanhuang): wrap as a slim metric in metrics.py + + +Usage example: given a set of images with ids in the list image_ids +and corresponding lists of numpy arrays encoding groundtruth (boxes and classes) +and detections (boxes, scores and classes), where elements of each list +correspond to detections/annotations of a single image, +then evaluation (in multi-class mode) can be invoked as follows: + + groundtruth_dict = coco_tools.ExportGroundtruthToCOCO( + image_ids, groundtruth_boxes_list, groundtruth_classes_list, + max_num_classes, output_path=None) + detections_list = coco_tools.ExportDetectionsToCOCO( + image_ids, detection_boxes_list, detection_scores_list, + detection_classes_list, output_path=None) + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() +""" + +import copy +import time +from collections import OrderedDict +from typing import Any, Dict, List, Set, Union + +import numpy as np +from pycocotools import coco, cocoeval, mask + +from neural_compressor.utils import logger + + +class COCOWrapper(coco.COCO): + """Wrapper for the pycocotools COCO class. + + Attributes: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + """ + + def __init__(self, dataset: Dict[str, Any], detection_type: str = "bbox"): + """Construct a COCOWrapper. + + See http://mscoco.org/dataset/#format for a description of the format. + By default, the coco.COCO class constructor reads from a JSON file. + This function duplicates the same behavior but loads from a dictionary, + allowing us to perform evaluation without writing to external storage. + + Args: + dataset: a dictionary holding bounding box annotations in the COCO format. + detection_type: type of detections being wrapped. Can be one of ['bbox', + 'segmentation'] + + Raises: + ValueError: if detection_type is unsupported. + """ + supported_detection_types = ["bbox", "segmentation"] + if detection_type not in supported_detection_types: + raise ValueError( + "Unsupported detection type: {}. " + "Supported values are: {}".format(detection_type, supported_detection_types) + ) + self._detection_type = detection_type + coco.COCO.__init__(self) + self.dataset = dataset + self.createIndex() + + def LoadAnnotations(self, annotations: list) -> coco.COCO: + """Load annotations dictionary into COCO datastructure. + + See http://mscoco.org/dataset/#format for a description of the annotations + format. As above, this function replicates the default behavior of the API + but does not require writing to external storage. + + Args: + annotations: python list holding object detection results where each + detection is encoded as a dict with required keys ['image_id', + 'category_id', 'score'] and one of ['bbox', 'segmentation'] based on + `detection_type`. + + Returns: + a coco.COCO datastructure holding object detection annotations results + + Raises: + ValueError: if (1) annotations is not a list or annotations do not + correspond to the images contained in self. + """ + results = coco.COCO() + results.dataset["images"] = [img for img in self.dataset["images"]] + + logger.info("Load and prepare annotation results.") + tic = time.time() + + if not isinstance(annotations, list): + raise ValueError("annotations is not a list of objects") + annotation_img_ids = [ann["image_id"] for ann in annotations] + if set(annotation_img_ids) != (set(annotation_img_ids) & set(self.getImgIds())): + raise ValueError("Results do not correspond to current coco set") + results.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) + if self._detection_type == "bbox": + for idx, ann in enumerate(annotations): + bb = ann["bbox"] + ann["area"] = bb[2] * bb[3] + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + elif self._detection_type == "segmentation": + for idx, ann in enumerate(annotations): + ann["area"] = mask.area(ann["segmentation"]) + ann["bbox"] = mask.toBbox(ann["segmentation"]) + ann["id"] = idx + 1 + ann["iscrowd"] = 0 + logger.info("DONE (t=%0.2fs)", (time.time() - tic)) + + results.dataset["annotations"] = annotations + results.createIndex() + return results + + +class COCOEvalWrapper(cocoeval.COCOeval): + """Wrapper for the pycocotools COCOeval class. + + To evaluate, create two objects (groundtruth_dict and detections_list) + using the conventions listed at http://mscoco.org/dataset/#format. + Then call evaluation as follows: + + groundtruth = coco_tools.COCOWrapper(groundtruth_dict) + detections = groundtruth.LoadAnnotations(detections_list) + evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections, + agnostic_mode=False) + metrics = evaluator.ComputeMetrics() + """ + + def __init__( + self, + groundtruth: coco.COCO = None, + detections: coco.COCO = None, + agnostic_mode=False, + iou_type: str = "bbox", + iou_thrs: Union[str, float] = None, + map_points=None, + ): + """Construct a COCOEvalWrapper. + + Note that for the area-based metrics to be meaningful, detection and + groundtruth boxes must be in image coordinates measured in pixels. + + Args: + groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding + groundtruth annotations + detections: a coco.COCO (or coco_tools.COCOWrapper) object holding + detections + agnostic_mode: boolean (default: False). If True, evaluation ignores + class labels, treating all detections as proposals. + iou_thrs: Minimal value for intersection over union that allows to + make decision that prediction bounding box is true positive. + You can specify one float value between 0 to 1 or + string "05:0.05:0.95" for standard COCO thresholds. + iou_type: IOU type to use for evaluation. Supports `bbox` or `segm`. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + """ + cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type) + if agnostic_mode: + self.params.useCats = 0 + if iou_thrs == "0.5:0.05:0.95": + self.params.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + elif isinstance(iou_thrs, float): + self.params.iouThrs = [iou_thrs] + + if map_points == 101: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + if map_points == 11: + self.params.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.1)) + 1, endpoint=True) + if map_points == 0: + self.params.recThrs = [-1] + + def GetCategory(self, category_id: int) -> dict: + """Fetch dictionary holding category information given category id. + + Args: + category_id: integer id + + Returns: + dictionary holding 'id', 'name'. + """ + return self.cocoGt.cats[category_id] + + def GetAgnosticMode(self) -> bool: + """Return whether COCO Eval is configured to evaluate in agnostic mode.""" + return self.params.useCats == 0 + + def GetCategoryIdList(self) -> List[int]: + """Return the list of IDs of all valid categories.""" + return self.params.catIds + + def accumulate(self, p: cocoeval.Params = None): + """Accumulate evaluation results per image and store it to self.eval. + + Args: + p: input params for evaluation + """ + print("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + print("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories + recall = -np.ones((T, K, A, M)) + scores = -np.ones((T, R, K, A, M)) + + # create dictionary for future indexing + _pe = self._paramsEval + print("-pe", _pe) + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + dtScoresSorted = dtScores[inds] + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + + # calculate precision + if R == 1: + rc = np.concatenate(([0.0], rc, [1.0])) + pr = np.concatenate(([0.0], pr, [0.0])) + + # compute the precision envelope + for i in range(pr.size - 1, 0, -1): + pr[i - 1] = np.maximum(pr[i - 1], pr[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + change_point = np.where(rc[1:] != rc[:-1])[0] + # and sum (\Delta recall) * recall + res = np.sum((rc[change_point + 1] - rc[change_point]) * pr[change_point + 1]) + precision[t, :, k, a, m] = np.array([res]) + else: + q = np.zeros((R,)) + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except: + pass + precision[t, :, k, a, m] = np.array(q) + + # calculate recall + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # calculate score + ss = np.zeros((R,)) + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + ss[ri] = dtScoresSorted[pi] + except: + pass + scores[t, :, k, a, m] = np.array(ss) + # exit(0) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "precision": precision, + "recall": recall, + "scores": scores, + } + toc = time.time() + print("DONE (t={:0.2f}s).".format(toc - tic)) + + def ComputeMetrics( + self, include_metrics_per_category: bool = False, all_metrics_per_category: bool = False + ): # pragma: no cover + """Compute detection metrics. + + Args: + include_metrics_per_category: Whether include metrics per category. + all_metrics_per_category: Whether include all the summery metrics for + each category in per_category_ap. Be careful with setting it to true if + you have more than handful of categories, because it will pollute + your mldash. + + Returns: + A tuple of (summary_metrics, per_category_ap), in which + (1) summary_metrics is a dictionary holding: + 'Precision/mAP': mean average precision over classes averaged over IOU + thresholds ranging from .5 to .95 with .05 increments; + 'Precision/mAP@.50IOU': mean average precision at 50% IOU; + 'Precision/mAP@.75IOU': mean average precision at 75% IOU; + 'Precision/mAP (small)': mean average precision for small objects + (area < 32^2 pixels); + 'Precision/mAP (medium)': mean average precision for medium sized + objects (32^2 pixels < area < 96^2 pixels); + 'Precision/mAP (large)': mean average precision for large objects + (96^2 pixels < area < 10000^2 pixels); + 'Recall/AR@1': average recall with 1 detection; + 'Recall/AR@10': average recall with 10 detections; + 'Recall/AR@100': average recall with 100 detections; + 'Recall/AR@100 (small)': average recall for small objects with 100 + detections; + 'Recall/AR@100 (medium)': average recall for medium objects with 100 + detections; + 'Recall/AR@100 (large)': average recall for large objects with 100 + detections; + and (2) per_category_ap is a dictionary holding category specific results with + keys of the form: 'Precision mAP ByCategory/category' + (without the supercategory part if no supercategories exist). + + For backward compatibility 'PerformanceByCategory' is included in the + output regardless of all_metrics_per_category. If evaluating class-agnostic + mode, per_category_ap is an empty dictionary. + + Raises: + ValueError: If category_stats does not exist. + """ + self.evaluate() + self.accumulate() + self.summarize() + + summary_metrics = OrderedDict( + [ + ("Precision/mAP", self.stats[0]), + ("Precision/mAP@.50IOU", self.stats[1]), + ("Precision/mAP@.75IOU", self.stats[2]), + ("Precision/mAP (small)", self.stats[3]), + ("Precision/mAP (medium)", self.stats[4]), + ("Precision/mAP (large)", self.stats[5]), + ("Recall/AR@1", self.stats[6]), + ("Recall/AR@10", self.stats[7]), + ("Recall/AR@100", self.stats[8]), + ("Recall/AR@100 (small)", self.stats[9]), + ("Recall/AR@100 (medium)", self.stats[10]), + ("Recall/AR@100 (large)", self.stats[11]), + ] + ) + if not include_metrics_per_category: + return summary_metrics, {} + if not hasattr(self, "category_stats"): + raise ValueError("Category stats do not exist") + per_category_ap = OrderedDict([]) + if self.GetAgnosticMode(): + return summary_metrics, per_category_ap + for category_index, category_id in enumerate(self.GetCategoryIdList()): + category = self.GetCategory(category_id)["name"] + # Kept for backward compatilbility + # pylint: disable=no-member + per_category_ap["PerformanceByCategory/mAP/{}".format(category)] = self.category_stats[0][category_index] + if all_metrics_per_category: + per_category_ap["Precision mAP ByCategory/{}".format(category)] = self.category_stats[0][category_index] + per_category_ap["Precision mAP@.50IOU ByCategory/{}".format(category)] = self.category_stats[1][ + category_index + ] + per_category_ap["Precision mAP@.75IOU ByCategory/{}".format(category)] = self.category_stats[2][ + category_index + ] + per_category_ap["Precision mAP (small) ByCategory/{}".format(category)] = self.category_stats[3][ + category_index + ] + per_category_ap["Precision mAP (medium) ByCategory/{}".format(category)] = self.category_stats[4][ + category_index + ] + per_category_ap["Precision mAP (large) ByCategory/{}".format(category)] = self.category_stats[5][ + category_index + ] + per_category_ap["Recall AR@1 ByCategory/{}".format(category)] = self.category_stats[6][category_index] + per_category_ap["Recall AR@10 ByCategory/{}".format(category)] = self.category_stats[7][category_index] + per_category_ap["Recall AR@100 ByCategory/{}".format(category)] = self.category_stats[8][category_index] + per_category_ap["Recall AR@100 (small) ByCategory/{}".format(category)] = self.category_stats[9][ + category_index + ] + per_category_ap["Recall AR@100 (medium) ByCategory/{}".format(category)] = self.category_stats[10][ + category_index + ] + per_category_ap["Recall AR@100 (large) ByCategory/{}".format(category)] = self.category_stats[11][ + category_index + ] + + return summary_metrics, per_category_ap + + +def _ConvertBoxToCOCOFormat(box): + """Convert a box in [ymin, xmin, ymax, xmax] format to COCO format. + + This is a utility function for converting from our internal + [ymin, xmin, ymax, xmax] convention to the convention used by the COCO API + i.e., [xmin, ymin, width, height]. + + Args: + box: a numpy array in format of [ymin, xmin, ymax, xmax] + + Returns: + A list of floats, in COCO format, representing [xmin, ymin, width, height] + """ + return [float(box[1]), float(box[0]), float(box[3] - box[1]), float(box[2] - box[0])] + + +def _RleCompress(masks): + """Compresses mask using Run-length encoding provided by pycocotools. + + Args: + masks: uint8 numpy array of shape [mask_height, mask_width] with values in + {0, 1}. + + Returns: + A pycocotools Run-length encoding of the mask. + """ + return mask.encode(np.asfortranarray(masks)) + + +def ExportSingleImageGroundtruthToCoco( + image_id: Union[int, str], + next_annotation_id: int, + category_id_set: Set[str], + groundtruth_boxes: np.array, + groundtruth_classes: np.array, + groundtruth_masks: Union[np.array, None] = None, + groundtruth_is_crowd: Union[np.array, None] = None, +) -> list: + """Export groundtruth of a single image to COCO format. + + This function converts groundtruth detection annotations represented as numpy + arrays to dictionaries that can be ingested by the COCO evaluation API. Note + that the image_ids provided here must match the ones given to + ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in + correspondence - that is: groundtruth_boxes[i, :], and + groundtruth_classes[i] are associated with the same groundtruth annotation. + + In the exported result, "area" fields are always set to the area of the + groundtruth bounding box. + + Args: + image_id: a unique image identifier either of type integer or string. + next_annotation_id: integer specifying the first id to use for the + groundtruth annotations. All annotations are assigned a continuous integer + id starting from this value. + category_id_set: A set of valid class ids. Groundtruth with classes not in + category_id_set are dropped. + groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4] + groundtruth_classes: numpy array (int) with shape [num_gt_boxes] + groundtruth_masks: optional uint8 numpy array of shape [num_detections, + image_height, image_width] containing detection_masks. + groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes] + indicating whether groundtruth boxes are crowd. + + Returns: + A list of groundtruth annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the + right lengths or (2) if each of the elements inside these lists do not + have the correct shapes or (3) if image_ids are not integers + """ + if len(groundtruth_classes.shape) != 1: + raise ValueError("groundtruth_classes is " "expected to be of rank 1.") + if len(groundtruth_boxes.shape) != 2: + raise ValueError("groundtruth_boxes is expected to be of " "rank 2.") + if groundtruth_boxes.shape[1] != 4: + raise ValueError("groundtruth_boxes should have " "shape[1] == 4.") + num_boxes = groundtruth_classes.shape[0] + if num_boxes != groundtruth_boxes.shape[0]: + raise ValueError( + "Corresponding entries in groundtruth_classes, " + "and groundtruth_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension)." + "Classes shape: %d. Boxes shape: %d. Image ID: %s" + % (groundtruth_classes.shape[0], groundtruth_boxes.shape[0], image_id) + ) + has_is_crowd = groundtruth_is_crowd is not None + if has_is_crowd and len(groundtruth_is_crowd.shape) != 1: + raise ValueError("groundtruth_is_crowd is expected to be of rank 1.") + groundtruth_list = [] + for i in range(num_boxes): + if groundtruth_classes[i] in category_id_set: + iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0 + export_dict = { + "id": next_annotation_id + i, + "image_id": image_id, + "category_id": int(groundtruth_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])), + "area": float( + (groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) + * (groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]) + ), + "iscrowd": iscrowd, + } + if groundtruth_masks is not None: + export_dict["segmentation"] = _RleCompress(groundtruth_masks[i]) + groundtruth_list.append(export_dict) + return groundtruth_list + + +def ExportSingleImageDetectionBoxesToCoco( + image_id: Union[int, str], + category_id_set: Set[int], + detection_boxes: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detections of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. Note that the image_ids + provided here must match the ones given to the + ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in + correspondence - that is: boxes[i, :], and classes[i] + are associated with the same groundtruth annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_boxes: float numpy array of shape [num_detections, 4] containing + detection boxes. + detection_scores: float numpy array of shape [num_detections] containing + scored for the detection boxes. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection boxes. + + Returns: + A list of detection annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_boxes, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + if len(detection_boxes.shape) != 2: + raise ValueError("All entries in detection_boxes expected to be of " "rank 2.") + if detection_boxes.shape[1] != 4: + raise ValueError("All entries in detection_boxes should have " "shape[1] == 4.") + num_boxes = detection_classes.shape[0] + if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_boxes should have " + "compatible shapes (i.e., agree on the 0th dimension). " + "Classes shape: %d. Boxes shape: %d. " + "Scores shape: %d" % (detection_classes.shape[0], detection_boxes.shape[0], detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "bbox": list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])), + "score": float(detection_scores[i]), + } + ) + return detections_list + + +def ExportSingleImageDetectionMasksToCoco( + image_id: Union[str, int], + category_id_set: Set[int], + detection_masks: np.array, + detection_scores: np.array, + detection_classes: np.array, +) -> list: + """Export detection masks of a single image to COCO format. + + This function converts detections represented as numpy arrays to dictionaries + that can be ingested by the COCO evaluation API. We assume that + detection_masks, detection_scores, and detection_classes are in correspondence + - that is: detection_masks[i, :], detection_classes[i] and detection_scores[i] + are associated with the same annotation. + + Args: + image_id: unique image identifier either of type integer or string. + category_id_set: A set of valid class ids. Detections with classes not in + category_id_set are dropped. + detection_masks: uint8 numpy array of shape [num_detections, image_height, + image_width] containing detection_masks. + detection_scores: float numpy array of shape [num_detections] containing + scores for detection masks. + detection_classes: integer numpy array of shape [num_detections] containing + the classes for detection masks. + + Returns: + A list of detection mask annotations for a single image in the COCO format. + + Raises: + ValueError: if (1) detection_masks, detection_scores and detection_classes + do not have the right lengths or (2) if each of the elements inside these + lists do not have the correct shapes or (3) if image_ids are not integers. + """ + if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1: + raise ValueError("All entries in detection_classes and detection_scores" "expected to be of rank 1.") + num_boxes = detection_classes.shape[0] + if not num_boxes == len(detection_masks) == detection_scores.shape[0]: + raise ValueError( + "Corresponding entries in detection_classes, " + "detection_scores and detection_masks should have " + "compatible lengths and shapes " + "Classes length: %d. Masks length: %d. " + "Scores length: %d" % (detection_classes.shape[0], len(detection_masks), detection_scores.shape[0]) + ) + detections_list = [] + for i in range(num_boxes): + if detection_classes[i] in category_id_set: + detections_list.append( + { + "image_id": image_id, + "category_id": int(detection_classes[i]), + "segmentation": _RleCompress(detection_masks[i]), + "score": float(detection_scores[i]), + } + ) + return detections_list diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/data_process.py new file mode 100644 index 00000000000..32e55adb3fd --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/data_process.py @@ -0,0 +1,655 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import cv2 +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + +interpolation_map = { + "nearest": cv2.INTER_NEAREST, + "bilinear": cv2.INTER_LINEAR, + "bicubic": cv2.INTER_CUBIC, +} + +category_map = { + 1: "person", + 2: "bicycle", + 3: "car", + 4: "motorcycle", + 5: "airplane", + 6: "bus", + 7: "train", + 8: "truck", + 9: "boat", + 10: "traffic light", + 11: "fire hydrant", + 13: "stop sign", + 14: "parking meter", + 15: "bench", + 16: "bird", + 17: "cat", + 18: "dog", + 19: "horse", + 20: "sheep", + 21: "cow", + 22: "elephant", + 23: "bear", + 24: "zebra", + 25: "giraffe", + 27: "backpack", + 28: "umbrella", + 31: "handbag", + 32: "tie", + 33: "suitcase", + 34: "frisbee", + 35: "skis", + 36: "snowboard", + 37: "sports ball", + 38: "kite", + 39: "baseball bat", + 40: "baseball glove", + 41: "skateboard", + 42: "surfboard", + 43: "tennis racket", + 44: "bottle", + 46: "wine glass", + 47: "cup", + 48: "fork", + 49: "knife", + 50: "spoon", + 51: "bowl", + 52: "banana", + 53: "apple", + 54: "sandwich", + 55: "orange", + 56: "broccoli", + 57: "carrot", + 58: "hot dog", + 59: "pizza", + 60: "donut", + 61: "cake", + 62: "chair", + 63: "couch", + 64: "potted plant", + 65: "bed", + 67: "dining table", + 70: "toilet", + 72: "tv", + 73: "laptop", + 74: "mouse", + 75: "remote", + 76: "keyboard", + 77: "cell phone", + 78: "microwave", + 79: "oven", + 80: "toaster", + 81: "sink", + 82: "refrigerator", + 84: "book", + 85: "clock", + 86: "vase", + 87: "scissors", + 88: "teddy bear", + 89: "hair drier", + 90: "toothbrush", +} + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + + +class ResizeTFTransform(object): + """Resize the input image to the given size. + + Args: + size (list or int): Size of the result + interpolation (str, default='bilinear'):Desired interpolation type, + support 'bilinear', 'nearest', 'bicubic' + + Returns: + tuple of processed image and label + """ + + def __init__(self, size, interpolation="bilinear"): + """Initialize `ResizeTFTransform` class.""" + if isinstance(size, int): + self.size = size, size + elif isinstance(size, list): + if len(size) == 1: + self.size = size[0], size[0] + elif len(size) == 2: + self.size = size[0], size[1] + self.interpolation = interpolation + + if self.interpolation not in ["bilinear", "nearest", "bicubic"]: + raise ValueError("Unsupported interpolation type!") + + def __call__(self, sample): + """Resize the input image in sample to the given size.""" + image, label = sample + if isinstance(image, tf.Tensor): + image = tf.image.resize(image, self.size, method=self.interpolation) + else: + image = cv2.resize(image, self.size, interpolation=interpolation_map[self.interpolation]) + return (image, label) + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class COCOmAPv2(BaseMetric): + """Compute mean average precision of the detection task.""" + + def __init__( + self, + anno_path=None, + iou_thrs="0.5:0.05:0.95", + map_points=101, + map_key="DetectionBoxes_Precision/mAP", + output_index_mapping={"num_detections": -1, "boxes": 0, "scores": 1, "classes": 2}, + ): + """Initialize the metric. + + Args: + anno_path: The path of annotation file. + iou_thrs: Minimal value for intersection over union that allows to make decision + that prediction bounding box is true positive. You can specify one float value + between 0 to 1 or string "05:0.05:0.95" for standard COCO thresholds. + map_points: The way to calculate mAP. 101 for 101-point interpolated AP, 11 for + 11-point interpolated AP, 0 for area under PR curve. + map_key: The key that mapping to pycocotools COCOeval. + Defaults to 'DetectionBoxes_Precision/mAP'. + output_index_mapping: The output index mapping. + Defaults to {'num_detections':-1, 'boxes':0, 'scores':1, 'classes':2}. + """ + self.output_index_mapping = output_index_mapping + + if anno_path: + import os + import yaml + + assert os.path.exists(anno_path), "Annotation path does not exists!" + with open(anno_path, "r") as f: + label_map = yaml.safe_load(f.read()) + self.category_map_reverse = {k: v for k, v in label_map.items()} + else: + # label: index + self.category_map_reverse = {v: k for k, v in category_map.items()} + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + self.category_map = category_map + self.category_id_set = set([cat for cat in self.category_map]) # index + self.iou_thrs = iou_thrs + self.map_points = map_points + self.map_key = map_key + + def update(self, predicts, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + predicts: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. Defaults to None. + """ + from coco_tools import ExportSingleImageDetectionBoxesToCoco, ExportSingleImageGroundtruthToCoco + + detections = [] + if "num_detections" in self.output_index_mapping and self.output_index_mapping["num_detections"] > -1: + for item in zip(*predicts): + detection = {} + num = int(item[self.output_index_mapping["num_detections"]]) + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]])[0:num] + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]])[0:num] + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]])[0:num] + detections.append(detection) + else: + for item in zip(*predicts): + detection = {} + detection["boxes"] = np.asarray(item[self.output_index_mapping["boxes"]]) + detection["scores"] = np.asarray(item[self.output_index_mapping["scores"]]) + detection["classes"] = np.asarray(item[self.output_index_mapping["classes"]]) + detections.append(detection) + + bboxes, str_labels, int_labels, image_ids = labels + labels = [] + if len(int_labels[0]) == 0: + for str_label in str_labels: + str_label = [x if type(x) == "str" else x.decode("utf-8") for x in str_label] + labels.append([self.category_map_reverse[x] for x in str_label]) + elif len(str_labels[0]) == 0: + for int_label in int_labels: + labels.append([x for x in int_label]) + + for idx, image_id in enumerate(image_ids): + image_id = image_id if type(image_id) == "str" else image_id.decode("utf-8") + if image_id in self.image_ids: + continue + self.image_ids.append(image_id) + + ground_truth = {} + ground_truth["boxes"] = np.asarray(bboxes[idx]) + ground_truth["classes"] = np.asarray(labels[idx]) + + self.ground_truth_list.extend( + ExportSingleImageGroundtruthToCoco( + image_id=image_id, + next_annotation_id=self.annotation_id, + category_id_set=self.category_id_set, + groundtruth_boxes=ground_truth["boxes"], + groundtruth_classes=ground_truth["classes"], + ) + ) + self.annotation_id += ground_truth["boxes"].shape[0] + + self.detection_list.extend( + ExportSingleImageDetectionBoxesToCoco( + image_id=image_id, + category_id_set=self.category_id_set, + detection_boxes=detections[idx]["boxes"], + detection_scores=detections[idx]["scores"], + detection_classes=detections[idx]["classes"], + ) + ) + + def reset(self): + """Reset the prediction and labels.""" + self.image_ids = [] + self.ground_truth_list = [] + self.detection_list = [] + self.annotation_id = 1 + + def result(self): + """Compute mean average precision. + + Returns: + The mean average precision score. + """ + from coco_tools import COCOEvalWrapper, COCOWrapper + + if len(self.ground_truth_list) == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + else: + groundtruth_dict = { + "annotations": self.ground_truth_list, + "images": [{"id": image_id} for image_id in self.image_ids], + "categories": [{"id": k, "name": v} for k, v in self.category_map.items()], + } + coco_wrapped_groundtruth = COCOWrapper(groundtruth_dict) + coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(self.detection_list) + box_evaluator = COCOEvalWrapper( + coco_wrapped_groundtruth, + coco_wrapped_detections, + agnostic_mode=False, + iou_thrs=self.iou_thrs, + map_points=self.map_points, + ) + box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics( + include_metrics_per_category=False, all_metrics_per_category=False + ) + box_metrics.update(box_per_category_ap) + box_metrics = {"DetectionBoxes_" + key: value for key, value in iter(box_metrics.items())} + + return box_metrics[self.map_key] + + +class ParseDecodeCoco: # pragma: no cover + """Helper function for TensorflowModelZooBertDataset. + + Parse the features from sample. + """ + + def __call__(self, sample): + """Parse the sample data. + + Args: + sample: Data to be parsed. + """ + # Dense features in Example proto. + feature_map = { + "image/encoded": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + "image/object/class/text": tf.compat.v1.VarLenFeature(dtype=tf.string), + "image/object/class/label": tf.compat.v1.VarLenFeature(dtype=tf.int64), + "image/source_id": tf.compat.v1.FixedLenFeature([], dtype=tf.string, default_value=""), + } + sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + { + k: sparse_float32 + for k in [ + "image/object/bbox/xmin", + "image/object/bbox/ymin", + "image/object/bbox/xmax", + "image/object/bbox/ymax", + ] + } + ) + + features = tf.io.parse_single_example(sample, feature_map) + + xmin = tf.expand_dims(features["image/object/bbox/xmin"].values, 0) + ymin = tf.expand_dims(features["image/object/bbox/ymin"].values, 0) + xmax = tf.expand_dims(features["image/object/bbox/xmax"].values, 0) + ymax = tf.expand_dims(features["image/object/bbox/ymax"].values, 0) + + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + encoded_image = features["image/encoded"] + image_tensor = tf.image.decode_image(encoded_image, channels=3) + image_tensor.set_shape([None, None, 3]) + + str_label = features["image/object/class/text"].values + int_label = features["image/object/class/label"].values + image_id = features["image/source_id"] + + return image_tensor, (bbox[0], str_label, int_label, image_id) + + +class COCORecordDataset(object): + """Tensorflow COCO dataset in tf record format. + + Root is a full path to tfrecord file, which contains the file name. + Please use Resize transform when batch_size > 1 + + Args: root (str): Root directory of dataset. + num_cores (int, default=28):The number of input Datasets to interleave from in parallel. + transform (transform object, default=None): transform to process input data. + filter (Filter objects, default=None): filter out examples according + to specific conditions. + """ + + def __new__(cls, root, num_cores=28, transform=None, filter=filter): + """Build a new object.""" + record_iterator = tf.compat.v1.python_io.tf_record_iterator(root) + example = tf.train.SequenceExample() + for element in record_iterator: + example.ParseFromString(element) + break + feature = example.context.feature + if ( + len(feature["image/object/class/text"].bytes_list.value) == 0 + and len(feature["image/object/class/label"].int64_list.value) == 0 + ): + raise ValueError( + "Tfrecord format is incorrect, please refer\ + 'https://github.com/tensorflow/models/blob/master/research/\ + object_detection/dataset_tools/create_coco_tf_record.py' to\ + create correct tfrecord" + ) + # pylint: disable=no-name-in-module + from tensorflow.python.data.experimental import parallel_interleave + + tfrecord_paths = [root] + ds = tf.data.TFRecordDataset.list_files(tfrecord_paths) + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, + cycle_length=num_cores, + block_length=5, + sloppy=True, + buffer_output_elements=10000, + prefetch_input_elements=10000, + ) + ) + if transform is not None: + transform.transform_list.insert(0, ParseDecodeCoco()) + else: + transform = ParseDecodeCoco() + ds = ds.map(transform, num_parallel_calls=None) + if filter is not None: + ds = ds.filter(filter) + ds = ds.prefetch(buffer_size=1000) + return ds + + +class TFDataLoader(object): + """Tensorflow dataloader class. + + In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict + method to do session run, this dataloader is designed to satisfy the usage of feed dict + in tf1.x. Although it's a general dataloader and can be used in MXNet and PyTorch. + + Args: + dataset: obj. wrapper of needed data. + batch_size: int. batch size + """ + + def __init__(self, dataset, batch_size=1, last_batch="rollover"): + """Initialize `TFDataDataLoader` class.""" + self.dataset = dataset + self.last_batch = last_batch + self.batch_size = batch_size + dataset = dataset.batch(batch_size) + + def batch(self, batch_size, last_batch="rollover"): + """Dataset return data per batch.""" + drop_last = False if last_batch == "rollover" else True + self.batch_size = batch_size + self.dataset = self.dataset.batch(batch_size, drop_last) + + def __iter__(self): + """Iterate dataloader.""" + return self._generate_dataloader( + self.dataset, + batch_size=self.batch_size, + last_batch=self.last_batch, + ) + + def _generate_dataloader( + self, + dataset, + batch_size=1, + last_batch="rollover", + collate_fn=None, + sampler=None, + batch_sampler=None, + num_workers=None, + pin_memory=None, + distributed=False, + ): + """Yield data.""" + drop_last = False if last_batch == "rollover" else True + + def check_dynamic_shape(element_spec): + if isinstance(element_spec, collections.abc.Sequence): + return any([check_dynamic_shape(ele) for ele in element_spec]) + elif isinstance(element_spec, tf.TensorSpec): + return True if element_spec.shape.num_elements() is None else False + else: + raise ValueError("unrecognized element spec...") + + def squeeze_output(output): + if isinstance(output, collections.abc.Sequence): + return [squeeze_output(ele) for ele in output] + elif isinstance(output, np.ndarray): + return np.squeeze(output, axis=0) + else: + raise ValueError("not supported output format....") + + if tf.executing_eagerly(): + index = 0 + outputs = [] + for iter_tensors in dataset: + samples = [] + iter_inputs, iter_labels = iter_tensors[0], iter_tensors[1] + if isinstance(iter_inputs, tf.Tensor): + samples.append(iter_inputs.numpy()) + else: + samples.append(tuple(iter_input.numpy() for iter_input in iter_inputs)) + if isinstance(iter_labels, tf.Tensor): + samples.append(iter_labels.numpy()) + else: + samples.append([np.array(l) for l in iter_labels]) + index += 1 + outputs.append(samples) + if index == batch_size: + outputs = default_collate(outputs) + yield outputs + outputs = [] + index = 0 + if len(outputs) > 0: + outputs = default_collate(outputs) + yield outputs + else: + try_single_batch = check_dynamic_shape(dataset.element_spec) + dataset = dataset.batch(1 if try_single_batch else batch_size, drop_last) + ds_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) + iter_tensors = ds_iterator.get_next() + data_config = tf.compat.v1.ConfigProto() + data_config.use_per_session_threads = 1 + data_config.intra_op_parallelism_threads = 1 + data_config.inter_op_parallelism_threads = 16 + data_sess = tf.compat.v1.Session(config=data_config) + # pylint: disable=no-name-in-module + from tensorflow.python.framework.errors_impl import OutOfRangeError + + while True: + if not try_single_batch: + try: + outputs = data_sess.run(iter_tensors) + yield outputs + except OutOfRangeError: + data_sess.close() + return + else: + try: + outputs = [] + for i in range(0, batch_size): + outputs.append(squeeze_output(data_sess.run(iter_tensors))) + outputs = default_collate(outputs) + yield outputs + except OutOfRangeError: + if len(outputs) == 0: + data_sess.close() + return + else: + outputs = default_collate(outputs) + yield outputs + data_sess.close() + return diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/main.py b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/main.py new file mode 100644 index 00000000000..dbced65f2d7 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/main.py @@ -0,0 +1,129 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +from __future__ import division + +import time + +import numpy as np +import tensorflow as tf + +from argparse import ArgumentParser +from data_process import( + COCOmAPv2, + COCORecordDataset, + ComposeTransform, + ResizeTFTransform, + TFDataLoader, +) + +arg_parser = ArgumentParser(description='Parse args') + +arg_parser.add_argument('-g', + "--input-graph", + help='Specify the input graph.', + dest='input_graph') +arg_parser.add_argument('--config', type=str, default='') +arg_parser.add_argument('--dataset_location', type=str, default='') +arg_parser.add_argument('--output_model', type=str, default='') +arg_parser.add_argument('--mode', type=str, default='performance') +arg_parser.add_argument('--batch_size', type=int, default=10) +arg_parser.add_argument('--iters', type=int, default=100, dest='iters', help='iterations') +arg_parser.add_argument('--tune', action='store_true', default=False) +arg_parser.add_argument('--benchmark', dest='benchmark', + action='store_true', help='run benchmark') +args = arg_parser.parse_args() + +def evaluate(model): + """Custom evaluate function to estimate the accuracy of the model. + + Args: + model (tf.Graph or string or INC.model.TensorflowCheckpointModel): The input model. + + Returns: + accuracy (float): evaluation result, the larger is better. + """ + from neural_compressor.tensorflow import Model + if isinstance(model, str) or isinstance(model, tf.compat.v1.Graph): + model = Model(model) + model.input_tensor_names = ["image_tensor:0"] + model.output_tensor_names = ["num_detections:0", "detection_boxes:0", \ + "detection_scores:0", "detection_classes:0"] + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor)>1 else \ + model.output_tensor[0] + warmup = 5 + iteration = -1 + if args.benchmark and args.mode == 'performance': + iteration = args.iters + metric = COCOmAPv2(output_index_mapping={'num_detections':0, 'boxes':1, 'scores':2, 'classes':3}) + + def eval_func(dataloader): + latency_list = [] + for idx, (inputs, labels) in enumerate(dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + metric.update(predictions, labels) + latency_list.append(end-start) + if idx + 1 == iteration: + break + latency = np.array(latency_list[warmup:]).mean() / args.batch_size + return latency + + eval_dataset = COCORecordDataset(root=args.dataset_location, filter=None, \ + transform=ComposeTransform(transform_list=[ResizeTFTransform(size=300)])) + eval_dataloader=TFDataLoader(dataset=eval_dataset, batch_size=args.batch_size) + latency = eval_func(eval_dataloader) + if args.benchmark and args.mode == 'performance': + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency * 1000)) + print("Throughput: {:.3f} images/sec".format(1. / latency)) + acc = metric.result() + return acc + +def main(_): + calib_dataset = COCORecordDataset(root=args.dataset_location, filter=None, \ + transform=ComposeTransform(transform_list=[ResizeTFTransform(size=300)])) + calib_dataloader = TFDataLoader(dataset=calib_dataset, batch_size=args.batch_size) + + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + quant_config = StaticQuantConfig(weight_granularity="per_channel") + model = Model(args.input_graph) + model.input_tensor_names = ['image_tensor'] + model.output_tensor_names = ["num_detections", "detection_boxes", "detection_scores", "detection_classes"] + q_model = quantize_model(model, quant_config, calib_dataloader) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_graph) + else: + accuracy = evaluate(args.input_graph) + print('Batch size = %d' % args.batch_size) + print("Accuracy: %.5f" % accuracy) + +if __name__ == "__main__": + tf.compat.v1.app.run() diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/prepare_model.py b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/prepare_model.py new file mode 100644 index 00000000000..51882cf0bfe --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/prepare_model.py @@ -0,0 +1,99 @@ +import os +import argparse +import enum +import tarfile +import abc + + +class SupportedModels(enum.Enum): + """ + Enumeration containing supported models + """ + ssd_resnet50_v1 = 'ssd_resnet50_v1' + ssd_mobilnet_v1 = 'ssd_mobilenet_v1' + + +class Model(abc.ABC): + """ + Base model class used to obtain the model (and perform any necessary operations to make it usable) + """ + + @abc.abstractmethod + def get_pretrained_model(self, destination): + """ + Base method for obtaining a ready to use model + Args: + destination: path to where the file should be stored + """ + pass + + +class SsdMobilenetV1(Model): + """ Concrete implementation of the Model base class for ssd_mobilenet_v1""" + + def get_pretrained_model(self, destination): + """ + Obtains a ready to use ssd_mobilenet_v1 model file. + Args: + destination: path to where the file should be stored + """ + url = 'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz' + os.system("curl -o ssd_mobilenet_v1_coco_2018_01_28.tar.gz {0}".format(url)) + with tarfile.open("ssd_mobilenet_v1_coco_2018_01_28.tar.gz") as tar: + if not os.path.exists(destination): + os.makedirs(destination) + tar.extractall(destination) + + +class SsdResnet50(Model): + """ Concrete implementation of the Model base class for ssd_resnet_50""" + + def get_pretrained_model(self, destination): + """ + Obtains a ready to use ssd_resnet_50 model file. + Args: + destination: path to where the file should be stored + """ + url = "http://download.tensorflow.org/models/object_detection/" \ + "ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz" + os.system("curl -o ssd_resnet50_v1.tar.gz {0}".format(url)) + with tarfile.open("ssd_resnet50_v1.tar.gz") as tar: + if not os.path.exists(destination): + os.makedirs(destination) + tar.extractall(destination) + + +def get_model(model: SupportedModels) -> Model: + """ + Factory method that returns the requested model object + Args: + model: model from SupportedModels enumeration + + Returns: Concrete object inheriting the Model base class + + """ + if model == SupportedModels.ssd_resnet50_v1: + return SsdResnet50() + if model == SupportedModels.ssd_mobilnet_v1: + return SsdMobilenetV1() + else: + raise AttributeError("The model {0} is not supported. Supported models: {1}" + .format(model_name, SupportedModels.__members__.keys())) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Prepare pre-trained model for COCO object detection') + parser.add_argument('--model_name', type=str, default='ssd_resnet50_v1', + help='model to download, default is ssd_resnet50_v1', + choices=["ssd_resnet50_v1", "ssd_mobilenet_v1"]) + parser.add_argument('--model_path', type=str, default='./model', help='directory to put models, default is ./model') + + args = parser.parse_args() + model_name = args.model_name + model_path = args.model_path + try: + model = get_model(SupportedModels(model_name)) + model.get_pretrained_model(model_path) + except AttributeError: + print("The model {0} is not supported. Supported models: {1}" + .format(model_name, SupportedModels.__members__.keys())) diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..8ee728de373 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_benchmark.sh @@ -0,0 +1,52 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=32 + iters=100 + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + + python main.py \ + --input-graph ${input_model} \ + --mode ${mode} \ + --dataset_location "${dataset_location}" \ + --batch_size ${batch_size} \ + --benchmark \ + --iters ${iters} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..559d695f768 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/ssd_mobilenet_v1/quantization/ptq/run_quant.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-graph "${input_model}" \ + --output_model "${output_model}" \ + --dataset_location "${dataset_location}" \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/README.md b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/README.md new file mode 100644 index 00000000000..845e383cd59 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/README.md @@ -0,0 +1,96 @@ +This document describes the step-by-step to reproduce Yolo-v5 tuning result with Neural Compressor. This example can run on Intel CPUs and GPUs. + +# Prerequisite + + +## 1. Environment +Recommend python 3.10 or higher version. + +### Install Intel® Neural Compressor +```shell +pip install neural-compressor +``` + +### Install Tensorflow +```shell +pip install tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Installation Dependency packages +```shell +cd examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow + +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare model + +Users can choose to automatically or manually download the model. +### Automatic download + +Run the `prepare_model.sh` script. +```shell +. prepare_model.sh +``` + +This script will load yolov5 model to `./yolov5/yolov5s.pb`. + +### Manual download + +To get a TensorFlow pretrained model, you need to export it from a PyTorch model. Clone the [Ultralytics yolov5 repository](https://github.com/ultralytics/yolov5.git). +Generate the pretrained PyTorch model and then export to a Tensorflow supported format with the following commands: +```shell +python yolov5/models/tf.py --weights yolov5/yolov5s.pt +python yolov5/export.py --weights yolov5/yolov5s.pt --include pb +``` + +The yolov5 model will be loaded to `./yolov5/yolov5s.pb`. + +## 3. Prepare Dataset + +Users can choose to automatically or manually download the dataset. +### Automatic download + +Run the `prepare_dataset.sh` script. +```shell +. prepare_dataset.sh +``` +The validation set of coco2017 will be downloaded into a `./coco` folder. + +# Run + +## 1. Quantization +```python +bash run_quant.sh --input_model=./yolov5/yolov5s.pb --output_model=yolov5s_int8.pb --dataset_location=/path/to/dataset +``` + +## 2. Benchmark +```python +# run performance benchmark +bash run_benchmark.sh --input_model=yolov5s_int8.pb --dataset_location=/path/to/dataset --mode=performance + +# run accuracy benchmark +bash run_benchmark.sh --input_model=yolov5s_int8.pb --dataset_location=/path/to/dataset --mode=accuracy +``` + +Finally, the program will generate the quantized Yolo-v5 model with relative 1% loss. diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/main.py b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/main.py new file mode 100644 index 00000000000..50d750344bc --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/main.py @@ -0,0 +1,304 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +import argparse +import os +import sys +import torch +import numpy as np + +from pathlib import Path +from tqdm import tqdm + + +from yolov5.models.common import DetectMultiBackend +from yolov5.utils.callbacks import Callbacks +from yolov5.utils.dataloaders import create_dataloader +from yolov5.utils.general import ( + LOGGER, + TQDM_BAR_FORMAT, + Profile, + check_dataset, + check_img_size, + check_requirements, + check_yaml, + coco80_to_coco91_class, + colorstr, + increment_path, + non_max_suppression, + print_args, + scale_boxes, + xywh2xyxy, +) +from yolov5.utils.metrics import ap_per_class, box_iou +from yolov5.utils.plots import output_to_target, plot_images, plot_val_study +from yolov5.utils.torch_utils import select_device, smart_inference_mode + +from neural_compressor.tensorflow.utils import BaseModel, CpuInfo + + +parser = argparse.ArgumentParser() +parser.add_argument('--dataset_location', type=str, default='/datasets/mnist', help='dataset path') +parser.add_argument('--input_model', type=str, default='yolov5s.pb', help='input model path(s)') +parser.add_argument('--output_model', type=str, default='yolov5s_int8.pb', help='output model path(s)') +parser.add_argument('--batch_size', type=int, default=128, help='batch size') +parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') +parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') +parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') +parser.add_argument('--verbose', nargs='?', const=True, default=False, help='verbose output') +parser.add_argument('--project', default='evaluate/val-cls', help='save to project/name') +parser.add_argument('--name', default='exp', help='save to project/name') +parser.add_argument('--tune', action="store_true", help='whether to apply quantization') +parser.add_argument('--benchmark', action="store_true", help='whether to run benchmark') +parser.add_argument('--mode', type=str, default='performance', help='run performance or accuracy benchmark') +parser.add_argument('--iteration', type=int, default=100, help='iteration for calibration or evaluation') +args = parser.parse_args() + +def process_batch(detections, labels, iouv): + """ + Return correct prediction matrix. + + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + +@smart_inference_mode() +def evaluate( + model, # model.pt path(s) + source=args.dataset_location, + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task="val", # train, val, test, speed or study + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + verbose=False, # verbose output + project=args.project, # save to project/name + name="exp", # save to project/name + exist_ok=False, # existing project/name ok, do not increment + save_dir=Path(""), + callbacks=Callbacks(), + compute_loss=None, +): + if isinstance(model, BaseModel): + model.save("./yolov5s_eval.pb") + model = "./yolov5s_eval.pb" + device = select_device(device) + + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / "labels").mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(model, device=device) + stride, pt = model.stride, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + device = model.device + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models") + + # Data + #data = check_dataset(yaml_path) # check + + # Configure + model.eval() + nc = 1 if single_cls else 80 # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.5, pt) # square inference for benchmarks + + dataloader = create_dataloader( + source, + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f"{task}: "), + )[0] + + seen = 0 + names = model.names if hasattr(model, "names") else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + s = ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95") + p, r, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times + loss = torch.zeros(3, device=device) + stats, ap, ap_class = [], [], [] + callbacks.run("on_val_start") + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + iters = -1 if args.mode == "accuracy" else args.iteration + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + if batch_i == iters: + break + + callbacks.run("on_val_batch_start") + with dt[0]: + im = im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + _, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im) if compute_loss else (model(im, augment=False), None) + + # Batch size 1 inference drops the batch dim + if isinstance(preds, list): + preds = preds[0] + + if preds.dim() == 2: + preds=preds.unsqueeze(0) + + # Loss + if compute_loss: + loss += compute_loss(train_out, targets)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [] # for autolabelling + with dt[2]: + preds = non_max_suppression( + preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det + ) + + if args.benchmark: + # Metrics + for si, pred in enumerate(preds): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) + + callbacks.run("on_val_image_end", pred, predn, path, names, im[si]) + + + callbacks.run("on_val_batch_end", batch_i, im, targets, paths, shapes, preds) + + if args.tune: + return 1 + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + _, _, p, r, _, ap, ap_class = ap_per_class(*stats, plot=False, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class + if nt.sum() == 0: + LOGGER.warning(f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels") + + pf = "%22s" + "%11i" * 2 + "%11.4g" * 4 # print format + + # Print results per class + if (verbose or (nc < 50)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x.t / seen * 1e3 for x in dt) # images per second + latency = t[2] + if args.benchmark and args.mode == "performance": + print("Batch size = {}".format(args.batch_size)) + print("Latency: {:.3f} ms".format(latency)) + print("Throughput: {:.3f} images/sec".format(1000/latency)) + + # Return results + model.float() # for training + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return map50 + + +def main(): + if args.tune: + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + excluded_conv_names = [ + "functional_16_1/tf_conv_1/sequential_1/conv2d_1/convolution", + "functional_16_1/tf_conv_1_2/sequential_1_1/conv2d_1_1/convolution", + "functional_16_1/tfc3_1/tf_conv_2_1/conv2d_2_1/convolution", + "functional_16_1/tfc3_1/sequential_2_1/tf_bottleneck_1/tf_conv_5_1/conv2d_5_1/convolution", + "functional_16_1/tfc3_1/tf_conv_3_1/conv2d_3_1/convolution", + "functional_16_1/tfc3_1/tf_conv_4_1/conv2d_4_1/convolution" + ] + quant_config = StaticQuantConfig(weight_granularity="per_channel") + local_dtype = "bf16" if CpuInfo().bf16 or os.getenv("FORCE_BF16") == "1" else "fp32" + local_config = StaticQuantConfig(weight_dtype=local_dtype, act_dtype=local_dtype) + for conv_name in excluded_conv_names: + quant_config.set_local(conv_name, local_config) + + q_model = quantize_model(args.input_model, quant_config, calib_func=evaluate) + q_model.save(args.output_model) + + if args.benchmark: + if args.mode == 'performance': + evaluate(args.input_model) + elif args.mode == 'accuracy': + map50 = evaluate(args.input_model) + print("Batch size = %d" % args.batch_size) + LOGGER.info("Accuracy: %.4g" % map50) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_dataset.sh b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_dataset.sh new file mode 100644 index 00000000000..acdf8a2b5e8 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_dataset.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# set -x + +wget https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels.zip +unzip -o coco2017labels.zip +rm coco2017labels.zip + +cd coco +mkdir images +cd images +wget http://images.cocodataset.org/zips/val2017.zip +unzip -o val2017.zip +rm val2017.zip diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_model.sh b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_model.sh new file mode 100644 index 00000000000..3446739939d --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/prepare_model.sh @@ -0,0 +1,3 @@ +INSTALLATION_PATH=$(python3 -c "import sys; import yolov5; p=sys.modules['yolov5'].__file__; print(p.replace('/__init__.py', ''))") +python $INSTALLATION_PATH/models/tf.py --weights yolov5/yolov5s.pt +python $INSTALLATION_PATH/export.py --weights yolov5/yolov5s.pt --include pb \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..2c40b972bcd --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/requirements.txt @@ -0,0 +1 @@ +yolov5 diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..df8009e115b --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_benchmark.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + batch_size=128 + iters=100 + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + if [[ ${mode} == "accuracy" ]]; then + python main.py \ + --input_model ${input_model} \ + --dataset_location ${dataset_location} \ + --mode ${mode} \ + --batch_size ${batch_size} \ + --benchmark + elif [[ ${mode} == "performance" ]]; then + incbench --num_c 4 main.py \ + --input_model ${input_model} \ + --dataset_location ${dataset_location} \ + --mode ${mode} \ + --batch_size ${batch_size} \ + --iteration ${iters} \ + --benchmark + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..8d1f6807138 --- /dev/null +++ b/examples/3.x_api/tensorflow/object_detection/yolo_v5/quantization/ptq/run_quant.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + + +# run tuning +function run_benchmark { + python main.py \ + --input_model ${input_model} \ + --output_model ${output_model} \ + --dataset_location ${dataset_location} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md new file mode 100644 index 00000000000..7bff08a2f84 --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md @@ -0,0 +1,98 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Wide & Deep tuning zoo result. +This example can run on Intel CPUs and GPUs. + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers). + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +### Install Additional Dependency packages +```shell +cd examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq +pip install -r requirements.txt +``` + +### 2. Download Frozen PB +```shell +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/wide_deep_fp32_pretrained_model.pb +``` + +### 3. Prepare Dataset +Download training dataset: (8 million samples) +```bash +$ wget https://storage.googleapis.com/dataset-uploader/criteo-kaggle/large_version/train.csv +``` +Download evaluation dataset (2 million samples) +```bash +$ wget https://storage.googleapis.com/dataset-uploader/criteo-kaggle/large_version/eval.csv +``` + +### 4. Process Dataset +Process calib dataset +```bash +python preprocess_csv_tfrecords.py \ + --inputcsv-datafile train.csv \ + --calibrationcsv-datafile eval.csv \ + --outputfile-name processed_data +``` +Process eval dataset +```bash +python preprocess_csv_tfrecords.py \ + --inputcsv-datafile eval.csv \ + --calibrationcsv-datafile train.csv \ + --outputfile-name processed_data +``` +Two .tfrecords files are generated and will be used later on: +1) train_processed_data.tfrecords +2) eval_processed_data.tfrecords + + +# Run Command + +## Quantization + ```shell + bash run_quant.sh --dataset_location=/path/to/datasets --input_model=/path/to/wide_deep_fp32_pretrained_model.pb --output_model=./wnd_int8_opt.pb + ``` + +## Benchmark + ``` + bash run_benchmark.sh --dataset_location=/path/to/datasets --input_model=./wnd_int8_opt.pb --mode=accuracy --batch_size=500 + bash run_benchmark.sh --dataset_location=/path/to/datasets --input_model=./wnd_int8_opt.pb --mode=performance --batch_size=500 + ``` + +# Other +This example takes the reference from https://github.com/IntelAI/models/tree/master/benchmarks/recommendation/tensorflow/wide_deep_large_ds. +The pretrained model was trained with preprocessed data from dataset Criteo. diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/main.py b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/main.py new file mode 100644 index 00000000000..a89efd25537 --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/main.py @@ -0,0 +1,347 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import sys +import os +import numpy as np +import argparse +import collections +import time +import math +import json +import datetime + +import tensorflow as tf + +from tensorflow.python.framework import ops +from tensorflow.core.framework import graph_pb2 +from google.protobuf import text_format +from argparse import ArgumentParser +from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference +from tensorflow.compat.v1 import graph_util + + +def load_graph(model_file): + """This is a function to load TF graph from pb file + + Args: + model_file (string): TF pb file local path + + Returns: + graph: TF graph object + """ + graph = tf.Graph() + #graph_def = tf.compat.v1.GraphDef() + graph_def = graph_pb2.GraphDef() + + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + + +numeric_feature_names = ["numeric_1"] +string_feature_names = ["string_1"] + +def get_feature_name(compute_accuracy): + + if compute_accuracy: + full_features_names = numeric_feature_names + string_feature_names + ["label"] + feature_datatypes = [tf.io.FixedLenSequenceFeature([], tf.float32, default_value=0.0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature( + [], tf.int64, default_value=0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature([], tf.int64, default_value=0, allow_missing=True)] + else: + full_features_names = numeric_feature_names + string_feature_names + feature_datatypes = [tf.io.FixedLenSequenceFeature([], tf.float32, default_value=0.0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature( + [], tf.int64, default_value=0, allow_missing=True)] + return full_features_names, feature_datatypes + +def input_fn(data_file, num_epochs, shuffle, batch_size, compute_accuracy=True): + """Generate an input function for the Estimator.""" + full_features_names, feature_datatypes = get_feature_name(compute_accuracy) + def _parse_function(proto): + f = collections.OrderedDict( + zip(full_features_names, feature_datatypes)) + parsed_features = tf.io.parse_example(proto, f) + parsed_feature_vals_num = [tf.reshape( + parsed_features["numeric_1"], shape=[-1, 13])] + parsed_feature_vals_str = [tf.reshape( + parsed_features["string_1"], shape=[-1, 2]) for i in string_feature_names] + parsed_feature_vals = parsed_feature_vals_num + parsed_feature_vals_str + if compute_accuracy: + parsed_feature_vals_label = [tf.reshape(parsed_features[i], shape=[-1]) for i in ["label"]] + parsed_feature_vals = parsed_feature_vals + parsed_feature_vals_label + return parsed_feature_vals + + # Extract lines from input files using the Dataset API. + dataset = tf.data.TFRecordDataset([data_file]) + if shuffle: + dataset = dataset.shuffle(buffer_size=20000) + dataset = dataset.batch(batch_size) + dataset = dataset.map(_parse_function, num_parallel_calls=28) + dataset = dataset.prefetch(batch_size*10) + return dataset + +def evaluation_func(model, measurer=None): + return evaluate_opt_graph.eval_inference(model) + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph""" + + def __init__(self): + arg_parser = ArgumentParser(description='Parse args') + arg_parser.add_argument('-i', '--input_graph', type=str, + help='Specify the input of the model', + dest='input_graph', + required=True) + arg_parser.add_argument('-o', '--output_graph', type=str, + help='Specify the output of the model', + dest='output_graph') + arg_parser.add_argument('--calibration_data_location', type=str, + help='full path of calibration data file', + dest='calib_data') + arg_parser.add_argument('--evaluation_data_location', type=str, + help='full path of validation data file', + dest='eval_data', + required=True) + arg_parser.add_argument('--batch_size', type=int, + help='batch size for inference.Default is 512', + default=512, + dest='batch_size') + arg_parser.add_argument('--num_intra_threads', type=int, + help='number of threads for an operator', + required=False, + default=0, + dest='num_intra_threads') + arg_parser.add_argument('--num_inter_threads', type=int, + help='number of threads across operators', + required=False, + default=0, + dest='num_inter_threads') + arg_parser.add_argument('--kmp_blocktime', type=str, + help='KMP_BLOCKTIME value', + required=False, + default=None, + dest='kmp_blocktime') + arg_parser.add_argument('-r', "--accuracy", + help='For accuracy measurement only.', + dest='accuracy', action='store_true') + arg_parser.add_argument("--config", default=None, + help="tuning config") + arg_parser.add_argument('--performance', + dest='performance', + action='store_true', + help='run performance') + arg_parser.add_argument('--tune', + dest='tune', + action='store_true', + help='use neural_compressor to tune.') + arg_parser.add_argument("--warmup-steps", + type=int, default=50, + help="number of warmup steps") + arg_parser.add_argument("--steps", + type=int, default=2000, + help="number of iterations") + + arg_parser.add_argument('--env', + dest='env', + help='specific Tensorflow env', + default='mkl') + + + self.args = arg_parser.parse_args() + + def auto_tune(self): + """This is neural_compressor tuning part to generate a quantized pb + Returns: + graph: it will return a quantized pb + """ + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model, Model + + set_random_seed(9527) + infer_graph = load_graph(self.args.input_graph) + model = Model(infer_graph) + model.input_tensor_names = ["new_numeric_placeholder", "new_categorical_placeholder"] + model.output_tensor_names = ["import/head/predictions/probabilities"] + + if self.args.calib_data: + quant_config = StaticQuantConfig() + calib_dataloader=Dataloader(self.args.calib_data, self.args.batch_size) + q_model = quantize_model(model, quant_config, calib_dataloader) + return q_model + print("Please provide calibration dataset!") + + def eval_inference(self, infer_graph): + print("Run inference") + if isinstance(infer_graph, tf.compat.v1.GraphDef): + graph = tf.Graph() + with graph.as_default(): + tf.import_graph_def(infer_graph, name='') + infer_graph = graph + + data_config = tf.compat.v1.ConfigProto() + data_config.intra_op_parallelism_threads = self.args.num_intra_threads + data_config.inter_op_parallelism_threads = self.args.num_inter_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.compat.v1.ConfigProto() + if self.args.env == 'mkl': + print("Set inter and intra for mkl: ") + print("intra_op_parallelism_threads = ", self.args.num_intra_threads) + print("inter_op_parallelism_threads = ", self.args.num_inter_threads) + infer_config.intra_op_parallelism_threads = self.args.num_intra_threads + infer_config.inter_op_parallelism_threads = self.args.num_inter_threads + infer_config.use_per_session_threads = 1 + + total_test_samples = sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(self.args.eval_data)) + total_batches = math.ceil(float(total_test_samples)/self.args.batch_size) + placeholder_list = ['new_numeric_placeholder','new_categorical_placeholder'] + input_tensor = [infer_graph.get_tensor_by_name(name + ":0") for name in placeholder_list] + output_name = "import/head/predictions/probabilities" + output_tensor = infer_graph.get_tensor_by_name(output_name + ":0" ) + correctly_predicted = 0 + evaluate_duration = 0.0 + + features_list = [] + data_graph = tf.Graph() + with data_graph.as_default(): + res_dataset = input_fn(self.args.eval_data, 1, False, self.args.batch_size) + iterator = tf.compat.v1.data.make_one_shot_iterator(res_dataset) + next_element = iterator.get_next() + with tf.compat.v1.Session(config=data_config, graph=data_graph) as data_sess: + for i in range(int(total_batches)): + batch = data_sess.run(next_element) + features=batch[0:3] + features_list.append(features) + + if self.args.performance: + iteration = 0 + warm_up_iteration = self.args.warmup_steps + total_run = self.args.steps + + if total_run > total_batches: + total_run = total_batches + + with tf.compat.v1.Session(config=infer_config, graph=infer_graph) as infer_sess: + i = 0 + for i in range(int(total_run)): + start_time = time.time() + logistic = infer_sess.run(output_tensor, dict(zip(input_tensor, features_list[iteration][0:2]))) + time_consume = time.time() - start_time + + if iteration > warm_up_iteration: + evaluate_duration += time_consume + + iteration += 1 + if iteration > total_batches: + iteration = 0 + test_batches = total_run - warm_up_iteration + else: + with tf.compat.v1.Session(config=infer_config, graph=infer_graph) as infer_sess: + i = 0 + for i in range(int(total_batches)): + start_time = time.time() + logistic = infer_sess.run(output_tensor, dict(zip(input_tensor, features_list[i][0:2]))) + time_consume = time.time() - start_time + evaluate_duration += time_consume + + predicted_labels = np.argmax(logistic,1) + correctly_predicted=correctly_predicted+np.sum(features_list[i][2] == predicted_labels) + + i=i+1 + + accuracy = float(correctly_predicted) / float(total_test_samples) + test_batches = total_batches + + no_of_test_samples = test_batches * self.args.batch_size + latency = 1000 * float(evaluate_duration) / float(test_batches) + throughput = no_of_test_samples / evaluate_duration + + print('--------------------------------------------------') + print('Total test records: %d' % no_of_test_samples) + print('Number of batches: %d' % test_batches) + print('Batch size = %d' % self.args.batch_size) + print('Latency: %.3f ms' % latency) + print('Throughput: %.3f records/sec' % throughput) + print('--------------------------------------------------') + + return accuracy + + def run(self): + """ This is neural_compressor function include tuning and benchmark option """ + + if self.args.tune: + q_model = evaluate_opt_graph.auto_tune() + q_model.save(self.args.output_graph) + else: + if self.args.accuracy: + infer_graph = load_graph(self.args.input_graph) + acc = evaluation_func(infer_graph) + print("Accuracy: %.5f" % acc) + if self.args.performance: + infer_graph = load_graph(self.args.input_graph) + evaluation_func(infer_graph) + + +class Dataloader(object): + def __init__(self, data_location, batch_size): + """dataloader generator + + Args: + data_location (str): tf recorder local path + batch_size (int): dataloader batch size + """ + self.batch_size = batch_size + self.data_file = data_location + self.total_samples = sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(data_location)) + self.n = math.ceil(float(self.total_samples) / batch_size) + print("batch size is " + str(self.batch_size) + "," + str(self.n) + " iteration") + + def __iter__(self): + data_graph = tf.Graph() + with data_graph.as_default(): + self.dataset = input_fn(self.data_file, 1, False, self.batch_size) + self.dataset_iterator = tf.compat.v1.data.make_one_shot_iterator(self.dataset) + next_element = self.dataset_iterator.get_next() + + with tf.compat.v1.Session(graph=data_graph) as sess: + for i in range(self.n): + batch = sess.run(next_element) + yield (batch[0:2], batch[2]) + + def __len__(self): + return self.n + + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/preprocess_csv_tfrecords.py b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/preprocess_csv_tfrecords.py new file mode 100644 index 00000000000..e1a82cd674c --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/preprocess_csv_tfrecords.py @@ -0,0 +1,155 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import sys +import pandas +import argparse +import numpy as np +import tensorflow as tf +if tf.version.VERSION < '2.0': + tf.enable_eager_execution() +parser = argparse.ArgumentParser() +parser.add_argument('--inputcsv-datafile', type=str, + help='full path of data file e.g. eval.csv', + dest='evaldatafile_path', + required=True) +parser.add_argument('--calibrationcsv-datafile', type=str, + help='full path of data file of calibration/train dataset to get normalization ranges', + dest='traindatafile_path', + default='NULL', + required=False) + +parser.add_argument('--outputfile-name', type=str, + help='output tfrecord file name e.g. processed_eval.[tfrecords]', + dest='outputfile_path', + default="processed_data.tfrecords", + required=False) + +args = parser.parse_args() + +eval_csv_file = args.evaldatafile_path +train_csv_file = args.traindatafile_path +output_file = args.outputfile_path + +if not os.path.isfile(eval_csv_file): + print("Please input a valid csv file") + sys.exit(1) + +filename, file_ext = os.path.splitext(output_file) +in_filename, _ = os.path.splitext(os.path.basename(eval_csv_file)) + +if file_ext != ".tfrecords": + output_file = output_file + ".tfrecords" + +output_file = "{}_{}".format(in_filename,output_file) +csv = pandas.read_csv(eval_csv_file, header=None) +if len(csv.columns)==39: + dataset_type = 'test' +else: + dataset_type = 'eval' +fill_na_dict = {} +if dataset_type=='test': + for i in range(0,13): + fill_na_dict[i]=0.0 + for i in range(13,39): + fill_na_dict[i]="" +else: + for i in range(1,14): + fill_na_dict[i]=0.0 + for i in range(14,40): + fill_na_dict[i]="" +csv=csv.fillna(value=fill_na_dict).values +numeric_feature_names = ["numeric_1"] +string_feature_names = ["string_1"] +LABEL_COLUMN =["clicked"] +CATEGORICAL_COLUMNS1 = ["C"+str(i)+"_embedding" for i in range(1, 27)] +NUMERIC_COLUMNS1 = ["I"+str(i) for i in range(1, 14)] +if dataset_type=='eval': + DATA_COLUMNS = LABEL_COLUMN + NUMERIC_COLUMNS1 + CATEGORICAL_COLUMNS1 +else: + DATA_COLUMNS = NUMERIC_COLUMNS1 + CATEGORICAL_COLUMNS1 +CATEGORICAL_COLUMNS2 = ["C"+str(i)+"_embedding" for i in range(1, 27)] +NUMERIC_COLUMNS2 = ["I"+str(i) for i in range(1, 14)] + +CATEGORICAL_COLUMNS1.sort() +NUMERIC_COLUMNS1.sort() +no_of_rows = 0 +with open(eval_csv_file, 'r') as f: + if not os.path.isfile(train_csv_file): + nums=[line.strip('\n\r').split(',') for line in f.readlines()] + else: + f1 = open(train_csv_file, 'r') + nums=[line.strip('\n\r').split(',') for line in f.readlines( + )]+[line.strip('\n\t').split(',') for line in f1.readlines()] + numpy_arr = np.array(nums) + numpy_arr[numpy_arr=='']='0' + min_list,max_list,range_list = [],[],[] + for i in range(len(DATA_COLUMNS)): + if DATA_COLUMNS[i] in NUMERIC_COLUMNS1: + col_min = numpy_arr[:,i].astype(np.float32).min() + col_max = numpy_arr[:,i].astype(np.float32).max() + min_list.append(col_min) + max_list.append(col_max) + range_list.append(col_max-col_min) + if os.path.isfile(train_csv_file): + f1.close() + print('min list',min_list) + print('max list',max_list) + print('range list',range_list) + + +with tf.compat.v1.python_io.TFRecordWriter(output_file) as writer: + print('*****Processing data******') + for row in csv: + no_of_rows = no_of_rows+1 + if dataset_type == 'eval': + unnormalized_vals = np.array(row[1:14]) + else: + unnormalized_vals = np.array(row[0:13]) + normalized_vals = (unnormalized_vals-min_list)/range_list + if dataset_type == 'eval': + new_categorical_dict = dict(zip(CATEGORICAL_COLUMNS2, row[14:40])) + else: + new_categorical_dict = dict(zip(CATEGORICAL_COLUMNS2, row[13:39])) + new_categorical_list = [] + for i in CATEGORICAL_COLUMNS1: + if pandas.isnull(new_categorical_dict[i]): + new_categorical_list.append("") + else: + new_categorical_list.append(new_categorical_dict[i]) + hash_values = tf.compat.v1.string_to_hash_bucket_fast( + new_categorical_list, 1000).numpy() + new_numerical_dict = dict(zip(NUMERIC_COLUMNS2, normalized_vals)) + example = tf.train.Example() + for i in NUMERIC_COLUMNS1: + example.features.feature[numeric_feature_names[0]].float_list.value.extend([new_numerical_dict[i]]) + for i in range(0, 26): + example.features.feature[string_feature_names[0]].int64_list.value.extend([i]) + example.features.feature[string_feature_names[0]].int64_list.value.extend([hash_values[i]]) + if dataset_type == 'eval': + example.features.feature["label"].int64_list.value.append(row[0]) + writer.write(example.SerializeToString()) + +print('Total number of rows ', no_of_rows) +print('Generated output file name :'+output_file) diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..e2f0ef81736 --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/requirements.txt @@ -0,0 +1,9 @@ +intel-tensorflow>=2.12.0 +Cython +contextlib2 +pillow>=8.2.0 +lxml>=4.6.2 +matplotlib +numpy>=1.17.4 +pycocotools +protobuf diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..72ab01f2a19 --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + define_mode + run_benchmark + +} + +# init params +function init_params { + iters=1000 + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + esac + done + +} + +function define_mode { + if [[ ${mode} == "accuracy" ]]; then + mode_cmd=" --accuracy" + elif [[ ${mode} == "performance" ]]; then + mode_cmd=" --performance" + else + echo "Error: No such mode: ${mode}" + exit 1 + fi +} + +# run_tuning +function run_benchmark { + #numactl -N 0 -m 0 \ + python main.py \ + --input_graph ${input_model} \ + --evaluation_data_location ${dataset_location}/eval_processed_data.tfrecords \ + --batch_size ${batch_size} \ + --num_inter_threads 4 \ + ${mode_cmd} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..a8068917a27 --- /dev/null +++ b/examples/3.x_api/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_quant.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_tuning +function run_tuning { + python main.py \ + --input_graph ${input_model} \ + --evaluation_data_location ${dataset_location}/eval_processed_data.tfrecords \ + --calibration_data_location ${dataset_location}/train_processed_data.tfrecords \ + --accuracy \ + --batch_size 1000 \ + --output_graph ${output_model} \ + --tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md new file mode 100644 index 00000000000..4307ec85480 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md @@ -0,0 +1,76 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning zoo result of 3dunet-mlperf. +This example can run on Intel CPUs and GPUs. + +# Prerequisite + +## 1. Environment + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` + +### Install requirements +```shell +pip install -r requirements.txt +``` +> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers). + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +## 2. Prepare Pre-trained model + Download the pre-trained model from the + [3DUnetCNN](https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_7_0/3dunet_dynamic_ndhwc.pb). + In this example, we are using the model, + trained using the fold 1 BRATS 2019 data. + The validation files have been copied from [here](https://github.com/mlcommons/inference/tree/r0.7/vision/medical_imaging/3d-unet/folds) + +## 3. Prepare dataset + +### Download BraTS 2019 dataset + Please download [Brats 2019](https://www.med.upenn.edu/cbica/brats2019/data.html) + separately and unzip the dataset. The directory that contains the dataset files will be + passed to the launch script when running the benchmarking script. + +### Prepare Calibration set + The calibration set is the forty images listed in brats_cal_images_list.txt. They are randomly selected from Fold 0, Fold 2, Fold 3, and Fold 4 of BraTS 2019 Training Dataset. + + +# Run command +Please set the following environment variables before running quantization or benchmark commands: + +* `export nnUNet_preprocessed=/build/preprocessed_data` +* `export nnUNet_raw_data_base=/build/raw_data` +* `export RESULTS_FOLDER=/build/result` + +## Quantization + +`bash run_quant.sh --input_model=3dunet_dynamic_ndhwc.pb --dataset_location=/build --output_model=3dunet_dynamic_ndhwc_int8.pb` + +## Benchmark + +`bash run_benchmark.sh --input_model=3dunet_dynamic_ndhwc_int8.pb --dataset_location=/build --batch_size=100 --iters=500 --mode=benchmark` + +`bash run_benchmark.sh --input_model=3dunet_dynamic_ndhwc_int8.pb --dataset_location=/build --batch_size=1 --mode=accuracy` diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/__init__.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/__init__.py new file mode 100644 index 00000000000..0a18c579d8b --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/brats_cal_images_list.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/brats_cal_images_list.txt new file mode 100644 index 00000000000..69276e67b6a --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/brats_cal_images_list.txt @@ -0,0 +1,40 @@ +HGG__BraTS19_2013_18_1 +HGG__BraTS19_2013_20_1 +HGG__BraTS19_CBICA_AAP_1 +HGG__BraTS19_CBICA_ABN_1 +HGG__BraTS19_CBICA_ABO_1 +HGG__BraTS19_CBICA_ALU_1 +HGG__BraTS19_CBICA_ANZ_1 +HGG__BraTS19_CBICA_APY_1 +HGG__BraTS19_CBICA_AQJ_1 +HGG__BraTS19_CBICA_AQZ_1 +HGG__BraTS19_CBICA_ASN_1 +HGG__BraTS19_CBICA_ASY_1 +HGG__BraTS19_CBICA_AUW_1 +HGG__BraTS19_CBICA_AXJ_1 +HGG__BraTS19_CBICA_AXM_1 +HGG__BraTS19_CBICA_AYG_1 +HGG__BraTS19_CBICA_AYU_1 +HGG__BraTS19_CBICA_AZD_1 +HGG__BraTS19_CBICA_BAX_1 +HGG__BraTS19_CBICA_BGR_1 +HGG__BraTS19_CBICA_BHV_1 +HGG__BraTS19_TCIA01_235_1 +HGG__BraTS19_TCIA02_394_1 +HGG__BraTS19_TCIA02_473_1 +HGG__BraTS19_TCIA02_606_1 +HGG__BraTS19_TCIA03_419_1 +HGG__BraTS19_TCIA04_192_1 +HGG__BraTS19_TCIA04_479_1 +HGG__BraTS19_TCIA06_372_1 +HGG__BraTS19_TCIA08_278_1 +LGG__BraTS19_2013_28_1 +LGG__BraTS19_TCIA09_462_1 +LGG__BraTS19_TCIA10_130_1 +LGG__BraTS19_TCIA10_202_1 +LGG__BraTS19_TCIA10_346_1 +LGG__BraTS19_TCIA10_387_1 +LGG__BraTS19_TCIA10_628_1 +LGG__BraTS19_TCIA12_470_1 +LGG__BraTS19_TCIA13_621_1 +LGG__BraTS19_TCIA13_653_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/main.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/main.py new file mode 100644 index 00000000000..bc8ce8edc07 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/main.py @@ -0,0 +1,219 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time +from argparse import ArgumentParser +import os +import pickle +import sys +import math +import array + +import numpy as np +import tensorflow as tf +from tensorflow.python.framework import dtypes +from tensorflow.core.protobuf import rewriter_config_pb2 +from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference +from nnunet.evaluation.region_based_evaluation import evaluate_regions, get_brats_regions + +from nnUNet.setup import setup +from nnUNet.postprocess import postprocess_output + +INPUTS = 'input' +OUTPUTS = 'Identity' + +if __name__ == "__main__": + """Evaluate 3d_unet with optimized TensorFlow graph""" + def get_args(): + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-m', "--mode", + help="One of three options: 'benchmark'/'accuracy'/'tune'.") + arg_parser.add_argument('-n', "--iters", + help='The number of iteration. shall > warmup num(10)', + type=int, default=100) + arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + arg_parser.add_argument('-i', "--input-model", + help='Specify the input graph.', + dest='input_model') + arg_parser.add_argument('-o', "--output-model", + help='Specify the output graph.', + dest='output_model') + arg_parser.add_argument('-c', "--calib-preprocess", + help='Specify calibration preprocess dir.', + dest='calib_preprocess') + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data.', + dest="data_location", default=None) + arg_parser.add_argument("--batch-size", dest="batch_size", type=int, default=1) + arg_parser.add_argument("--bfloat16", type=int, default=0) + + args = arg_parser.parse_args() + print(args) + return args + + def eval_func(graph): + print("Run inference for accuracy") + args = get_args() + #setup(args.data_location, args.input_model) + + output_graph = optimize_for_inference(graph.as_graph_def(), [INPUTS], [OUTPUTS], + dtypes.float32.as_datatype_enum, False) + tf.import_graph_def(output_graph, name="") + + input_tensor = graph.get_tensor_by_name('input:0') + output_tensor = graph.get_tensor_by_name('Identity:0') + + config = tf.compat.v1.ConfigProto() + config.intra_op_parallelism_threads=args.num_intra_threads + config.inter_op_parallelism_threads=args.num_inter_threads + if args.bfloat16: + config.graph_options.rewrite_options.auto_mixed_precision_mkl = rewriter_config_pb2.RewriterConfig.ON + + sess = tf.compat.v1.Session(graph=graph, config=config) + if args.mode: + print("Inference with real data") + preprocessed_data_dir = os.path.join(args.data_location, "preprocessed_data") + with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "rb") as f: + preprocessed_files = pickle.load(f) + + dictionaries = [] + for preprocessed_file in preprocessed_files: + with open(os.path.join(preprocessed_data_dir, preprocessed_file + ".pkl"), "rb") as f: + dct = pickle.load(f)[1] + dictionaries.append(dct) + + count = len(preprocessed_files) + predictions = [None] * count + validation_indices = list(range(0,count)) + print("Found {:d} preprocessed files".format(count)) + loaded_files = {} + batch_size = args.batch_size + + # Get the number of steps based on batch size + steps = count#math.ceil(count/batch_size) + warmup = 10 + assert args.iters >= warmup, 'iteration must be larger than warmup' + time_list=[] + for i in range(steps): + print("Iteration {} ...".format(i)) + test_data_index = validation_indices[i]#validation_indices[i * batch_size:(i + 1) * batch_size] + file_name = preprocessed_files[test_data_index] + with open(os.path.join(preprocessed_data_dir, "{:}.pkl".format(file_name)), "rb") as f: + data = pickle.load(f)[0] + if args.mode == 'performance' and i < args.iters: + time_start = time.time() + predictions[i] = sess.run(output_tensor, feed_dict={input_tensor: data[np.newaxis, ...]})[0].astype(np.float32) + duration = time.time() - time_start + time_list.append(duration) + else: + predictions[i] = sess.run(output_tensor, feed_dict={input_tensor: data[np.newaxis, ...]})[0].astype(np.float32) + if args.mode == 'performance': + latency = np.array(time_list[warmup: ]).mean() / args.batch_size + print('Batch size = {}'.format(args.batch_size)) + print('Latency: {:.3f} ms'.format(latency * 1000)) + print('Throughput: {:.3f} items/sec'.format(1./ latency)) + else: + output_folder = os.path.join(args.data_location, "postprocessed_data") + output_files = preprocessed_files + # Post Process + postprocess_output(predictions, dictionaries, validation_indices, output_folder, output_files) + + ground_truths = os.path.join(args.data_location, \ + "raw_data/nnUNet_raw_data/Task043_BraTS2019/labelsTr") + # Run evaluation + print("Running evaluation...") + evaluate_regions(output_folder, ground_truths, get_brats_regions()) + # Load evaluation summary + print("Loading evaluation summary...") + accuracy=0.0 + with open(os.path.join(output_folder, "summary.csv")) as f: + for line in f: + words = line.split(",") + if words[0] == "mean": + whole = float(words[1]) + core = float(words[2]) + enhancing = float(words[3]) + mean = (whole + core + enhancing) / 3 + accuracy=mean + print("Batch size =", args.batch_size) + print("Accuracy is {:.5f}".format(mean)) + break + print("Done!") + return accuracy + + def load_graph(file_name): + tf.compat.v1.logging.info('Loading graph from: ' + file_name) + with tf.io.gfile.GFile(file_name, "rb") as f: + graph_def = tf.compat.v1.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.Graph().as_default() as graph: + tf.import_graph_def(graph_def, name='') + return graph + + class CalibrationDL(): + def __init__(self): + path = os.path.abspath(os.path.expanduser( + './brats_cal_images_list.txt')) + with open(path, 'r') as f: + self.preprocess_files = [line.rstrip() for line in f] + + self.loaded_files = {} + self.batch_size = 1 + + def __getitem__(self, sample_id): + file_name = self.preprocess_files[sample_id] + print("Loading file {:}".format(file_name)) + with open(os.path.join(args.calib_preprocess, "{:}.pkl".format(file_name)), "rb") as f: + self.loaded_files[sample_id] = pickle.load(f)[0] + # note that calibration phase does not care label, here we return 0 for label free case. + return self.loaded_files[sample_id], 0 + + def __len__(self): + self.count = len(self.preprocess_files) + return self.count + + + args = get_args() + print(args) + graph = load_graph(args.input_model) + if args.mode == 'tune': + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow.utils import BaseDataLoader + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + set_random_seed(9527) + quant_config = StaticQuantConfig() + calib_dataloader=BaseDataLoader(dataset=CalibrationDL()) + q_model = quantize_model(graph, quant_config, calib_dataloader) + try: + q_model.save(args.output_model) + except Exception as e: + print("Failed to save model due to {}".format(str(e))) + else: + eval_func(graph) diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/Task043_BraTS_2019.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/Task043_BraTS_2019.py new file mode 100644 index 00000000000..d26521276d6 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/Task043_BraTS_2019.py @@ -0,0 +1,125 @@ +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is copied from nnUnet/nnunet/dataset_conversion/Task043_BraTS_2019.py, except that +# the validation/test set part is removed and downloaded_data_dir is now configurable. + +import argparse +import numpy as np +from collections import OrderedDict +import os +import sys + +from batchgenerators.utilities.file_and_folder_operations import * +from nnunet.paths import nnUNet_raw_data +import SimpleITK as sitk +import shutil + +def copy_BraTS_segmentation_and_convert_labels(in_file, out_file): + # use this for segmentation only!!! + # nnUNet wants the labels to be continuous. BraTS is 0, 1, 2, 4 -> we make that into 0, 1, 2, 3 + img = sitk.ReadImage(in_file) + img_npy = sitk.GetArrayFromImage(img) + + uniques = np.unique(img_npy) + for u in uniques: + if u not in [0, 1, 2, 4]: + raise RuntimeError('unexpected label') + + seg_new = np.zeros_like(img_npy) + seg_new[img_npy == 4] = 3 + seg_new[img_npy == 2] = 1 + seg_new[img_npy == 1] = 2 + img_corr = sitk.GetImageFromArray(seg_new) + img_corr.CopyInformation(img) + sitk.WriteImage(img_corr, out_file) + +def task_setup(downloaded_data_dir): + """ + REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION! + """ + + task_name = "Task043_BraTS2019" + print(task_name) + print(downloaded_data_dir) + print(nnUNet_raw_data) + + target_base = join(nnUNet_raw_data, task_name) + if not os.path.isdir(target_base): + target_imagesTr = join(target_base, "imagesTr") + target_imagesVal = join(target_base, "imagesVal") + target_imagesTs = join(target_base, "imagesTs") + target_labelsTr = join(target_base, "labelsTr") + + maybe_mkdir_p(target_imagesTr) + maybe_mkdir_p(target_imagesVal) + maybe_mkdir_p(target_imagesTs) + maybe_mkdir_p(target_labelsTr) + + patient_names = [] + for tpe in ["HGG", "LGG"]: + cur = join(downloaded_data_dir, tpe) + for p in subdirs(cur, join=False): + patdir = join(cur, p) + patient_name = tpe + "__" + p + patient_names.append(patient_name) + t1 = join(patdir, p + "_t1.nii.gz") + t1c = join(patdir, p + "_t1ce.nii.gz") + t2 = join(patdir, p + "_t2.nii.gz") + flair = join(patdir, p + "_flair.nii.gz") + seg = join(patdir, p + "_seg.nii.gz") + + assert all([ + isfile(t1), + isfile(t1c), + isfile(t2), + isfile(flair), + isfile(seg) + ]), "%s" % patient_name + + shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz")) + shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz")) + shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz")) + shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz")) + + copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz")) + + json_dict = OrderedDict() + json_dict['name'] = "BraTS2019" + json_dict['description'] = "nothing" + json_dict['tensorImageSize'] = "4D" + json_dict['reference'] = "see BraTS2019" + json_dict['licence'] = "see BraTS2019 license" + json_dict['release'] = "0.0" + json_dict['modality'] = { + "0": "T1", + "1": "T1ce", + "2": "T2", + "3": "FLAIR" + } + json_dict['labels'] = { + "0": "background", + "1": "edema", + "2": "non-enhancing", + "3": "enhancing", + } + json_dict['numTraining'] = len(patient_names) + json_dict['numTest'] = 0 + json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in + patient_names] + json_dict['test'] = [] + + save_json(json_dict, join(target_base, "dataset.json")) + print("DONE") diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/__init__.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/__init__.py new file mode 100644 index 00000000000..0a18c579d8b --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold0_validation.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold0_validation.txt new file mode 100644 index 00000000000..57eeeb651c5 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold0_validation.txt @@ -0,0 +1,67 @@ +HGG__BraTS19_2013_22_1 +HGG__BraTS19_2013_23_1 +HGG__BraTS19_2013_3_1 +HGG__BraTS19_2013_5_1 +HGG__BraTS19_2013_7_1 +HGG__BraTS19_CBICA_AAB_1 +HGG__BraTS19_CBICA_AAL_1 +HGG__BraTS19_CBICA_ABN_1 +HGG__BraTS19_CBICA_ALU_1 +HGG__BraTS19_CBICA_AME_1 +HGG__BraTS19_CBICA_ANG_1 +HGG__BraTS19_CBICA_AOC_1 +HGG__BraTS19_CBICA_AOD_1 +HGG__BraTS19_CBICA_APZ_1 +HGG__BraTS19_CBICA_AQD_1 +HGG__BraTS19_CBICA_AQJ_1 +HGG__BraTS19_CBICA_AQN_1 +HGG__BraTS19_CBICA_ASA_1 +HGG__BraTS19_CBICA_ASK_1 +HGG__BraTS19_CBICA_ASO_1 +HGG__BraTS19_CBICA_AWH_1 +HGG__BraTS19_CBICA_AWV_1 +HGG__BraTS19_CBICA_AYA_1 +HGG__BraTS19_CBICA_AYC_1 +HGG__BraTS19_CBICA_AYI_1 +HGG__BraTS19_CBICA_BFB_1 +HGG__BraTS19_CBICA_BGN_1 +HGG__BraTS19_CBICA_BGR_1 +HGG__BraTS19_CBICA_BJY_1 +HGG__BraTS19_TCIA01_231_1 +HGG__BraTS19_TCIA01_378_1 +HGG__BraTS19_TCIA01_390_1 +HGG__BraTS19_TCIA01_412_1 +HGG__BraTS19_TCIA02_135_1 +HGG__BraTS19_TCIA02_179_1 +HGG__BraTS19_TCIA02_208_1 +HGG__BraTS19_TCIA02_274_1 +HGG__BraTS19_TCIA02_314_1 +HGG__BraTS19_TCIA02_430_1 +HGG__BraTS19_TCIA02_608_1 +HGG__BraTS19_TCIA03_121_1 +HGG__BraTS19_TCIA03_138_1 +HGG__BraTS19_TCIA03_375_1 +HGG__BraTS19_TCIA03_498_1 +HGG__BraTS19_TCIA06_184_1 +HGG__BraTS19_TCIA06_372_1 +HGG__BraTS19_TCIA08_113_1 +HGG__BraTS19_TCIA08_162_1 +HGG__BraTS19_TCIA08_218_1 +HGG__BraTS19_TCIA08_469_1 +LGG__BraTS19_2013_6_1 +LGG__BraTS19_TCIA09_141_1 +LGG__BraTS19_TCIA09_255_1 +LGG__BraTS19_TCIA09_402_1 +LGG__BraTS19_TCIA09_451_1 +LGG__BraTS19_TCIA09_462_1 +LGG__BraTS19_TCIA09_620_1 +LGG__BraTS19_TCIA10_266_1 +LGG__BraTS19_TCIA10_413_1 +LGG__BraTS19_TCIA10_628_1 +LGG__BraTS19_TCIA10_629_1 +LGG__BraTS19_TCIA10_640_1 +LGG__BraTS19_TCIA12_298_1 +LGG__BraTS19_TCIA12_470_1 +LGG__BraTS19_TCIA13_621_1 +LGG__BraTS19_TCIA13_624_1 +LGG__BraTS19_TCIA13_654_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold1_validation.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold1_validation.txt new file mode 100644 index 00000000000..d24f39b67c4 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold1_validation.txt @@ -0,0 +1,67 @@ +HGG__BraTS19_2013_13_1 +HGG__BraTS19_2013_19_1 +HGG__BraTS19_2013_27_1 +HGG__BraTS19_CBICA_AAG_1 +HGG__BraTS19_CBICA_ALN_1 +HGG__BraTS19_CBICA_ANV_1 +HGG__BraTS19_CBICA_AOH_1 +HGG__BraTS19_CBICA_APK_1 +HGG__BraTS19_CBICA_APR_1 +HGG__BraTS19_CBICA_AQG_1 +HGG__BraTS19_CBICA_AQP_1 +HGG__BraTS19_CBICA_ARZ_1 +HGG__BraTS19_CBICA_ASF_1 +HGG__BraTS19_CBICA_ASG_1 +HGG__BraTS19_CBICA_ATP_1 +HGG__BraTS19_CBICA_ATX_1 +HGG__BraTS19_CBICA_AUA_1 +HGG__BraTS19_CBICA_AVJ_1 +HGG__BraTS19_CBICA_AVV_1 +HGG__BraTS19_CBICA_AWG_1 +HGG__BraTS19_CBICA_AXL_1 +HGG__BraTS19_CBICA_AXQ_1 +HGG__BraTS19_CBICA_BAN_1 +HGG__BraTS19_CBICA_BBG_1 +HGG__BraTS19_CBICA_BGE_1 +HGG__BraTS19_CBICA_BHQ_1 +HGG__BraTS19_CBICA_BIC_1 +HGG__BraTS19_CBICA_BNR_1 +HGG__BraTS19_TCIA01_131_1 +HGG__BraTS19_TCIA01_147_1 +HGG__BraTS19_TCIA01_180_1 +HGG__BraTS19_TCIA01_190_1 +HGG__BraTS19_TCIA01_221_1 +HGG__BraTS19_TCIA01_335_1 +HGG__BraTS19_TCIA01_411_1 +HGG__BraTS19_TCIA02_151_1 +HGG__BraTS19_TCIA02_321_1 +HGG__BraTS19_TCIA02_331_1 +HGG__BraTS19_TCIA02_368_1 +HGG__BraTS19_TCIA02_471_1 +HGG__BraTS19_TCIA03_257_1 +HGG__BraTS19_TCIA03_474_1 +HGG__BraTS19_TCIA04_111_1 +HGG__BraTS19_TCIA04_328_1 +HGG__BraTS19_TCIA04_343_1 +HGG__BraTS19_TCIA05_277_1 +HGG__BraTS19_TCIA05_478_1 +HGG__BraTS19_TCIA06_165_1 +HGG__BraTS19_TCIA08_105_1 +HGG__BraTS19_TCIA08_280_1 +HGG__BraTS19_TMC_15477_1 +HGG__BraTS19_TMC_21360_1 +HGG__BraTS19_TMC_30014_1 +LGG__BraTS19_TCIA09_428_1 +LGG__BraTS19_TCIA10_175_1 +LGG__BraTS19_TCIA10_276_1 +LGG__BraTS19_TCIA10_393_1 +LGG__BraTS19_TCIA10_408_1 +LGG__BraTS19_TCIA10_410_1 +LGG__BraTS19_TCIA10_449_1 +LGG__BraTS19_TCIA10_490_1 +LGG__BraTS19_TCIA10_625_1 +LGG__BraTS19_TCIA10_637_1 +LGG__BraTS19_TCIA12_249_1 +LGG__BraTS19_TCIA12_466_1 +LGG__BraTS19_TCIA13_615_1 +LGG__BraTS19_TCIA13_630_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold2_validation.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold2_validation.txt new file mode 100644 index 00000000000..c468e57417d --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold2_validation.txt @@ -0,0 +1,67 @@ +HGG__BraTS19_2013_11_1 +HGG__BraTS19_2013_21_1 +HGG__BraTS19_2013_2_1 +HGG__BraTS19_2013_4_1 +HGG__BraTS19_CBICA_ABB_1 +HGG__BraTS19_CBICA_ABE_1 +HGG__BraTS19_CBICA_ABM_1 +HGG__BraTS19_CBICA_ANZ_1 +HGG__BraTS19_CBICA_AOP_1 +HGG__BraTS19_CBICA_APY_1 +HGG__BraTS19_CBICA_AQA_1 +HGG__BraTS19_CBICA_AQO_1 +HGG__BraTS19_CBICA_AQU_1 +HGG__BraTS19_CBICA_ARW_1 +HGG__BraTS19_CBICA_ASV_1 +HGG__BraTS19_CBICA_AUN_1 +HGG__BraTS19_CBICA_AUW_1 +HGG__BraTS19_CBICA_AUX_1 +HGG__BraTS19_CBICA_AVB_1 +HGG__BraTS19_CBICA_AVF_1 +HGG__BraTS19_CBICA_AWX_1 +HGG__BraTS19_CBICA_AXO_1 +HGG__BraTS19_CBICA_AYW_1 +HGG__BraTS19_CBICA_BAX_1 +HGG__BraTS19_CBICA_BEM_1 +HGG__BraTS19_CBICA_BHK_1 +HGG__BraTS19_CBICA_BHM_1 +HGG__BraTS19_CBICA_BLJ_1 +HGG__BraTS19_TCIA01_150_1 +HGG__BraTS19_TCIA01_203_1 +HGG__BraTS19_TCIA01_235_1 +HGG__BraTS19_TCIA01_401_1 +HGG__BraTS19_TCIA01_448_1 +HGG__BraTS19_TCIA01_499_1 +HGG__BraTS19_TCIA02_168_1 +HGG__BraTS19_TCIA02_222_1 +HGG__BraTS19_TCIA02_226_1 +HGG__BraTS19_TCIA02_283_1 +HGG__BraTS19_TCIA02_290_1 +HGG__BraTS19_TCIA02_309_1 +HGG__BraTS19_TCIA02_394_1 +HGG__BraTS19_TCIA02_455_1 +HGG__BraTS19_TCIA02_606_1 +HGG__BraTS19_TCIA03_133_1 +HGG__BraTS19_TCIA04_192_1 +HGG__BraTS19_TCIA04_361_1 +HGG__BraTS19_TCIA06_332_1 +HGG__BraTS19_TCIA08_167_1 +HGG__BraTS19_TCIA08_205_1 +HGG__BraTS19_TCIA08_234_1 +HGG__BraTS19_TCIA08_242_1 +HGG__BraTS19_TCIA08_278_1 +HGG__BraTS19_TCIA08_436_1 +HGG__BraTS19_TMC_12866_1 +LGG__BraTS19_2013_15_1 +LGG__BraTS19_2013_1_1 +LGG__BraTS19_TCIA09_312_1 +LGG__BraTS19_TCIA10_109_1 +LGG__BraTS19_TCIA10_130_1 +LGG__BraTS19_TCIA10_152_1 +LGG__BraTS19_TCIA10_241_1 +LGG__BraTS19_TCIA10_282_1 +LGG__BraTS19_TCIA10_325_1 +LGG__BraTS19_TCIA10_639_1 +LGG__BraTS19_TCIA13_618_1 +LGG__BraTS19_TCIA13_633_1 +LGG__BraTS19_TMC_09043_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold3_validation.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold3_validation.txt new file mode 100644 index 00000000000..171a51a02a8 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold3_validation.txt @@ -0,0 +1,67 @@ +HGG__BraTS19_2013_12_1 +HGG__BraTS19_2013_14_1 +HGG__BraTS19_2013_18_1 +HGG__BraTS19_2013_20_1 +HGG__BraTS19_2013_26_1 +HGG__BraTS19_CBICA_ABO_1 +HGG__BraTS19_CBICA_ALX_1 +HGG__BraTS19_CBICA_ANP_1 +HGG__BraTS19_CBICA_AOS_1 +HGG__BraTS19_CBICA_AOZ_1 +HGG__BraTS19_CBICA_AQT_1 +HGG__BraTS19_CBICA_ARF_1 +HGG__BraTS19_CBICA_ASE_1 +HGG__BraTS19_CBICA_ASW_1 +HGG__BraTS19_CBICA_ATN_1 +HGG__BraTS19_CBICA_ATV_1 +HGG__BraTS19_CBICA_AUQ_1 +HGG__BraTS19_CBICA_AVG_1 +HGG__BraTS19_CBICA_AVT_1 +HGG__BraTS19_CBICA_AWI_1 +HGG__BraTS19_CBICA_AXW_1 +HGG__BraTS19_CBICA_AYG_1 +HGG__BraTS19_CBICA_AYU_1 +HGG__BraTS19_CBICA_BAP_1 +HGG__BraTS19_CBICA_BCL_1 +HGG__BraTS19_CBICA_BDK_1 +HGG__BraTS19_CBICA_BGG_1 +HGG__BraTS19_CBICA_BGT_1 +HGG__BraTS19_CBICA_BGW_1 +HGG__BraTS19_CBICA_BGX_1 +HGG__BraTS19_TCIA01_186_1 +HGG__BraTS19_TCIA01_429_1 +HGG__BraTS19_TCIA01_460_1 +HGG__BraTS19_TCIA02_171_1 +HGG__BraTS19_TCIA02_370_1 +HGG__BraTS19_TCIA02_374_1 +HGG__BraTS19_TCIA02_377_1 +HGG__BraTS19_TCIA02_473_1 +HGG__BraTS19_TCIA02_491_1 +HGG__BraTS19_TCIA02_607_1 +HGG__BraTS19_TCIA03_296_1 +HGG__BraTS19_TCIA03_338_1 +HGG__BraTS19_TCIA03_419_1 +HGG__BraTS19_TCIA04_437_1 +HGG__BraTS19_TCIA04_479_1 +HGG__BraTS19_TCIA06_247_1 +HGG__BraTS19_TCIA06_603_1 +HGG__BraTS19_TMC_11964_1 +LGG__BraTS19_2013_28_1 +LGG__BraTS19_2013_29_1 +LGG__BraTS19_2013_9_1 +LGG__BraTS19_TCIA09_177_1 +LGG__BraTS19_TCIA09_254_1 +LGG__BraTS19_TCIA10_103_1 +LGG__BraTS19_TCIA10_299_1 +LGG__BraTS19_TCIA10_310_1 +LGG__BraTS19_TCIA10_330_1 +LGG__BraTS19_TCIA10_346_1 +LGG__BraTS19_TCIA10_351_1 +LGG__BraTS19_TCIA10_420_1 +LGG__BraTS19_TCIA10_442_1 +LGG__BraTS19_TCIA10_632_1 +LGG__BraTS19_TCIA10_644_1 +LGG__BraTS19_TCIA12_480_1 +LGG__BraTS19_TCIA13_623_1 +LGG__BraTS19_TCIA13_642_1 +LGG__BraTS19_TCIA13_645_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold4_validation.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold4_validation.txt new file mode 100644 index 00000000000..0fc2a8bc9cc --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/folds/fold4_validation.txt @@ -0,0 +1,67 @@ +HGG__BraTS19_2013_10_1 +HGG__BraTS19_2013_17_1 +HGG__BraTS19_2013_25_1 +HGG__BraTS19_CBICA_AAP_1 +HGG__BraTS19_CBICA_ABY_1 +HGG__BraTS19_CBICA_AMH_1 +HGG__BraTS19_CBICA_ANI_1 +HGG__BraTS19_CBICA_AOO_1 +HGG__BraTS19_CBICA_AQQ_1 +HGG__BraTS19_CBICA_AQR_1 +HGG__BraTS19_CBICA_AQV_1 +HGG__BraTS19_CBICA_AQY_1 +HGG__BraTS19_CBICA_AQZ_1 +HGG__BraTS19_CBICA_ASH_1 +HGG__BraTS19_CBICA_ASN_1 +HGG__BraTS19_CBICA_ASR_1 +HGG__BraTS19_CBICA_ASU_1 +HGG__BraTS19_CBICA_ASY_1 +HGG__BraTS19_CBICA_ATB_1 +HGG__BraTS19_CBICA_ATD_1 +HGG__BraTS19_CBICA_ATF_1 +HGG__BraTS19_CBICA_AUR_1 +HGG__BraTS19_CBICA_AXJ_1 +HGG__BraTS19_CBICA_AXM_1 +HGG__BraTS19_CBICA_AXN_1 +HGG__BraTS19_CBICA_AZD_1 +HGG__BraTS19_CBICA_AZH_1 +HGG__BraTS19_CBICA_BCF_1 +HGG__BraTS19_CBICA_BFP_1 +HGG__BraTS19_CBICA_BGO_1 +HGG__BraTS19_CBICA_BHB_1 +HGG__BraTS19_CBICA_BHV_1 +HGG__BraTS19_CBICA_BHZ_1 +HGG__BraTS19_CBICA_BKV_1 +HGG__BraTS19_TCIA01_201_1 +HGG__BraTS19_TCIA01_425_1 +HGG__BraTS19_TCIA02_117_1 +HGG__BraTS19_TCIA02_118_1 +HGG__BraTS19_TCIA02_198_1 +HGG__BraTS19_TCIA02_300_1 +HGG__BraTS19_TCIA02_322_1 +HGG__BraTS19_TCIA02_605_1 +HGG__BraTS19_TCIA03_199_1 +HGG__BraTS19_TCIA03_265_1 +HGG__BraTS19_TCIA04_149_1 +HGG__BraTS19_TCIA05_396_1 +HGG__BraTS19_TCIA05_444_1 +HGG__BraTS19_TCIA06_211_1 +HGG__BraTS19_TCIA06_409_1 +HGG__BraTS19_TCIA08_319_1 +HGG__BraTS19_TCIA08_406_1 +HGG__BraTS19_TMC_06290_1 +HGG__BraTS19_TMC_06643_1 +HGG__BraTS19_TMC_27374_1 +LGG__BraTS19_2013_0_1 +LGG__BraTS19_2013_16_1 +LGG__BraTS19_2013_24_1 +LGG__BraTS19_2013_8_1 +LGG__BraTS19_TCIA09_493_1 +LGG__BraTS19_TCIA10_202_1 +LGG__BraTS19_TCIA10_261_1 +LGG__BraTS19_TCIA10_307_1 +LGG__BraTS19_TCIA10_387_1 +LGG__BraTS19_TCIA12_101_1 +LGG__BraTS19_TCIA13_634_1 +LGG__BraTS19_TCIA13_650_1 +LGG__BraTS19_TCIA13_653_1 diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/postprocess.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/postprocess.py new file mode 100644 index 00000000000..e5590bdb338 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/postprocess.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from multiprocessing import Pool +import os +from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax + +def load_predictions(predictions, dictionaries, validation_indices): + assert len(predictions) == len(dictionaries),"Number of predictions does not match number of samples in validation set!" + padded_shape = [224,224,160] + results = [None for i in range(len(predictions))] + for i in range(len(predictions)): + qsl_idx = validation_indices[i] + prediction = predictions[qsl_idx] + assert qsl_idx >= 0 and qsl_idx < len(predictions), "Invalid qsl_idx!" + raw_shape = list(dictionaries[qsl_idx]["size_after_cropping"]) + # Remove the padded part + pad_before = [(p - r) // 2 for p, r in zip(padded_shape, raw_shape)] + pad_after = [-(p - r - b) for p, r, b in zip(padded_shape, raw_shape, pad_before)] + result_shape = (4,) + tuple(padded_shape) + result = np.reshape(prediction, result_shape).astype(np.float32) + results[qsl_idx] = result[:, pad_before[0]:pad_after[0], pad_before[1]:pad_after[1], pad_before[2]:pad_after[2]] + assert all([i is not None for i in results]), "Missing some results!" + return results + +def postprocess_output(predictions, dictionaries, validation_indices, output_folder, output_files): + processed_predictions = load_predictions(predictions, dictionaries, validation_indices) + print("Running postprocessing with multiple threads...") + force_separate_z=None + interp_order=3 + interp_order_z=0 + num_threads_nifti_save = 12 + all_in_gpu = "None" + print("Saving predictions...") + pool = Pool(num_threads_nifti_save) + results = [] + for i, output_filename in enumerate(output_files): + print(i, "/", len(output_files)) + output_filename = os.path.join(output_folder, output_filename + ".nii.gz") + softmax_mean = processed_predictions[i] + dct = dictionaries[i] + bytes_per_voxel = 4 + if all_in_gpu: + bytes_per_voxel = 2 # if all_in_gpu then the return value is half (float16) + if np.prod(softmax_mean.shape) > (2e9 / bytes_per_voxel * 0.85): # * 0.85 just to be save + print( + "This output is too large for python process-process communication. Saving output temporarily to disk") + np.save(output_filename[:-7] + ".npy", softmax_mean) + softmax_mean = output_filename[:-7] + ".npy" + + results.append(pool.starmap_async(save_segmentation_nifti_from_softmax, + ((softmax_mean, output_filename, dct, interp_order, None, None, None, + None, None, force_separate_z, interp_order_z),) + )) + + _ = [i.get() for i in results] + pool.close() + pool.join() diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py new file mode 100644 index 00000000000..048eb0e91cb --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/preprocess.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file has been copied from +# https://github.com/mlcommons/inference/blob/r0.7/vision/medical_imaging/3d-unet/preprocess.py + +import argparse +import numpy +import os +import pickle +import sys +import torch + +from batchgenerators.augmentations.utils import pad_nd_image +from batchgenerators.utilities.file_and_folder_operations import subfiles +from nnunet.training.model_restore import load_model_and_checkpoint_files +from nnunet.inference.predict import preprocess_multithreaded + +def preprocess_MLPerf(model, checkpoint_name, folds, fp16, list_of_lists, output_filenames, preprocessing_folder, num_threads_preprocessing): + assert len(list_of_lists) == len(output_filenames) + print("loading parameters for folds", folds) + trainer, params = load_model_and_checkpoint_files(model, folds, fp16, checkpoint_name=checkpoint_name) + + print("starting preprocessing generator") + preprocessing = preprocess_multithreaded(trainer, list_of_lists, output_filenames, num_threads_preprocessing, None) + print("Preprocessing images...") + all_output_files = [] + + for preprocessed in preprocessing: + output_filename, (d, dct) = preprocessed + + all_output_files.append(output_filename) + if isinstance(d, str): + data = np.load(d) + os.remove(d) + d = data + + # Pad to the desired full volume + d = pad_nd_image(d, trainer.patch_size, "constant", None, False, None) + + with open(os.path.join(preprocessing_folder, output_filename+ ".pkl"), "wb") as f: + pickle.dump([d, dct], f) + f.close() + + return all_output_files + + +def preprocess_setup(preprocessed_data_dir): + print("Preparing for preprocessing data...") + + # Validation set is fold 1 + fold = 1 + import sys + import os + CURRENT_DIR = os.path.split(os.path.abspath(__file__))[0] + #validation_fold_file = '/workspace/intelai_models/inference/nnUNet/folds/fold1_validation.txt' + validation_fold_file = os.path.join(CURRENT_DIR, 'folds/fold1_validation.txt') + # Make sure the model exists + model_dir = 'build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1' + model_path = os.path.join(model_dir, "plans.pkl") + assert os.path.isfile(model_path), "Cannot find the model file {:}!".format(model_path) + checkpoint_name = "model_final_checkpoint" + + # Other settings + fp16 = False + num_threads_preprocessing = 12 + raw_data_dir = 'build/raw_data/nnUNet_raw_data/Task043_BraTS2019/imagesTr' + + # Open list containing validation images from specific fold (e.g. 1) + validation_files = [] + with open(validation_fold_file) as f: + for line in f: + validation_files.append(line.rstrip()) + + # Create output and preprocessed directory + if not os.path.isdir(preprocessed_data_dir): + os.makedirs(preprocessed_data_dir) + + # Create list of images locations (i.e. 4 images per case => 4 modalities) + all_files = subfiles(raw_data_dir, suffix=".nii.gz", join=False, sort=True) + list_of_lists = [[os.path.join(raw_data_dir, i) for i in all_files if i[:len(j)].startswith(j) and + len(i) == (len(j) + 12)] for j in validation_files] + + # Preprocess images, returns filenames list + # This runs in multiprocess + print("Actually preprocessing data...") + + preprocessed_files = preprocess_MLPerf(model_dir, checkpoint_name, fold, fp16, list_of_lists, + validation_files, preprocessed_data_dir, num_threads_preprocessing) + + print("Saving metadata of the preprocessed data...") + with open(os.path.join(preprocessed_data_dir, "preprocessed_files.pkl"), "wb") as f: + pickle.dump(preprocessed_files, f) + + print("Preprocessed data saved to {:}".format(preprocessed_data_dir)) + print("Done!") diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/setup.py b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/setup.py new file mode 100644 index 00000000000..bf4d5981497 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/nnUNet/setup.py @@ -0,0 +1,81 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import os, shutil +import argparse +import sys +import zipfile +#import sys +#print(sys.path) +#sys.path.append('/home/sys_dltest/lpot/lz/frameworks.ai.models.intel-models/models/image_segmentation/tensorflow/3d_unet_mlperf') +from nnUNet.Task043_BraTS_2019 import task_setup +from nnUNet.preprocess import preprocess_setup + +BUILD_DIR = 'build' +RAW_DATA_DIR = BUILD_DIR + '/raw_data' +PREPROCESSED_DATA_DIR = BUILD_DIR + '/preprocessed_data' +POSTPROCESSED_DATA_DIR = BUILD_DIR + '/postprocessed_data' +MODEL_DIR = BUILD_DIR + '/model' +RESULT_DIR = BUILD_DIR + '/result' +TF_MODEL = '224_224_160.pb' +OTHER_FILES = 'fold_1.zip' + +def create_directories(): + print("Creating directories") + if not os.path.isdir(BUILD_DIR): + os.makedirs(BUILD_DIR) + if not os.path.isdir(RAW_DATA_DIR): + os.makedirs(RAW_DATA_DIR) + if not os.path.isdir(PREPROCESSED_DATA_DIR): + os.makedirs(PREPROCESSED_DATA_DIR) + if not os.path.isdir(POSTPROCESSED_DATA_DIR): + os.makedirs(POSTPROCESSED_DATA_DIR) + if not os.path.isdir(RESULT_DIR): + os.makedirs(RESULT_DIR) + if not os.path.isdir(MODEL_DIR): + os.makedirs(MODEL_DIR) + +def download_model(input_graph): + pwd = os.getcwd() + os.chdir(os.path.join(pwd, MODEL_DIR)) + if input_graph == 'NONE': + print("Downloading TF model from Zenodo") + if not os.path.isfile(TF_MODEL): + os.system('wget -O 224_224_160.pb https://zenodo.org/record/3928991/files/224_224_160.pb?download=1;') + os.chdir(os.path.join(pwd, RESULT_DIR)) + if not os.path.isfile(OTHER_FILES): + os.system('wget -O fold_1.zip https://zenodo.org/record/3904106/files/fold_1.zip?download=1;') + zip_file = "fold_1.zip" + #legacy bitmap issue https://bugzilla.redhat.com/show_bug.cgi?id=1802689 + if (not os.path.isfile(OTHER_FILES)): + os.system('curl -O --output fold_1.zip https://zenodo.org/record/3904106/files/fold_1.zip') + try: + with zipfile.ZipFile(zip_file) as z: + z.extractall() + print("Extracted all") + except: + print("Could not extract fold_1.zip") + os.chdir(pwd) + +def setup(downloaded_data_dir, input_graph='NONE'): + create_directories() + download_model(input_graph) + task_setup(downloaded_data_dir) + preprocess_setup(PREPROCESSED_DATA_DIR) diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..d5069f8038f --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +nnunet +tensorflow \ No newline at end of file diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..36f8d8502f0 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh @@ -0,0 +1,65 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + export BUILD_DIR=${dataset_location} + export nnUNet_preprocessed=${BUILD_DIR}/preprocessed_data + export nnUNet_raw_data_base=${BUILD_DIR}/raw_data + export RESULTS_FOLDER=${BUILD_DIR}/result + run_benchmark + +} + +# init params +function init_params { + iters=100 + batch_size=1 + for var in "$@" + do + case $var in + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_benchmark +function run_benchmark { + if [[ ${bfloat16} == "true" ]]; then + extra_cmd="--bfloat16" + else + extra_cmd="" + fi + + python main.py \ + --input-model=${input_model} \ + --data-location=${dataset_location} \ + --calib-preprocess=${BUILD_DIR}/calib_preprocess \ + --batch-size=${batch_size} \ + --mode=${mode} \ + --iters=${iters} \ + ${extra_cmd} +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..79256545613 --- /dev/null +++ b/examples/3.x_api/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_quant.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + export BUILD_DIR=${dataset_location} + export nnUNet_preprocessed=${BUILD_DIR}/preprocessed_data + export nnUNet_raw_data_base=${BUILD_DIR}/raw_data + export RESULTS_FOLDER=${BUILD_DIR}/result + run_tuning + +} + +# init params +function init_params { + for var in "$@" + do + case $var in + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo $var |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + +# run_tuning +function run_tuning { + python main.py \ + --input-model=${input_model} \ + --output-model=${output_model} \ + --data-location=${dataset_location} \ + --calib-preprocess=${BUILD_DIR}/calib_preprocess \ + --mode=tune +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md new file mode 100644 index 00000000000..6fa291d0b36 --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md @@ -0,0 +1,117 @@ +Step-by-Step +============ + +This document is used to list steps of reproducing TensorFlow style transfer Intel® Neural Compressor tuning zoo result. +This example can run on Intel CPUs and GPUs. + +# Prerequisite + +## Prerequisite + +### Installation +```shell +# Install Intel® Neural Compressor +pip install neural-compressor +``` +### Install Intel Tensorflow +```shell +pip install intel-tensorflow +``` +> Note: Supported Tensorflow [Version](../../../../../../README.md#supported-frameworks). + +### Install Additional Dependency packages +```shell +cd examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq +pip install -r requirements.txt +``` + +### Install Intel Extension for Tensorflow +#### Quantizing the model on Intel GPU(Mandatory to install ITEX) +Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[xpu] +``` +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) + +#### Quantizing the model on Intel CPU(Optional to install ITEX) +Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. + +```shell +pip install --upgrade intel-extension-for-tensorflow[cpu] +``` + +> **Note**: +> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. + +### 2. Prepare Pretrained model + +#### Automated approach +Run the `prepare_model.py` script located in `./examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq`. + +``` +usage: prepare_model.py [-h] [--model_path MODEL_PATH] + +optional arguments: + -h, --help show this help message and exit + --model_path MODEL_PATH directory to put models, default is ./model +``` + +#### Manual approach + +```shell +wget https://storage.googleapis.com/download.magenta.tensorflow.org/models/arbitrary_style_transfer.tar.gz +tar -xvzf arbitrary_style_transfer.tar.gz +``` + +### 3. Prepare Dataset +There are two folders named style_images and content_images in current folder. Please use these two folders to generated stylized images for test. And you can also prepare your own style_images or content_images. + + +# Run Command + ```shell + python main.py --output_dir=./result --style_images_paths=./style_images --content_images_paths=./content_images --input_model=./model/model.ckpt + ``` + + +## Quantization Config + +## Quantization + ```shell + bash run_quant.sh --dataset_location=style_images/,content_images/ --input_model=./model/model.ckpt --output_model=saved_model + ``` +## Benchmark + ```shell + bash run_benchmark.sh --dataset_location=style_images/,content_images/ --input_model=saved_model.pb --batch_size=1 + ``` + +Details of enabling Intel® Neural Compressor on style transfer for Tensorflow. +========================= + +This is a tutorial of how to enable style_transfer model with Intel® Neural Compressor. +## User Code Analysis +1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. + +2. User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. + +For style_transfer, we applied the latter one because we don't have metric for style transfer model.The first one is to implement the q_dataloader and implement a fake *eval_func*. As neural_compressor have implement a style_transfer dataset, so only eval_func should be prepared after load the graph + +### Evaluation Part Adaption +As style transfer don't have a metric to measure the accuracy, we only implement a fake eval_func +```python +def eval_func(model): + return 1. +``` + +Here we set the input tensor and output tensors name into *inputs* and *outputs* field. In this case we only calibration and quantize the model without tune the accuracy + +### Code update + +After prepare step is done, we just need add 2 lines to get the quantized model. +```python +from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + +quant_config = StaticQuantConfig() +q_model = quantize_model(graph, quant_config, calib_dataloader) +q_model.save(FLAGS.output_model) +``` diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg new file mode 100644 index 00000000000..5f6c5a6beb5 Binary files /dev/null and b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg differ diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg new file mode 100644 index 00000000000..248d9fd31f9 Binary files /dev/null and b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg differ diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/data_process.py b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/data_process.py new file mode 100644 index 00000000000..d49c262bbec --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/data_process.py @@ -0,0 +1,362 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import glob +import collections + +import numpy as np +import tensorflow as tf + +from abc import abstractmethod +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils.data import default_collate + + +class StyleTransferDataset(object): + """Dataset used for style transfer task on tensorflow/inteltensorflow/tensorflow_itex backend. + + This Dataset is to construct a dataset from two specific image holders representing + content image folder and style image folder. + """ + + def __init__( + self, + content_folder, + style_folder, + crop_ratio=0.1, + resize_shape=(256, 256), + image_format="jpg", + transform=None, + filter=None, + ): + """Initialize `StyleTransferDataset` class. + + Args: + content_folder (str): Root directory of content images. + style_folder (str): Root directory of style images. + crop_ratio (float, default=0.1): Cropped ratio to each side. + resize_shape (tuple, default=(256, 256)): Target size of image. + image_format (str, default='jpg'): Target image format. + transform (transform object, default=None): Transform to process input data. + filter (Filter objects, default=None): Filter out examples according to specific conditions. + """ + self.transform = transform + self.content_folder = content_folder + self.style_folder = style_folder + self.resize_shape = resize_shape + self.crop_ratio = crop_ratio + self.content_images = glob.glob(os.path.join(content_folder, "*" + image_format)) + self.style_images = glob.glob(os.path.join(style_folder, "*" + image_format)) + self.image_list = [] + for content in self.content_images: + for style in self.style_images: + self.image_list.append((content, style)) + + def __len__(self): + """Return the length of dataset.""" + return len(self.image_list) + + def __getitem__(self, index): + """Return the item of dataset according to the given index.""" + from PIL import Image + + content_image, style_image = self.image_list[index] + content_image = Image.open(content_image) + style_image = Image.open(style_image) + width, height = style_image.size + crop_ratio = self.crop_ratio + crop_box = (crop_ratio * height, crop_ratio * width, (1 - crop_ratio) * height, (1 - crop_ratio) * width) + content_image = np.asarray(content_image.resize(self.resize_shape)) + style_image = np.asarray(style_image.resize(self.resize_shape)) + if content_image.max() > 1.0: + content_image = content_image / 255.0 + if style_image.max() > 1.0: + style_image = style_image / 255.0 + + return (content_image, style_image), 0 + + +class ComposeTransform(object): + """Composes several transforms together. + + Args: + transform_list (list of Transform objects): list of transforms to compose + + Returns: + sample (tuple): tuple of processed image and label + """ + + def __init__(self, transform_list): + """Initialize `ComposeTransform` class.""" + self.transform_list = transform_list + + def __call__(self, sample): + """Call transforms in transform_list.""" + for transform in self.transform_list: + sample = transform(sample) + return sample + +class ParseDecodeVocTransform(): + """Parse features in Example proto. + + Returns: + tuple of parsed image and labels + """ + + def __call__(self, sample): + """Parse decode voc.""" + + # Currently only supports jpeg and png. + # Need to use this logic because the shape is not known for + # tf.image.decode_image and we rely on this info to + # extend label if necessary. + def _decode_image(content, channels): + """Decode the image with content.""" + return tf.cond( + tf.image.is_jpeg(content), + lambda: tf.image.decode_jpeg(content, channels), + lambda: tf.image.decode_png(content, channels), + ) + + features = { + "image/encoded": tf.compat.v1.FixedLenFeature((), tf.string, default_value=""), + "image/filename": tf.compat.v1.FixedLenFeature((), tf.string, default_value=""), + "image/format": tf.compat.v1.FixedLenFeature((), tf.string, default_value="jpeg"), + "image/height": tf.compat.v1.FixedLenFeature((), tf.int64, default_value=0), + "image/width": tf.compat.v1.FixedLenFeature((), tf.int64, default_value=0), + "image/segmentation/class/encoded": tf.compat.v1.FixedLenFeature((), tf.string, default_value=""), + "image/segmentation/class/format": tf.compat.v1.FixedLenFeature((), tf.string, default_value="png"), + } + + parsed_features = tf.compat.v1.parse_single_example(sample, features) + + image = _decode_image(parsed_features["image/encoded"], channels=3) + + label = None + label = _decode_image(parsed_features["image/segmentation/class/encoded"], channels=1) + + sample = { + "image": image, + } + + label.set_shape([None, None, 1]) + + sample["labels_class"] = label + + return sample["image"], sample["labels_class"] + + +class BaseMetric(object): + """The base class of Metric.""" + + def __init__(self, metric, single_output=False, hvd=None): + """Initialize the basic metric. + + Args: + metric: The metric class. + single_output: Whether the output is single or not, defaults to False. + hvd: The Horovod class for distributed training, defaults to None. + """ + self._metric_cls = metric + self._single_output = single_output + self._hvd = hvd + + def __call__(self, *args, **kwargs): + """Evaluate the model predictions, and the reference. + + Returns: + The class itself. + """ + self._metric = self._metric_cls(*args, **kwargs) + return self + + @abstractmethod + def update(self, preds, labels=None, sample_weight=None): + """Update the state that need to be evaluated. + + Args: + preds: The prediction result. + labels: The reference. Defaults to None. + sample_weight: The sampling weight. Defaults to None. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def reset(self): + """Clear the predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @abstractmethod + def result(self): + """Evaluate the difference between predictions and labels. + + Raises: + NotImplementedError: The method should be implemented by subclass. + """ + raise NotImplementedError + + @property + def metric(self): + """Return its metric class. + + Returns: + The metric class. + """ + return self._metric_cls + + @property + def hvd(self): + """Return its hvd class. + + Returns: + The hvd class. + """ + return self._hvd + + @hvd.setter + def hvd(self, hvd): + """Set its hvd. + + Args: + hvd: The Horovod class for distributed training. + """ + self._hvd = hvd + + +class TopKMetric(BaseMetric): + """Compute Top-k Accuracy classification score for Tensorflow model. + + This metric computes the number of times where the correct label is among + the top k labels predicted. + + Attributes: + k (int): The number of most likely outcomes considered to find the correct label. + num_correct: The number of predictions that were correct classified. + num_sample: The total number of predictions. + """ + + def __init__(self, k=1): + """Initialize the k, number of samples and correct predictions. + + Args: + k: The number of most likely outcomes considered to find the correct label. + """ + self.k = k + self.num_correct = 0 + self.num_sample = 0 + + def update(self, preds, labels, sample_weight=None): + """Add the predictions and labels. + + Args: + preds: The predictions. + labels: The labels corresponding to the predictions. + sample_weight: The sample weight. + """ + preds, labels = TopKMetric._topk_shape_validate(preds, labels) + + labels = labels.reshape([len(labels)]) + with tf.Graph().as_default() as acc_graph: + topk = tf.nn.in_top_k( + predictions=tf.constant(preds, dtype=tf.float32), targets=tf.constant(labels, dtype=tf.int32), k=self.k + ) + fp32_topk = tf.cast(topk, tf.float32) + correct_tensor = tf.reduce_sum(input_tensor=fp32_topk) + + with tf.compat.v1.Session() as acc_sess: + correct = acc_sess.run(correct_tensor) + + self.num_sample += len(labels) + self.num_correct += correct + + def reset(self): + """Reset the number of samples and correct predictions.""" + self.num_correct = 0 + self.num_sample = 0 + + def result(self): + """Compute the top-k score. + + Returns: + The top-k score. + """ + if self.num_sample == 0: + logger.warning("Sample num during evaluation is 0.") + return 0 + elif getattr(self, "_hvd", None) is not None: # pragma: no cover + allgather_num_correct = sum(self._hvd.allgather_object(self.num_correct)) + allgather_num_sample = sum(self._hvd.allgather_object(self.num_sample)) + return allgather_num_correct / allgather_num_sample + return self.num_correct / self.num_sample + + @staticmethod + def _topk_shape_validate(preds, labels): + # preds shape can be Nxclass_num or class_num(N=1 by default) + # it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax + if isinstance(preds, int): + preds = [preds] + preds = np.array(preds) + elif isinstance(preds, np.ndarray): + preds = np.array(preds) + elif isinstance(preds, list): + preds = np.array(preds) + preds = preds.reshape((-1, preds.shape[-1])) + + # consider labels just int value 1x1 + if isinstance(labels, int): + labels = [labels] + labels = np.array(labels) + elif isinstance(labels, tuple): + labels = np.array([labels]) + labels = labels.reshape((labels.shape[-1], -1)) + elif isinstance(labels, list): + if isinstance(labels[0], int): + labels = np.array(labels) + labels = labels.reshape((labels.shape[0], 1)) + elif isinstance(labels[0], tuple): + labels = np.array(labels) + labels = labels.reshape((labels.shape[-1], -1)) + else: + labels = np.array(labels) + # labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot) + # only support 2 dimension one-shot labels + # or 1 dimension one-hot class_num will confuse with N + + if len(preds.shape) == 1: + N = 1 + class_num = preds.shape[0] + preds = preds.reshape([-1, class_num]) + elif len(preds.shape) >= 2: + N = preds.shape[0] + preds = preds.reshape([N, -1]) + class_num = preds.shape[1] + + label_N = labels.shape[0] + assert label_N == N, "labels batch size should same with preds" + labels = labels.reshape([N, -1]) + # one-hot labels will have 2 dimension not equal 1 + if labels.shape[1] != 1: + labels = labels.argsort()[..., -1:] + return preds, labels diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/main.py b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/main.py new file mode 100644 index 00000000000..440b0cee4af --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/main.py @@ -0,0 +1,207 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2023 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# + +import os +import io +import skimage.io +import glob +import numpy as np +import tensorflow.compat.v1 as tf +from PIL import Image +import time + +from neural_compressor.tensorflow.utils import BaseDataLoader, DummyDatasetV2 +from data_process import ( + StyleTransferDataset, + ComposeTransform, + ParseDecodeVocTransform, +) + +flags = tf.flags +flags.DEFINE_string('style_images_paths', None, 'Paths to the style images' + 'for evaluation.') +flags.DEFINE_string('content_images_paths', None, 'Paths to the content images' + 'for evaluation.') +flags.DEFINE_string('output_dir', './result', 'Output stylized image directory.') + +flags.DEFINE_string('output_model', None, 'Output model directory.') + +flags.DEFINE_string('input_model', None, 'Output directory.') + +flags.DEFINE_integer('batch_size', 1, 'batch_size') + +flags.DEFINE_bool('tune', False, 'if use tune') + +FLAGS = flags.FLAGS + +def load_img(path, resize_shape=(256, 256), crop_ratio=0.1): + img = Image.open(path) + width, height = img.size + crop_box = (crop_ratio*height, crop_ratio*width, (1-crop_ratio)*height, (1-crop_ratio)*width) + img = np.asarray(img.crop(crop_box).resize(resize_shape)) + if img.max() > 1.0: + img = img / 255. + img = img.astype(np.float32)[np.newaxis, ...] + return img + +def save_image(image, output_file, save_format='jpeg'): + image = np.uint8(image * 255.0) + buf = io.BytesIO() + skimage.io.imsave(buf, np.squeeze(image, 0), format=save_format) + buf.seek(0) + f = tf.gfile.GFile(output_file, 'w') + f.write(buf.getvalue()) + f.close() + +def image_style_transfer(sess, content_img_path, style_img_path): + stylized_images = sess.graph.get_tensor_by_name('import/import/transformer/expand/conv3/conv/Sigmoid:0') + style_img_np = load_img(style_img_path, crop_ratio=0) + content_img_np = load_img(content_img_path, crop_ratio=0) + stylized_image_res = sess.run( + stylized_images, + feed_dict={ + 'import/import/style_input:0': style_img_np, + 'import/import/content_input:0': content_img_np}) + # saves stylized image. + save_image(stylized_image_res, os.path.join(FLAGS.output_dir, 'stylized_image.jpg')) + +def main(args=None): + tf.logging.set_verbosity(tf.logging.INFO) + if not tf.gfile.Exists(FLAGS.output_dir): + tf.gfile.MkDir(FLAGS.output_dir) + + with tf.Session() as sess: + if FLAGS.input_model.rsplit('.', 1)[-1] == 'ckpt': + style_img_ph = tf.placeholder(tf.float32, shape=[None, 256, 256, 3], name='style_input') + content_img_ph = tf.placeholder(tf.float32, shape=[None, 256, 256, 3], name='content_input') + # import meta_graph + meta_data_path = FLAGS.input_model + '.meta' + saver = tf.train.import_meta_graph(meta_data_path, clear_devices=True) + + sess.run(tf.global_variables_initializer()) + saver.restore(sess, FLAGS.input_model) + graph_def = sess.graph.as_graph_def() + + replace_style = 'style_image_processing/ResizeBilinear_2' + replace_content = 'batch_processing/batch' + for node in graph_def.node: + for idx, input_name in enumerate(node.input): + # replace style input and content input nodes to placeholder + if replace_content == input_name: + node.input[idx] = 'content_input' + if replace_style == input_name: + node.input[idx] = 'style_input' + + if FLAGS.tune: + from neural_compressor.tensorflow.quantization.utils.utility import _parse_ckpt_bn_input + _parse_ckpt_bn_input(graph_def) + output_name = 'transformer/expand/conv3/conv/Sigmoid' + frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, [output_name]) + # use frozen pb instead + elif FLAGS.input_model.rsplit('.', 1)[-1] == 'pb': + with open(FLAGS.input_model, 'rb') as f: + frozen_graph = tf.GraphDef() + frozen_graph.ParseFromString(f.read()) + else: + print("not supported model format") + exit(-1) + + if FLAGS.tune: + with tf.Graph().as_default() as graph: + tf.import_graph_def(frozen_graph, name='') + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + set_random_seed(9527) + dataset = StyleTransferDataset( + content_folder=FLAGS.content_images_paths.strip(), + style_folder=FLAGS.style_images_paths.strip(), + transform=ComposeTransform(transform_list= [ + ParseDecodeVocTransform(), + ] + ) + ) + calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=FLAGS.batch_size) + + quant_config = StaticQuantConfig() + q_model = quantize_model(graph, quant_config, calib_dataloader) + q_model.save(FLAGS.output_model) + frozen_graph= q_model.graph_def + + # validate the quantized model here + with tf.Graph().as_default(), tf.Session() as sess: + if FLAGS.tune: + # create dataloader using default style_transfer dataset + # generate stylized images + dataset = StyleTransferDataset( + content_folder=FLAGS.content_images_paths.strip(), + style_folder=FLAGS.style_images_paths.strip(), + crop_ratio=0.2, + resize_shape=(256, 256) + ) + else: + dataset = DummyDatasetV2(input_shape=[(256, 256, 3), (256, 256, 3)], label_shape=(1, )) + + dataloader = BaseDataLoader(dataset=dataset, batch_size=FLAGS.batch_size) + tf.import_graph_def(frozen_graph, name='') + style_transfer(sess, dataloader) + +def add_import_to_name(sess, name, try_cnt=2): + for i in range(0, try_cnt): + try: + sess.graph.get_tensor_by_name(name) + return name + except: + name = 'import/' + name + + raise ValueError('can not find tensor by name') + +# validate and save the files +def style_transfer(sess, dataloader): + time_list = [] + output_name = add_import_to_name(sess, 'transformer/expand/conv3/conv/Sigmoid:0', 3) + style_name = add_import_to_name(sess, 'style_input:0', 3) + content_name = add_import_to_name(sess, 'content_input:0', 3) + + stylized_images = sess.graph.get_tensor_by_name(output_name) + + for idx, ((content_img_np, style_img_np), _) in enumerate(dataloader): + start_time = time.time() + stylized_image_res = sess.run( + stylized_images, + feed_dict={ + style_name: style_img_np, + content_name: content_img_np}) + duration = time.time() - start_time + time_list.append(duration) + if idx + 1 == 20: + break + warm_up = 1 + throughput = (len(time_list) - warm_up)/ np.array(time_list[warm_up:]).sum() + print('Batch size = {}'.format(FLAGS.batch_size)) + print('Latency: {:.3f} ms'.format(np.array(time_list[warm_up:]).mean() * 1000)) + print('Throughput: {:.3f} images/sec'.format(throughput)) + + +def run_tuning(): + tf.disable_v2_behavior() + tf.app.run(main) + +if __name__ == '__main__': + run_tuning() diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py new file mode 100644 index 00000000000..74182ad5f37 --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py @@ -0,0 +1,33 @@ +import os +import argparse +import enum +import tarfile +import abc + +def get_pretrained_model(destination): + """ + Obtains a ready to use style_transfer model file. + Args: + destination: path to where the file should be stored + """ + url = "https://storage.googleapis.com/download.magenta.tensorflow.org/models/ \ + arbitrary_style_transfer.tar.gz" + + os.system("curl -o arbitrary_style_transfer.tar.gz {0}".format(url)) + with tarfile.open("arbitrary_style_transfer.tar.gz") as tar: + if not os.path.exists(destination): + os.makedirs(destination) + tar.extractall(destination) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Prepare pre-trained model for style transfer model') + parser.add_argument('--model_path', type=str, default='./model', help='directory to put models, default is ./model') + + args = parser.parse_args() + model_path = args.model_path + try: + get_pretrained_model(model_path) + except AttributeError: + print("The model fetched failed.") + diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt new file mode 100644 index 00000000000..1e5d462dcd4 --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt @@ -0,0 +1,2 @@ +scikit-image +Pillow>=8.2.0 diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh new file mode 100644 index 00000000000..41fee820958 --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh @@ -0,0 +1,61 @@ +#!/bin/bash +set -x + +function main { + + init_params "$@" + run_benchmark + +} + +# init params +function init_params { + iters=100 + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo $var |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo $var |cut -f2 -d=) + ;; + --mode=*) + mode=$(echo $var |cut -f2 -d=) + ;; + --batch_size=*) + batch_size=$(echo $var |cut -f2 -d=) + ;; + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) + ;; + *) + echo "Error: No such parameter: ${var}" + exit 1 + ;; + esac + done + +} + + +# run_tuning +function run_benchmark { + style_images=$(echo ${dataset_location} | awk -F ',' '{print $1}') + content_images=$(echo ${dataset_location} | awk -F ',' '{print $2}') + echo "$style_images, $content_images" + + python main.py \ + --input_model "${input_model}" \ + --style_images_paths "${style_images}" \ + --content_images_paths "${content_images}" \ + --batch_size "${batch_size}" \ + --tune=False \ + --output_model "${output_model}" + +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_quant.sh b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_quant.sh new file mode 100644 index 00000000000..4fdfdd2e8a5 --- /dev/null +++ b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_quant.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# set -x + +function main { + + init_params "$@" + + run_tuning + +} + +# init params +function init_params { + + for var in "$@" + do + case $var in + --topology=*) + topology=$(echo $var |cut -f2 -d=) + ;; + --dataset_location=*) + dataset_location=$(echo "$var" |cut -f2 -d=) + ;; + --input_model=*) + input_model=$(echo "$var" |cut -f2 -d=) + ;; + --output_model=*) + output_model=$(echo "$var" |cut -f2 -d=) + ;; + esac + done + +} + +# run_tuning +function run_tuning { + style_images=$(echo ${dataset_location} | awk -F ',' '{print $1}') + content_images=$(echo ${dataset_location} | awk -F ',' '{print $2}') + echo "$style_images, $content_images" + + python main.py \ + --input_model "${input_model}" \ + --style_images_paths "${style_images}" \ + --content_images_paths "${content_images}" \ + --config "./conf.yaml" \ + --tune=True \ + --output_model "${output_model}" +} + +main "$@" diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg new file mode 100644 index 00000000000..5af5a0eff59 Binary files /dev/null and b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg differ diff --git a/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg new file mode 100644 index 00000000000..bb0c46ea1de Binary files /dev/null and b/examples/3.x_api/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg differ diff --git a/examples/helloworld/fp8_example/README.md b/examples/helloworld/fp8_example/README.md new file mode 100644 index 00000000000..b758768ef0f --- /dev/null +++ b/examples/helloworld/fp8_example/README.md @@ -0,0 +1,96 @@ +### Usage demo: + +#### two steps to get quantized model + +```diff +import torch ++ from neural_compressor.torch.quantization import FP8Config, convert, prepare, finalize_calibration +import habana_frameworks.torch.core as htcore + +class M(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = torch.nn.Linear(10, 5) + self.fc2 = torch.nn.Linear(5, 10) + + def forward(self, inp): + x1 = self.fc1(inp) + x2 = self.fc2(x1) + return x2 + +model = M().eval() + ++ config = FP8Config.from_json_file(args.quant_config) # args.quant_config is the path of json file + ++ if config.measure: ++ model = prepare(model, config) + ++ if config.quantize: ++ htcore.hpu_initialize() ++ model = convert(model, config) + +# user code run +with torch.no_grad(): + model.to("hpu") + output = model(torch.randn(1, 10).to("hpu")) + print(output) + ++ if config.measure: ++ finalize_calibration(model) +``` + + +Whole script and config refer to [sample_two_steps.py](./sample_two_steps.py), [maxabs_measure.json](./maxabs_measure.json) and [maxabs_quant.json](./maxabs_quant.json). + +First, measure the tensor quantization statistic: +```shell +python sample_two_steps.py --quant_config=maxabs_measure.json +``` + +Then quantize the model based on previous measurements: +```shell +python sample_two_steps.py --quant_config=maxabs_quant.json +``` + +#### one step to get quantized model + +```diff +import torch ++ from neural_compressor.torch.quantization import FP8Config, convert, prepare, finalize_calibration +import habana_frameworks.torch.core as htcore + +class M(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = torch.nn.Linear(10, 5) + self.fc2 = torch.nn.Linear(5, 10) + + def forward(self, inp): + x1 = self.fc1(inp) + x2 = self.fc2(x1) + return x2 + +model = M().to("hpu") + ++ config = FP8Config.from_json_file(args.quant_config) # args.quant_config is the path of json file ++ model = prepare(model, config) + +# user code run to do calibration +with torch.no_grad(): + output = model(torch.randn(1, 10).to("hpu")) + print(output) + ++ finalize_calibration(model) ++ model = convert(model) + +# user code to run benchmark for quantized model +with torch.no_grad(): + output = model(torch.randn(1, 10).to("hpu")) + print(output) +``` + +Whole script and config refer to [sample_one_step.py](./sample_one_step.py). + +```shell +python sample_one_step.py --quant_config=quant_config.json +``` diff --git a/examples/helloworld/fp8_example/maxabs_measure.json b/examples/helloworld/fp8_example/maxabs_measure.json new file mode 100644 index 00000000000..8d55f33e57a --- /dev/null +++ b/examples/helloworld/fp8_example/maxabs_measure.json @@ -0,0 +1,7 @@ +{ + "mode": "MEASURE", + "observer": "maxabs", + "allowlist": {"types": [], "names": []}, + "blocklist": {"types": [], "names": []}, + "dump_stats_path": "./hqt_output/measure" +} diff --git a/examples/helloworld/fp8_example/maxabs_quant.json b/examples/helloworld/fp8_example/maxabs_quant.json new file mode 100644 index 00000000000..d1f76f8f630 --- /dev/null +++ b/examples/helloworld/fp8_example/maxabs_quant.json @@ -0,0 +1,8 @@ +{ + "mode": "QUANTIZE", + "observer": "maxabs", + "scale_method": "maxabs_hw", + "allowlist": {"types": [], "names": []}, + "blocklist": {"types": [], "names": []}, + "dump_stats_path": "./hqt_output/measure" +} diff --git a/examples/helloworld/fp8_example/quant_config.json b/examples/helloworld/fp8_example/quant_config.json new file mode 100644 index 00000000000..c139d13bbea --- /dev/null +++ b/examples/helloworld/fp8_example/quant_config.json @@ -0,0 +1,8 @@ +{ + "mode": "AUTO", + "observer": "maxabs", + "scale_method": "maxabs_hw", + "allowlist": {"types": [], "names": []}, + "blocklist": {"types": [], "names": []}, + "dump_stats_path": "./hqt_output/measure" +} diff --git a/examples/helloworld/fp8_example/sample_one_step.py b/examples/helloworld/fp8_example/sample_one_step.py new file mode 100644 index 00000000000..54a4090a833 --- /dev/null +++ b/examples/helloworld/fp8_example/sample_one_step.py @@ -0,0 +1,56 @@ +import argparse +import torch +import habana_frameworks.torch.core as htcore +htcore.hpu_set_env() + +from neural_compressor.torch.quantization import FP8Config, convert, finalize_calibration, prepare + +torch.manual_seed(1) + + +# 1. python sample_one_step.py --quant_config=quant_config.json + + +class M(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = torch.nn.Linear(10, 5) + self.fc2 = torch.nn.Linear(5, 10) + + def forward(self, inp): + x1 = self.fc1(inp) + x2 = self.fc2(x1) + return x2 + + +def eval_func(model): + # user's eval func + input = torch.randn(1, 10) + model(input.to("hpu")) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Habana FP8 sample code.", formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("--quant_config", type=str, help="json file of quantization config") + args = parser.parse_args() + + model = M().eval().to("hpu") + htcore.hpu_initialize() + + config = FP8Config.from_json_file(args.quant_config) + model = prepare(model, config) + + # for calibration + with torch.no_grad(): + # model.to("hpu") + output = model(torch.randn(1, 10).to("hpu")) + + model = convert(model) + print(model) + + # for benchmark + with torch.no_grad(): + output = model(torch.randn(1, 10).to("hpu")) + print(output) diff --git a/examples/helloworld/fp8_example/sample_two_steps.py b/examples/helloworld/fp8_example/sample_two_steps.py new file mode 100644 index 00000000000..9e17748b9b0 --- /dev/null +++ b/examples/helloworld/fp8_example/sample_two_steps.py @@ -0,0 +1,50 @@ +import argparse +import torch +import habana_frameworks.torch.core as htcore +htcore.hpu_set_env() + +from neural_compressor.torch.quantization import FP8Config, convert, finalize_calibration, prepare + +torch.manual_seed(1) + +# 1. python sample_two_steps.py --quant_config=maxabs_measure.json +# 2. python sample_two_steps.py --quant_config=maxabs_quant.json + + +class M(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = torch.nn.Linear(10, 5) + self.fc2 = torch.nn.Linear(5, 10) + + def forward(self, inp): + x1 = self.fc1(inp) + x2 = self.fc2(x1) + return x2 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Habana FP8 sample code.", formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("--quant_config", type=str, help="json file of quantization config") + args = parser.parse_args() + + model = M().eval() + config = FP8Config.from_json_file(args.quant_config) + + if config.measure: + model = prepare(model, config) + + if config.quantize: + htcore.hpu_initialize() + model = convert(model, config) + print(model) + + with torch.no_grad(): + model.to("hpu") + output = model(torch.randn(1, 10).to("hpu")) + print(output) + + if config.measure: + finalize_calibration(model) diff --git a/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/README.md b/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/README.md index 9fbd442ce8e..409e8e2a7f4 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/README.md +++ b/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/README.md @@ -32,28 +32,6 @@ tar -xvzf caffe_ilsvrc12.tar.gz val.txt # Run -## Diagnosis - -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Quantization diagnosis - -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis - -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Quantize model with QLinearOps: diff --git a/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/main.py b/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/main.py index 1fc6e1b3a4b..90c796e312b 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/main.py +++ b/examples/onnxrt/image_recognition/mobilenet_v3/quantization/ptq_static/main.py @@ -286,9 +286,6 @@ def eval(onnx_model): return eval_func(onnx_model, dataloader, top1) if args.benchmark: - if args.diagnose and args.mode != "performance": - print("[ WARNING ] Diagnosis works only with performance benchmark.") - if args.mode == 'performance': from neural_compressor.benchmark import fit from neural_compressor.config import BenchmarkConfig @@ -297,7 +294,6 @@ def eval(onnx_model): iteration=1000, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, ) fit(model, conf, b_dataloader=dataloader) elif args.mode == 'accuracy': @@ -308,7 +304,6 @@ def eval(onnx_model): from neural_compressor import quantization, PostTrainingQuantConfig config = PostTrainingQuantConfig( quant_format=args.quant_format, - diagnosis=args.diagnose, ) q_model = quantization.fit(model, config, calib_dataloader=dataloader, diff --git a/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/README.md b/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/README.md index 47a4f568238..5ec8f534b46 100644 --- a/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/README.md +++ b/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/README.md @@ -31,29 +31,6 @@ tar -xvzf caffe_ilsvrc12.tar.gz val.txt ``` # Run - -## Diagnosis - -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Quantization diagnosis - -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis - -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Quantize model with QLinearOps: diff --git a/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/main.py b/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/main.py index 91843d01471..aacad83a241 100644 --- a/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/main.py +++ b/examples/onnxrt/image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static/main.py @@ -267,8 +267,6 @@ def eval(onnx_model): return eval_func(onnx_model, dataloader, top1) if args.benchmark: - if args.diagnose and args.mode != "performance": - print("[ WARNING ] Diagnosis works only with performance benchmark.") if args.mode == 'performance': from neural_compressor.benchmark import fit from neural_compressor.config import BenchmarkConfig @@ -277,7 +275,6 @@ def eval(onnx_model): iteration=1000, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, ) fit(model, conf, b_dataloader=dataloader) elif args.mode == 'accuracy': @@ -288,7 +285,6 @@ def eval(onnx_model): from neural_compressor import quantization, PostTrainingQuantConfig config = PostTrainingQuantConfig( quant_format=args.quant_format, - diagnosis=args.diagnose, ) q_model = quantization.fit(model, config, calib_dataloader=dataloader, diff --git a/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/README.md b/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/README.md index 1113e97d6d6..f889c658614 100644 --- a/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/README.md +++ b/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/README.md @@ -32,28 +32,6 @@ tar -xvzf caffe_ilsvrc12.tar.gz val.txt # Run -## Diagnosis - -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Quantization diagnosis - -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis - -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Quantize model with QLinearOps: diff --git a/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py b/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py index b42815a1d3d..802e9b18fef 100644 --- a/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py +++ b/examples/onnxrt/image_recognition/resnet50_torchvision/quantization/ptq_static/main.py @@ -295,9 +295,6 @@ def eval_func(model, dataloader, metric): def eval(onnx_model): return eval_func(onnx_model, dataloader, top1) - if args.benchmark and args.diagnose and args.mode != "performance": - print("[ WARNING ] Diagnosis works only with performance benchmark.") - if args.benchmark: if args.mode == 'performance': from neural_compressor.benchmark import fit @@ -307,7 +304,6 @@ def eval(onnx_model): iteration=1000, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, device=args.device, backend=backend, ) @@ -320,7 +316,6 @@ def eval(onnx_model): from neural_compressor import quantization, PostTrainingQuantConfig config = PostTrainingQuantConfig( quant_format=args.quant_format, - diagnosis=args.diagnose, device=args.device, backend=backend ) diff --git a/examples/onnxrt/nlp/bert/quantization/ptq_dynamic/README.md b/examples/onnxrt/nlp/bert/quantization/ptq_dynamic/README.md index 6e0078f99f3..aaed30c3075 100644 --- a/examples/onnxrt/nlp/bert/quantization/ptq_dynamic/README.md +++ b/examples/onnxrt/nlp/bert/quantization/ptq_dynamic/README.md @@ -32,19 +32,6 @@ python prepare_model.py --input_model='MRPC.zip' --output_model='bert.onnx' # Run -## Diagnosis - -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Benchmark diagnosis - -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Dynamic quantization: diff --git a/examples/onnxrt/nlp/bert/quantization/ptq_static/README.md b/examples/onnxrt/nlp/bert/quantization/ptq_static/README.md index 3f6d640d947..4fbb4357574 100644 --- a/examples/onnxrt/nlp/bert/quantization/ptq_static/README.md +++ b/examples/onnxrt/nlp/bert/quantization/ptq_static/README.md @@ -31,17 +31,6 @@ python prepare_model.py --input_model='MRPC.zip' --output_model='bert.onnx' # Run -## Diagnosis -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Benchmark diagnosis -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Static quantization with QOperator format: diff --git a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py index 95a49ce37ab..c7cf936270d 100644 --- a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py +++ b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_dynamic/main.py @@ -216,8 +216,6 @@ def _process_dataset(self): self.label = [] self.onnx_inputs = [] for inputs in self.dataset: - # import pdb; - # pdb.set_trace() onnx_inputs = [] has_labels = all(inputs.get(k) is not None for k in self.label_names) if has_labels: @@ -237,8 +235,6 @@ def _process_dataset(self): } """ for key in self.onnx_input_names: - # import pdb; - # pdb.set_trace() if key in inputs: # onnx_inputs[key] = np.array([inputs[key]]) onnx_inputs.append(np.array(inputs[key])) diff --git a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py index b3de22ac766..5540f4c002d 100644 --- a/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py +++ b/examples/onnxrt/nlp/huggingface_model/token_classification/layoutlmv2/quantization/ptq_static/main.py @@ -216,8 +216,6 @@ def _process_dataset(self): self.label = [] self.onnx_inputs = [] for inputs in self.dataset: - # import pdb; - # pdb.set_trace() onnx_inputs = [] has_labels = all(inputs.get(k) is not None for k in self.label_names) if has_labels: @@ -237,8 +235,6 @@ def _process_dataset(self): } """ for key in self.onnx_input_names: - # import pdb; - # pdb.set_trace() if key in inputs: # onnx_inputs[key] = np.array([inputs[key]]) onnx_inputs.append(np.array(inputs[key])) diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/README.md b/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/README.md index d3c849b517a..47746bb4394 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/README.md +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/README.md @@ -27,28 +27,6 @@ Download [MS COCO 2017 dataset](https://cocodataset.org/#download). # Run -## Diagnosis - -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. - -### Quantization diagnosis - -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis - -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1. Quantization Static quantization with QOperator format: diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/main.py b/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/main.py index 0e9e0235661..aaffb794e0a 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/main.py +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v1/quantization/ptq_static/main.py @@ -140,8 +140,6 @@ def eval_func(model): return metric.result() if args.benchmark: - if args.diagnose and args.mode != "performance": - print("[ WARNING ] Diagnosis works only with performance benchmark.") if args.mode == 'performance': from neural_compressor.benchmark import fit from neural_compressor.config import BenchmarkConfig @@ -149,7 +147,6 @@ def eval_func(model): iteration=100, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, device=args.device, backend=backend, ) @@ -169,7 +166,6 @@ def eval_func(model): accuracy_criterion=accuracy_criterion, quant_format=args.quant_format, calibration_sampling_size=[50], - diagnosis=args.diagnose, device=args.device, backend=backend, ) diff --git a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py index 940722075ab..e4d5db6b8ee 100644 --- a/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py +++ b/examples/pytorch/object_detection/ssd_resnet34/quantization/ptq/fx/python/models/utils.py @@ -59,7 +59,6 @@ def _compute_padding(self, input, dim): return additional_padding, total_padding def forward(self, input): - #import pdb; pdb.set_trace() if self.padding == "VALID": return F.conv2d( input, @@ -180,7 +179,6 @@ def decode_boxes(rel_codes, boxes, weights): dh = dh / wh pred_ctr_x = dx * widths + ctr_x - #import pdb; pdb.set_trace() pred_ctr_y = dy * heights + ctr_y pred_w = torch.exp(dw) * widths pred_h = torch.exp(dh) * heights @@ -194,5 +192,4 @@ def decode_boxes(rel_codes, boxes, weights): ], dim=2, ) - #import pdb; pdb.set_trace() return pred_boxes diff --git a/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/README.md b/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/README.md index 9f368bc8bd6..90c307d5021 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/README.md +++ b/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/README.md @@ -76,24 +76,6 @@ config = PostTrainingQuantConfig( ) ``` -## Diagnosis -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. -### Quantization diagnosis -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1 Quantization ```shell diff --git a/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/main.py index c375fa277ec..8c6229989de 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/mobilenet_v3/quantization/ptq/main.py @@ -249,7 +249,6 @@ def run(self): conf = PostTrainingQuantConfig( calibration_sampling_size=[20, 50], op_name_dict=op_name_dict, - diagnosis=args.diagnose, ) from neural_compressor import Metric top1 = Metric(name="topk", k=1) @@ -283,7 +282,6 @@ def eval(model): iteration=100, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, ) fit(args.input_graph, conf, b_dataloader=dataloader) elif args.mode == 'accuracy': diff --git a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/README.md b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/README.md index 520705edf8f..30e494a47f5 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/README.md +++ b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/README.md @@ -72,24 +72,6 @@ config = PostTrainingQuantConfig( ) ``` -## Diagnosis -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. -### Quantization diagnosis -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1 Quantization ```shell diff --git a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/main.py index 11f4dcd6e96..cb944f90711 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq/main.py @@ -120,7 +120,6 @@ def run(self): conf = PostTrainingQuantConfig( outputs=['softmax_tensor'], calibration_sampling_size=[50, 100], - diagnosis=args.diagnose, ) from neural_compressor import Metric top1 = Metric(name="topk", k=1) @@ -155,7 +154,6 @@ def eval(model): iteration=100, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, ) fit(args.input_graph, conf, b_dataloader=dataloader) elif args.mode == 'accuracy': diff --git a/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/README.md b/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/README.md index 547a2f6d7c8..b62baeb61cf 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/README.md +++ b/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/README.md @@ -105,24 +105,6 @@ config = PostTrainingQuantConfig( ) ``` -## Diagnosis -Neural Compressor offers quantization and benchmark diagnosis. Adding `diagnosis` parameter to Quantization/Benchmark config will provide additional details useful in diagnostics. -### Quantization diagnosis -``` -config = PostTrainingQuantConfig( - diagnosis=True, - ... -) -``` - -### Benchmark diagnosis -``` -config = BenchmarkConfig( - diagnosis=True, - ... -) -``` - ## 1 Quantization ```shell diff --git a/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/main.py b/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/main.py index 73da3fd60a6..95b17af0290 100644 --- a/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/main.py +++ b/examples/tensorflow/image_recognition/tensorflow_models/resnet_v2_50/quantization/ptq/main.py @@ -114,7 +114,6 @@ def run(self): eval_dataloader = create_dataloader('tensorflow', eval_dataloader_args) conf = PostTrainingQuantConfig( calibration_sampling_size=[50, 100], - diagnosis=args.diagnose, ) from neural_compressor import Metric top1 = Metric(name="topk", k=1) @@ -148,7 +147,6 @@ def eval(model): iteration=100, cores_per_instance=4, num_of_instance=1, - diagnosis=args.diagnose, ) fit(args.input_graph, conf, b_dataloader=dataloader) elif args.mode == 'accuracy': diff --git a/examples/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt b/examples/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt index 74b67c4c7b3..186124490e2 100644 --- a/examples/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt +++ b/examples/tensorflow/nlp/large_language_models/quantization/ptq/gpt-j/requirements.txt @@ -1,4 +1,4 @@ -tensorflow==2.12 +tensorflow==2.12.1 transformers datasets numpy \ No newline at end of file diff --git a/examples/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py b/examples/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py index 78c91f446bb..b88cd9f7a09 100644 --- a/examples/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py +++ b/examples/tensorflow/nlp/large_language_models/quantization/ptq/smoothquant/main.py @@ -188,7 +188,6 @@ def eval_func(model): from neural_compressor.config import AccuracyCriterion from neural_compressor import quantization -os.environ["TF_USE_LEGACY_KERAS"]="False" recipes = {} if args.sq: recipes = {"smooth_quant": True, "smooth_quant_args": {'alpha': args.alpha}} diff --git a/neural_coder/README.md b/neural_coder/README.md deleted file mode 100644 index b7c6d0fb63b..00000000000 --- a/neural_coder/README.md +++ /dev/null @@ -1,52 +0,0 @@ -Neural Coder -=========================== -## What do we offer? - -Neural Coder is a novel component under Intel® Neural Compressor to further simplify the deployment of deep learning models via one-click automated code changes for device switch (e.g., CUDA to CPU) and optimization enabling. Subsequently, Neural Coder can also perform automated benchmark on all applicable optimization sets acquired from the automated enabling, and evaluate for the best out-of-box performance. - -Neural Coder leverages static program analysis techniques and heuristic optimization rules to simplify the usage of various Deep Learning optimization APIs for increasing computation efficiency of AI models and improving user experience for general AI customers. We demonstrate great improvement of developer productivity and aim to facilitate enhanced Deep Learning acceleration adoption via this toolkit. - -Neural Coder helps you code Deep Learning optimizations automatically into your scripts. For example, to apply -- Automatic Mixed Precision (torch.cpu.amp.autocast) -- JIT Script computation graph transformation (torch.jit.script) -- Channels Last memory format transformation (torch.channels_last) - -simultaneously on below PyTorch evaluation code, we generate the optimized code in one-click by detecting the correct position to insert the correct API code lines: -```diff - import torch - import torchvision.models as models - my_model = models.resnet50(pretrained=True) -+ import torch -+ with torch.no_grad(): -+ my_model = my_model.to(memory_format=torch.channels_last) -+ import torch -+ with torch.no_grad(): -+ my_model.eval() -+ my_model = torch.jit.script(my_model) -+ my_model = torch.jit.freeze(my_model) - my_model.eval() - batch_size = 112 - input = torch.rand(batch_size, 3, 224, 224) - with torch.no_grad(): -+ import torch -+ with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): - my_model(input) -``` - -## Getting Started! - -There are currently 3 ways to use Neural Coder for automatic quantization enabling and benchmark. - -### Jupyter Lab Extension -We offer Neural Coder as an extension plugin in Jupyter Lab. This enables users to utilize Neural Coder while writing their Deep Learning models in Jupyter Lab coding platform. Users can simply search for ```jupyter-lab-neural-compressor``` in the Extension Manager in JupyterLab and install Neural Coder with one click. For more details, please refer to this [guide](extensions/neural_compressor_ext_lab/README.md). - -[AWS Amazon SageMaker](https://aws.amazon.com/sagemaker/) users can also use Neural Coder as an extension following this [guide](docs/AWSSageMakerSupport.md). - -### Python Launcher -Neural Coder can be used as a Python Launcher. Users can run the Python Deep Learning model code as it is with automatic enabling of optimizations by simply adding an inline prefix ```-m neural_coder``` to the Python command line. For more details, please refer to this [guide](docs/PythonLauncher.md). - -### Python API -There are 3 user-facing APIs for Neural Coder: enable, bench and superbench. For more details, please refer to this [guide](docs/PythonAPI.md). We have provided a [list](docs/SupportMatrix.md) of supported Deep Learning optimization features. Specifically for quantization, we provide an auto-quantization API that helps automatically enable quantization on Deep Learning models and automatically evaluates for the best performance on the model with no manual coding needed. Supported features include Post-Training Static Quantization, Post-Training Dynamic Quantization, and Mixed Precision. For more details, please refer to this [guide](docs/Quantization.md). - -## Contact -Please contact us at [inc.maintainers@intel.com](mailto:inc.maintainers@intel.com) for any Neural Coder related question. diff --git a/neural_coder/__init__.py b/neural_coder/__init__.py deleted file mode 100644 index 7bf18cd4956..00000000000 --- a/neural_coder/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .interface import enable -from .interface import bench -from .interface import superbench - -# from .interface import superreport -from .interface import auto_quant diff --git a/neural_coder/__main__.py b/neural_coder/__main__.py deleted file mode 100644 index 0551b72dc21..00000000000 --- a/neural_coder/__main__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .launcher import Launcher - -args = Launcher.parse_args() -Launcher.execute(args) diff --git a/neural_coder/backends/.yaml b/neural_coder/backends/.yaml deleted file mode 100644 index 1e3b1fa1501..00000000000 --- a/neural_coder/backends/.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - - - - - - content: - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - order: - - below: - - - - - above: - - - - - - below: - - - - - above: - - - - - - below: - - - - - above: - - - - diff --git a/neural_coder/backends/__init__.py b/neural_coder/backends/__init__.py deleted file mode 100644 index e833188cc78..00000000000 --- a/neural_coder/backends/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neural_coder/backends/intel_extension_for_transformers.yaml b/neural_coder/backends/intel_extension_for_transformers.yaml deleted file mode 100644 index a1accbbfb4b..00000000000 --- a/neural_coder/backends/intel_extension_for_transformers.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Note: For intel_extension_for_transformers support -# we default apply "PostTrainingDynamic" and "eval_f1" -# support for customization is pending further evaluation - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] metric = metrics.Metric(name="eval_f1", is_relative=True, criterion=0.01) - [+] objective = objectives.performance - [+] q_config = QuantizationConfig(approach="PostTrainingDynamic", metrics=[metric], objectives=[objective]) - [+] MODEL_NAME = trainer.quantize(quant_config=q_config) - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/keras_inc.yaml b/neural_coder/backends/keras_inc.yaml deleted file mode 100644 index 0731a4bbbc9..00000000000 --- a/neural_coder/backends/keras_inc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] from neural_compressor.quantization import fit - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] config = PostTrainingQuantConfig(backend='itex', quant_level=1) - [+] quantized_model = fit(MODEL_NAME, conf=config, calib_dataloader=DATALOADER_NAME, eval_func=eval_func) - [+] quantized_model.save("./quantized_model") - order: - - below: - above: diff --git a/neural_coder/backends/nano_bf16.yaml b/neural_coder/backends/nano_bf16.yaml deleted file mode 100644 index afecf6908f1..00000000000 --- a/neural_coder/backends/nano_bf16.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, precision="bf16", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_bf16_channels_last.yaml b/neural_coder/backends/nano_bf16_channels_last.yaml deleted file mode 100644 index d0f3987ef29..00000000000 --- a/neural_coder/backends/nano_bf16_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, precision="bf16", channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_bf16_ipex.yaml b/neural_coder/backends/nano_bf16_ipex.yaml deleted file mode 100644 index 6e8db6f8719..00000000000 --- a/neural_coder/backends/nano_bf16_ipex.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, precision="bf16", use_ipex=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_bf16_ipex_channels_last.yaml b/neural_coder/backends/nano_bf16_ipex_channels_last.yaml deleted file mode 100644 index 3b99ef12f35..00000000000 --- a/neural_coder/backends/nano_bf16_ipex_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, precision="bf16", use_ipex=True, channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_fp32_channels_last.yaml b/neural_coder/backends/nano_fp32_channels_last.yaml deleted file mode 100644 index 9516ba4d50d..00000000000 --- a/neural_coder/backends/nano_fp32_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_fp32_ipex.yaml b/neural_coder/backends/nano_fp32_ipex.yaml deleted file mode 100644 index c6fc4329b38..00000000000 --- a/neural_coder/backends/nano_fp32_ipex.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, use_ipex=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_fp32_ipex_channels_last.yaml b/neural_coder/backends/nano_fp32_ipex_channels_last.yaml deleted file mode 100644 index aea74db737a..00000000000 --- a/neural_coder/backends/nano_fp32_ipex_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, use_ipex=True, channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_gpu_to_cpu.yaml b/neural_coder/backends/nano_gpu_to_cpu.yaml deleted file mode 100644 index 426f72ce980..00000000000 --- a/neural_coder/backends/nano_gpu_to_cpu.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_above_model_definition_line", "insert_above_input_definition_line"] - content: - - |- - [+] from bigdl.nano.pytorch import patch_torch - [+] patch_torch() - order: - - below: - above: diff --git a/neural_coder/backends/nano_int8.yaml b/neural_coder/backends/nano_int8.yaml deleted file mode 100644 index cb846256435..00000000000 --- a/neural_coder/backends/nano_int8.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, precision="int8", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_bf16.yaml b/neural_coder/backends/nano_jit_bf16.yaml deleted file mode 100644 index 122d93ca717..00000000000 --- a/neural_coder/backends/nano_jit_bf16.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="jit", precision="bf16", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_bf16_channels_last.yaml b/neural_coder/backends/nano_jit_bf16_channels_last.yaml deleted file mode 100644 index cf7e1437c4a..00000000000 --- a/neural_coder/backends/nano_jit_bf16_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="jit", precision="bf16", channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_bf16_ipex.yaml b/neural_coder/backends/nano_jit_bf16_ipex.yaml deleted file mode 100644 index 1a237c6edf8..00000000000 --- a/neural_coder/backends/nano_jit_bf16_ipex.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="jit", precision="bf16", use_ipex=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_bf16_ipex_channels_last.yaml b/neural_coder/backends/nano_jit_bf16_ipex_channels_last.yaml deleted file mode 100644 index 603db6942f8..00000000000 --- a/neural_coder/backends/nano_jit_bf16_ipex_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="jit", precision="bf16", use_ipex=True, channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_fp32.yaml b/neural_coder/backends/nano_jit_fp32.yaml deleted file mode 100644 index 71e7d4ede95..00000000000 --- a/neural_coder/backends/nano_jit_fp32.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="jit", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_fp32_channels_last.yaml b/neural_coder/backends/nano_jit_fp32_channels_last.yaml deleted file mode 100644 index c30a1767175..00000000000 --- a/neural_coder/backends/nano_jit_fp32_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="jit", channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_fp32_ipex.yaml b/neural_coder/backends/nano_jit_fp32_ipex.yaml deleted file mode 100644 index f673b076a20..00000000000 --- a/neural_coder/backends/nano_jit_fp32_ipex.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="jit", use_ipex=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_jit_fp32_ipex_channels_last.yaml b/neural_coder/backends/nano_jit_fp32_ipex_channels_last.yaml deleted file mode 100644 index 54514000ac5..00000000000 --- a/neural_coder/backends/nano_jit_fp32_ipex_channels_last.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="jit", use_ipex=True, channels_last=True, input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_onnxruntime_fp32.yaml b/neural_coder/backends/nano_onnxruntime_fp32.yaml deleted file mode 100644 index c29e3410bc4..00000000000 --- a/neural_coder/backends/nano_onnxruntime_fp32.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="onnxruntime", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_onnxruntime_int8_qlinear.yaml b/neural_coder/backends/nano_onnxruntime_int8_qlinear.yaml deleted file mode 100644 index bb5b35557da..00000000000 --- a/neural_coder/backends/nano_onnxruntime_int8_qlinear.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="onnxruntime", precision="int8", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_openvino_fp32.yaml b/neural_coder/backends/nano_openvino_fp32.yaml deleted file mode 100644 index 6b9324cec39..00000000000 --- a/neural_coder/backends/nano_openvino_fp32.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.trace(MODEL_NAME, accelerator="openvino", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/nano_openvino_int8.yaml b/neural_coder/backends/nano_openvino_int8.yaml deleted file mode 100644 index 9f1b70814e7..00000000000 --- a/neural_coder/backends/nano_openvino_int8.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - - "indent_inference_line" - content: - - |- - [+] from bigdl.nano.pytorch import InferenceOptimizer - [+] MODEL_NAME = InferenceOptimizer.quantize(MODEL_NAME, accelerator="openvino", precision="int8", input_sample=INPUT_NAME) - [+] with InferenceOptimizer.get_context(MODEL_NAME): - - 1 - order: - - below: - above: diff --git a/neural_coder/backends/onnx_inc_dynamic_quant.yaml b/neural_coder/backends/onnx_inc_dynamic_quant.yaml deleted file mode 100644 index 98a98d941ca..00000000000 --- a/neural_coder/backends/onnx_inc_dynamic_quant.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] from neural_compressor.quantization import fit - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] config = PostTrainingQuantConfig(approach='dynamic', quant_level=1) - [+] MODEL_NAME = fit(MODEL_NAME, conf=config, calib_dataloader=DATALOADER_NAME, eval_func=EVAL_FUNCTION_NAME) - [+] MODEL_NAME.save("./quantized_model.onnx") - order: - - below: - above: diff --git a/neural_coder/backends/onnx_inc_static_quant_qdq.yaml b/neural_coder/backends/onnx_inc_static_quant_qdq.yaml deleted file mode 100644 index 6df51eefb54..00000000000 --- a/neural_coder/backends/onnx_inc_static_quant_qdq.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] from neural_compressor.quantization import fit - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] config = PostTrainingQuantConfig(quant_format='QDQ', quant_level=1) - [+] MODEL_NAME = fit(MODEL_NAME, conf=config, calib_dataloader=DATALOADER_NAME, eval_func=EVAL_FUNCTION_NAME) - [+] MODEL_NAME.save("./quantized_model.onnx") - order: - - below: - above: diff --git a/neural_coder/backends/onnx_inc_static_quant_qlinear.yaml b/neural_coder/backends/onnx_inc_static_quant_qlinear.yaml deleted file mode 100644 index 0c7f5e500f7..00000000000 --- a/neural_coder/backends/onnx_inc_static_quant_qlinear.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] from neural_compressor.quantization import fit - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] config = PostTrainingQuantConfig(quant_format='default', quant_level=1) - [+] MODEL_NAME = fit(MODEL_NAME, conf=config, calib_dataloader=DATALOADER_NAME, eval_func=EVAL_FUNCTION_NAME) - [+] MODEL_NAME.save("./quantized_model.onnx") - order: - - below: - above: diff --git a/neural_coder/backends/pytorch_aliblade.yaml b/neural_coder/backends/pytorch_aliblade.yaml deleted file mode 100644 index ba29ac86548..00000000000 --- a/neural_coder/backends/pytorch_aliblade.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - content: - - |- - [+] import torch_blade - [+] with torch.no_grad(): - [+] MODEL_NAME = torch_blade.optimize(MODEL_NAME, allow_tracing=True, model_inputs=tuple(INPUT_NAME)) - order: - - below: - above: diff --git a/neural_coder/backends/pytorch_benchmark.yaml b/neural_coder/backends/pytorch_benchmark.yaml deleted file mode 100644 index 1a153637d57..00000000000 --- a/neural_coder/backends/pytorch_benchmark.yaml +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_above_inference_line - - insert_below_inference_line - - indent_inference_line - content: - - |- - [+] if not ACCURACY_MODE: - [+] try: - [+] time - [+] time_nc = time.time - [+] except: - [+] from time import time as time_nc - [+] count_iter_ = 0 - [+] total_time_ = 0 - [+] num_iter_ = NUM_BENCHMARK_ITERATION - [+] num_warmup_iter_ = 10 - [+] list_batch_time_ = [] - [+] for i_ in range(num_iter_): - [+] count_iter_ = count_iter_ + 1 - [+] if count_iter_ > num_warmup_iter_: - [+] t1_ = time_nc() - [+] try: - [+] torch - [+] no_grad = torch.no_grad - [+] except: - [+] from torch import no_grad - [+] with no_grad(): - - |- - [+] if count_iter_ > num_warmup_iter_: - [+] t2_ = time_nc() - [+] batch_time_ = t2_ - t1_ - [+] list_batch_time_.append(batch_time_) - [+] total_time_ = total_time_ + batch_time_ - [+] print("Neural_Coder_Bench_IPS: ", round((num_iter_ - num_warmup_iter_) / total_time_, 3)) - [+] print("Neural_Coder_Bench_MSPI: ", round(total_time_ / (num_iter_ - num_warmup_iter_) * 1000, 3)) - [+] list_batch_time_.sort() - [+] p50_latency_ = list_batch_time_[int(len(list_batch_time_) * 0.50) - 1] * 1000 - [+] p90_latency_ = list_batch_time_[int(len(list_batch_time_) * 0.90) - 1] * 1000 - [+] p99_latency_ = list_batch_time_[int(len(list_batch_time_) * 0.99) - 1] * 1000 - [+] print("Neural_Coder_Bench_P50: ", round(p50_latency_, 3)) - [+] print("Neural_Coder_Bench_P90: ", round(p90_latency_, 3)) - [+] print("Neural_Coder_Bench_P99: ", round(p99_latency_, 3)) - [+] quit() - [+] else: - [+] INFERENCE_LINE - - 3 - order: - - below: - above: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_channels_last.yaml b/neural_coder/backends/pytorch_channels_last.yaml deleted file mode 100644 index 75343032b02..00000000000 --- a/neural_coder/backends/pytorch_channels_last.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] import torch - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = MODEL_NAME.to(memory_format=torch.channels_last) - order: - - below: - - pytorch_inc_static_quant_fx - - pytorch_inc_static_quant_ipex - - pytorch_inc_dynamic_quant - above: - - pytorch_ipex_fp32 - - pytorch_ipex_bf16 - - pytorch_ipex_int8_static_quant - - pytorch_ipex_int8_dynamic_quant - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi diff --git a/neural_coder/backends/pytorch_inc_bf16.yaml b/neural_coder/backends/pytorch_inc_bf16.yaml deleted file mode 100644 index bdbf3b07272..00000000000 --- a/neural_coder/backends/pytorch_inc_bf16.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] import torch - [+] torch.backends.quantized.engine = 'onednn' - [+] from neural_compressor.config import MixedPrecisionConfig - [+] from neural_compressor import mix_precision - [+] config = MixedPrecisionConfig() - [+] MODEL_NAME = mix_precision.fit(model=MODEL_NAME, config=config) - [+] try: - [+] with torch.no_grad(): - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - [+] except: - [+] pass - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_dynamic_quant.yaml b/neural_coder/backends/pytorch_inc_dynamic_quant.yaml deleted file mode 100644 index edb27b4d188..00000000000 --- a/neural_coder/backends/pytorch_inc_dynamic_quant.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] try: - [+] torch.backends.quantized.engine = 'onednn' - [+] except: - [+] from torch.backends.quantized import engine; engine = 'onednn' - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] conf = PostTrainingQuantConfig(approach="dynamic", quant_level=1) - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, eval_func=eval_func) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - [+] try: - [+] with torch.no_grad(): - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - [+] except: - [+] pass - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_dynamic_quant_fp8.yaml b/neural_coder/backends/pytorch_inc_dynamic_quant_fp8.yaml deleted file mode 100644 index f32b91389bb..00000000000 --- a/neural_coder/backends/pytorch_inc_dynamic_quant_fp8.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] try: - [+] torch.backends.quantized.engine = 'onednn' - [+] except: - [+] from torch.backends.quantized import engine; engine = 'onednn' - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] conf = PostTrainingQuantConfig(approach="dynamic", precision = FP8_DATA_FORMAT, quant_level=1) - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, calib_dataloader=DATALOADER_NAME, eval_func=eval_func) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - [+] try: - [+] with torch.no_grad(): - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - [+] except: - [+] pass - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_huggingface_optimum_dynamic.yaml b/neural_coder/backends/pytorch_inc_huggingface_optimum_dynamic.yaml deleted file mode 100644 index 890ff34d0a2..00000000000 --- a/neural_coder/backends/pytorch_inc_huggingface_optimum_dynamic.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from optimum.intel.neural_compressor import INCQuantizer - [+] quantization_config = PostTrainingQuantConfig(approach="dynamic", quant_level=1) - [+] quantizer = INCQuantizer.from_pretrained(MODEL_NAME) - [+] quantizer.quantize(quantization_config=quantization_config, save_directory="quantized_model", save_onnx_model=False) - [+] MODEL_NAME = quantizer._quantized_model - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_huggingface_optimum_static.yaml b/neural_coder/backends/pytorch_inc_huggingface_optimum_static.yaml deleted file mode 100644 index 08df89f24fd..00000000000 --- a/neural_coder/backends/pytorch_inc_huggingface_optimum_static.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from optimum.intel.neural_compressor import INCQuantizer - [+] quantization_config = PostTrainingQuantConfig(approach="static", quant_level=1) - [+] quantizer = INCQuantizer.from_pretrained(MODEL_NAME) - [+] quantizer.quantize(quantization_config=quantization_config, calibration_dataset=eval_dataset, save_directory="quantized_model", save_onnx_model=False) - [+] MODEL_NAME = quantizer._quantized_model - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_static_quant_fx.yaml b/neural_coder/backends/pytorch_inc_static_quant_fx.yaml deleted file mode 100644 index f77801ebc51..00000000000 --- a/neural_coder/backends/pytorch_inc_static_quant_fx.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] try: - [+] torch.backends.quantized.engine = 'onednn' - [+] except: - [+] from torch.backends.quantized import engine; engine = 'onednn' - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] conf = PostTrainingQuantConfig(quant_level=1) - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, calib_dataloader=DATALOADER_NAME, eval_func=eval_func) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - [+] try: - [+] with torch.no_grad(): - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - [+] except: - [+] pass - - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_static_quant_fx_fp8.yaml b/neural_coder/backends/pytorch_inc_static_quant_fx_fp8.yaml deleted file mode 100644 index 419c5256588..00000000000 --- a/neural_coder/backends/pytorch_inc_static_quant_fx_fp8.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] def eval_func(model): - [+] EVAL_FUNC_LINES - [+] try: - [+] torch.backends.quantized.engine = 'onednn' - [+] except: - [+] from torch.backends.quantized import engine; engine = 'onednn' - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] conf = PostTrainingQuantConfig(precision=FP8_DATA_FORMAT, quant_level=1) - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, calib_dataloader=DATALOADER_NAME, eval_func=eval_func) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - [+] try: - [+] with torch.no_grad(): - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - [+] except: - [+] pass - - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_static_quant_ipex.yaml b/neural_coder/backends/pytorch_inc_static_quant_ipex.yaml deleted file mode 100644 index 9cde95fa197..00000000000 --- a/neural_coder/backends/pytorch_inc_static_quant_ipex.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] conf = PostTrainingQuantConfig(backend='ipex', quant_level=1) - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, calib_dataloader=DATALOADER_NAME) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_inc_static_quant_ipex_xpu.yaml b/neural_coder/backends/pytorch_inc_static_quant_ipex_xpu.yaml deleted file mode 100644 index f4835516cdf..00000000000 --- a/neural_coder/backends/pytorch_inc_static_quant_ipex_xpu.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_dataloader_definition_line", "insert_below_model_definition_line"] - content: - - |- - [+] from neural_compressor.config import PostTrainingQuantConfig - [+] from neural_compressor.quantization import fit - [+] MODEL_NAME = MODEL_NAME.to("xpu") - [+] conf = PostTrainingQuantConfig(backend='ipex', quant_level=1, device="xpu") - [+] MODEL_NAME = fit(model=MODEL_NAME, conf=conf, calib_dataloader=DATALOADER_NAME) - [+] MODEL_NAME.save("./quantized_model") - [+] MODEL_NAME.eval() - order: - - below: - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - pytorch_channels_last diff --git a/neural_coder/backends/pytorch_ipex_bf16.yaml b/neural_coder/backends/pytorch_ipex_bf16.yaml deleted file mode 100644 index d27f7c6c58b..00000000000 --- a/neural_coder/backends/pytorch_ipex_bf16.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] import torch - [+] import intel_extension_for_pytorch as ipex - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = ipex.optimize(MODEL_NAME, dtype=torch.bfloat16) - - |- - [+] import torch - [+] with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): - - 1 - order: - - below: - - pytorch_channels_last - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_ipex_fp32.yaml b/neural_coder/backends/pytorch_ipex_fp32.yaml deleted file mode 100644 index 0256429948d..00000000000 --- a/neural_coder/backends/pytorch_ipex_fp32.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] import torch - [+] import intel_extension_for_pytorch as ipex - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = ipex.optimize(MODEL_NAME, dtype=torch.float32) - order: - - below: - - pytorch_channels_last - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi diff --git a/neural_coder/backends/pytorch_ipex_int8_dynamic_quant.yaml b/neural_coder/backends/pytorch_ipex_int8_dynamic_quant.yaml deleted file mode 100644 index 06d35d57ef8..00000000000 --- a/neural_coder/backends/pytorch_ipex_int8_dynamic_quant.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - content: - - |- - [+] if "quantize" not in str(type(MODEL_NAME)) and "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] import intel_extension_for_pytorch as ipex - [+] qconfig = ipex.quantization.default_dynamic_qconfig - [+] MODEL_NAME = ipex.quantization.prepare(MODEL_NAME, qconfig, example_inputs=INPUT_NAME, inplace=False) - [+] with torch.no_grad(): - [+] for i in range(10): - [+] INFERENCE_LINE - [+] MODEL_NAME = ipex.quantization.convert(MODEL_NAME) - [+] with torch.no_grad(): - [+] INFERENCE_LINE - [+] MODEL_NAME.eval() - order: - - below: - - pytorch_channels_last - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi diff --git a/neural_coder/backends/pytorch_ipex_int8_static_quant.yaml b/neural_coder/backends/pytorch_ipex_int8_static_quant.yaml deleted file mode 100644 index 9de931ee9fe..00000000000 --- a/neural_coder/backends/pytorch_ipex_int8_static_quant.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - content: - - |- - [+] if "quantize" not in str(type(MODEL_NAME)) and "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] import intel_extension_for_pytorch as ipex - [+] qconfig = ipex.quantization.default_static_qconfig - [+] MODEL_NAME = ipex.quantization.prepare(MODEL_NAME, qconfig, example_inputs=INPUT_NAME, inplace=False) - [+] with torch.no_grad(): - [+] for i in range(10): - [+] INFERENCE_LINE - [+] MODEL_NAME = ipex.quantization.convert(MODEL_NAME) - [+] with torch.no_grad(): - [+] INFERENCE_LINE - [+] MODEL_NAME.eval() - order: - - below: - - pytorch_channels_last - above: - - pytorch_jit_script - - pytorch_jit_script_ofi - - pytorch_jit_trace - - pytorch_jit_trace_ofi diff --git a/neural_coder/backends/pytorch_jit_script.yaml b/neural_coder/backends/pytorch_jit_script.yaml deleted file mode 100644 index 014cc5177f1..00000000000 --- a/neural_coder/backends/pytorch_jit_script.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] if "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = torch.jit.script(MODEL_NAME) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - order: - - below: - - pytorch_inc_static_quant_fx - - pytorch_inc_static_quant_ipex - - pytorch_inc_dynamic_quant - - pytorch_ipex_fp32 - - pytorch_ipex_bf16 - - pytorch_ipex_int8_static_quant - - pytorch_ipex_int8_dynamic_quant - - pytorch_channels_last - above: diff --git a/neural_coder/backends/pytorch_jit_script_ofi.yaml b/neural_coder/backends/pytorch_jit_script_ofi.yaml deleted file mode 100644 index 2e9c5868d86..00000000000 --- a/neural_coder/backends/pytorch_jit_script_ofi.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - content: - - |- - [+] if "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = torch.jit.optimize_for_inference(torch.jit.script(MODEL_NAME)) - order: - - below: - - pytorch_inc_static_quant_fx - - pytorch_inc_static_quant_ipex - - pytorch_inc_dynamic_quant - - pytorch_ipex_fp32 - - pytorch_ipex_bf16 - - pytorch_ipex_int8_static_quant - - pytorch_ipex_int8_dynamic_quant - - pytorch_channels_last - above: diff --git a/neural_coder/backends/pytorch_jit_trace.yaml b/neural_coder/backends/pytorch_jit_trace.yaml deleted file mode 100644 index e52fb7f62ab..00000000000 --- a/neural_coder/backends/pytorch_jit_trace.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - content: - - |- - [+] if "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = torch.jit.trace(MODEL_NAME, INPUT_NAME, strict=False, check_trace=False) - [+] MODEL_NAME = torch.jit.freeze(MODEL_NAME) - order: - - below: - - pytorch_inc_static_quant_fx - - pytorch_inc_static_quant_ipex - - pytorch_inc_dynamic_quant - - pytorch_ipex_fp32 - - pytorch_ipex_bf16 - - pytorch_ipex_int8_static_quant - - pytorch_ipex_int8_dynamic_quant - - pytorch_channels_last - above: diff --git a/neural_coder/backends/pytorch_jit_trace_ofi.yaml b/neural_coder/backends/pytorch_jit_trace_ofi.yaml deleted file mode 100644 index 3b52bdbd5b1..00000000000 --- a/neural_coder/backends/pytorch_jit_trace_ofi.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - ["insert_below_model_definition_line", "insert_below_input_definition_line"] - content: - - |- - [+] if "jit" not in str(type(MODEL_NAME)): - [+] import torch - [+] with torch.no_grad(): - [+] MODEL_NAME.eval() - [+] MODEL_NAME = torch.jit.optimize_for_inference(torch.jit.trace(MODEL_NAME, INPUT_NAME, strict=False, check_trace=False)) - order: - - below: - - pytorch_inc_static_quant_fx - - pytorch_inc_static_quant_ipex - - pytorch_inc_dynamic_quant - - pytorch_ipex_fp32 - - pytorch_ipex_bf16 - - pytorch_ipex_int8_static_quant - - pytorch_ipex_int8_dynamic_quant - - pytorch_channels_last - above: diff --git a/neural_coder/backends/pytorch_mixed_precision_cpu.yaml b/neural_coder/backends/pytorch_mixed_precision_cpu.yaml deleted file mode 100644 index ad4337f5e57..00000000000 --- a/neural_coder/backends/pytorch_mixed_precision_cpu.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] import torch - [+] with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16): - - 1 - order: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_mixed_precision_cuda.yaml b/neural_coder/backends/pytorch_mixed_precision_cuda.yaml deleted file mode 100644 index 60c597cedd8..00000000000 --- a/neural_coder/backends/pytorch_mixed_precision_cuda.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] import torch - [+] with torch.cuda.amp.autocast(enabled=True, dtype=torch.float16): - - 1 - order: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_mixed_precision_intel_gpu.yaml b/neural_coder/backends/pytorch_mixed_precision_intel_gpu.yaml deleted file mode 100644 index 3b5c86ae977..00000000000 --- a/neural_coder/backends/pytorch_mixed_precision_intel_gpu.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] import torch - [+] with torch.xpu.amp.autocast(dtype=torch.half): - - 1 - order: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_torchdynamo_jit_script.yaml b/neural_coder/backends/pytorch_torchdynamo_jit_script.yaml deleted file mode 100644 index 48281956ca7..00000000000 --- a/neural_coder/backends/pytorch_torchdynamo_jit_script.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] from typing import List - [+] import torch - [+] import torchdynamo - [+] torchdynamo.config.raise_on_backend_error = False - [+] def dynamo_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): - [+] return torch.jit.script(gm) - - |- - [+] import torchdynamo - [+] with torchdynamo.optimize(dynamo_backend): - - 1 - order: - - below: - above: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_torchdynamo_jit_script_ofi.yaml b/neural_coder/backends/pytorch_torchdynamo_jit_script_ofi.yaml deleted file mode 100644 index dcee3603d33..00000000000 --- a/neural_coder/backends/pytorch_torchdynamo_jit_script_ofi.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] from typing import List - [+] import torch - [+] import torchdynamo - [+] torchdynamo.config.raise_on_backend_error = False - [+] def dynamo_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): - [+] return torch.jit.optimize_for_inference(torch.jit.script(gm)) - - |- - [+] import torchdynamo - [+] with torchdynamo.optimize(dynamo_backend): - - 1 - order: - - below: - above: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_torchdynamo_jit_trace.yaml b/neural_coder/backends/pytorch_torchdynamo_jit_trace.yaml deleted file mode 100644 index d3673153986..00000000000 --- a/neural_coder/backends/pytorch_torchdynamo_jit_trace.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] from typing import List - [+] import torch - [+] import torchdynamo - [+] torchdynamo.config.raise_on_backend_error = False - [+] def dynamo_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): - [+] return torch.jit.trace(gm, example_inputs) - - |- - [+] import torchdynamo - [+] with torchdynamo.optimize(dynamo_backend): - - 1 - order: - - below: - above: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/pytorch_torchdynamo_jit_trace_ofi.yaml b/neural_coder/backends/pytorch_torchdynamo_jit_trace_ofi.yaml deleted file mode 100644 index 9f9d1eafce5..00000000000 --- a/neural_coder/backends/pytorch_torchdynamo_jit_trace_ofi.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - insert_below_model_definition_line - - insert_above_inference_line - - indent_inference_line - content: - - |- - [+] from typing import List - [+] import torch - [+] import torchdynamo - [+] torchdynamo.config.raise_on_backend_error = False - [+] def dynamo_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): - [+] return torch.jit.optimize_for_inference(torch.jit.trace(gm, example_inputs)) - - |- - [+] import torchdynamo - [+] with torchdynamo.optimize(dynamo_backend): - - 1 - order: - - below: - above: - - below: - above: - - below: - above: diff --git a/neural_coder/backends/template.yaml b/neural_coder/backends/template.yaml deleted file mode 100644 index 1e3b1fa1501..00000000000 --- a/neural_coder/backends/template.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -transformation: - location: - - - - - - - content: - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - - |- - [+] YOUR CODE LINE 1 - [+] YOUR CODE LINE 2 - [+] YOUR CODE LINE 3 - order: - - below: - - - - - above: - - - - - - below: - - - - - above: - - - - - - below: - - - - - above: - - - - diff --git a/neural_coder/coders/__init__.py b/neural_coder/coders/__init__.py deleted file mode 100644 index e833188cc78..00000000000 --- a/neural_coder/coders/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neural_coder/coders/autoinc/__init__.py b/neural_coder/coders/autoinc/__init__.py deleted file mode 100644 index e833188cc78..00000000000 --- a/neural_coder/coders/autoinc/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neural_coder/coders/autoinc/autoinc_harness.py b/neural_coder/coders/autoinc/autoinc_harness.py deleted file mode 100644 index e21b9bfdf8e..00000000000 --- a/neural_coder/coders/autoinc/autoinc_harness.py +++ /dev/null @@ -1,476 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import sys - -import yaml - -from ... import globals -from ...utils.line_operation import ( - get_line_indent_level, - get_line_left_hand_side, - get_line_wo_comment, - is_eval_func_model_name, - single_line_comment_or_empty_line_detection, -) - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -class AutoInc_Harness(object): - def __init__(self, backend): - self.backend = backend - - def print_info(self): - for i in globals.list_model_def_instance: - logger.debug(f"i.print_info(): {i.print_info()}") - - # collect file transformation info and register in globals - # (i.e. which file to add which lines at which location) - def register_transformation(self): - backend_file = open(os.path.dirname(__file__) + "/../../backends/" + self.backend + ".yaml") - backend_dict = yaml.load(backend_file, Loader=yaml.BaseLoader) - logger.debug(f"backend_dict: {backend_dict}") - bk_trans_location = backend_dict["transformation"]["location"] # string - bk_trans_content = backend_dict["transformation"]["content"] # string - bk_trans_order = backend_dict["transformation"]["order"] # list - - # modular design - if globals.use_modular: - content = globals.modular_item - bk_trans_content = ["[+] " + content.replace("\n", "\n[+] ")[:-5]] - - list_code = [] - history = set() - for i in globals.list_code_path: - list_code.append(open(i, "r").read()) - for loc in bk_trans_location: - # PART 1 - "model_definition_line" - if "insert_below_model_definition_line" in loc: - for ins in globals.list_model_def_instance: - model_name = ins.model_name - if model_name in history and globals.code_domain == "torchvision": - continue - else: - history.add(model_name) - file_path = ins.file_path - model_def_line_idx = ins.model_def_line_idx - - file_path_idx = globals.list_code_path.index(file_path) - lines = list_code[file_path_idx].split("\n") - line_idx = 0 - - # to check if this model has an inference line is in the file - # if not, skip this model - to_transform = False - for i in range(len(lines)): - line = lines[i] - if model_name + "(" in line or ( - model_name + "." in line and line.find(model_name) < line.find(".") and "(" in line - ): - to_transform = True - if not to_transform and globals.code_domain == "onnx": - pass - elif not to_transform: - continue - ### information - - # search DataLoader definition in this file - dataloader_name = "" - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if ("DataLoader(" in line and "=" in line and line.find("=") < line.find("DataLoader")) or ( - "dataloader" in line and "=" in line and line.find("=") > line.find("dataloader") - ): - dataloader_def_line_indent_level = get_line_indent_level(line) - dataloader_name = get_line_left_hand_side(line) - dataloader_def_line_idx = i - - # search inference line in this file, and also input_name - inference_line = "" - input_name = "" - for i in range(len(lines)): - line = lines[i] - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if not single_line_comment_or_empty_line_detection(line): - if is_eval_func and "[coder-enabled]" not in line: - inference_line = line - input_name = line[line.find("(") + 1 : line.find(")")].replace("*", "") - # get "c" in "a = b(**c)" - - # search input definition in this file (if any) - if input_name != "": - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if input_name in line and "=" in line and line.find("=") > line.find(input_name): - input_def_line_indent_level = get_line_indent_level(line) - input_def_line_idx = i - - # search model definition line and its end line index - # (only has 1 model definition line, because it's in loop of globals.list_model_def_instance) - for i in range(len(lines)): - line = lines[i] - if line_idx == model_def_line_idx and "[coder-enabled]" not in line: - model_def_line_indent_level = get_line_indent_level(line) - if ")" in line and line.count(")") == line.count("("): # e.g. model = Net(xxx) - model_definition_end_line_idx = line_idx + 1 - else: # e.g. model = Net(xxx, \n xxx, \n xxx) - do_search = True - i_search = 1 - while do_search: - following_line = lines[line_idx + i_search] - if ")" in following_line and following_line.count(")") > following_line.count("("): - do_search = False - i_search += 1 - model_definition_end_line_idx = line_idx + i_search - line_idx += 1 - - ### check - - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - if file_path_idx == 0 and (globals.code_domain in ["transformers_trainer", "torchvision", "onnx"]): - pass - elif ( - ("INPUT_NAME" in bk_trans_content_this and input_name == "") - or ("DATALOADER_NAME" in bk_trans_content_this and dataloader_name == "") - or ("INFERENCE_LINE" in bk_trans_content_this and inference_line == "") - ): - logger.info( - f"Skipped due to not having enough information required by " - "the transformation content specified in the config file " - "(e.g. INPUT_NAME, DATALOADER_NAME, INFERENCE_LINE). " - f"File path: {file_path}" - ) - continue - - ### location - - # search for features to put below them - """ - Example (psuedo-code): - model = Net() - # jit script begin mark - model = torch.jit.script(model) - # jit script end mark (feature name + model name to handle multi-model situation) - model = ipex.optimize(model, "fp32") # "ipex fp32" must be put below "jit script" - """ - put_below_idx = 0 - for i in range(len(lines)): - for item in bk_trans_order[0]["below"]: - line = lines[i] - if item in line and model_name in line: - put_below_idx = max(put_below_idx, i + 1) - - # search for features to put above them - put_above_idx = sys.maxsize - for i in range(len(lines)): - for item in bk_trans_order[0]["above"]: - line = lines[i] - if item in line and model_name in line: - put_above_idx = min(put_above_idx, i) - - # location assignment (below model def / dataloader def / input def) - torchvision_indent = -1 - if file_path_idx == 0 and globals.code_domain == "transformers_trainer": - for i in range(len(lines)): - line = lines[i] - if re.findall("trainer = .*Trainer", line): - if "(" in line and line.count(")") == line.count("("): - trans_insert_location = i + 1 - else: - do_search = True - i_search = 1 - while do_search: - following_line = lines[i + i_search] - if ")" in following_line and following_line.count(")") > following_line.count( - "(" - ): - do_search = False - i_search += 1 - trans_insert_location = i + i_search - trans_insert_location = min(max(trans_insert_location, put_below_idx), put_above_idx) - elif file_path_idx == 0 and globals.code_domain == "torchvision": - trans_insert_location = 1 - for i in range(len(lines)): - line = lines[i] - if ( - "val_loader" in line - and "aux_val_loader" not in line - and ( - "torch.utils.data.DataLoader" in line - or "utils.data.DataLoader" in line - or "DataLoader" in line - ) - ): - torchvision_indent = get_line_indent_level(line) - if "(" in line and line.count(")") == line.count("("): - trans_insert_location = i + 1 - else: - do_search = True - i_search = 1 - while do_search: - following_line = lines[i + i_search] - if ")" in following_line and following_line.count(")") > following_line.count( - "(" - ): - do_search = False - i_search += 1 - trans_insert_location = i + i_search - trans_insert_location = min(max(trans_insert_location, put_below_idx), put_above_idx) - else: - if "insert_below_model_definition_line" in loc: - trans_insert_location = min( - max(model_definition_end_line_idx, put_below_idx), put_above_idx - ) - if "insert_below_dataloader_definition_line" in loc: - try: - dataloader_def_line_idx - except: - logger.warning( - f"Skipped due to not having dataloader definition required by " - "the transformation content specified in the config file. " - f"File path: {file_path}" - ) - continue - trans_insert_location = max( - trans_insert_location, - min(max(dataloader_def_line_idx + 1, put_below_idx), put_above_idx), - ) - if "insert_below_input_definition_line" in loc: - try: - input_def_line_idx - except: - logger.warning( - f"Skipped due to not having input definition required by " - "the transformation content specified in the config file. " - f"File path: {file_path}" - ) - continue - trans_insert_location = max( - trans_insert_location, min(max(input_def_line_idx + 1, put_below_idx), put_above_idx) - ) - - insert_indent_level = ( - get_line_indent_level(lines[trans_insert_location - 1]) - if torchvision_indent == -1 - else torchvision_indent - ) - ### content - # lines to insert - lines_to_insert = bk_trans_content_this - if globals.code_domain == "transformers_trainer": - lines_to_insert = lines_to_insert.replace( - "EVAL_FUNC_LINES", globals.list_eval_func_lines[0] - ).replace("DATALOADER_NAME", globals.list_calib_dataloader_name[0]) - elif globals.code_domain == "transformers_no_trainer": - pass - elif globals.code_domain == "torchvision": - lines_to_insert = lines_to_insert.replace( - "EVAL_FUNC_LINES", globals.list_eval_func_lines[0] - ).replace("DATALOADER_NAME", globals.list_calib_dataloader_name[0]) - elif globals.code_domain == "onnx": - lines_to_insert = lines_to_insert.replace( - "EVAL_FUNCTION_NAME", globals.list_eval_func_name[0] - ).replace("DATALOADER_NAME", globals.list_calib_dataloader_name[0]) - else: - lines_to_insert = lines_to_insert.replace("DATALOADER_NAME", dataloader_name).replace( - "def eval_func", "# def eval_func" - ) - - optimum_quant_config_line = ( - 'IncQuantizationConfig.from_pretrained("' + globals.optimum_quant_config + '")' - ) - - # replace [+] indication with empty - lines_to_insert = lines_to_insert.replace("[+] ", " " * insert_indent_level) - # add begin indicator - lines_to_insert = ( - " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " for " - + model_name - + " [Beginning Line]\n" - + lines_to_insert - ) - # replace INDICATIONS with real stuff - lines_to_insert = ( - lines_to_insert.replace("MODEL_NAME", model_name) - .replace("INPUT_NAME", input_name) - .replace("EVAL_FUNC_LINES", "# return 1") - .replace("OPTIMUM_QUANT_CONFIG", optimum_quant_config_line) - .replace("\n", " # [coder-enabled]\n") - ) - # add end indicator - lines_to_insert += ( - " # [coder-enabled]\n" - + " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " for " - + model_name - + " [Ending Line] # [coder-enabled]" - ) - - ### register - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append([lines_to_insert.count("\n") + 1]) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append(lines_to_insert.count("\n") + 1) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - # PART 2 - "inference line" - if ( - "indent_inference_line" in loc - or "insert_above_inference_line" in loc - or "insert_below_inference_line" in loc - ): - for file_path in globals.list_code_path: - code = open(file_path, "r").read() - lines = code.split("\n") - line_idx = 0 - for i in range(len(lines)): - line = lines[i] - for model_name in globals.list_model_name: - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if is_eval_func and "[coder-enabled]" not in line: - if eval_func_type == "non-forward": - pass # do something - - inference_line_indent_level = get_line_indent_level(line) - - if "indent_inference_line" in loc: - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - add_indent_level = int(bk_trans_content_this) - - trans_indent_location = [] - # indent can have multiple location, so is a list of numbers - trans_indent_level = [] - - if ")" in line: # e.g. model = Net(xxx) - trans_indent_location.append(line_idx) - trans_indent_level.append(add_indent_level) - else: # e.g. model = Net(xxx, \n xxx, \n xxx) - trans_indent_location.append(line_idx) - trans_indent_level.append(add_indent_level) - do_search = True - i_search = 1 - while do_search: - trans_indent_location.append(line_idx + i_search) - trans_indent_level.append(add_indent_level) - following_line = lines[line_idx + i_search] - if ")" in following_line: - do_search = False - i_search += 1 - - ### register - - if file_path not in globals.list_trans_indent_modified_file: - globals.list_trans_indent_modified_file.append(file_path) - globals.list_trans_indent_location_idxs.append(trans_indent_location) - globals.list_trans_indent_level.append(trans_indent_level) - else: - idx = globals.list_trans_indent_modified_file.index(file_path) - for i in trans_indent_location: - globals.list_trans_indent_location_idxs[idx].append(i) - for i in trans_indent_level: - globals.list_trans_indent_level[idx].append(i) - - if "insert_above_inference_line" in loc: - idx_offset = 0 - elif "insert_below_inference_line" in loc: - idx_offset = 1 - - if "insert_above_inference_line" in loc or "insert_below_inference_line" in loc: - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - - trans_insert_location = line_idx + idx_offset - - insert_indent_level = inference_line_indent_level - - ### content - - # lines to insert - lines_to_insert = bk_trans_content_this - # replace [+] indication with empty - lines_to_insert = lines_to_insert.replace("[+] ", " " * insert_indent_level) - # add begin indicator - lines_to_insert = ( - " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " [Beginning Line] \n" - + lines_to_insert - ) - # replace INDICATIONS with real stuff - # (for now, inference_line related transformations ) - # (have nothing to do with input, dataloader etc, ) - # (so no need to put replaces here.) - lines_to_insert = lines_to_insert.replace("\n", " # [coder-enabled]\n") - # add end indicator - lines_to_insert += ( - " # [coder-enabled]\n" - + " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " [Ending Line] # [coder-enabled]" - ) - - # customized argument - if self.backend == "pytorch_benchmark": - lines_to_insert = lines_to_insert.replace( - "NUM_BENCHMARK_ITERATION", globals.num_benchmark_iteration - ) - lines_to_insert = lines_to_insert.replace("ACCURACY_MODE", str(False)) - lines_to_insert = lines_to_insert.replace("EVAL_FUNC_LINES", line.strip()) - - ### register - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append( - [lines_to_insert.count("\n") + 1] - ) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append( - lines_to_insert.count("\n") + 1 - ) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - line_idx += 1 - - # PART 3 - for customized location - - logger.debug(f"globals.list_trans_insert_modified_file: {globals.list_trans_insert_modified_file}") - logger.debug(f"globals.list_trans_insert_location_idxs: {globals.list_trans_insert_location_idxs}") - logger.debug(f"globals.list_trans_insert_number_insert_lines: {globals.list_trans_insert_number_insert_lines}") - logger.debug(f"globals.list_trans_insert_lines_to_insert: {globals.list_trans_insert_lines_to_insert}") diff --git a/neural_coder/coders/autoinc/calib_dataloader.py b/neural_coder/coders/autoinc/calib_dataloader.py deleted file mode 100644 index 8d287837676..00000000000 --- a/neural_coder/coders/autoinc/calib_dataloader.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ... import globals - - -class Calib_Dataloader(object): - def __init__(self): - pass - - def register_transformation(self): - if globals.code_domain == "transformers_trainer": - globals.list_calib_dataloader_name.append("trainer.get_eval_dataloader()") - elif globals.code_domain == "transformers_no_trainer": - pass - elif globals.code_domain == "torchvision": - globals.list_calib_dataloader_name.append("val_loader") - elif globals.code_domain == "onnx": - codes = open(globals.list_code_path[0], "r").read().split("\n") - for line in codes: - line = line.strip() - if "loader" in line and "=" in line: - end = 0 - for i in range(len(line)): - if line[i] == "=": - end = i - if line[end - 1] == " ": - globals.list_calib_dataloader_name.append(line[: end - 1]) - else: - globals.list_calib_dataloader_name.append(line[:end]) - else: # random model - pass diff --git a/neural_coder/coders/autoinc/domain.py b/neural_coder/coders/autoinc/domain.py deleted file mode 100644 index ed4437d7455..00000000000 --- a/neural_coder/coders/autoinc/domain.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - - -def determine_domain(path) -> str: - codes = open(path, "r").read() - if ("import torchvision.models" in codes or "from torchvision.models" in codes) and "val_loader" in codes: - return "torchvision" - elif re.search(r"from (.*)transformers import", codes) and re.search(r"(.*)Model(.*)", codes): - if "Trainer" in codes or "trainer" in codes: - return "transformers_trainer" - else: - return "transformers_no_trainer" - elif "onnx.load(" in codes: - return "onnx" - elif "keras.Sequential" in codes: - return "keras_script" - elif "from tensorflow import" in codes or "import tensorflow" in codes: - return "tensorflow_keras_model" - else: - return "random model" diff --git a/neural_coder/coders/autoinc/eval_func.py b/neural_coder/coders/autoinc/eval_func.py deleted file mode 100644 index 9ea65d36315..00000000000 --- a/neural_coder/coders/autoinc/eval_func.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ... import globals -from ...utils.line_operation import get_line_indent_level - - -class Eval_Func(object): - def __init__(self): - pass - - def register_transformation(self): - if globals.code_domain == "transformers_trainer": - lines = [ - "trainer.model = model", - "metrics = trainer.evaluate() # check if all tasks do not have parameters in evaluate()", - "keys = [", - ' "eval_accuracy",', - ' "eval_bleu",', - ' "eval_matthews_correlation",', - ' "eval_pearsonr",', - ' "eval_precision",', - ' "eval_recall",', - ' "eval_rouge",', - ' "eval_sacrebleu",', - ' "eval_spearmanr",', - ' "eval_mcc",', - ' "eval_acc",', - ' "eval_acc_and_f1",', - ' "eval_corr",', - ' "eval_mnli/acc",', - ' "eval_mnli-mm/acc",', - ' "eval_exact_match",', - ' "eval_f1",', - "] # METRIC_TAGS in transformers", - "for key in keys:", - " if key in metrics.keys():", - " return metrics[key]", - 'assert False, "No metric returned, Please check inference metric!"', - ] - for index, line in enumerate(lines): - if index != 0: - lines[index] = "[+] " + " " * 4 + line - lines = "\n".join(lines) - globals.list_eval_func_lines.append(lines) - elif globals.code_domain == "transformers_no_trainer": - pass - elif globals.code_domain == "torchvision": - # search for 'validate()' - codes = open(globals.list_code_path[0], "r").read().split("\n") - lines = [] - for index, line in enumerate(codes): - if "def validate(" in line: - start = index - start_indent = get_line_indent_level(codes[start]) - for i in range(start + 1, len(codes)): - if codes[i] == "": - continue - line_indent = get_line_indent_level(codes[i]) - if line_indent > start_indent: - change_indent = line_indent - 4 - lines.append(" " * change_indent + codes[i].lstrip()) - # no 'print' - else: - break - break - else: - pass - for index, line in enumerate(lines): - if "return" in line: - indent = get_line_indent_level(line) - line_list = line.split() - line_list[1] = "float(" + line_list[1] + ")" - lines[index] = " " * indent + " ".join(line_list) - for index, line in enumerate(lines): - if index != 0: - lines[index] = "[+] " + " " * 8 + line - lines = "\n".join(lines) - globals.list_eval_func_lines.append(lines) - elif globals.code_domain == "onnx": - # look for sess = onnxruntime.InferenceSession(MODEL_NAME.SerializeToString(), None) - codes = open(globals.list_code_path[0], "r").read().split("\n") - start = 0 - for idx, line in enumerate(codes): - if "onnxruntime.InferenceSession(" in line: - start = idx - break - line_indent = get_line_indent_level(codes[start]) - target = None - for i in range(start, -1, -1): - if "def" in codes[i] and (line_indent - get_line_indent_level(codes[i])) == 4: - target = codes[i].split(" ")[1] - break - func_name = None - for i in range(len(target)): - if target[i] == "(": - globals.list_eval_func_name.append(target[:i]) - break - else: # random model - pass diff --git a/neural_coder/coders/pytorch/__init__.py b/neural_coder/coders/pytorch/__init__.py deleted file mode 100644 index e833188cc78..00000000000 --- a/neural_coder/coders/pytorch/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neural_coder/coders/pytorch/batch_size.py b/neural_coder/coders/pytorch/batch_size.py deleted file mode 100644 index ec61e19d70f..00000000000 --- a/neural_coder/coders/pytorch/batch_size.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ... import globals - - -class BatchSizeCoder(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - - def transform(self): - lines = self.file.split("\n") - for line in lines: - if self.not_modify(line): - new_line = self.modify(line) - self.result.append(new_line) - else: - self.result.append(line) - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def not_modify(self, s): - if "batch_size" in s and "=" in s: - return True - return False - - def modify(self, s): - idx = s.find("batch_size") - s_right = s[idx:] - if " = " in s_right: - index = s.find(" = ") - s_left = s[:index] - if "batch_size" in s_left: - if "," in s_left: - index1 = s_left.find(",") - index2 = s_left.find("batch_size") - if index1 > index2: - slice1 = s_left[:index1] - else: - s_left1 = s_left[:index2] - s_right = s_left[index2:] - index3 = s_left1.rfind(",") - if "," in s_right: - index4 = s_right.find(",") + len(s_left1) - slice1 = s_left[index3 + 2 : index4] - else: - slice1 = s_left[index3 + 2 : index] - s1 = slice1 + " = " + globals.target_batch_size - s = s[:] + "\n" + s1 - else: - s_right = s[index + 3 :] - s_right = s_right.replace(s_right, globals.target_batch_size) - s = s_left + " = " + s_right - elif "batch_size=" in s: - idx = s.find("batch_size=") - s_right = s[idx:] - idx2 = s_right.find("batch_size") - if "," in s_right: - index2 = s_right.find(",") - old = s_right[idx2:index2] - s = s.replace(old, "batch_size=" + globals.target_batch_size) - elif ")" in s_right: - index2 = s_right.find(")") - old = s_right[idx2:index2] - s = s.replace(old, "batch_size=" + globals.target_batch_size) - else: - old = s_right[idx2:] - s = s.replace(old, "batch_size=" + globals.target_batch_size) - return s diff --git a/neural_coder/coders/pytorch/change_trainer_to_nlptrainer.py b/neural_coder/coders/pytorch/change_trainer_to_nlptrainer.py deleted file mode 100644 index 7c0d71ce534..00000000000 --- a/neural_coder/coders/pytorch/change_trainer_to_nlptrainer.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ...utils.line_operation import get_line_indent_level - - -class TrainerToNLPTrainer(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - - def transform(self): - lines = self.file.split("\n") - - for line in lines: - if self.is_modify(line): - new_line = self.modify(line) - self.result.append(new_line) - else: - self.result.append(line) - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def is_modify(self, s): - if "trainer = Trainer(" in s: - return True - else: - return False - - def modify(self, s): - old = "Trainer" - s = s.replace(old, "NLPTrainer") - return s diff --git a/neural_coder/coders/pytorch/cuda_to_cpu.py b/neural_coder/coders/pytorch/cuda_to_cpu.py deleted file mode 100644 index 2d6c9c8a13e..00000000000 --- a/neural_coder/coders/pytorch/cuda_to_cpu.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ...utils.line_operation import get_line_indent_level - - -class CudaToCpu(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - - def transform(self): - lines = self.file.split("\n") - # determine if jump the whole file (in cases where: args.device, args.cuda etc) - to_jump = False - for line in lines: - if self.is_jump_file(line): - to_jump = True - break - - if to_jump: # this file do not need transformation - for line in lines: - self.result.append(line) - else: # this file might need transformation - for line in lines: - if self.is_delete(line): - indent_level = get_line_indent_level(line) - new_line = " " * indent_level + "pass" - self.result.append(new_line) - elif self.is_modify(line): - new_line = self.change_to_cpu(line) - self.result.append(new_line) - else: - self.result.append(line) - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def is_jump_file(self, s): - if ( - "args.device" in s - or "args.cpu" in s - or "args.gpu" in s - or "args.cuda" in s - or "torch.cuda.is_available()" in s - ): - return True - else: - return False - - def is_delete(self, s): - if "cuda." in s and "=" not in s and "if" not in s: - return True - else: - return False - - def is_modify(self, s): - if "'cuda'" in s or '"cuda"' in s or "'cuda:0'" in s or '"cuda:0"' in s or "cuda()" in s: - return True - else: - return False - - def change_to_cpu(self, s): - if "'cuda'" in s or "'cuda:0'" in s: - old = "'cuda'" if "'cuda'" in s else "'cuda:0'" - s = s.replace(old, "'cpu'") - elif '"cuda"' in s or '"cuda:0"' in s: - old = '"cuda"' if '"cuda"' in s else '"cuda:0"' - s = s.replace(old, '"cpu"') - elif "cuda()" in s: - old = "cuda" - s = s.replace(old, "cpu") - return s diff --git a/neural_coder/coders/pytorch/dummy_dataloader.py b/neural_coder/coders/pytorch/dummy_dataloader.py deleted file mode 100644 index 64e745cdacf..00000000000 --- a/neural_coder/coders/pytorch/dummy_dataloader.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ... import globals -from ...utils.line_operation import get_line_indent_level, get_line_left_hand_side, is_eval_func_model_name - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -class DummyDataLoader(object): - def __init__(self, list_model_def_instance): - self.list_model_def_instance = list_model_def_instance - - def print_info(self): - for i in self.list_model_def_instance: - logger.debug(f"i.print_info(): {i.print_info()}") - - # collect file transformation info and register (store) in globals - # (i.e. which file to add which lines at which location) - def register_transformation(self): - list_code = [] - for i in globals.list_code_path: - list_code.append(open(i, "r").read()) - - for ins in self.list_model_def_instance: - model_name = ins.model_name - file_path = ins.file_path - model_def_line_idx = ins.model_def_line_idx - function_def_line_idx = ins.function_def_line_idx - class_name = ins.class_name - - # transformation - file_path_idx = globals.list_code_path.index(file_path) - lines = list_code[file_path_idx].split("\n") - line_idx = 0 - - # search DataLoader - dataloader_name = "" - for i in range(len(lines)): # each item is a str of this code line - line = lines[i] - if "DataLoader(" in line and "=" in line and line.find("=") < line.find("DataLoader"): - dataloader_name = get_line_left_hand_side(line) - dataloader_def_line_idx = i - - if dataloader_name != "": - return - else: - input_dimension_str = "3, 224, 224)" - for i in range(len(lines)): - line = lines[i] - if ("input" in line and "=" in line and line.find("=") > line.find("input")) or ( - "image" in line and "=" in line and line.find("=") > line.find("image") - ): - input_dimension_str = line[line.find(",") + 2 :] - - for i in range(len(lines)): - line = lines[i] - if line_idx == model_def_line_idx: - indent_level = get_line_indent_level(line) - lines_to_insert = "" - lines_to_insert += " " * indent_level + "import torch" + "\n" - lines_to_insert += " " * indent_level + "from torch.utils.data import Dataset" + "\n" - lines_to_insert += " " * indent_level + "class DummyDataset(Dataset):" + "\n" - lines_to_insert += ( - " " * indent_level + " def __init__(self, *shapes, num_samples: int = 10000):" + "\n" - ) - lines_to_insert += " " * indent_level + " super().__init__()" + "\n" - lines_to_insert += " " * indent_level + " self.shapes = shapes" + "\n" - lines_to_insert += " " * indent_level + " self.num_samples = num_samples" + "\n" - lines_to_insert += " " * indent_level + " def __len__(self):" + "\n" - lines_to_insert += " " * indent_level + " return self.num_samples" + "\n" - lines_to_insert += " " * indent_level + " def __getitem__(self, idx: int):" + "\n" - lines_to_insert += " " * indent_level + " sample = []" + "\n" - lines_to_insert += " " * indent_level + " for shape in self.shapes:" + "\n" - lines_to_insert += " " * indent_level + " spl = torch.rand(*shape)" + "\n" - lines_to_insert += " " * indent_level + " sample.append(spl)" + "\n" - lines_to_insert += " " * indent_level + " return sample" + "\n" - lines_to_insert += " " * indent_level + "from torch.utils.data import DataLoader" + "\n" - lines_to_insert += ( - " " * indent_level - + "dummy_dataset = DummyDataset((" - + input_dimension_str - + ", (1, ))" - + "\n" - ) - lines_to_insert += ( - " " * indent_level + "dummy_dataloader = DataLoader(dummy_dataset, batch_size=1)" - ) - - trans_insert_location = 0 - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append([lines_to_insert.count("\n") + 1]) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append(lines_to_insert.count("\n") + 1) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - line_idx += 1 - - logger.debug(f"globals.list_trans_insert_modified_file: {globals.list_trans_insert_modified_file}") - logger.debug(f"globals.list_trans_insert_location_idxs: {globals.list_trans_insert_location_idxs}") - logger.debug(f"globals.list_trans_insert_number_insert_lines: {globals.list_trans_insert_number_insert_lines}") - logger.debug(f"globals.list_trans_insert_lines_to_insert: {globals.list_trans_insert_lines_to_insert}") diff --git a/neural_coder/coders/pytorch/harness.py b/neural_coder/coders/pytorch/harness.py deleted file mode 100644 index 3921383791e..00000000000 --- a/neural_coder/coders/pytorch/harness.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import sys - -import yaml - -from ... import globals -from ...utils.line_operation import ( - get_line_indent_level, - get_line_left_hand_side, - get_line_wo_comment, - is_eval_func_model_name, - single_line_comment_or_empty_line_detection, -) - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -class Harness(object): - def __init__(self, backend): - self.backend = backend - - def print_info(self): - for i in globals.list_model_def_instance: - logger.debug(f"i.print_info(): {i.print_info()}") - - # collect file transformation info and register in globals - # (i.e. which file to add which lines at which location) - def register_transformation(self): - backend_file = open(os.path.dirname(__file__) + "/../../backends/" + self.backend + ".yaml") - backend_dict = yaml.load(backend_file, Loader=yaml.BaseLoader) - logger.debug(f"backend_dict: {backend_dict}") - - bk_trans_location = backend_dict["transformation"]["location"] # string - bk_trans_content = backend_dict["transformation"]["content"] # string - bk_trans_order = backend_dict["transformation"]["order"] # list - - list_code = [] - for i in globals.list_code_path: - list_code.append(open(i, "r").read()) - - for loc in bk_trans_location: - # PART 1 - "model_definition_line" - if "insert_below_model_definition_line" in loc: - for ins in globals.list_model_def_instance: - model_name = ins.model_name - file_path = ins.file_path - model_def_line_idx = ins.model_def_line_idx - - file_path_idx = globals.list_code_path.index(file_path) - lines = list_code[file_path_idx].split("\n") - line_idx = 0 - - # to check if this model has an inference line is in the file - # if not, skip this model - to_transform = False - for i in range(len(lines)): - line = lines[i] - if model_name + "(" in line or ( - model_name + "." in line and line.find(model_name) < line.find(".") and "(" in line - ): - to_transform = True - if not to_transform: - continue - - ### information - - # search DataLoader definition in this file - dataloader_name = "" - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if ("DataLoader(" in line and "=" in line and line.find("=") < line.find("DataLoader")) or ( - "dataloader" in line and "=" in line and line.find("=") > line.find("dataloader") - ): - dataloader_def_line_indent_level = get_line_indent_level(line) - dataloader_name = get_line_left_hand_side(line) - dataloader_def_line_idx = i - - # search inference line in this file, and also input_name - inference_line = "" - input_name = "" - for i in range(len(lines)): - line = lines[i] - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if not single_line_comment_or_empty_line_detection(line): - if is_eval_func and "[coder-enabled]" not in line: - inference_line = line - input_name = line[line.find("(") + 1 : line.find(")")].replace("*", "") - # get "c" in "a = b(**c)" - - # search input definition in this file (if any) - if input_name != "": - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if input_name in line and "=" in line and line.find("=") > line.find(input_name): - input_def_line_indent_level = get_line_indent_level(line) - input_def_line_idx = i - - # search trainer definition in this file (for transformers trainer only) - trainer_def_line_idx = -1 - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if "trainer = Trainer(" in line: - trainer_def_line_indent_level = get_line_indent_level(line) - trainer_def_line_idx = i - - # search model definition line and its end line index - # (only has 1 model definition line, because it's in loop of globals.list_model_def_instance) - for i in range(len(lines)): - line = lines[i] - if line_idx == model_def_line_idx and "[coder-enabled]" not in line: - model_def_line_indent_level = get_line_indent_level(line) - if ")" in line and line.count(")") == line.count("("): # e.g. model = Net(xxx) - model_definition_end_line_idx = line_idx + 1 - else: # e.g. model = Net(xxx, \n xxx, \n xxx) - do_search = True - i_search = 1 - while do_search: - following_line = lines[line_idx + i_search] - if ")" in following_line and following_line.count(")") > following_line.count("("): - do_search = False - i_search += 1 - model_definition_end_line_idx = line_idx + i_search - line_idx += 1 - - ### check - - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - - if ( - ("INPUT_NAME" in bk_trans_content_this and input_name == "") - or ("DATALOADER_NAME" in bk_trans_content_this and dataloader_name == "") - or ("INFERENCE_LINE" in bk_trans_content_this and inference_line == "") - ): - logger.info( - f"Skipped due to not having enough information required by " - "the transformation content specified in the config file " - "(e.g. INPUT_NAME, DATALOADER_NAME, INFERENCE_LINE). " - f"File path: {file_path}" - ) - continue - - ### location - - # search for features to put below them - """ - Example (psuedo-code): - model = Net() - # jit script begin mark - model = torch.jit.script(model) - # jit script end mark (feature name + model name to handle multi-model situation) - model = ipex.optimize(model, "fp32") # "ipex fp32" must be put below "jit script" - """ - put_below_idx = 0 - for i in range(len(lines)): - for item in bk_trans_order[0]["below"]: - line = lines[i] - if item in line and model_name in line: - put_below_idx = max(put_below_idx, i + 1) - - # search for features to put above them - put_above_idx = sys.maxsize - for i in range(len(lines)): - for item in bk_trans_order[0]["above"]: - line = lines[i] - if item in line and model_name in line: - put_above_idx = min(put_above_idx, i) - - # location assignment (below model def / dataloader def / input def) - if "insert_below_model_definition_line" in loc: - trans_insert_location = min(max(model_definition_end_line_idx, put_below_idx), put_above_idx) - if trainer_def_line_idx > 0: - trans_insert_location = trainer_def_line_idx - 1 - # for transformers trainer to put right above trainer def - if "insert_below_dataloader_definition_line" in loc: - try: - dataloader_def_line_idx - except: - logger.warning( - f"Skipped due to not having dataloader definition required by " - "the transformation content specified in the config file. " - f"File path: {file_path}" - ) - continue - trans_insert_location = max( - trans_insert_location, min(max(dataloader_def_line_idx + 1, put_below_idx), put_above_idx) - ) - if "insert_below_input_definition_line" in loc: - try: - input_def_line_idx - except: - logger.warning( - f"Skipped due to not having input definition required by " - "the transformation content specified in the config file. " - f"File path: {file_path}" - ) - continue - trans_insert_location = max( - trans_insert_location, min(max(input_def_line_idx + 1, put_below_idx), put_above_idx) - ) - - insert_indent_level = get_line_indent_level(lines[trans_insert_location - 1]) - if trainer_def_line_idx > 0: # for transformers trainer to put right above trainer def - insert_indent_level = get_line_indent_level(lines[trans_insert_location]) - ### content - - # lines to insert - lines_to_insert = bk_trans_content_this - # replace [+] indication with empty - lines_to_insert = lines_to_insert.replace("[+] ", " " * insert_indent_level) - # add begin indicator - lines_to_insert = ( - " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " for " - + model_name - + " [Beginning Line]\n" - + lines_to_insert - ) - # replace INDICATIONS with real stuff - lines_to_insert = ( - lines_to_insert.replace("MODEL_NAME", model_name) - .replace("INPUT_NAME", input_name) - .replace("DATALOADER_NAME", dataloader_name) - .replace("INFERENCE_LINE", inference_line.strip()) - .replace("\n", " # [coder-enabled]\n") - ) - # add end indicator - lines_to_insert += ( - " # [coder-enabled]\n" - + " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " for " - + model_name - + " [Ending Line] # [coder-enabled]" - ) - - ### register - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append([lines_to_insert.count("\n") + 1]) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append(lines_to_insert.count("\n") + 1) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - # PART 2 - "inference line" - if ( - "indent_inference_line" in loc - or "insert_above_inference_line" in loc - or "insert_below_inference_line" in loc - ): - for file_path in globals.list_code_path: - code = open(file_path, "r").read() - lines = code.split("\n") - line_idx = 0 - for i in range(len(lines)): - line = lines[i] - for model_name in globals.list_model_name: - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if is_eval_func and "[coder-enabled]" not in line: - if eval_func_type == "non-forward": - pass # do something - inference_line = line - inference_line_indent_level = get_line_indent_level(line) - - if "indent_inference_line" in loc: - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - add_indent_level = int(bk_trans_content_this) - - trans_indent_location = [] - # indent can have multiple location, so is a list of numbers - trans_indent_level = [] - - if ")" in line: # e.g. model = Net(xxx) - trans_indent_location.append(line_idx) - trans_indent_level.append(add_indent_level) - else: # e.g. model = Net(xxx, \n xxx, \n xxx) - trans_indent_location.append(line_idx) - trans_indent_level.append(add_indent_level) - do_search = True - i_search = 1 - while do_search: - trans_indent_location.append(line_idx + i_search) - trans_indent_level.append(add_indent_level) - following_line = lines[line_idx + i_search] - if ")" in following_line: - do_search = False - i_search += 1 - - ### register - - if file_path not in globals.list_trans_indent_modified_file: - globals.list_trans_indent_modified_file.append(file_path) - globals.list_trans_indent_location_idxs.append(trans_indent_location) - globals.list_trans_indent_level.append(trans_indent_level) - else: - idx = globals.list_trans_indent_modified_file.index(file_path) - for i in trans_indent_location: - globals.list_trans_indent_location_idxs[idx].append(i) - for i in trans_indent_level: - globals.list_trans_indent_level[idx].append(i) - - if "insert_above_inference_line" in loc: - idx_offset = 0 - elif "insert_below_inference_line" in loc: - if ")" in line: # e.g. model = Net(xxx) - idx_offset = 1 - else: # e.g. model = Net(xxx, \n xxx, \n xxx) - do_search = True - i_search = 1 - while do_search: - following_line = lines[line_idx + i_search] - if ")" in following_line: - do_search = False - i_search += 1 - inference_line = ( - inference_line - + "\n" - + " " * (get_line_indent_level(line) + 4) - + following_line - ) - idx_offset = i_search - - if "insert_above_inference_line" in loc or "insert_below_inference_line" in loc: - bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)] - - trans_insert_location = line_idx + idx_offset - - insert_indent_level = inference_line_indent_level - - ### content - - # lines to insert - lines_to_insert = bk_trans_content_this - # replace [+] indication with empty - lines_to_insert = lines_to_insert.replace("[+] ", " " * insert_indent_level) - # add begin indicator - lines_to_insert = ( - " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " [Beginning Line] \n" - + lines_to_insert - ) - # replace INDICATIONS with real stuff - # (for now, inference_line related transformations ) - # (have nothing to do with input, dataloader etc, ) - # (so no need to put replaces here.) - lines_to_insert = lines_to_insert.replace("\n", " # [coder-enabled]\n") - # add end indicator - lines_to_insert += ( - " # [coder-enabled]\n" - + " " * insert_indent_level - + "# [NeuralCoder] " - + self.backend - + " [Ending Line] # [coder-enabled]" - ) - - # customized argument - if self.backend == "pytorch_benchmark": - lines_to_insert = lines_to_insert.replace( - "NUM_BENCHMARK_ITERATION", globals.num_benchmark_iteration - ) - lines_to_insert = lines_to_insert.replace("ACCURACY_MODE", str(False)) - lines_to_insert = lines_to_insert.replace( - "INFERENCE_LINE", inference_line.strip() - ) - - ### register - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append( - [lines_to_insert.count("\n") + 1] - ) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append( - lines_to_insert.count("\n") + 1 - ) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - break # already transformed this line, so skip any further model_name search - line_idx += 1 - - # PART 3 - for customized location - - logger.debug(f"globals.list_trans_insert_modified_file: {globals.list_trans_insert_modified_file}") - logger.debug(f"globals.list_trans_insert_location_idxs: {globals.list_trans_insert_location_idxs}") - logger.debug(f"globals.list_trans_insert_number_insert_lines: {globals.list_trans_insert_number_insert_lines}") - logger.debug(f"globals.list_trans_insert_lines_to_insert: {globals.list_trans_insert_lines_to_insert}") diff --git a/neural_coder/coders/pytorch/lightning.py b/neural_coder/coders/pytorch/lightning.py deleted file mode 100644 index 383432e2c3f..00000000000 --- a/neural_coder/coders/pytorch/lightning.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class Lightning(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - - def transform(self): - lines = self.file.split("\n") - for line in lines: - if self.not_add_accelerator(line) or self.not_add_precision(line): - new_line = self.add(line) - if self.not_modify(new_line): - new_line = self.modify(new_line) - self.result.append(new_line) - elif self.not_modify(line): - new_line = self.modify(line) - self.result.append(new_line) - if not self.not_add_accelerator(line) and not self.not_add_precision(line) and not self.not_modify(line): - if line == "" and self.result[-1] == "": - continue - self.result.append(line) - - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def not_add_precision(self, s): - if "Trainer" in s: - if "precision" not in s: - return True - else: - return False - return False - - def not_add_accelerator(self, s): - if "Trainer" in s: - if "accelerator" not in s: - return True - else: - return False - return False - - def add(self, s): - if "Trainer" in s: - if "precision" not in s: - s_index = s.find(")") - s = s[:s_index] + ', precision="bf16"' + s[s_index:] - if "accelerator" not in s: - s_index = s.find(")") - s = s[:s_index] + ', accelerator="cpu"' + s[s_index:] - return s - - def not_modify(self, s): - if "bf16" in s and "cpu" in s: - return False - return True - - def modify(self, s): - if "16" in s: - old = "16" - s = s.replace(old, '"bf16"') - if "32" in s: - old = "32" - s = s.replace(old, '"bf16"') - if '"gpu"' in s: - old = '"gpu"' - s = s.replace(old, '"cpu"') - if '"tpu"' in s: - old = '"tpu"' - s = s.replace(old, '"cpu"') - return s diff --git a/neural_coder/coders/pytorch/reclaim_inference_transformers_trainer.py b/neural_coder/coders/pytorch/reclaim_inference_transformers_trainer.py deleted file mode 100644 index 82f324e26b3..00000000000 --- a/neural_coder/coders/pytorch/reclaim_inference_transformers_trainer.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ... import globals -from ...utils.line_operation import ( - get_line_indent_level, - get_line_left_hand_side, - is_eval_func_model_name, - single_line_comment_or_empty_line_detection, -) - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -class ReclaimInferenceTransformersTrainer(object): - def __init__(self, list_model_def_instance): - self.list_model_def_instance = list_model_def_instance - - def print_info(self): - for i in self.list_model_def_instance: - logger.debug(f"i.print_info(): {i.print_info()}") - - # collect file transformation info and register (store) in globals - # (i.e. which file to add which lines at which location) - def register_transformation(self): - file_path = globals.list_code_path[0] - lines = open(file_path, "r").read().split("\n") - line_idx = 0 - - for i in range(len(lines)): - line = lines[i] - - if "# Evaluation" in line: - indent_level = get_line_indent_level(line) - trans_insert_location = i - lines_to_insert = "" - lines_to_insert += " " * indent_level + "eval_dataloader = trainer.get_eval_dataloader()" + "\n" - lines_to_insert += " " * indent_level + "import torch" + "\n" - lines_to_insert += " " * indent_level + "for step, inputs in enumerate(eval_dataloader):" + "\n" - lines_to_insert += " " * indent_level + " with torch.no_grad():" + "\n" - lines_to_insert += " " * indent_level + " model(**inputs)" - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append([lines_to_insert.count("\n") + 1]) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append(lines_to_insert.count("\n") + 1) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - line_idx += 1 - - logger.debug(f"globals.list_trans_insert_modified_file: {globals.list_trans_insert_modified_file}") - logger.debug(f"globals.list_trans_insert_location_idxs: {globals.list_trans_insert_location_idxs}") - logger.debug(f"globals.list_trans_insert_number_insert_lines: {globals.list_trans_insert_number_insert_lines}") - logger.debug(f"globals.list_trans_insert_lines_to_insert: {globals.list_trans_insert_lines_to_insert}") diff --git a/neural_coder/coders/pytorch/reclaim_inputs.py b/neural_coder/coders/pytorch/reclaim_inputs.py deleted file mode 100644 index 16397012e37..00000000000 --- a/neural_coder/coders/pytorch/reclaim_inputs.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ... import globals -from ...utils.line_operation import ( - get_line_indent_level, - get_line_left_hand_side, - is_eval_func_model_name, - single_line_comment_or_empty_line_detection, -) - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -class ReclaimInputs(object): - def __init__(self, list_model_def_instance): - self.list_model_def_instance = list_model_def_instance - - def print_info(self): - for i in self.list_model_def_instance: - logger.debug(f"i.print_info(): {i.print_info()}") - - # collect file transformation info and register (store) in globals - # (i.e. which file to add which lines at which location) - def register_transformation(self): - list_code = [] - for i in globals.list_code_path: - list_code.append(open(i, "r").read()) - - for ins in self.list_model_def_instance: - model_name = ins.model_name - file_path = ins.file_path - model_def_line_idx = ins.model_def_line_idx - function_def_line_idx = ins.function_def_line_idx - class_name = ins.class_name - - # transformation - file_path_idx = globals.list_code_path.index(file_path) - lines = list_code[file_path_idx].split("\n") - line_idx = 0 - - # search inference line in this file, and also input_name - inference_line = "" - input_name = "" - for i in range(len(lines)): - line = lines[i] - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if is_eval_func and "[coder-enabled]" not in line: - inference_line = line - input_name = line[line.find("(") + 1 : line.find(")")].replace("*", "") # get "c" in "a = b(**c)" - - # if there is already a "input = xxx", then quit this function - if input_name != "": - for i in range(len(lines)): - line = lines[i] - if not single_line_comment_or_empty_line_detection(line): - if input_name in line and "=" in line and line.find(input_name) < line.find("="): - return - - # add the created lines for inputs - if inference_line != "" and input_name != "": - for i in range(len(lines)): - line = lines[i] - is_eval_func, eval_func_type = is_eval_func_model_name(model_name, line) - if is_eval_func and "[coder-enabled]" not in line: - indent_level = get_line_indent_level(line) - trans_insert_location = i - lines_to_insert = "" - lines_to_insert += " " * indent_level + "try:" + "\n" - lines_to_insert += " " * indent_level + " " + input_name + " = " + input_name + "\n" - lines_to_insert += " " * indent_level + "except:" + "\n" - lines_to_insert += " " * indent_level + " pass" - - if file_path not in globals.list_trans_insert_modified_file: - globals.list_trans_insert_modified_file.append(file_path) - globals.list_trans_insert_location_idxs.append([trans_insert_location]) - globals.list_trans_insert_number_insert_lines.append([lines_to_insert.count("\n") + 1]) - globals.list_trans_insert_lines_to_insert.append([lines_to_insert]) - else: - idx = globals.list_trans_insert_modified_file.index(file_path) - globals.list_trans_insert_location_idxs[idx].append(trans_insert_location) - globals.list_trans_insert_number_insert_lines[idx].append(lines_to_insert.count("\n") + 1) - globals.list_trans_insert_lines_to_insert[idx].append(lines_to_insert) - - line_idx += 1 - - logger.debug(f"globals.list_trans_insert_modified_file: {globals.list_trans_insert_modified_file}") - logger.debug(f"globals.list_trans_insert_location_idxs: {globals.list_trans_insert_location_idxs}") - logger.debug(f"globals.list_trans_insert_number_insert_lines: {globals.list_trans_insert_number_insert_lines}") - logger.debug(f"globals.list_trans_insert_lines_to_insert: {globals.list_trans_insert_lines_to_insert}") diff --git a/neural_coder/coders/tensorflow/__init__.py b/neural_coder/coders/tensorflow/__init__.py deleted file mode 100644 index e833188cc78..00000000000 --- a/neural_coder/coders/tensorflow/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/neural_coder/coders/tensorflow/amp.py b/neural_coder/coders/tensorflow/amp.py deleted file mode 100644 index 70302d78d4a..00000000000 --- a/neural_coder/coders/tensorflow/amp.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ...utils.line_operation import get_line_left_hand_side - - -class TensorFlowKerasAMP(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - self.keras_edited_flag = False - - def transform(self): - # import pdb - # pdb.set_trace() - lines = self.file.split("\n") - for line in lines: - if self.is_modify(line): - if ".ConfigProto()" in line: # TF AMP - config_name = get_line_left_hand_side(line) - new_line_1 = "from tensorflow.core.protobuf import rewriter_config_pb2" - new_line_2 = ( - config_name - + ".graph_options.rewrite_options.auto_mixed_precision_mkl = " - + "rewriter_config_pb2.RewriterConfig.ON" - ) - self.result.append(line) - self.result.append(new_line_1) - self.result.append(new_line_2) - elif "keras" in line and "import" in line: # Keras AMP - if not self.keras_edited_flag: - new_line_1 = "from tensorflow.keras.mixed_precision import experimental as mixed_precision" - new_line_2 = "policy = mixed_precision.Policy('mixed_bfloat16')" - new_line_3 = "mixed_precision.set_policy(policy)" - self.result.append(line) - self.result.append(new_line_1) - self.result.append(new_line_2) - self.result.append(new_line_3) - self.keras_edited_flag = True - else: - self.result.append(line) - else: - self.result.append(line) - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def is_modify(self, s): - if ".ConfigProto()" in s or ("keras" in s and "import" in s): - return True - else: - return False diff --git a/neural_coder/coders/tensorflow/inc.py b/neural_coder/coders/tensorflow/inc.py deleted file mode 100644 index 837dff143fb..00000000000 --- a/neural_coder/coders/tensorflow/inc.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ...utils.line_operation import get_line_indent_level, get_line_left_hand_side - - -class TensorFlowKerasINC(object): - def __init__(self, file) -> None: - self.file = file - self.result = [] - - def transform(self): - # import pdb - # pdb.set_trace() - lines = self.file.split("\n") - for line in lines: - if self.is_modify(line): - model_name = "model" - indent_level = get_line_indent_level(line) - self.result.append(line) - self.result.append(" " * indent_level + "from neural_compressor.quantization import fit") - self.result.append(" " * indent_level + "from neural_compressor.config import PostTrainingQuantConfig") - self.result.append(" " * indent_level + "from neural_compressor import common") - self.result.append(" " * indent_level + "config = PostTrainingQuantConfig(quant_level=1)") - self.result.append(" " * indent_level + model_name + " = fit(" + model_name + ", conf=config)") - self.result.append(" " * indent_level + model_name + '.save("./quantized_model")') - else: - self.result.append(line) - for index, line in enumerate(self.result): - if index != len(self.result) - 1: - self.result[index] += "\n" - return "".join(self.result) - - def is_modify(self, s): - if "model = tf." in s or "model = load_model(" in s: - if "self.model" not in s: - return True - else: - return False diff --git a/neural_coder/coders/transform.py b/neural_coder/coders/transform.py deleted file mode 100644 index c553cbbb87b..00000000000 --- a/neural_coder/coders/transform.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from .. import globals - -logging.basicConfig( - level=globals.logging_level, format="%(asctime)s %(levelname)s %(message)s", datefmt="%a, %d %b %Y %H:%M:%S +0000" -) -logger = logging.getLogger(__name__) - - -def execute_insert_transformation(list_transformed_code): - """Insert code lines into file.""" - for index, file_path in enumerate(globals.list_trans_insert_modified_file): - trans_location_idxs = globals.list_trans_insert_location_idxs[index] - trans_number_insert_lines = globals.list_trans_insert_number_insert_lines[index] - trans_lines_to_insert = globals.list_trans_insert_lines_to_insert[index] - - # sort trans_location_idxs and sort the other lists accordingly - trans_number_insert_lines = [i for _, i in sorted(zip(trans_location_idxs, trans_number_insert_lines))] - trans_lines_to_insert = [i for _, i in sorted(zip(trans_location_idxs, trans_lines_to_insert))] - trans_location_idxs = sorted(trans_location_idxs) - - file_path_idx = globals.list_code_path.index(file_path) - lines_transformed = list_transformed_code[file_path_idx].split("\n") - - # math - t = [0] - u = 0 - for n in trans_number_insert_lines: - u = u + n - t.append(u) - t = t[:-1] - - logger.debug(f"t: {t}") - trans_location_idxs = [sum(i) for i in zip(trans_location_idxs, t)] - logger.debug(f"trans_location_idxs after adjustment: {trans_location_idxs}") - - for idx in trans_location_idxs: # actual transformation (insertion) - additions = trans_lines_to_insert[trans_location_idxs.index(idx)].split("\n") - additions = additions[::-1] # reverse - for i in range(len(additions)): - lines_transformed.insert(idx, additions[i]) - - # transfer lines_transformed to code format ("\n" save write) - code_transformed = "".join([i + "\n" for i in lines_transformed])[0:-1] - - list_transformed_code[file_path_idx] = code_transformed - - return list_transformed_code - - -def execute_indent_transformation(list_transformed_code): - """Indent code lines with spaces at the beginning.""" - for index, file_path in enumerate(globals.list_trans_indent_modified_file): - trans_location_idxs = globals.list_trans_indent_location_idxs[index] - trans_indent_level = globals.list_trans_indent_level[index] - - file_path_idx = globals.list_code_path.index(file_path) - lines_transformed = list_transformed_code[file_path_idx].split("\n") - - for idx in trans_location_idxs: # actual transformation (indent) - this_indent_level = trans_indent_level[trans_location_idxs.index(idx)] - lines_transformed[idx] = " " * 4 * this_indent_level + lines_transformed[idx] - - # transfer lines_transformed to code format ("\n" save write) - code_transformed = "".join([i + "\n" for i in lines_transformed])[0:-1] - - list_transformed_code[file_path_idx] = code_transformed - - return list_transformed_code diff --git a/neural_coder/docs/AWSSageMakerSupport.md b/neural_coder/docs/AWSSageMakerSupport.md deleted file mode 100644 index eb8926c12ee..00000000000 --- a/neural_coder/docs/AWSSageMakerSupport.md +++ /dev/null @@ -1,32 +0,0 @@ -AWS Amazon SageMaker Support -===== - -[AWS Amazon SageMaker](https://aws.amazon.com/sagemaker/) users can easily enjoy the productivity boost brought by Neural Coder by one-click installing [Neural Coder Jupyter Lab extension](https://www.npmjs.com/package/jupyter-lab-neural-compressor) in either **SageMaker Studio** or **SageMaker Notebook instance**. - -## Start Jupyter Lab 3 -[Neural Coder Jupyter extension](https://www.npmjs.com/package/jupyter-lab-neural-compressor) requires Jupyter Lab 3. Using Jupyter Lab 1 will cause installation error. To start Jupyter Lab 3, please check the following: - -#### For SageMaker Studio -SageMaker Studio Jupyter Lab 3 - -#### For SageMaker Notebook instance -SageMaker Notebook instance Jupyter Lab 3 - -## Installation Guide -For both cases, the installation process is exactly the same, which is by **searching ```neural-compressor``` in the Extension Manager**. - -1. Search and Install - -SageMaker Notebook instance Jupyter Lab 3 - -2. Rebuild - -SageMaker Notebook instance Jupyter Lab 3 - -3. Save and Reload - -SageMaker Notebook instance Jupyter Lab 3 - -4. Done! - -SageMaker Notebook instance Jupyter Lab 3 diff --git a/neural_coder/docs/BigDLNanoSupport.md b/neural_coder/docs/BigDLNanoSupport.md deleted file mode 100644 index 0ea0e4dc849..00000000000 --- a/neural_coder/docs/BigDLNanoSupport.md +++ /dev/null @@ -1,37 +0,0 @@ -BigDL Nano Support -=========================== - -Neural Coder collaborates with [BigDL-Nano](https://bigdl.readthedocs.io/en/latest/doc/Nano/Overview/nano.html), a Python library that automatically applies modern CPU optimizations, to further democratize ease-of-use BigDL-Nano APIs as a **no-code** solution for PyTorch Deep Learning programmers. - -## Example -For instance, to perform BF16 + Channels Last optimizations with BigDL-Nano API using Neural Coder on the [example code](../examples/nano/resnet18.py) and run this code with the enabled optimizations, users can simply execute this command: -``` -python -m neural_coder -o nano_bf16_channels_last ../examples/nano/resnet18.py -``` -The alias for each optimization set is documented in the below Support Matrix. Note that you need to ```pip install bigdl``` first following [BigDL-Nano documentation](https://github.com/intel-analytics/BigDL#installing). - -## Support Matrix - -| Optimization Set | API Alias | -| ------------- | ------------- | -| BF16 + Channels Last | `nano_bf16_channels_last` | -| BF16 + IPEX + Channels Last | `nano_bf16_ipex_channels_last` | -| BF16 + IPEX | `nano_bf16_ipex` | -| BF16 | `nano_bf16` | -| Channels Last | `nano_fp32_channels_last` | -| IPEX + Channels Last | `nano_fp32_ipex_channels_last` | -| IPEX | `nano_fp32_ipex` | -| Convert CUDA TO GPU | `nano_gpu_to_cpu` | -| INT8 | `nano_int8` | -| JIT + BF16 + Channels Last | `nano_jit_bf16_channels_last` | -| JIT + BF16 + IPEX + Channels Last | `nano_jit_bf16_ipex_channels_last` | -| JIT + BF16 + IPEX | `nano_jit_bf16_ipex` | -| JIT + BF16 | `nano_jit_bf16` | -| JIT + Channels Last | `nano_jit_fp32_channels_last` | -| JIT + IPEX + Channels Last | `nano_jit_fp32_ipex_channels_last` | -| JIT + IPEX | `nano_jit_fp32_ipex` | -| JIT | `nano_jit_fp32` | -| ONNX Runtime | `nano_onnxruntime_fp32` | -| ONNX Runtime + INT8 | `nano_onnxruntime_int8_qlinear` | -| OpenVINO | `nano_openvino_fp32` | -| OpenVINO + INT8 | `nano_openvino_int8` | diff --git a/neural_coder/docs/IntelCPU_PerformanceSetting.md b/neural_coder/docs/IntelCPU_PerformanceSetting.md deleted file mode 100644 index a112fd3516f..00000000000 --- a/neural_coder/docs/IntelCPU_PerformanceSetting.md +++ /dev/null @@ -1,51 +0,0 @@ -## Intel CPU Platforms: Best Performance Setting -### Install MKL, OpenMP and JEMALLOC -The simplest way for installation is through ```conda install```: -```bash -conda install -y mkl mkl-include jemalloc -``` - -### Install NUMA Controller -```bash -apt-get update && apt-get install bc numactl -``` - -### Environment Variables -Check if your ```CONDA_PREFIX``` has a value by: -```bash -echo ${CONDA_PREFIX} -``` -If it is empty, it means that you are not in a traditional CONDA environment, you need to find the location of the ```.so``` files by: -```bash -find / -name "libjemalloc.so" -find / -name "libiomp5.so" -``` -It will show the path these files were installed into. For example: -```bash -/home/name/lib/libjemalloc.so -/home/name/lib/libiomp5.so -``` -And then you should ```export``` this path as ```CONDA_PREFIX```: -```bash -export CONDA_PREFIX="/home/name" -``` -Finally: -```bash -export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libjemalloc.so -export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so -export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000" -export KMP_AFFINITY="granularity=fine,compact,1,0" -export KMP_BLOCKTIME=1 -export DNNL_PRIMITIVE_CACHE_CAPACITY=1024 -``` - -### Frequency Governers -Check the frequency governor state on your machine: -```bash -cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -``` -If it shows ```powersave``` instead of ```performance```, execute: -```bash -echo "performance" | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor -``` diff --git a/neural_coder/docs/PythonAPI.md b/neural_coder/docs/PythonAPI.md deleted file mode 100644 index dfc7567fe43..00000000000 --- a/neural_coder/docs/PythonAPI.md +++ /dev/null @@ -1,58 +0,0 @@ -Neural Coder as Python API -=========================== - -Neural Coder can be used as Python APIs. We currently provide 3 main user-facing APIs for Neural Coder: enable, bench and superbench. - -#### Enable -Users can use ```enable()``` to enable specific features into DL scripts: -``` -from neural_coder import enable -enable( - code="neural_coder/examples/vision/resnet50.py", - features=[ - "pytorch_jit_script", - "pytorch_channels_last", - ], -) -``` -To run benchmark directly on the optimization together with the enabling: -``` -from neural_coder import enable -enable( - code="neural_coder/examples/vision/resnet50.py", - features=[ - "pytorch_jit_script", - "pytorch_channels_last" - ], - run_bench=True, -) -``` - -#### Bench -To run benchmark on your code with an existing patch: -``` -from neural_coder import bench -bench( - code="neural_coder/examples/vision/resnet50.py", - patch_path="${your_patch_path}", -) -``` - -#### SuperBench -To sweep on optimization sets with a fixed benchmark configuration: -``` -from neural_coder import superbench -superbench(code="neural_coder/examples/vision/resnet50.py") -``` -To sweep on benchmark configurations for a fixed optimization set: -``` -from neural_coder import superbench -superbench( - code="neural_coder/examples/vision/resnet50.py", - sweep_objective="bench_config", - bench_feature=[ - "pytorch_jit_script", - "pytorch_channels_last", - ], -) -``` diff --git a/neural_coder/docs/PythonLauncher.md b/neural_coder/docs/PythonLauncher.md deleted file mode 100644 index 74ad230e1f9..00000000000 --- a/neural_coder/docs/PythonLauncher.md +++ /dev/null @@ -1,40 +0,0 @@ -Python Launcher -=========================== - -Neural Coder can be used as a Python **Launcher**. Users can run the Python model code as it is with automatic enabling of Deep Learning optimizations by using Neural Coder's inline Python **Launcher** design. - -## Quick-Start - -Example: Let's say you are running an NLP model using ```run_glue.py``` from HuggingFace transformers [examples](https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py). - -Pre-requisites: -```bash -pip install transformers==4.21.0 torch datasets -``` - -Generally we run this code with a Python command line like this: -```bash -python run_glue.py --model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result -``` - -With Neural Coder's **Launcher**, users can easily enjoy Deep Learning optimizations (e.g. default - INT8 dynamic quantization by Intel® Neural Compressor for PyTorch models) by simply adding an inline prefix -```bash --m neural_coder -``` -to the Python command line, and everything else remains the same: -```bash -python -m neural_coder run_glue.py --model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result -``` - -This will run ```run_glue.py``` with the Deep Learning optimization automatically enabled, while everything else (e.g. your input arguments for the code itself) remains the same as the original code. You can also check out the optimized code ```run_glue_optimized.py``` auto-generated by the **Launcher** under the same folder if you want to learn the code enabling. - -Note: Any modification on the optimized code ```run_glue_optimized.py``` will be overwritten every time you run Neural Coder **Launcher** on ```run_glue.py```, so please make any modification on the original code ```run_glue.py``` instead of the optimized one. The optimized code is only saved for your reference. - -## Launcher Arguments (Optional) - -Users can specify which Deep Learning optimization they want to conduct using ```--opt``` argument. The list of supported Deep Learning optimization features can be found [here](SupportMatrix.md). - -Note that if specifically optimizing with INT8 quantization by Intel® Neural Compressor, to choose a quantization approach (strategy), ```--approach``` argument can be specified with either ```static```, ```static_ipex``` or ```dynamic```. For example, to run INT8 static quantization by Intel® Neural Compressor: -```bash -python -m neural_coder --approach static run_glue.py --model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result -``` diff --git a/neural_coder/docs/Quantization.md b/neural_coder/docs/Quantization.md deleted file mode 100644 index 555834c74a8..00000000000 --- a/neural_coder/docs/Quantization.md +++ /dev/null @@ -1,39 +0,0 @@ -Neural Coder for Quantization -=========================== -This feature helps automatically enable quantization on Deep Learning models and automatically evaluates for the best performance on the model. It is a code-free solution that can help users enable quantization algorithms on a model with no manual coding needed. Supported features include Post-Training Static Quantization, Post-Training Dynamic Quantization, and Mixed Precision. - - -## Features Supported -- Post-Training Static Quantization for [Stock PyTorch](https://pytorch.org/tutorials/prototype/fx_graph_mode_ptq_static.html) (with FX backend) -- Post-Training Static Quantization for [IPEX](https://github.com/intel/intel-extension-for-pytorch/blob/v1.12.0/docs/tutorials/features/int8.md) -- Post-Training Dynamic Quantization for [Stock PyTorch](https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html) -- Mixed Precision for [Stock PyTorch](https://pytorch.org/tutorials/recipes/recipes/amp_recipe.html) - -## Models Supported -- HuggingFace [Transformers](https://github.com/huggingface/transformers) models -- [torchvision](https://pytorch.org/vision/stable/index.html) models -- Broad models (under development) - -## Usage -- PyPI distribution with a one-line API call -- [JupyterLab extension](../extensions/neural_compressor_ext_lab/README.md) - -## Example -### PyPI distribution: -HuggingFace [Transformers](https://github.com/huggingface/transformers) models: [text-classification/run_glue.py](https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py) -``` -from neural_coder import auto_quant -auto_quant( - code="https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - args="--model_name_or_path albert-base-v2 --task_name sst2 --do_eval --output_dir result", -) -``` - -[torchvision](https://pytorch.org/vision/stable/index.html) models: [imagenet/main.py](https://github.com/pytorch/examples/blob/main/imagenet/main.py) -``` -from neural_coder import auto_quant -auto_quant( - code="https://github.com/pytorch/examples/blob/main/imagenet/main.py", - args="-a alexnet --pretrained -e /path/to/imagenet/", -) -``` diff --git a/neural_coder/docs/SupportMatrix.md b/neural_coder/docs/SupportMatrix.md deleted file mode 100644 index be2a7fea308..00000000000 --- a/neural_coder/docs/SupportMatrix.md +++ /dev/null @@ -1,26 +0,0 @@ -Supported Optimization Features -=========================== - -| Category | Optimization | API Alias | -| ------------- | ------------- | ------------- | -| PyTorch | [Mixed Precision](https://pytorch.org/docs/stable/amp.html) | `pytorch_amp` | -| PyTorch | [Channels Last](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) | `pytorch_channels_last` | -| PyTorch | [JIT (Just-In-Time) Script/Trace](https://pytorch.org/docs/stable/jit.html) & [optimize_for_inference](https://pytorch.org/docs/stable/generated/torch.jit.optimize_for_inference.html) | `pytorch_jit_script`, `pytorch_jit_trace`, `pytorch_jit_script_ofi`, `pytorch_jit_trace_ofi` | -| PyTorch | JIT with [TorchDynamo](https://github.com/pytorch/torchdynamo) | `pytorch_torchdynamo_jit_script`, `pytorch_torchdynamo_jit_trace`, `pytorch_torchdynamo_jit_script_ofi`, `pytorch_torchdynamo_jit_trace_ofi` | -| PyTorch | [Intel Neural Compressor (INC) Mixed Precision](https://github.com/intel/neural-compressor/blob/master/docs/source/mixed_precision.md) | `pytorch_inc_bf16` | -| PyTorch | [INC INT8 Static Quantization (FX/IPEX)](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `pytorch_inc_static_quant_fx`, `pytorch_inc_static_quant_ipex`, `pytorch_inc_static_quant_ipex_xpu` | -| PyTorch | [INC INT8 Dynamic Quantization](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `pytorch_inc_dynamic_quant` | -| PyTorch | [Intel Extension for PyTorch (FP32, BF16, INT8 Static/Dynamic Quantization)](https://github.com/intel/intel-extension-for-pytorch) | `pytorch_ipex_fp32`, `pytorch_ipex_bf16`, `pytorch_ipex_int8_static_quant`, `pytorch_ipex_int8_dynamic_quant` | -| PyTorch | [Alibaba Blade-DISC](https://github.com/alibaba/BladeDISC) | `pytorch_aliblade` | -| PyTorch Lightning | [Mixed Precision](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html) | `pytorch_lightning_bf16_cpu` | -| TensorFlow | [Mixed Precision](https://www.intel.com/content/www/us/en/developer/articles/guide/getting-started-with-automixedprecisionmkl.html) | `tensorflow_amp` | -| Keras | [Mixed Precision](https://www.tensorflow.org/guide/mixed_precision) | `keras_amp` | -| TensorFlow/Keras Model | [INC Quantization](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `tensorflow_inc` | -| Keras Script | [INC Quantization](https://github.com/intel/neural-compressor/tree/master/examples/keras/mnist) | `keras_inc` | -| ONNX Runtime | [INC Static Quantization (QLinear)](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `onnx_inc_static_quant_qlinear` | -| ONNX Runtime | [INC Static Quantization (QDQ)](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `onnx_inc_static_quant_qdq` | -| ONNX Runtime | [INC Dynamic Quantization](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#supported-feature-matrix) | `onnx_inc_dynamic_quant` | -| [HuggingFace Optimum-Intel](https://huggingface.co/docs/optimum/intel/index) | INC Quantization | `pytorch_inc_huggingface_optimum_static`, `pytorch_inc_huggingface_optimum_dynamic` | -| [Intel Extension for Transformers](https://github.com/intel/intel-extension-for-transformers/) | INC Quantization | `intel_extension_for_transformers` | -| [BigDL Nano](https://bigdl.readthedocs.io/en/latest/doc/PythonAPI/Nano/pytorch.html#bigdl-nano-pytorch-inferenceoptimizer) | [Optimization List](./BigDLNanoSupport.md) | `nano_` + [specific alias](./BigDLNanoSupport.md) | -| Auto-Detect | [INC Quantization](https://github.com/intel/neural-compressor) | `inc_auto` | diff --git a/neural_coder/docs/cloud_autobench/CloudAutobench.MD b/neural_coder/docs/cloud_autobench/CloudAutobench.MD deleted file mode 100644 index d906a80292f..00000000000 --- a/neural_coder/docs/cloud_autobench/CloudAutobench.MD +++ /dev/null @@ -1,95 +0,0 @@ -# Cloud Auto-bench -This is a user guide for the automated bash script for creating a cloud instance, configuring the environment, running the benchmark code, and terminating the instance. The script supports AWS and Ali Yun for now and will support more cloud vendors in the future. - -## Prerequisite -## AWS -#### Install AWS CLI -Install the latest AWS CLI according to https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html. -Example: install AWS CLI on Linux x86 (64-bit) by the following commands. -``` -curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" -unzip awscliv2.zip -./aws/install -i ~/.Awscli2 -b ~/.Awscli2/bin -export PATH=/.Awscli2/bin:$PATH -``` -Confirm the installation with the following command. -``` -export PATH=/.Awscli2/bin:$PATH -``` - -#### Configure IAM role -Configure IAM role to access AWS according to https://us-east-2.console.aws.amazon.com/iamv2/home#/users. - -#### Configure AWS clients -Configure AWS clients via "aws configure" which fetch from step 2. -``` -aws --configure -AWS Access Key ID [None]: x -AWS Secret Access Key [None]: x -Default region name [None]: us-east-2 -Default output format [None]: text -``` - -#### Create a key pair -Create a key pair x.pem via https://us-east-2.console.aws.amazon.com/ec2/v2/home?region=us-east-2#KeyPairs:, add x.pem to the current folder, and modify its permission by -``` -chmod 400 x.pem -``` - -## Ali Yun -#### Install Alibaba Cloud CLI -Download the installation package for Linux from https://www.alibabacloud.com/help/en/alibaba-cloud-cli/latest/linux - -Decompress the downloaded file to obtain the executable file named aliyun -``` -tar xzvf (aliyun-cli-linux-latest-amd64.tgz) -``` -In the "()" is the file name of the installation file you have downloaded - -Set environment variables -add this line into the ~/.bash_profile file -``` -export PATH=/home/your_directory/.Awscli2/bin:$PATH -``` - -Your directory is the directory of bin in the ALi Yun installation directory) -source environment variables - -#### Configure Ali Yun clients -run -``` -aliyun configure -``` - -Input the key ID, key secret, region ID and default language of your Ali Yun account -``` -Access Key Id [************************]: -Access Key Secret [******************************]: -Default Region Id [**-****]: -Default Output Format [json]: json (Only support json) -Default Language [zh|en] en -``` - -### Create a key pair of Ali Yun according to page https://ecs.console.aliyun.com/#/keyPair/ -add x.pem to the current folder, and modify its permission by -``` -chmod 400 x.pem -``` - -## Launch the cloud benchmark script -### Modify the permission of bench.sh -``` -chmod 755 bench.sh -``` - -### Input your task config information in the config.conf file -You need to input the information of your task in the config.conf file -You can choose the cloud vendor(AWS or Ali Yun), Instance number, type and other information with the comments as a reference - -### Launch the script -``` -./bench.sh -``` - -### Output -Example Cloud auto-bench report: ```superbench_report_aws_icx.pdf``` diff --git a/neural_coder/docs/cloud_autobench/bench.sh b/neural_coder/docs/cloud_autobench/bench.sh deleted file mode 100644 index b28b2de9ed2..00000000000 --- a/neural_coder/docs/cloud_autobench/bench.sh +++ /dev/null @@ -1,497 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#!/bin/bash - -# Read the user input - -check_input() -{ -if [ $whether_launch_new -eq "1" ] -then - if [ $vendor -eq "1" ] - then - if [ -z $security_id_aws ] - then - echo "[ERROR] There is no security group ID, you must specify security ID in config file when creating a new instance" - fi - - if [ -z $subnet_id_aws ] - then - echo "[ERROR] There is no subnet ID, you must specify subnet ID in config file when creating a new instance" - fi - elif [ $vendor -eq "2" ] - then - if [ -z $security_id_ali ] - then - echo "[ERROR] There is no security group ID, you must specify security ID in config file when creating a new instance" - fi - - if [ -z $region_id_ali ] - then - echo "[ERROR] There is no region ID, you must specify region ID in config file when creating a new instance" - fi - else - echo "[ERROR] There is no this vendor" - fi -else - if [ -z $instance_id ] - then - echo "[ERROR] There is no instance ID, you must specify instance ID in config file when using an existed instance" - fi -fi - -} - -create_AWS_instance() -{ -if [ $os -eq "1" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ami-02f3416038bdb17fb" - else - ami_ID="ami-0ff596d41505819fd" - fi -elif [ $os -eq "2" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ami-02d1e544b84bf7502" - else - ami_ID="ami-03e57de632660544c" - fi -elif [ $os -eq "3" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ami-092b43193629811af" - else - ami_ID="ami-0082f8c86a7132597" - fi -elif [ $os -eq "4" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ami-0f7cb53c916a75006" - else - ami_ID="ami-075a486be6269029f" - fi -else - echo "[ERROR] The operating system is invalid" - exit 0 -fi - -echo "[INFO] Starting creating AMS instance ..." - -instance_id=$(aws ec2 run-instances --image-id $ami_ID --count $count --instance-type $i_type --key-name $key_name --security-group-ids $security_id_aws --subnet-id $subnet_id_aws --block-device-mappings 'DeviceName=/dev/sda1, Ebs={VolumeSize=30}' --query "Instances[0].InstanceId") -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] Create AWS Instance failed" - exit 0 -else - echo "[INFO] Create AWS instance success" - echo "[INFO] Your Instance Id is $instance_id" - echo "[INFO] Waiting for instance to initialize ..." - echo "[INFO] 15s left ..." - sleep 5s - echo "[INFO] 10s left ..." - sleep 5s - echo "[INFO] 5s left ..." - sleep 5s -fi -} - -create_Ali_Yun_instance() -{ -if [ $os -eq "1" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - else - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - fi -elif [ $os -eq "2" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - else - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - fi -elif [ $os -eq "3" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - else - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - fi -elif [ $os -eq "4" ] -then - if [ $arch -eq "1" ] - then - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - else - ami_ID="ubuntu_20_04_x64_20G_alibase_20220524.vhd" - fi -else - echo "[ERROR] The operating system is invalid" - exit 0 -fi - -i_type="ecs.$i_type_family.$i_type_size" - -echo "[INFO] Starting creating Ali Yun instance ..." - -instance_id=$(aliyun ecs RunInstances --RegionId $region_id_ali --InstanceType $i_type --InstanceChargeType PostPaid --ImageId $ami_ID --KeyPairName $key_name --SecurityGroupId $security_id_ali --VSwitchId vsw-m5ethlhigvonp2kuyzhjw --InternetMaxBandwidthIn 1 --InternetMaxBandwidthOut 1 |grep "i-") -result=$? - -instance_id="${instance_id:4:22}" -if [ $result -ne '0' ] -then - echo "[ERROR] Create Ali Yun Instance failed" - exit 0 -else - echo "[INFO] Create Ali Yun instance successfully" - echo "[INFO] The Ali Yun instance id is: $instance_id" - echo "[INFO] Waiting for instance to initialize ..." - echo "[INFO] 35s left ..." - sleep 5s - echo "[INFO] 30s left ..." - sleep 5s - echo "[INFO] 25s left ..." - sleep 5s - echo "[INFO] 20s left ..." - sleep 5s - echo "[INFO] 15s left ..." - sleep 5s - echo "[INFO] 10s left ..." - sleep 5s - echo "[INFO] 5s left ..." - sleep 5s -fi -} - -connect_AWS() -{ -dns_name=$(aws ec2 describe-instances --instance-ids $instance_id --query "Reservations[0].Instances[0].PublicDnsName") -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] Can not find this instance, please check" - exit 0 -fi - -host_name=$dns_name - -if [ $os -eq "1" ] -then - host_name="ubuntu@$dns_name" -else - host_name="ec2-user@$dns_name" -fi - -key_name="$key_name.pem" -echo "[INFO] Your instance host name is: $host_name" -echo "[INFO] Connecting to AWS Instance ..." -ssh -i $key_name $host_name -o "StrictHostKeyChecking no" "uname -a ; exit" -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] SSH connection failed" - echo "[INFO] Start terminating the Instance" - aws ec2 terminate-instances --instance-ids $instance_id - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance termination failed" - else - echo "[INFO] Instance termination success" - fi - exit 0 -else - echo "[INFO] Connect to AWS Instance success" -fi - -echo "[INFO] Start to transferring benchmark files" -scp -i $key_name -r ./code/ $host_name:/tmp -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] SSH connection failed" - exit 0 -else - echo "[INFO] File transferring success" -fi - -if [ $whether_launch_new -eq "1" ] -then - ssh -i $key_name $host_name "cd /tmp/code; chmod +x ./config.sh; ./config.sh; exit" - echo "[INFO] Install dependencies finished" -else - echo "[INFO] Configured environment" -fi - -echo "[INFO] Start launching the task ..." - -ssh -i $key_name $host_name "cd /tmp/code; chmod +x ./launch.sh; ./launch.sh; exit" - -echo "[INFO] Benchmark Execution finished" -} - -connect_Ali_Yun() -{ -public_ip=$(aliyun ecs DescribeInstances --output cols=InstanceId,PublicIpAddress.IpAddress rows=Instances.Instance[] |grep $instance_id) -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] Can not find this instance, please check" - exit 0 -fi - -public_ip="${public_ip:25}" -length=${#public_ip} -public_ip="${public_ip:1:$length-2}" -host_name="root@$public_ip" -key_name="$key_name.pem" -echo "[INFO] Your instance host name is: $host_name" - -echo "[INFO] Start to connecting Ali Yun instance" -ssh -i $key_name $host_name -o "StrictHostKeyChecking no" "uname -a ; exit" -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] SSH connection failed" - echo "[INFO] Start to delete instance $instance_id" - sleep 60s - aliyun ecs DeleteInstance --InstanceId $instance_id --Force true - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance termination failed" - exit 0 - else - echo "[INFO] Instance termination success" - fi - exit 0 -else - echo "[INFO] Connect to Ali Yun Instance success" -fi - -echo "[INFO] Start to transferring benchmark files" -scp -i $key_name -r ./code/ $host_name:/tmp -result=$? -if [ $result -ne '0' ] -then - echo "[ERROR] SSH connection failed" - exit 0 -else - echo "[INFO] File transferring success" -fi - -if [ $whether_launch_new -eq "1" ] -then - ssh -i $key_name $host_name "cd /tmp/code; chmod +x ./config.sh; ./config.sh; exit" - echo "[INFO] Install dependencies finished" -else - echo "[INFO] Configured environment" -fi - -echo "[INFO] Start launching the task ..." - -ssh -i $key_name $host_name "cd /tmp/code; chmod +x ./launch.sh; ./launch.sh; exit" - -echo "[INFO] Benchmark Execution finished" -} - -close_AWS() -{ - -if [ $whether_retain -eq "1" ] -then - echo "[INFO] Start stopping the Instance" - - aws ec2 stop-instances --instance-ids $instance_id - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance stop failed" - exit 0 - else - echo "[INFO] Instance stop success" - echo "[INFO] The instance id is $instance_id, Please record this $instance_id for next use" - fi -else - echo "[INFO] Start terminating the Instance" - - aws ec2 terminate-instances --instance-ids $instance_id - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance termination failed" - exit 0 - else - echo "[INFO] Instance termination success" - fi -fi -} - -close_Ali_Yun() -{ - -if [ $whether_retain -eq "1" ] -then - echo "[INFO] Start stopping the Instance" - - aliyun ecs StopInstance --InstanceId $instance_id - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance stop failed" - exit 0 - else - echo "[INFO] Instance stop success" - echo "[INFO] The instance id is $instance_id, Please record this $instance_id for next use" - fi -elif [ $whether_retain -eq "2" ] -then - echo "[INFO] Start terminating the Instance" - - aliyun ecs DeleteInstance --InstanceId $instance_id --Force true - result=$? - if [ $result -ne '0' ] - then - echo "[ERROR] Instance termination failed" - exit 0 - else - echo "[INFO] Instance termination success" - fi -fi -} - - -main() -{ -vendor=$(sed '/^cloud_vendor=/!d; s/.*=//' config.conf) -os=$(sed '/^OS=/!d; s/.*=//' config.conf) -arch=$(sed '/^arch=/!d; s/.*=//' config.conf) -count=$(sed '/^count=/!d; s/.*=//' config.conf) -i_type_family=$(sed '/^i_type_family=/!d; s/.*=//' config.conf) -i_type_size=$(sed '/^i_type_size=/!d; s/.*=//' config.conf) -key_name=$(sed '/^key_name=/!d; s/.*=//' config.conf) -instance_id=$(sed '/^instance_id=/!d; s/.*=//' config.conf) -security_id_aws=$(sed '/^security_id_aws=/!d; s/.*=//' config.conf) -subnet_id_aws=$(sed '/^subnet_id_aws=/!d; s/.*=//' config.conf) -security_id_ali=$(sed '/^security_id_ali=/!d; s/.*=//' config.conf) -region_id_ali=$(sed '/^region_id_ali=/!d; s/.*=//' config.conf) - -whether_retain=$(sed '/^whether_retain=/!d; s/.*=//' config.conf) -whether_launch_new=$(sed '/^whether_launch_new=/!d; s/.*=//' config.conf) - -i_type="$i_type_family.$i_type_size" - -check_input - -if [ ! -f "$key_name.pem" ]; then - echo "[ERROR] Can not find the key pair file $key_name.pem, please put the $key_name.pem file in this folder" - exit 0 -else - chmod 400 ./"$key_name.pem" -fi - -if [ ! -f "./code/benchmark.py" ]; then - echo "[ERROR] Can not find the benchmark file, please put the benchmark file in code folder" - exit 0 -fi - - -if [ $whether_launch_new -eq "1" ] -then - echo "[INFO] Your instance info:" - echo "[INFO] Instance key name: $key_name" - echo "[INFO] Instance count: $count" - echo "[INFO] Instance_type: $i_type" -else - echo "[INFO] The existed instance you choose: $instance_id" -fi - -if [ $whether_launch_new -eq "1" ] -then - if [ $vendor -eq "1" ] - then - create_AWS_instance - elif [ $vendor -eq "2" ] - then - create_Ali_Yun_instance - else - echo "Tencent Cloud" - fi -else - if [ $vendor -eq "1" ] - then - aws ec2 start-instances --instance-ids $instance_id - echo "[INFO] Waiting for instance to Start ..." - echo "[INFO] 15s left ..." - sleep 5s - echo "[INFO] 10s left ..." - sleep 5s - echo "[INFO] 5s left ..." - sleep 5s - elif [ $vendor -eq "2" ] - then - aliyun ecs StartInstance --InstanceId $instance_id - echo "[INFO] Waiting for instance to Start ..." - echo "[INFO] 45s left ..." - sleep 15s - echo "[INFO] 30s left ..." - sleep 15s - echo "[INFO] 15s left ..." - sleep 15s - else - echo "Tencent Cloud" - fi -fi - -if [ $vendor -eq "1" ] -then - connect_AWS -elif [ $vendor -eq "2" ] -then - connect_Ali_Yun -else - echo "Tencent Cloud" -fi - -if [ $vendor -eq "1" ] -then - close_AWS -elif [ $vendor -eq "2" ] -then - close_Ali_Yun -else - echo "Tencent Cloud" -fi - -exit 0 - -} - -main - - - - diff --git a/neural_coder/docs/cloud_autobench/code/config.sh b/neural_coder/docs/cloud_autobench/code/config.sh deleted file mode 100644 index 10e9449a12f..00000000000 --- a/neural_coder/docs/cloud_autobench/code/config.sh +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -echo "[INFO] Start installing software packages and dependencies" - -# install machine packages -sudo apt-get -y update -sudo apt-get install -y wget -sudo apt-get install -y git -sudo apt-get install -y build-essential -sudo apt-get install -y htop aha html2text numactl bc -sudo apt-get install -y ffmpeg libsm6 libxext6 -sudo apt-get install -y automake libtool -sudo apt-get install -y python3 pip - -# install conda -wget https://repo.continuum.io/archive/Anaconda3-5.0.0-Linux-x86_64.sh -O anaconda3.sh -chmod +x anaconda3.sh -sudo ./anaconda3.sh -b -p /home/anaconda3 -export PATH=/home/anaconda3/bin:$PATH -conda create -yn test python=3.9 -source activate test - -# install pip modules -pip install numpy -pip install pyyaml -pip install typing_extensions -pip install psutil -pip install neural_compressor intel_extension_for_pytorch - -# install torch -pip3 install torch torchvision torchaudio -pip3 install torchdynamo -pip3 install transformers diff --git a/neural_coder/docs/cloud_autobench/code/launch.sh b/neural_coder/docs/cloud_autobench/code/launch.sh deleted file mode 100644 index 1521301e1c3..00000000000 --- a/neural_coder/docs/cloud_autobench/code/launch.sh +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -x - -export PATH=/home/anaconda3/bin:$PATH -source activate test -echo "[INFO] Start running auto benchmark..." -python -c "from neural_coder import superreport; superreport(code='resnet50.py')" -# Note: you need to uncomment superreport in neural_coder/interface.py and neural_coder/__init__.py to use this API. diff --git a/neural_coder/docs/cloud_autobench/code/resnet50.py b/neural_coder/docs/cloud_autobench/code/resnet50.py deleted file mode 100644 index 13caa61334a..00000000000 --- a/neural_coder/docs/cloud_autobench/code/resnet50.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torchvision.models as models - -model = models.resnet50(pretrained=True) -model.eval() -batch_size = 1 -input = torch.rand(batch_size, 3, 224, 224) -with torch.no_grad(): - model(input) diff --git a/neural_coder/docs/cloud_autobench/config.conf b/neural_coder/docs/cloud_autobench/config.conf deleted file mode 100644 index d598178debb..00000000000 --- a/neural_coder/docs/cloud_autobench/config.conf +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an example of providing Cloud configs for cloud benchmark - -# Choose Cloud Vendor -# 1 AWS 2 Ali Yun 3 Tencent Cloud -cloud_vendor=1 - -# Choose Instance Operating System -# 1 Ubuntu 2 Amazon Linux 3 Red Hat 4 SUSE Linux -OS=1 - -# Choose Instance CPU architecture -# 1 x86 2 ARM -arch=1 - -# Number of Instances need to be launched -count=1 - -# Family of instance type -i_type_family=c6i - -# Size of instance type -i_type_size=2xlarge - -# Private key name -key_name=huangyu - -# Instance id (Required if use an existing instance) -instance_id=i-0ecdba84ca1ea5d68 - -# Security Group id AWS (Required) -security_id_aws=sg-00139ed15a90e83e0 - -# Subnet id AWS (Required) -subnet_id_aws=subnet-010cad6a296e44f8b - -# Security Group id Ali Yun (Required) -security_id_ali=sg-m5e61j9zh9hivx3k9iwp - -# RegionId Ali Yun (Required) -region_id_ali=cn-qingdao - -# Whether retain the instance after this task -# 1 retain 2 Not retain -whether_retain=2 - -# Create a new instance or use an existing instance -# 1 Creat new 2 Use an existing one -whether_launch_new=2 diff --git a/neural_coder/docs/cloud_autobench/superbench_report_aws_icx.pdf b/neural_coder/docs/cloud_autobench/superbench_report_aws_icx.pdf deleted file mode 100644 index cdb6d01608c..00000000000 Binary files a/neural_coder/docs/cloud_autobench/superbench_report_aws_icx.pdf and /dev/null differ diff --git a/neural_coder/docs/release_notes/v0.4.md b/neural_coder/docs/release_notes/v0.4.md deleted file mode 100644 index 933c02900a3..00000000000 --- a/neural_coder/docs/release_notes/v0.4.md +++ /dev/null @@ -1,25 +0,0 @@ -v0.4 -===== - -## Highlights -- **Visual Studio Code extension**: We are delighted to announce the release of Neural Coder's [Visual Studio Code extension](https://marketplace.visualstudio.com/items?itemName=IntelNeuralCompressor.neural-coder-ext-vscode). VS Code programmers can enjoy one-click automatic enabling of Deep Learning optimization API and accelerate their Deep Learning models without manual coding. - -- **HuggingFace Transformers**: - - We supported **all** HuggingFace Transformers [examples](https://github.com/huggingface/transformers/tree/main/examples/pytorch) that calls ```Trainer``` class, and validated over **500** models from HuggingFace Transformers [model hub](https://huggingface.co/models). The models are able to be accelerated automatically with Neural Coder with minimum loss of prediction accuracy. - - We enabled the support of [HuggingFace Optimum-Intel](https://huggingface.co/docs/optimum/intel/index). User scripts of HuggingFace Transformers models will by default be optimized with Optimum-Intel API to enjoy performance speed-up brought by INT8 quantization. - - We enabled the support of [Intel® Extension for Transformers](https://github.com/intel/intel-extension-for-transformers), an innovative toolkit to accelerate Transformer-based models on Intel platforms. For more details, please refer to the updated [support matrix](../SupportMatrix.md). - -- **Support of BigDL Nano**: We are delighted to announce the collaboration between Neural Coder and [BigDL Nano](https://bigdl.readthedocs.io/en/latest/doc/Nano/index.html). Users can now one-click enable BigDL Nano optimizations for PyTorch in Neural Coder. For detailed support matrix for BigDL Nano features, please refer to this [guide](../BigDLNanoSupport.md). - -- **Amazon AWS SageMaker**: We provided a user [tutorial](../AWSSageMakerSupport.md) for installing Neural Coder's JupyterLab extension in AWS SageMaker platform. Users are able to one-click install the extension in Amazon AWS SageMaker with Jupyter 3 and enjoy Neural Coder's functionalities. - -- **Python Launcher**: We added the implementation of [Python Launcher](../PythonLauncher.md) usage for Neural Coder, which will be one of the recommended user interfaces in the future as a replacement of Python API. Users can run the Python model code as it is with automatic enabling of Deep Learning optimizations by using Neural Coder's inline Python Launcher design: ```-m neural_coder```. - -- **Device Detection**: We enabled the capability of detecting running device and its ISA automatically and adjusting applied optimization features accordingly. For instance, when running Neural Coder on Intel GPU instead of Intel CPU, the PyTorch Mixed Precision optimization feature will adapt ```xpu``` instead of ```cpu```, and ```torch.half``` instead of ```torch.bfloat16```. - -## Others -- **INT8 Accuracy Evaluation**: We enabled accuracy evaluation for INT8 quantizations in Neural Coder. Users are able to view the accuracy delta for each quantization optimization in Neural Coder's auto-benchmark output log. The calculation is ```acc_delta = (int8_acc - fp32_acc)/(fp32_acc)```. - -- **Auto-quantize TensorFlow/Keras scripts**: We enabled the support of auto-quantizing TensorFlow/Keras script-based models with Intel® Neural Compressor. The default quantization scheme will be applied. For more details, please refer to the updated [support matrix](../SupportMatrix.md). - -- **Auto-quantize ONNX Runtime scripts**: We enabled the support of auto-quantizing ONNX Runtime script-based models with Intel® Neural Compressor. We support [dynamic quantization](https://github.com/intel/neural-compressor/tree/master/examples/onnxrt#dynamic-quantization), static quantization ([QDQ](https://github.com/intel/neural-compressor/tree/master/examples/onnxrt#tensor-oriented-qdq-format)), and static quantization ([QLinearOps](https://github.com/intel/neural-compressor/tree/master/examples/onnxrt#operator-oriented-with-qlinearops)). For more details, please refer to the updated [support matrix](../SupportMatrix.md). diff --git a/neural_coder/examples/keras/mnist.py b/neural_coder/examples/keras/mnist.py deleted file mode 100644 index 22ba9d66876..00000000000 --- a/neural_coder/examples/keras/mnist.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import tensorflow as tf -import numpy as np -from tensorflow import keras -from tensorflow.keras import layers -import time -from torch.utils.data import DataLoader - -num_classes = 10 - -def build_dataset(): - # Load the data and split it between train and test sets - (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() - - # Scale images to the [0, 1] range - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - # Make sure images have shape (28, 28, 1) - x_train = np.expand_dims(x_train, -1) - x_test = np.expand_dims(x_test, -1) - - # convert class vectors to binary class matrices - y_train = keras.utils.to_categorical(y_train, num_classes) - y_test = keras.utils.to_categorical(y_test, num_classes) - return x_train, y_train, x_test, y_test - -class Dataset(): - def __init__(self, ): - _, _ , self.inputs, self.labels = build_dataset() - - def __getitem__(self, idx): - return self.inputs[idx], self.labels[idx] - - def __len__(self): - assert len(self.inputs) == len(self.labels), 'inputs should have equal len with labels' - return len(self.inputs) - -def build_model(x_train, y_train, x_test, y_test): - if os.path.exists('fp32_model'): - model = keras.models.load_model('fp32_model') - return model - # Model / data parameters - input_shape = (28, 28, 1) - model = keras.Sequential( - [ - keras.Input(shape=input_shape), - layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - layers.MaxPooling2D(pool_size=(2, 2)), - layers.Flatten(), - layers.Dropout(0.5), - layers.Dense(num_classes, activation="softmax"), - ] - ) - - batch_size = 128 - epochs = 1 - - model.compile(loss="categorical_crossentropy", optimizer="adam", - metrics=["accuracy"], run_eagerly=True) - model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) - model.summary() - if not os.path.exists('fp32_model'): - model.save('fp32_model') - return model - -def eval_func(model): - x_train, y_train, x_test, y_test = build_dataset() - model.compile(metrics=["accuracy"], run_eagerly=False) - score = model.evaluate(x_test, y_test) - return score[1] - -def main(): - x_train, y_train, x_test, y_test = build_dataset() - model = build_model(x_train, y_train, x_test, y_test) - calib_dataloader = DataLoader(Dataset(), batch_size=10) - -if __name__ == '__main__': - main() diff --git a/neural_coder/examples/nano/resnet18.py b/neural_coder/examples/nano/resnet18.py deleted file mode 100644 index 8d189bf75b2..00000000000 --- a/neural_coder/examples/nano/resnet18.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import torch -from torchvision.models import resnet18 - -if __name__ == "__main__": - - model_ft = resnet18(pretrained=True) - - x = torch.rand(2, 3, 224, 224) - y_hat = model_ft(x) - predictions = y_hat.argmax(dim=1) - print(predictions) diff --git a/neural_coder/examples/nlp/distilbert.py b/neural_coder/examples/nlp/distilbert.py deleted file mode 100644 index eab5513a51a..00000000000 --- a/neural_coder/examples/nlp/distilbert.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer -) - -finetuned_model = "distilbert-base-uncased-finetuned-sst-2-english" - - -class MyDataLoader(object): - def __init__(self): - self.tokenizer = AutoTokenizer.from_pretrained(finetuned_model) - self.sequence = "Shanghai is a beautiful city!" - self.encoded_input = self.tokenizer( - self.sequence, - return_tensors='pt' - ) - self.label = 1 # negative sentence: 0; positive sentence: 1 - self.batch_size = 1 - - def __iter__(self): - yield self.encoded_input, self.label - - -my_nlp_model = AutoModelForSequenceClassification.from_pretrained( - finetuned_model, -) - -my_nlp_dataloader = MyDataLoader() - -output = my_nlp_model(**my_nlp_dataloader.encoded_input) diff --git a/neural_coder/examples/nlp/run_glue.py b/neural_coder/examples/nlp/run_glue.py deleted file mode 100644 index f3a8ccfd75c..00000000000 --- a/neural_coder/examples/nlp/run_glue.py +++ /dev/null @@ -1,617 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# code source -# https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py - -import logging -import os -import random -import sys -from dataclasses import dataclass, field -from typing import Optional - -import datasets -import numpy as np -from datasets import load_dataset, load_metric - -import transformers -from transformers import ( - AutoConfig, - AutoModelForSequenceClassification, - AutoTokenizer, - DataCollatorWithPadding, - EvalPrediction, - HfArgumentParser, - PretrainedConfig, - Trainer, - TrainingArguments, - default_data_collator, - set_seed, -) -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.21.0") - -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") - -task_to_keys = { - "cola": ("sentence", None), - "mnli": ("premise", "hypothesis"), - "mrpc": ("sentence1", "sentence2"), - "qnli": ("question", "sentence"), - "qqp": ("question1", "question2"), - "rte": ("sentence1", "sentence2"), - "sst2": ("sentence", None), - "stsb": ("sentence1", "sentence2"), - "wnli": ("sentence1", "sentence2"), -} - -logger = logging.getLogger(__name__) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - - Using `HfArgumentParser` we can turn this class - into argparse arguments to be able to specify them on - the command line. - """ - - task_name: Optional[str] = field( - default=None, - metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, - ) - dataset_name: Optional[str] = field( - default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - max_seq_length: int = field( - default=128, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} - ) - pad_to_max_length: bool = field( - default=True, - metadata={ - "help": ( - "Whether to pad all samples to `max_seq_length`. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "A csv or a json file containing the training data."} - ) - validation_file: Optional[str] = field( - default=None, metadata={"help": "A csv or a json file containing the validation data."} - ) - test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) - - def __post_init__(self): - if self.task_name is not None: - self.task_name = self.task_name.lower() - if self.task_name not in task_to_keys.keys(): - raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) - elif self.dataset_name is not None: - pass - elif self.train_file is None or self.validation_file is None: - raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") - else: - train_extension = self.train_file.split(".")[-1] - assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." - validation_extension = self.validation_file.split(".")[-1] - assert ( - validation_extension == train_extension - ), "`validation_file` should have the same extension (csv or json) as `train_file`." - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - ignore_mismatched_sizes: bool = field( - default=False, - metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, - ) - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_glue", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - datasets.utils.logging.set_verbosity(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). - # - # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the - # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named - # label if at least two columns are provided. - # - # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this - # single column. You can easily tweak this behavior (see below) - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.task_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - "glue", - data_args.task_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - elif data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - raw_datasets = load_dataset( - data_args.dataset_name, - data_args.dataset_config_name, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - # Loading a dataset from your local files. - # CSV/JSON training and evaluation files are needed. - data_files = {"train": data_args.train_file, "validation": data_args.validation_file} - - # Get the test dataset: you can provide your own CSV/JSON test file (see below) - # when you use `do_predict` without specifying a GLUE benchmark task. - if training_args.do_predict: - if data_args.test_file is not None: - train_extension = data_args.train_file.split(".")[-1] - test_extension = data_args.test_file.split(".")[-1] - assert ( - test_extension == train_extension - ), "`test_file` should have the same extension (csv or json) as `train_file`." - data_files["test"] = data_args.test_file - else: - raise ValueError("Need either a GLUE task or a test file for `do_predict`.") - - for key in data_files.keys(): - logger.info(f"load a local file for {key}: {data_files[key]}") - - if data_args.train_file.endswith(".csv"): - # Loading a dataset from local csv files - raw_datasets = load_dataset( - "csv", - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - else: - # Loading a dataset from local json files - raw_datasets = load_dataset( - "json", - data_files=data_files, - cache_dir=model_args.cache_dir, - use_auth_token=True if model_args.use_auth_token else None, - ) - # See more about loading any type of standard or custom dataset at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Labels - if data_args.task_name is not None: - is_regression = data_args.task_name == "stsb" - if not is_regression: - label_list = raw_datasets["train"].features["label"].names - num_labels = len(label_list) - else: - num_labels = 1 - else: - # Trying to have good defaults here, don't hesitate to tweak to your needs. - is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] - if is_regression: - num_labels = 1 - else: - # A useful fast method: - # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique - label_list = raw_datasets["train"].unique("label") - label_list.sort() # Let's sort it for determinism - num_labels = len(label_list) - - # Load pretrained model and tokenizer - # - # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - num_labels=num_labels, - finetuning_task=data_args.task_name, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - tokenizer = AutoTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - model = AutoModelForSequenceClassification.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, - ) - - # Preprocessing the raw_datasets - if data_args.task_name is not None: - sentence1_key, sentence2_key = task_to_keys[data_args.task_name] - else: - # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. - non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] - if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: - sentence1_key, sentence2_key = "sentence1", "sentence2" - else: - if len(non_label_column_names) >= 2: - sentence1_key, sentence2_key = non_label_column_names[:2] - else: - sentence1_key, sentence2_key = non_label_column_names[0], None - - # Padding strategy - if data_args.pad_to_max_length: - padding = "max_length" - else: - # We will pad later, dynamically at batch creation, to the max sequence length in each batch - padding = False - - # Some models have set the order of the labels to use, so let's make sure we do use it. - label_to_id = None - if ( - model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id - and data_args.task_name is not None - and not is_regression - ): - # Some have all caps in their config, some don't. - label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} - if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): - label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} - else: - logger.warning( - "Your model seems to have been trained with labels, but they don't match the dataset: ", - f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." - "\nIgnoring the model labels as a result.", - ) - elif data_args.task_name is None and not is_regression: - label_to_id = {v: i for i, v in enumerate(label_list)} - - if label_to_id is not None: - model.config.label2id = label_to_id - model.config.id2label = {id: label for label, id in config.label2id.items()} - elif data_args.task_name is not None and not is_regression: - model.config.label2id = {l: i for i, l in enumerate(label_list)} - model.config.id2label = {id: label for label, id in config.label2id.items()} - - if data_args.max_seq_length > tokenizer.model_max_length: - logger.warning( - f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" - f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." - ) - max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) - - def preprocess_function(examples): - # Tokenize the texts - args = ( - (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) - ) - result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) - - # Map labels to IDs (not necessary for GLUE tasks) - if label_to_id is not None and "label" in examples: - result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] - return result - - with training_args.main_process_first(desc="dataset map pre-processing"): - raw_datasets = raw_datasets.map( - preprocess_function, - batched=True, - load_from_cache_file=not data_args.overwrite_cache, - desc="Running tokenizer on dataset", - ) - if training_args.do_train: - if "train" not in raw_datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = raw_datasets["train"] - if data_args.max_train_samples is not None: - max_train_samples = min(len(train_dataset), data_args.max_train_samples) - train_dataset = train_dataset.select(range(max_train_samples)) - - if training_args.do_eval: - if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] - if data_args.max_eval_samples is not None: - max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) - eval_dataset = eval_dataset.select(range(max_eval_samples)) - - if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: - if "test" not in raw_datasets and "test_matched" not in raw_datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] - if data_args.max_predict_samples is not None: - max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) - predict_dataset = predict_dataset.select(range(max_predict_samples)) - - # Log a few random samples from the training set: - if training_args.do_train: - for index in random.sample(range(len(train_dataset)), 3): - logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") - - # Get the metric function - if data_args.task_name is not None: - metric = load_metric("glue", data_args.task_name) - else: - metric = load_metric("accuracy") - - # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a - # predictions and label_ids field) and has to return a dictionary string to float. - def compute_metrics(p: EvalPrediction): - preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions - preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) - if data_args.task_name is not None: - result = metric.compute(predictions=preds, references=p.label_ids) - if len(result) > 1: - result["combined_score"] = np.mean(list(result.values())).item() - return result - elif is_regression: - return {"mse": ((preds - p.label_ids) ** 2).mean().item()} - else: - return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} - - # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if - # we already did the padding. - if data_args.pad_to_max_length: - data_collator = default_data_collator - elif training_args.fp16: - data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) - else: - data_collator = None - - # Initialize our Trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - compute_metrics=compute_metrics, - tokenizer=tokenizer, - data_collator=data_collator, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.save_model() # Saves the tokenizer too for easy upload - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - logger.info("*** Evaluate ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - tasks = [data_args.task_name] - eval_datasets = [eval_dataset] - if data_args.task_name == "mnli": - tasks.append("mnli-mm") - eval_datasets.append(raw_datasets["validation_mismatched"]) - combined = {} - - for eval_dataset, task in zip(eval_datasets, tasks): - metrics = trainer.evaluate(eval_dataset=eval_dataset) - - max_eval_samples = ( - data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - ) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - if task == "mnli-mm": - metrics = {k + "_mm": v for k, v in metrics.items()} - if task is not None and "mnli" in task: - combined.update(metrics) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) - - if training_args.do_predict: - logger.info("*** Predict ***") - - # Loop to handle MNLI double evaluation (matched, mis-matched) - tasks = [data_args.task_name] - predict_datasets = [predict_dataset] - if data_args.task_name == "mnli": - tasks.append("mnli-mm") - predict_datasets.append(raw_datasets["test_mismatched"]) - - for predict_dataset, task in zip(predict_datasets, tasks): - # Removing the `label` columns because it contains -1 and Trainer won't like that. - predict_dataset = predict_dataset.remove_columns("label") - predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions - predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) - - output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") - if trainer.is_world_process_zero(): - with open(output_predict_file, "w") as writer: - logger.info(f"***** Predict results {task} *****") - writer.write("index\tprediction\n") - for index, item in enumerate(predictions): - if is_regression: - writer.write(f"{index}\t{item:3.3f}\n") - else: - item = label_list[item] - writer.write(f"{index}\t{item}\n") - - kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} - if data_args.task_name is not None: - kwargs["language"] = "en" - kwargs["dataset_tags"] = "glue" - kwargs["dataset_args"] = data_args.task_name - kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" - - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/neural_coder/examples/onnx/onnx_model.py b/neural_coder/examples/onnx/onnx_model.py deleted file mode 100644 index 8e2c7b2d928..00000000000 --- a/neural_coder/examples/onnx/onnx_model.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import argparse - -import onnx -import yaml - -from pycocotools.coco import COCO -from pycocotools.mask import iou, encode -import numpy as np -from torchvision import transforms -from PIL import Image -from onnx import numpy_helper -import os -import onnxruntime - -logger = logging.getLogger(__name__) -logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', - datefmt = '%m/%d/%Y %H:%M:%S', - level = logging.WARN) -logger.info("Evaluating ONNXRuntime full precision accuracy and performance:") -parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter -) -parser.add_argument( - '--model_path', - type=str, - help="Pre-trained model on onnx file" -) -parser.add_argument( - '--label_path', - type=str, - help="Annotation file path" -) -parser.add_argument( - '--data_path', - type=str, - help="Path to val2017 of COCO" -) -parser.add_argument( - '--benchmark', - action='store_true', \ - default=False -) -parser.add_argument( - '--tune', - action='store_true', \ - default=False, - help="whether quantize the model" -) -parser.add_argument( - '--config', - type=str, - help="config yaml path" -) -parser.add_argument( - '--output_model', - type=str, - help="output model path" -) -parser.add_argument( - '--mode', - type=str, - help="benchmark mode of performance or accuracy" -) -args = parser.parse_args() - -# key = COCO id, value = Pascal VOC id -COCO_TO_VOC = { - 1: 15, # person - 2: 2, # bicycle - 3: 7, # car - 4: 14, # motorbike - 5: 1, # airplane - 6: 6, # bus - 7: 19, # train - 9: 4, # boat - 16: 3, # bird - 17: 8, # cat - 18: 12, # dog - 19: 13, # horse - 20: 17, # sheep - 21: 10, # cow - 44: 5, # bottle - 62: 9, # chair - 63: 18, # couch/sofa - 64: 16, # potted plant - 67: 11, # dining table - 72: 20, # tv -} -VOC_CAT_IDS = list(COCO_TO_VOC.keys()) -cocoGt = COCO(str(args.label_path)) - -preprocess = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), -]) - -class Dataset: - def __init__(self): - imgIds = self.getImgIdsUnion(cocoGt, VOC_CAT_IDS) - self.data = [] - for imgId in imgIds: - img_path = os.path.join(args.data_path, cocoGt.imgs[imgId]['file_name']) - if os.path.exists(img_path): - input_tensor = self.load_image(img_path) - - _, height, width = input_tensor.shape - output_tensor = np.zeros((21, height, width), dtype=np.uint8) - - annIds = cocoGt.getAnnIds(imgId, VOC_CAT_IDS) - for ann in cocoGt.loadAnns(annIds): - mask = cocoGt.annToMask(ann) - output_tensor[COCO_TO_VOC[ann['category_id']]] |= mask - - # Set everything not labeled to be background - output_tensor[0] = 1 - np.max(output_tensor, axis=0) - self.data.append((input_tensor, output_tensor)) - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index] - - def getImgIdsUnion(self, gt, catIds): - """ - Returns all the images that have *any* of the categories in `catIds`, - unlike the built-in `gt.getImgIds` which returns all the images containing - *all* of the categories in `catIds`. - """ - imgIds = set() - for catId in catIds: - imgIds |= set(gt.catToImgs[catId]) - return list(imgIds) - - def load_image(self, img_path): - input_image = Image.open(img_path).convert('RGB') - input_tensor = preprocess(input_image) - input_tensor = input_tensor.detach().cpu().numpy() - return input_tensor - -def iou(model_tensor, target_tensor): - # Don't include the background when summing - model_tensor = model_tensor[:, 1:, :, :] - target_tensor = target_tensor[:, 1:, :, :] - - intersection = np.sum(np.logical_and(model_tensor, target_tensor)) - union = np.sum(np.logical_or(model_tensor, target_tensor)) - - if union == 0: - # Can only happen if nothing was there and nothing was predicted, - # which is a perfect score - return 1 - else: - return intersection / union - -def evaluate(model, dataloader): - totalIoU = 0 - sess = onnxruntime.InferenceSession(model.SerializeToString(), None) - idx = 1 - for input_tensor, target_tensor in dataloader: - input_tensor = input_tensor[np.newaxis, ...] - target_tensor = target_tensor[np.newaxis, ...] - model_tensor = sess.run(["out"], {"input": input_tensor})[0] - - batch_size, nclasses, height, width = model_tensor.shape - raw_labels = np.argmax(model_tensor, axis=1).astype(np.uint8) - - output_tensor = np.zeros((nclasses, batch_size, height, width), dtype=np.uint8) - for c in range(nclasses): - output_tensor[c][raw_labels==c] = 1 - - output_tensor = np.transpose(output_tensor, [1, 0, 2, 3]) - totalIoU += iou(output_tensor, target_tensor) - idx += 1 - return totalIoU / idx - -if __name__ == "__main__": - from neural_compressor.experimental import common - ds = Dataset() - dataloader = common.DataLoader(ds) - model = onnx.load(args.model_path) - def eval(model): - return evaluate(model, ds) - - if args.benchmark and args.mode == "accuracy": - results = eval(model) - print("Batch size = 1") - print("Accuracy: %.5f" % results) - - if args.benchmark and args.mode == "performance": - from neural_compressor.experimental import Benchmark, common - evaluator = Benchmark(args.config) - evaluator.model = common.Model(model) - evaluator.b_dataloader = common.DataLoader(ds) - evaluator(args.mode) diff --git a/neural_coder/examples/vision/alexnet.py b/neural_coder/examples/vision/alexnet.py deleted file mode 100644 index 10ac6d8b080..00000000000 --- a/neural_coder/examples/vision/alexnet.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import torch -import torchvision.models as models -model = models.alexnet(pretrained=True) -model.eval() -batch_size = 1 -input = torch.rand(batch_size, 3, 224, 224) -with torch.no_grad(): - model(input) diff --git a/neural_coder/examples/vision/main.py b/neural_coder/examples/vision/main.py deleted file mode 100644 index 3b9bfd65298..00000000000 --- a/neural_coder/examples/vision/main.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# code source -# https://github.com/pytorch/examples/blob/main/imagenet/main.py - -import argparse -import os -import random -import shutil -import time -import warnings -from enum import Enum - -import torch -import torch.nn as nn -import torch.nn.parallel -import torch.backends.cudnn as cudnn -import torch.distributed as dist -import torch.optim -from torch.optim.lr_scheduler import StepLR -import torch.multiprocessing as mp -import torch.utils.data -import torch.utils.data.distributed -import torchvision.transforms as transforms -import torchvision.datasets as datasets -import torchvision.models as models -from torch.utils.data import Subset - -model_names = models.list_models(module=models) - -parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') -parser.add_argument('data', metavar='DIR', nargs='?', default='imagenet', - help='path to dataset (default: imagenet)') -parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', - choices=model_names, - help='model architecture: ' + - ' | '.join(model_names) + - ' (default: resnet18)') -parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', - help='number of data loading workers (default: 4)') -parser.add_argument('--epochs', default=90, type=int, metavar='N', - help='number of total epochs to run') -parser.add_argument('--start-epoch', default=0, type=int, metavar='N', - help='manual epoch number (useful on restarts)') -parser.add_argument('-b', '--batch-size', default=256, type=int, - metavar='N', - help='mini-batch size (default: 256), this is the total ' - 'batch size of all GPUs on the current node when ' - 'using Data Parallel or Distributed Data Parallel') -parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, - metavar='LR', help='initial learning rate', dest='lr') -parser.add_argument('--momentum', default=0.9, type=float, metavar='M', - help='momentum') -parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, - metavar='W', help='weight decay (default: 1e-4)', - dest='weight_decay') -parser.add_argument('-p', '--print-freq', default=10, type=int, - metavar='N', help='print frequency (default: 10)') -parser.add_argument('--resume', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', - help='evaluate model on validation set') -parser.add_argument('--pretrained', dest='pretrained', action='store_true', - help='use pre-trained model') -parser.add_argument('--world-size', default=-1, type=int, - help='number of nodes for distributed training') -parser.add_argument('--rank', default=-1, type=int, - help='node rank for distributed training') -parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, - help='url used to set up distributed training') -parser.add_argument('--dist-backend', default='nccl', type=str, - help='distributed backend') -parser.add_argument('--seed', default=None, type=int, - help='seed for initializing training. ') -parser.add_argument('--gpu', default=None, type=int, - help='GPU id to use.') -parser.add_argument('--multiprocessing-distributed', action='store_true', - help='Use multi-processing distributed training to launch ' - 'N processes per node, which has N GPUs. This is the ' - 'fastest way to use PyTorch for either single node or ' - 'multi node data parallel training') -parser.add_argument('--dummy', action='store_true', help="use fake data to benchmark") - -best_acc1 = 0 - - -def main(): - args = parser.parse_args() - - if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - cudnn.deterministic = True - warnings.warn('You have chosen to seed training. ' - 'This will turn on the CUDNN deterministic setting, ' - 'which can slow down your training considerably! ' - 'You may see unexpected behavior when restarting ' - 'from checkpoints.') - - if args.gpu is not None: - warnings.warn('You have chosen a specific GPU. This will completely ' - 'disable data parallelism.') - - if args.dist_url == "env://" and args.world_size == -1: - args.world_size = int(os.environ["WORLD_SIZE"]) - - args.distributed = args.world_size > 1 or args.multiprocessing_distributed - - ngpus_per_node = torch.cuda.device_count() - if args.multiprocessing_distributed: - # Since we have ngpus_per_node processes per node, the total world_size - # needs to be adjusted accordingly - args.world_size = ngpus_per_node * args.world_size - # Use torch.multiprocessing.spawn to launch distributed processes: the - # main_worker process function - mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) - else: - # Simply call main_worker function - main_worker(args.gpu, ngpus_per_node, args) - - -def main_worker(gpu, ngpus_per_node, args): - global best_acc1 - args.gpu = gpu - - if args.gpu is not None: - print("Use GPU: {} for training".format(args.gpu)) - - if args.distributed: - if args.dist_url == "env://" and args.rank == -1: - args.rank = int(os.environ["RANK"]) - if args.multiprocessing_distributed: - # For multiprocessing distributed training, rank needs to be the - # global rank among all the processes - args.rank = args.rank * ngpus_per_node + gpu - dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - # create model - if args.pretrained: - print("=> using pre-trained model '{}'".format(args.arch)) - model = models.__dict__[args.arch](pretrained=True) - else: - print("=> creating model '{}'".format(args.arch)) - model = models.__dict__[args.arch]() - - if not torch.cuda.is_available(): - print('using CPU, this will be slow') - elif args.distributed: - # For multiprocessing distributed, DistributedDataParallel constructor - # should always set the single device scope, otherwise, - # DistributedDataParallel will use all available devices. - if args.gpu is not None: - torch.cuda.set_device(args.gpu) - model.cuda(args.gpu) - # When using a single GPU per process and per - # DistributedDataParallel, we need to divide the batch size - # ourselves based on the total number of GPUs of the current node. - args.batch_size = int(args.batch_size / ngpus_per_node) - args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - else: - model.cuda() - # DistributedDataParallel will divide and allocate batch_size to all - # available GPUs if device_ids are not set - model = torch.nn.parallel.DistributedDataParallel(model) - elif args.gpu is not None: - torch.cuda.set_device(args.gpu) - model = model.cuda(args.gpu) - else: - # DataParallel will divide and allocate batch_size to all available GPUs - if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): - model.features = torch.nn.DataParallel(model.features) - model.cuda() - else: - model = torch.nn.DataParallel(model).cuda() - - # define loss function (criterion), optimizer, and learning rate scheduler - criterion = nn.CrossEntropyLoss().cuda(args.gpu) - - optimizer = torch.optim.SGD(model.parameters(), args.lr, - momentum=args.momentum, - weight_decay=args.weight_decay) - - """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" - scheduler = StepLR(optimizer, step_size=30, gamma=0.1) - - # optionally resume from a checkpoint - if args.resume: - if os.path.isfile(args.resume): - print("=> loading checkpoint '{}'".format(args.resume)) - if args.gpu is None: - checkpoint = torch.load(args.resume) - else: - # Map model to be loaded to specified single gpu. - loc = 'cuda:{}'.format(args.gpu) - checkpoint = torch.load(args.resume, map_location=loc) - args.start_epoch = checkpoint['epoch'] - best_acc1 = checkpoint['best_acc1'] - if args.gpu is not None: - # best_acc1 may be from a checkpoint from a different GPU - best_acc1 = best_acc1.to(args.gpu) - model.load_state_dict(checkpoint['state_dict']) - optimizer.load_state_dict(checkpoint['optimizer']) - scheduler.load_state_dict(checkpoint['scheduler']) - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.resume, checkpoint['epoch'])) - else: - print("=> no checkpoint found at '{}'".format(args.resume)) - - cudnn.benchmark = True - - # Data loading code - if args.dummy: - print("=> Dummy data is used!") - train_dataset = datasets.FakeData(1281167, (3, 224, 224), 1000, transforms.ToTensor()) - val_dataset = datasets.FakeData(50000, (3, 224, 224), 1000, transforms.ToTensor()) - else: - traindir = os.path.join(args.data, 'train') - valdir = os.path.join(args.data, 'val') - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - - train_dataset = datasets.ImageFolder( - traindir, - transforms.Compose([ - transforms.RandomResizedCrop(224), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - normalize, - ])) - - val_dataset = datasets.ImageFolder( - valdir, - transforms.Compose([ - transforms.Resize(256), - transforms.CenterCrop(224), - transforms.ToTensor(), - normalize, - ])) - - if args.distributed: - train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) - val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, drop_last=True) - else: - train_sampler = None - val_sampler = None - - train_loader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), - num_workers=args.workers, pin_memory=True, sampler=train_sampler) - - val_loader = torch.utils.data.DataLoader( - val_dataset, batch_size=args.batch_size, shuffle=False, - num_workers=args.workers, pin_memory=True, sampler=val_sampler) - - if args.evaluate: - validate(val_loader, model, criterion, args) - return - - for epoch in range(args.start_epoch, args.epochs): - if args.distributed: - train_sampler.set_epoch(epoch) - - # train for one epoch - train(train_loader, model, criterion, optimizer, epoch, args) - - # evaluate on validation set - acc1 = validate(val_loader, model, criterion, args) - - scheduler.step() - - - # remember best acc@1 and save checkpoint - is_best = acc1 > best_acc1 - best_acc1 = max(acc1, best_acc1) - - if not args.multiprocessing_distributed or (args.multiprocessing_distributed - and args.rank % ngpus_per_node == 0): - save_checkpoint({ - 'epoch': epoch + 1, - 'arch': args.arch, - 'state_dict': model.state_dict(), - 'best_acc1': best_acc1, - 'optimizer' : optimizer.state_dict(), - 'scheduler' : scheduler.state_dict() - }, is_best) - - -def train(train_loader, model, criterion, optimizer, epoch, args): - batch_time = AverageMeter('Time', ':6.3f') - data_time = AverageMeter('Data', ':6.3f') - losses = AverageMeter('Loss', ':.4e') - top1 = AverageMeter('Acc@1', ':6.2f') - top5 = AverageMeter('Acc@5', ':6.2f') - progress = ProgressMeter( - len(train_loader), - [batch_time, data_time, losses, top1, top5], - prefix="Epoch: [{}]".format(epoch)) - - # switch to train mode - model.train() - - end = time.time() - for i, (images, target) in enumerate(train_loader): - # measure data loading time - data_time.update(time.time() - end) - - if args.gpu is not None: - images = images.cuda(args.gpu, non_blocking=True) - if torch.cuda.is_available(): - target = target.cuda(args.gpu, non_blocking=True) - - # compute output - output = model(images) - loss = criterion(output, target) - - # measure accuracy and record loss - acc1, acc5 = accuracy(output, target, topk=(1, 5)) - losses.update(loss.item(), images.size(0)) - top1.update(acc1[0], images.size(0)) - top5.update(acc5[0], images.size(0)) - - # compute gradient and do SGD step - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - if i % args.print_freq == 0: - progress.display(i + 1) - - -def validate(val_loader, model, criterion, args): - - def run_validate(loader, base_progress=0): - with torch.no_grad(): - end = time.time() - for i, (images, target) in enumerate(loader): - i = base_progress + i - if args.gpu is not None: - images = images.cuda(args.gpu, non_blocking=True) - if torch.cuda.is_available(): - target = target.cuda(args.gpu, non_blocking=True) - - # compute output - output = model(images) - loss = criterion(output, target) - - # measure accuracy and record loss - acc1, acc5 = accuracy(output, target, topk=(1, 5)) - losses.update(loss.item(), images.size(0)) - top1.update(acc1[0], images.size(0)) - top5.update(acc5[0], images.size(0)) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - if i % args.print_freq == 0: - progress.display(i + 1) - - batch_time = AverageMeter('Time', ':6.3f', Summary.NONE) - losses = AverageMeter('Loss', ':.4e', Summary.NONE) - top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE) - top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE) - progress = ProgressMeter( - len(val_loader) + (args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset))), - [batch_time, losses, top1, top5], - prefix='Test: ') - - # switch to evaluate mode - model.eval() - - run_validate(val_loader) - if args.distributed: - top1.all_reduce() - top5.all_reduce() - - if args.distributed and (len(val_loader.sampler) * args.world_size < len(val_loader.dataset)): - aux_val_dataset = Subset(val_loader.dataset, - range(len(val_loader.sampler) * args.world_size, len(val_loader.dataset))) - aux_val_loader = torch.utils.data.DataLoader( - aux_val_dataset, batch_size=args.batch_size, shuffle=False, - num_workers=args.workers, pin_memory=True) - run_validate(aux_val_loader, len(val_loader)) - - progress.display_summary() - - return top1.avg - - -def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): - torch.save(state, filename) - if is_best: - shutil.copyfile(filename, 'model_best.pth.tar') - -class Summary(Enum): - NONE = 0 - AVERAGE = 1 - SUM = 2 - COUNT = 3 - -class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): - self.name = name - self.fmt = fmt - self.summary_type = summary_type - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - def all_reduce(self): - total = torch.FloatTensor([self.sum, self.count]) - dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) - self.sum, self.count = total.tolist() - self.avg = self.sum / self.count - - def __str__(self): - fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' - return fmtstr.format(**self.__dict__) - - def summary(self): - fmtstr = '' - if self.summary_type is Summary.NONE: - fmtstr = '' - elif self.summary_type is Summary.AVERAGE: - fmtstr = '{name} {avg:.3f}' - elif self.summary_type is Summary.SUM: - fmtstr = '{name} {sum:.3f}' - elif self.summary_type is Summary.COUNT: - fmtstr = '{name} {count:.3f}' - else: - raise ValueError('invalid summary type %r' % self.summary_type) - - return fmtstr.format(**self.__dict__) - - -class ProgressMeter(object): - def __init__(self, num_batches, meters, prefix=""): - self.batch_fmtstr = self._get_batch_fmtstr(num_batches) - self.meters = meters - self.prefix = prefix - - def display(self, batch): - entries = [self.prefix + self.batch_fmtstr.format(batch)] - entries += [str(meter) for meter in self.meters] - print('\t'.join(entries)) - - def display_summary(self): - entries = [" *"] - entries += [meter.summary() for meter in self.meters] - print(' '.join(entries)) - - def _get_batch_fmtstr(self, num_batches): - num_digits = len(str(num_batches // 1)) - fmt = '{:' + str(num_digits) + 'd}' - return '[' + fmt + '/' + fmt.format(num_batches) + ']' - -def accuracy(output, target, topk=(1,)): - """Computes the accuracy over the k top predictions for the specified values of k""" - with torch.no_grad(): - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -if __name__ == '__main__': - main() diff --git a/neural_coder/examples/vision/resnet18.py b/neural_coder/examples/vision/resnet18.py deleted file mode 100644 index a7fadf9c70b..00000000000 --- a/neural_coder/examples/vision/resnet18.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import torch -import torchvision.models as models -model = models.resnet18(pretrained=True) -model.eval() -batch_size = 1 -input = torch.rand(batch_size, 3, 224, 224) -with torch.no_grad(): - model(input) diff --git a/neural_coder/examples/vision/resnet50.py b/neural_coder/examples/vision/resnet50.py deleted file mode 100644 index c7091e2e7bc..00000000000 --- a/neural_coder/examples/vision/resnet50.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import torch -import torchvision.models as models -model = models.resnet50(pretrained=True) -model.eval() -batch_size = 1 -input = torch.rand(batch_size, 3, 224, 224) -with torch.no_grad(): - model(input) diff --git a/neural_coder/extensions/neural_compressor_ext_lab/.eslintignore b/neural_coder/extensions/neural_compressor_ext_lab/.eslintignore deleted file mode 100644 index fffa32fdf63..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/.eslintignore +++ /dev/null @@ -1,8 +0,0 @@ -node_modules -dist -coverage -**/*.d.ts -tests - -**/__tests__ -ui-tests diff --git a/neural_coder/extensions/neural_compressor_ext_lab/.eslintrc.js b/neural_coder/extensions/neural_compressor_ext_lab/.eslintrc.js deleted file mode 100644 index c64b3721828..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/.eslintrc.js +++ /dev/null @@ -1,44 +0,0 @@ -module.exports = { - extends: [ - 'eslint:recommended', - 'plugin:@typescript-eslint/eslint-recommended', - 'plugin:@typescript-eslint/recommended', - 'plugin:prettier/recommended' - ], - parser: '@typescript-eslint/parser', - parserOptions: { - project: 'tsconfig.json', - sourceType: 'module' - }, - plugins: ['@typescript-eslint'], - rules: { - '@typescript-eslint/naming-convention': [ - 'error', - { - selector: 'interface', - format: ['PascalCase'], - custom: { - regex: '^I[A-Z]', - match: true - } - } - ], - '@typescript-eslint/no-unused-vars': ['warn', { args: 'none' }], - '@typescript-eslint/no-explicit-any': 'off', - '@typescript-eslint/no-namespace': 'off', - '@typescript-eslint/no-use-before-define': 'off', - '@typescript-eslint/quotes': [ - 'error', - 'single', - { avoidEscape: true, allowTemplateLiterals: false } - ], - node: { - fs: 'empty', - net:'empty', - tls:'empty', -}, - curly: ['error', 'all'], - eqeqeq: 'error', - 'prefer-arrow-callback': 'error' - } -}; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/.prettierignore b/neural_coder/extensions/neural_compressor_ext_lab/.prettierignore deleted file mode 100644 index 0de58a6f50b..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/.prettierignore +++ /dev/null @@ -1,5 +0,0 @@ -node_modules -**/node_modules -**/lib -**/package.json -neural_compressor_ext_lab diff --git a/neural_coder/extensions/neural_compressor_ext_lab/.prettierrc b/neural_coder/extensions/neural_compressor_ext_lab/.prettierrc deleted file mode 100644 index d0824a69c14..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/.prettierrc +++ /dev/null @@ -1,6 +0,0 @@ -{ - "singleQuote": true, - "trailingComma": "none", - "arrowParens": "avoid", - "endOfLine": "auto" -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/.stylelintrc b/neural_coder/extensions/neural_compressor_ext_lab/.stylelintrc deleted file mode 100644 index 0e1ff30327c..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/.stylelintrc +++ /dev/null @@ -1,12 +0,0 @@ -{ - "extends": [ - "stylelint-config-recommended", - "stylelint-config-standard", - "stylelint-prettier/recommended" - ], - "rules": { - "property-no-vendor-prefix": null, - "selector-no-vendor-prefix": null, - "value-no-vendor-prefix": null - } -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/CHANGELOG.md b/neural_coder/extensions/neural_compressor_ext_lab/CHANGELOG.md deleted file mode 100644 index 2d352af421a..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Changelog - - - - diff --git a/neural_coder/extensions/neural_compressor_ext_lab/DEVELOP.md b/neural_coder/extensions/neural_compressor_ext_lab/DEVELOP.md deleted file mode 100644 index 7141373e783..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/DEVELOP.md +++ /dev/null @@ -1,75 +0,0 @@ -# neural_compressor_ext_lab - -A JupyterLab extension. - -## Requirements - -- JupyterLab >= 3.0 - -## Install - -To install the extension, execute: - -```bash -pip install neural_compressor_ext_lab -``` - -## Uninstall - -To remove the extension, execute: - -```bash -pip uninstall neural_compressor_ext_lab -``` - -## Contributing - -### Development install - -Note: You will need NodeJS to build the extension package. - -The `jlpm` command is JupyterLab's pinned version of -[yarn](https://yarnpkg.com/) that is installed with JupyterLab. You may use -`yarn` or `npm` in lieu of `jlpm` below. - -```bash -# Clone the repo to your local environment -# Change directory to the neural_compressor_ext_lab directory -# Install package in development mode -pip install -e . -# Link your development version of the extension with JupyterLab -jupyter labextension develop . --overwrite -# Rebuild extension Typescript source after making changes -jlpm build -``` - -You can watch the source directory and run JupyterLab at the same time in different terminals to watch for changes in the extension's source and automatically rebuild the extension. - -```bash -# Watch the source directory in one terminal, automatically rebuilding when needed -jlpm watch -# Run JupyterLab in another terminal -jupyter lab -``` - -With the watch command running, every saved change will immediately be built locally and available in your running JupyterLab. Refresh JupyterLab to load the change in your browser (you may need to wait several seconds for the extension to be rebuilt). - -By default, the `jlpm build` command generates the source maps for this extension to make it easier to debug using the browser dev tools. To also generate source maps for the JupyterLab core extensions, you can run the following command: - -```bash -jupyter lab build --minimize=False -``` - -### Development uninstall - -```bash -pip uninstall neural_compressor_ext_lab -``` - -In development mode, you will also need to remove the symlink created by `jupyter labextension develop` -command. To find its location, you can run `jupyter labextension list` to figure out where the `labextensions` -folder is located. Then you can remove the symlink named `neural_compressor_ext_lab` within that folder. - -### Packaging the extension - -See [RELEASE](RELEASE.md) diff --git a/neural_coder/extensions/neural_compressor_ext_lab/LICENSE b/neural_coder/extensions/neural_compressor_ext_lab/LICENSE deleted file mode 100644 index d3f814da892..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2022, neural_coder -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/neural_coder/extensions/neural_compressor_ext_lab/MANIFEST.in b/neural_coder/extensions/neural_compressor_ext_lab/MANIFEST.in deleted file mode 100644 index 99115494fae..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/MANIFEST.in +++ /dev/null @@ -1,24 +0,0 @@ -include LICENSE -include *.md -include pyproject.toml - -include package.json -include install.json -include ts*.json -include yarn.lock - -graft neural_compressor_ext_lab/labextension - -# Javascript files -graft src -graft style -prune **/node_modules -prune lib -prune binder - -# Patterns to exclude from any directory -global-exclude *~ -global-exclude *.pyc -global-exclude *.pyo -global-exclude .git -global-exclude .ipynb_checkpoints diff --git a/neural_coder/extensions/neural_compressor_ext_lab/README.md b/neural_coder/extensions/neural_compressor_ext_lab/README.md deleted file mode 100644 index edd9e1e53a6..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Intel® Neural Compressor as JupyterLab Extension -=========================== -A JupyterLab Extension library supporting Neural Coder, a novel feature powered by Intel® Neural Compressor providing automatic quantization to further simplify computing performance optimizations of Deep Learning models. - -## Installation -**By Extension Manager in JupyterLab (Recommended)** - -Search for ```jupyter-lab-neural-compressor``` in the Extension Manager in JupyterLab. - -**By Linux Terminal** -```bash -npm i jupyter-lab-neural-compressor -jupyter labextension install jupyter-lab-neural-compressor -``` - -## Getting Started! - -As shown in the drop-down list, the supported features include "INT8 (Static Quantization)", "INT8 (Dynamic Quantization)", "BF16", and "Auto Enable & Benchmark". Each of the first three options enables a specific quantization feature into your Deep Learning scripts. The last option automatically enables all quantization features on a Deep Learning script and automatically evaluates the best performance on the model. It is a code-free solution that can help users enable quantization algorithms on a Deep Learning model with no manual coding needed. - -Architecture - -### Auto-enable a feature -Click the run button on the left side of the drop-down list to start. After finishing, you can see the code changes for the specific optimization enabling as shown in the figure below: - -Architecture - -### Or let us help you auto-select the best feature -The last option automatically enables each quantization feature on your Deep Learning script and automatically evaluates for the best performance among all features on your Deep Learning model. Since it will automatically run the Python script for benchmark, it requires you to enter additional parameters needed to run your Python script. If there is no additional parameter needed, you can just leave it blank: - -Architecture - -In the new cell box appeared below your Code cell boxes, you can see the execution progress, and at the end you can see which one turns out to be the best optimization and how much performance gain can it bring to your Deep Learning model: - -Architecture - -When it is finished, you can also see that the code changes for the best optimization are automatically enabled into your script: - -Architecture - -## Pre-requisites -```bash -apt-get update && apt-get install bc numactl -conda install mkl mkl-include jemalloc -pip3 install neural-compressor opencv-python-headless -``` \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/RELEASE.md b/neural_coder/extensions/neural_compressor_ext_lab/RELEASE.md deleted file mode 100644 index bd51b356f28..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/RELEASE.md +++ /dev/null @@ -1,61 +0,0 @@ -# Making a new release of neural_compressor_ext_lab - -The extension can be published to `PyPI` and `npm` manually or using the [Jupyter Releaser](https://github.com/jupyter-server/jupyter_releaser). - -## Manual release - -### Python package - -This extension can be distributed as Python -packages. All of the Python -packaging instructions in the `pyproject.toml` file to wrap your extension in a -Python package. Before generating a package, we first need to install `build`. - -```bash -pip install build twine -``` - -To create a Python source package (`.tar.gz`) and the binary package (`.whl`) in the `dist/` directory, do: - -```bash -python -m build -``` - -> `python setup.py sdist bdist_wheel` is deprecated and will not work for this package. - -Then to upload the package to PyPI, do: - -```bash -twine upload dist/* -``` - -### NPM package - -To publish the frontend part of the extension as a NPM package, do: - -```bash -npm login -npm publish --access public -``` - -## Automated releases with the Jupyter Releaser - -The extension repository should already be compatible with the Jupyter Releaser. - -Check out the [workflow documentation](https://github.com/jupyter-server/jupyter_releaser#typical-workflow) for more information. - -Here is a summary of the steps to cut a new release: - -- Fork the [`jupyter-releaser` repo](https://github.com/jupyter-server/jupyter_releaser) -- Add `ADMIN_GITHUB_TOKEN`, `PYPI_TOKEN` and `NPM_TOKEN` to the Github Secrets in the fork -- Go to the Actions panel -- Run the "Draft Changelog" workflow -- Merge the Changelog PR -- Run the "Draft Release" workflow -- Run the "Publish Release" workflow - -## Publishing to `conda-forge` - -If the package is not on conda forge yet, check the documentation to learn how to add it: https://conda-forge.org/docs/maintainer/adding_pkgs.html - -Otherwise a bot should pick up the new version publish to PyPI, and open a new PR on the feedstock repository automatically. diff --git a/neural_coder/extensions/neural_compressor_ext_lab/binder/environment.yml b/neural_coder/extensions/neural_compressor_ext_lab/binder/environment.yml deleted file mode 100644 index 23bed128c24..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/binder/environment.yml +++ /dev/null @@ -1,21 +0,0 @@ -# a mybinder.org-ready environment for demoing neural_compressor_ext_lab -# this environment may also be used locally on Linux/MacOS/Windows, e.g. -# -# conda env update --file binder/environment.yml -# conda activate neural-compressor-ext-lab-demo -# -name: neural-compressor-ext-lab-demo - -channels: - - conda-forge - -dependencies: - # runtime dependencies - - python >=3.8,<3.9.0a0 - - jupyterlab >=3,<4.0.0a0 - # labextension build dependencies - - nodejs >=14,<15 - - pip - - wheel - # additional packages for demos - # - ipywidgets diff --git a/neural_coder/extensions/neural_compressor_ext_lab/binder/postBuild b/neural_coder/extensions/neural_compressor_ext_lab/binder/postBuild deleted file mode 100644 index 95eabd91874..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/binder/postBuild +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -""" perform a development install of neural_compressor_ext_lab - - On Binder, this will run _after_ the environment has been fully created from - the environment.yml in this directory. - - This script should also run locally on Linux/MacOS/Windows: - - python3 binder/postBuild -""" -import subprocess -import sys -from pathlib import Path - - -ROOT = Path.cwd() - -def _(*args, **kwargs): - """ Run a command, echoing the args - - fails hard if something goes wrong - """ - print("\n\t", " ".join(args), "\n") - return_code = subprocess.call(args, **kwargs) - if return_code != 0: - print("\nERROR", return_code, " ".join(args)) - sys.exit(return_code) - -# verify the environment is self-consistent before even starting -_(sys.executable, "-m", "pip", "check") - -# install the labextension -_(sys.executable, "-m", "pip", "install", "-e", ".") -_(sys.executable, "-m", "jupyter", "labextension", "develop", "--overwrite", ".") - -# verify the environment the extension didn't break anything -_(sys.executable, "-m", "pip", "check") - -# list the extensions -_("jupyter", "server", "extension", "list") - -# initially list installed extensions to determine if there are any surprises -_("jupyter", "labextension", "list") - - -print("JupyterLab with neural_compressor_ext_lab is ready to run with:\n") -print("\tjupyter lab\n") diff --git a/neural_coder/extensions/neural_compressor_ext_lab/install.json b/neural_coder/extensions/neural_compressor_ext_lab/install.json deleted file mode 100644 index 24f9a58e094..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/install.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "packageManager": "python", - "packageName": "neural_compressor_ext_lab", - "uninstallInstructions": "Use your Python package manager (pip, conda, etc.) to uninstall the package neural_compressor_ext_lab" -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.d.ts b/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.d.ts deleted file mode 100644 index 071e86ae2c1..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.d.ts +++ /dev/null @@ -1,11 +0,0 @@ -export declare namespace Constants { - const SHORT_PLUGIN_NAME = "neural_compressor_ext_lab"; - const WORK_PATH = "neural_coder_workspace/"; - const ICON_FORMAT_ALL_SVG = ""; - const ICON_RUN = ""; - const SVG = ""; - const LONG_PLUGIN_NAME: string; - const SETTINGS_SECTION: string; - const COMMAND_SECTION_NAME = "Jupyterlab Code Optimizer"; - const PLUGIN_VERSION = "0.1.0"; -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.js b/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.js deleted file mode 100644 index 13acd1a7ad3..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/constants.js +++ /dev/null @@ -1,12 +0,0 @@ -export var Constants; -(function (Constants) { - Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab'; - Constants.WORK_PATH = "neural_coder_workspace/"; - Constants.ICON_FORMAT_ALL_SVG = ''; - Constants.ICON_RUN = ''; - Constants.SVG = ''; - Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`; - Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`; - Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer'; - Constants.PLUGIN_VERSION = '0.1.0'; -})(Constants || (Constants = {})); diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.d.ts b/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.d.ts deleted file mode 100644 index dcf8ddfba2a..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.d.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { Cell, CodeCell } from '@jupyterlab/cells'; -import { ToolbarButton } from '@jupyterlab/apputils'; -import { Widget } from '@lumino/widgets'; -import { INotebookTracker, NotebookPanel, Notebook } from '@jupyterlab/notebook'; -declare class JupyterlabCodeOptimizer { - protected working: boolean; - protected panel: NotebookPanel; - private tmp_path; - log_path: string; - tmp_log_path: string; - rand: number; - markdown: Cell | undefined; - cells: CodeCell[]; - constructor(panel: NotebookPanel); - optimizeCode(code: string[], formatter: string, name: string, next: string, options: string | undefined, notebook: boolean, panel: NotebookPanel, cell: CodeCell, run?: ToolbarButton | undefined): Promise; -} -export declare class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer { - protected notebookname: string; - protected notebookTracker: INotebookTracker; - constructor(notebookTracker: INotebookTracker, panel: NotebookPanel); - optimizeAction(config: any, formatter?: string): Promise; - optimizeAllCodeCells(config?: string, formatter?: string, notebook?: Notebook, run?: ToolbarButton): Promise; - private getCodeCells; - private optimizeCells; - applicable(formatter: string, currentWidget: Widget): boolean | null; -} -export {}; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.js b/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.js deleted file mode 100644 index 744d7a7a8e3..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/deepcoder.js +++ /dev/null @@ -1,295 +0,0 @@ -import { NotebookActions } from '@jupyterlab/notebook'; -import NotebookUtilities from "./utils"; -import { Constants } from './constants'; -class JupyterlabCodeOptimizer { - constructor(panel) { - this.working = false; - this.panel = panel; - this.tmp_path = "tmp.py"; - this.rand = NotebookUtilities.GetRandomNum(0, 200); - this.log_path = Constants.WORK_PATH + "NeuralCoder" + this.rand + ".log"; - this.tmp_log_path = Constants.WORK_PATH + "NeuralCoder_tmp" + ".log"; - this.cells = []; - } - async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) { - let codes = []; - code.forEach(function (value) { - value = value.replace(/('\\n')/g, '^^^'); - value = value.replace(/\\n"/g, '###'); - value = value.replace(/\\n'/g, '###'); - value = value.replace(/"\\n/g, '@@'); - value = value.replace(/'\\n/g, '@@'); - value = value.replace(/\n/g, '\\n'); - value = value.replace(/"/g, '+++'); - value = value.replace(/,/g, '$'); - codes.push(value); - }); - let gen_code = `code = "${codes}"\ncodes = code.split(',')\nwith open( '${this.tmp_path}', 'w+' ) as f:\n for i in range(0,len(codes)):\n f.write('# this is the beginning of a single code snippet\\n')\n code_list = codes[i].replace('$',',').replace('+++','\"').split('\\n')\n for line in code_list:\n if('split(^^^)' in line):\n line=line.replace('split(^^^)', 'split(\\'\\\\n\\')')\n if('###' in line):\n line=line.replace('###', '\\\\n\"')\n if('@@' in line):\n line=line.replace('@@', '\"\\\\n')\n f.write(line+'\\n')`; - const expr = { code_list: `code_list` }; - NotebookUtilities.sendKernelRequestFromNotebook(panel, gen_code, expr, false); - if (options === 'normal') { - let runcode = `from neural_coder import enable\nenable(code="${this.tmp_path}",features=["${formatter}"], overwrite=True)`; - let expr = { sum: ` ` }; - NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false); - let run_code1 = `with open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let expr1 = { optimizedCode: "optimized_code" }; - let result2 = NotebookUtilities.sendKernelRequestFromNotebook(panel, run_code1, expr1, false); - result2.then(value => { - var _a, _b, _c, _d; - let optimizedTexts = Object.values(value.optimizedCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - const run_svg = document.createElement("svg"); - run_svg.innerHTML = Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - } - }); - } - else { - if (formatter === '') { - if (this.markdown) { - this.markdown.model.value.text += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \n"; - } - // cell.outputArea.node.innerText += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\n" - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n")`; - let expr1 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}",features=[], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr1 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode2 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr2 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - }); - } - else { - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}", features=["${formatter}"], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (next !== '') { - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode2 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr2 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - } - let runcode3 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr3 = { path: "" }; - let res_tmp = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - res_tmp.then(value => { - if (formatter === 'pytorch_inc_bf16') { - let read_log = `import re\nwith open("${this.tmp_log_path}", 'r') as f:\n logs = f.readlines()\n fps_list=[]\n for log_line in logs[-4:]:\n pat = re.compile(r\'\\d+\\.?\\d+')\n fps = re.findall(pat,log_line)[-1]\n fps_list.append(float(fps))\nmaxi = max(fps_list)\nindex = fps_list.index(maxi)\nboost = round(maxi/fps_list[0],1)\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\nbest_feature = features[index]\nbest_name = feature_name[index]\nfeature_l = []\nfeature_l.append(best_feature)\nfrom neural_coder import enable\nenable(code="${this.tmp_path}",features=feature_l, overwrite=True)\nwith open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let read_expr = { boost: "boost", best_feature: "best_feature", best_name: "best_name", optimizeCode: "optimized_code", feature_l: "fps_list", maxi: "maxi", index: "index" }; - let read_result = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false); - read_result.then(value => { - var _a, _b, _c, _d; - console.log("resres", value); - let boost = Object.values(value.boost.data)[0]; - let best_name = Object.values(value.best_name.data)[0]; - let optimizedTexts = Object.values(value.optimizeCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \n`; - } - // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\n` - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - } - // if(this.markdown){ - // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \n` - // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n` - // } - let command = "lscpu | grep 'Model name'"; - let get_hardware = `import subprocess\nsubp = subprocess.Popen("${command}",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")\nsubp.wait(2)\nhardware = subp.communicate()[0].replace("Model name:","").strip()`; - let expr_hardware = { hardware: "hardware" }; - let hard_res = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware, false); - hard_res.then(value => { - let hard = Object.values(value.hardware.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\n` - }); - // cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log\n` - const run_svg = document.createElement("svg"); - run_svg.innerHTML = Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - }); - } - }); - }); - } - } - } -} -export class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer { - constructor(notebookTracker, panel) { - super(panel); - this.notebookTracker = notebookTracker; - this.notebookname = ''; - } - async optimizeAction(config, formatter) { - return this.optimizeCells(true, config, formatter); - } - async optimizeAllCodeCells(config, formatter, notebook, run) { - return this.optimizeCells(false, config, formatter, notebook, run); - } - getCodeCells(ifmarkdown = true, notebook) { - if (!this.notebookTracker.currentWidget) { - return []; - } - const codeCells = []; - notebook = notebook || this.notebookTracker.currentWidget.content; - this.notebookname = notebook.title.label; - let count = 0; - notebook.widgets.forEach((cell) => { - if (cell.model.type === 'code') { - count += 1; - codeCells.push(cell); - } - }); - if (ifmarkdown) { - NotebookActions.insertBelow(notebook); - this.notebookTracker.currentWidget.content.activeCellIndex = count + 1; - NotebookActions.changeCellType(notebook, 'markdown'); - const activeCell = notebook.activeCell; - if (activeCell) { - this.markdown = activeCell; - } - } - this.cells = codeCells; - return codeCells; - } - async optimizeCells(selectedOnly, config, formatter, notebook, run) { - if (this.working) { - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - console.log("arrive here 333"); - this.working = true; - const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu'; - if (optimize_type === 'auto-quant') { - selectedOnly = true; - } - else { - selectedOnly = false; - } - const selectedCells = this.getCodeCells(selectedOnly, notebook); - let cell = selectedCells[selectedCells.length - 1]; - if (selectedCells.length === 0) { - this.working = false; - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - const currentTexts = selectedCells.map(cell => cell.model.value.text); - if (optimize_type === 'auto-quant') { - console.log("arrive here 444-111"); - if (this.markdown) { - this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}" \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \n`; - } - // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}"\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\n` - let runcode = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Auto-Quant Started ......\\n")`; - let expr = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - let runcode2 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\n")`; - let expr2 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - let runcode3 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Benchmark Mode: Throughput\\n")`; - let expr3 = { path: "" }; - NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - // cell.outputArea.node.setAttribute("class","pad") - await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run); - } - else { - console.log("arrive here 444-222"); - await this.optimizeCode(currentTexts, optimize_type, "", "", "normal", true, this.panel, cell, run); - } - this.working = false; - console.log("arrive here 555"); - return new Promise((resolve, reject) => { - resolve("success!"); - }); - } - applicable(formatter, currentWidget) { - const currentNotebookWidget = this.notebookTracker.currentWidget; - return currentNotebookWidget && currentWidget === currentNotebookWidget; - } -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/index.d.ts b/neural_coder/extensions/neural_compressor_ext_lab/lib/index.d.ts deleted file mode 100644 index f256eada9ba..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/index.d.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { JupyterFrontEndPlugin } from '@jupyterlab/application'; -/** - * Initialization data for the neural_compressor_ext_lab extension. - */ -declare const plugin: JupyterFrontEndPlugin; -export default plugin; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/index.js b/neural_coder/extensions/neural_compressor_ext_lab/lib/index.js deleted file mode 100644 index 190b66e0840..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/index.js +++ /dev/null @@ -1,105 +0,0 @@ -import { INotebookTracker } from '@jupyterlab/notebook'; -import { ToolbarButton, showDialog, Dialog } from '@jupyterlab/apputils'; -import { ISettingRegistry } from '@jupyterlab/settingregistry'; -import { IMainMenu } from '@jupyterlab/mainmenu'; -import { LabIcon } from '@jupyterlab/ui-components'; -import { Widget } from '@lumino/widgets'; -import { JupyterlabNotebookCodeOptimizer } from './deepcoder'; -import { Constants } from './constants'; -class neural_compressor_ext_lab { - constructor(app, tracker, notebookpanel) { - this.app = app; - this.tracker = tracker; - this.notebookpanel = notebookpanel; - this.setupWidgetExtension(); - this.config = ''; - } - createNew(nb) { - this.notebookpanel = nb; - this.notebookCodeOptimizer = new JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel); - const svg = document.createElement("svg"); - svg.innerHTML = Constants.ICON_FORMAT_ALL_SVG; - const run_svg = document.createElement("svg"); - run_svg.innerHTML = Constants.ICON_RUN; - const div = document.createElement("div"); - div.setAttribute("class", "wrapper"); - const span = document.createElement("span"); - span.setAttribute("class", "f1ozlkqi"); - span.innerHTML = Constants.SVG; - const selector = document.createElement("select"); - selector.setAttribute("class", "aselector"); - selector.id = "NeuralCoder"; - const option1 = document.createElement("option"); - option1.value = "pytorch_inc_static_quant_fx"; - option1.innerText = "INC Enable INT8 (Static)"; - option1.selected = true; - const option2 = document.createElement("option"); - option2.value = "pytorch_inc_dynamic_quant"; - option2.innerText = "INC Enable INT8 (Dynamic)"; - const option3 = document.createElement("option"); - option3.value = "pytorch_inc_bf16"; - option3.innerText = "INC Enable BF16"; - const option4 = document.createElement("option"); - option4.value = "auto-quant"; - option4.innerText = "INC Auto Enable & Benchmark"; - selector.options.add(option1); - selector.options.add(option2); - selector.options.add(option3); - selector.options.add(option4); - div.appendChild(selector); - div.appendChild(span); - const selector_widget = new Widget(); - selector_widget.node.appendChild(div); - selector_widget.addClass("aselector"); - let notebookCodeOptimizer = this.notebookCodeOptimizer; - let config = this.config; - const dia_input = document.createElement("input"); - const dia_widget = new Widget(); - dia_widget.node.appendChild(dia_input); - dia_widget.addClass("dialog"); - const run_button = new ToolbarButton({ - tooltip: 'NeuralCoder', - icon: new LabIcon({ - name: "run", - svgstr: Constants.ICON_RUN - }), - onClick: async function () { - var _a, _b, _c, _d; - console.log("arrive here 111"); - (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg); - if (selector.options[selector.selectedIndex].value === 'auto-quant') { - await showDialog({ - title: 'Please input execute parameters:', - body: dia_widget, - buttons: [Dialog.okButton({ label: 'Confirm' })] - }).then(result => { - if (result.button.accept) { - config = dia_input.value; - } - }); - } - console.log("arrive here 222"); - await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button); - } - }); - nb.toolbar.insertItem(11, "nc", run_button); - nb.toolbar.insertItem(12, "selector", selector_widget); - } - setupWidgetExtension() { - this.app.docRegistry.addWidgetExtension('Notebook', this); - } -} -/** - * Initialization data for the neural_compressor_ext_lab extension. - */ -const plugin = { - id: 'neural_compressor_ext_lab:plugin', - autoStart: true, - requires: [INotebookTracker, IMainMenu], - optional: [ISettingRegistry], - activate: (app, tracker, notebookpanel) => { - new neural_compressor_ext_lab(app, tracker, notebookpanel); - console.log('JupyterLab extension neural_compressor_ext_lab is activated!'); - } -}; -export default plugin; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/test.d.ts b/neural_coder/extensions/neural_compressor_ext_lab/lib/test.d.ts deleted file mode 100644 index 2c28e08c632..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/test.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -declare let str: string; -declare let str1: number; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/test.js b/neural_coder/extensions/neural_compressor_ext_lab/lib/test.js deleted file mode 100644 index c679ea06c74..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/test.js +++ /dev/null @@ -1,4 +0,0 @@ -"use strict"; -let str = '# this is the beginning of a single code snippet\nimport glob\nimport torch\nimport os\nimport sys\nfrom tqdm import tqdm\nfrom dalle_pytorch import VQGanVAE, DALLE, DiscreteVAE\nfrom dalle_pytorch.tokenizer import tokenizer\nfrom einops import repeat\nfrom dalle_nc import DALLE, DiscreteVAE\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\n# model\nvae = DiscreteVAE(\n image_size = 8,\n num_layers = 3,\n num_tokens = 8192,\n codebook_dim = 1024,\n hidden_dim = 64,\n num_resnet_blocks = 1,\n temperature = 0.9\n)\n\ndalle = DALLE(\n dim = 1024,\n vae = vae, # automatically infer (1) image sequence length and (2) number of image tokens\n num_text_tokens = 100000, # vocab size for text\n text_seq_len = 256, # text sequence length\n depth = 12, # should aim to be 64\n heads = 16, # attention heads\n dim_head = 64, # attention head dimension\n attn_dropout = 0.1, # attention dropout\n ff_dropout = 0.1 # feedforward dropout\n)\n# [NeuralCoder] pytorch_inc_dynamic_quant for dalle [Beginning Line]\nif "GraphModule" not in str(type(dalle)):\n from neural_compressor.quantization import fit\n from neural_compressor.config import PostTrainingQuantConfig\n config = PostTrainingQuantConfig(approach="dynamic")\n dalle = fit(dalle, conf=config)\n dalle = dalle.model\n dalle.eval()\n# [NeuralCoder] pytorch_inc_dynamic_quant for dalle [Ending Line]\n\ndalle.eval()\n\n# real data for DALLE image generation\nfiles = glob.glob(\'/home2/longxin/neural_compressor_ext_lab/real_text.txt\')\n\n# create dataloader\ninput_list = []\nwith torch.no_grad():\n count = 0\n for file in files:\n texts = open(file, \'r\').read().split(\'\\n\')\n for text in texts:\n print(text)\n\n num_images = 1\n\n top_k = 0.9\n\n image_size = vae.image_size\n\n texts = text.split(\'|\')\n\n for j, text in tqdm(enumerate(texts)):\n text_tokens = tokenizer.tokenize([text], 256).to(\'cpu\')\n\n text_tokens = repeat(text_tokens, \'() n -> b n\', b=num_images)\n\n for text_chunk in tqdm(text_tokens):\n d = {}\n d["text"] = text_chunk\n d["filter_thres"] = top_k\n input_list.append(d)\n\nclass MyDataset(Dataset):\n def __init__(self):\n self.samples = input_list\n\n def __getitem__(self, idx):\n return self.samples[idx], 1\n\n def __len__(self):\n return len(self.samples)\ndataset = MyDataset()\ndataloader = DataLoader(dataset)\n\n# inference\nwith torch.no_grad():\n for step, (inputs, labels) in enumerate(dataloader):\n print("running inference ...")\n output = dalle(**inputs)\n\n'; -let str1 = str.split('# this is the beginning of a single code snippet\\n').length; -console.log(__filename); diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.d.ts b/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.d.ts deleted file mode 100644 index 8b7a2173767..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.d.ts +++ /dev/null @@ -1,120 +0,0 @@ -import { NotebookPanel } from '@jupyterlab/notebook'; -import { Kernel } from '@jupyterlab/services'; -import { CommandRegistry } from '@phosphor/commands'; -export default class NotebookUtilities { - /** - * generate random number - * @Min - * @Max - */ - static GetRandomNum(Min: number, Max: number): number; - /** - * Builds an HTML container by sanitizing a list of strings and converting - * them in valid HTML - * @param msg A list of string with HTML formatting - * @returns a HTMLDivElement composed of a list of spans with formatted text - */ - private static buildDialogBody; - /** - * Opens a pop-up dialog in JupyterLab to display a simple message. - * @param title The title for the message popup - * @param msg The message as an array of strings - * @param buttonLabel The label to use for the button. Default is 'OK' - * @param buttonClassName The classname to give to the 'ok' button - * @returns Promise - A promise once the message is closed. - */ - static showMessage(title: string, msg: string[], buttonLabel?: string, buttonClassName?: string): Promise; - /** - * Opens a pop-up dialog in JupyterLab to display a yes/no dialog. - * @param title The title for the message popup - * @param msg The message - * @param acceptLabel The label to use for the accept button. Default is 'YES' - * @param rejectLabel The label to use for the reject button. Default is 'NO' - * @param yesButtonClassName The classname to give to the accept button. - * @param noButtonClassName The classname to give to the cancel button. - * @returns Promise - A promise once the message is closed. - */ - static showYesNoDialog(title: string, msg: string[], acceptLabel?: string, rejectLabel?: string, yesButtonClassName?: string, noButtonClassName?: string): Promise; - /** - * Opens a pop-up dialog in JupyterLab with various information and button - * triggering reloading the page. - * @param title The title for the message popup - * @param msg The message - * @param buttonLabel The label to use for the button. Default is 'Refresh' - * @param buttonClassName The classname to give to the 'refresh' button. - * @returns Promise - A promise once the message is closed. - */ - static showRefreshDialog(title: string, msg: string[], buttonLabel?: string, buttonClassName?: string): Promise; - /** - * @description Creates a new JupyterLab notebook for use by the application - * @param command The command registry - * @returns Promise - A promise containing the notebook panel object that was created (if successful). - */ - static createNewNotebook(command: CommandRegistry): Promise; - /** - * Safely saves the Jupyter notebook document contents to disk - * @param notebookPanel The notebook panel containing the notebook to save - */ - static saveNotebook(notebookPanel: NotebookPanel): Promise; - /** - * Convert the notebook contents to JSON - * @param notebookPanel The notebook panel containing the notebook to serialize - */ - static notebookToJSON(notebookPanel: NotebookPanel): any; - /** - * @description Gets the value of a key from specified notebook's metadata. - * @param notebookPanel The notebook to get meta data from. - * @param key The key of the value. - * @returns any -The value of the metadata. Returns null if the key doesn't exist. - */ - static getMetaData(notebookPanel: NotebookPanel, key: string): any; - /** - * @description Sets the key value pair in the notebook's metadata. - * If the key doesn't exists it will add one. - * @param notebookPanel The notebook to set meta data in. - * @param key The key of the value to create. - * @param value The value to set. - * @param save Default is false. Whether the notebook should be saved after the meta data is set. - * Note: This function will not wait for the save to complete, it only sends a save request. - * @returns The old value for the key, or undefined if it did not exist. - */ - static setMetaData(notebookPanel: NotebookPanel, key: string, value: any, save?: boolean): any; - /** - * @description This function runs code directly in the notebook's kernel and then evaluates the - * result and returns it as a promise. - * @param kernel The kernel to run the code in. - * @param runCode The code to run in the kernel. - * @param userExpressions The expressions used to capture the desired info from the executed code. - * @param runSilent Default is false. If true, kernel will execute as quietly as possible. - * store_history will be set to false, and no broadcast on IOPUB channel will be made. - * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history - * and the counter which is shown in the cells will be incremented to reflect code was run. - * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using - * an input_request message. - * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered. - * This allows the queued execution of multiple execute_requests, even if they generate exceptions. - * @returns Promise - A promise containing the execution results of the code as an object with - * keys based on the user_expressions. - * @example - * //The code - * const code = "a=123\nb=456\nsum=a+b"; - * //The user expressions - * const expr = {sum: "sum",prod: "a*b",args:"[a,b,sum]"}; - * //Async function call (returns a promise) - * sendKernelRequest(notebookPanel, code, expr,false); - * //Result when promise resolves: - * { - * sum:{status:"ok",data:{"text/plain":"579"},metadata:{}}, - * prod:{status:"ok",data:{"text/plain":"56088"},metadata:{}}, - * args:{status:"ok",data:{"text/plain":"[123, 456, 579]"}} - * } - * @see For more information on JupyterLab messages: - * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results - */ - static sendKernelRequest(kernel: Kernel.IKernelConnection | null | undefined, runCode: string, userExpressions: any, runSilent?: boolean, storeHistory?: boolean, allowStdIn?: boolean, stopOnError?: boolean): Promise; - /** - * Same as method sendKernelRequest but passing - * a NotebookPanel instead of a Kernel - */ - static sendKernelRequestFromNotebook(notebookPanel: NotebookPanel, runCode: string, userExpressions: any, runSilent?: boolean, storeHistory?: boolean, allowStdIn?: boolean, stopOnError?: boolean): Promise; -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.js b/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.js deleted file mode 100644 index 5ac72df1071..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/lib/utils.js +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright 2019-2020 The Kale Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -import { Dialog, showDialog } from '@jupyterlab/apputils'; -// @ts-ignore -import SanitizedHTML from 'react-sanitized-html'; -import * as React from 'react'; -export default class NotebookUtilities { - /** - * generate random number - * @Min - * @Max - */ - static GetRandomNum(Min, Max) { - let Range; - Range = Max - Min; - var Rand = Math.random(); - return (Min + Math.round(Rand * Range)); - } - /** - * Builds an HTML container by sanitizing a list of strings and converting - * them in valid HTML - * @param msg A list of string with HTML formatting - * @returns a HTMLDivElement composed of a list of spans with formatted text - */ - static buildDialogBody(msg) { - return (React.createElement("div", null, msg.map((s, i) => { - return (React.createElement(React.Fragment, { key: `msg-${i}` }, - React.createElement(SanitizedHTML, { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }), - React.createElement("br", null))); - }))); - } - /** - * Opens a pop-up dialog in JupyterLab to display a simple message. - * @param title The title for the message popup - * @param msg The message as an array of strings - * @param buttonLabel The label to use for the button. Default is 'OK' - * @param buttonClassName The classname to give to the 'ok' button - * @returns Promise - A promise once the message is closed. - */ - static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') { - const buttons = [ - Dialog.okButton({ label: buttonLabel, className: buttonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - await showDialog({ title, buttons, body: messageBody }); - } - /** - * Opens a pop-up dialog in JupyterLab to display a yes/no dialog. - * @param title The title for the message popup - * @param msg The message - * @param acceptLabel The label to use for the accept button. Default is 'YES' - * @param rejectLabel The label to use for the reject button. Default is 'NO' - * @param yesButtonClassName The classname to give to the accept button. - * @param noButtonClassName The classname to give to the cancel button. - * @returns Promise - A promise once the message is closed. - */ - static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') { - const buttons = [ - Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }), - Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - const result = await showDialog({ title, buttons, body: messageBody }); - return result.button.label === acceptLabel; - } - /** - * Opens a pop-up dialog in JupyterLab with various information and button - * triggering reloading the page. - * @param title The title for the message popup - * @param msg The message - * @param buttonLabel The label to use for the button. Default is 'Refresh' - * @param buttonClassName The classname to give to the 'refresh' button. - * @returns Promise - A promise once the message is closed. - */ - static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') { - await this.showMessage(title, msg, buttonLabel, buttonClassName); - location.reload(); - } - /** - * @description Creates a new JupyterLab notebook for use by the application - * @param command The command registry - * @returns Promise - A promise containing the notebook panel object that was created (if successful). - */ - static async createNewNotebook(command) { - const notebook = await command.execute('notebook:create-new', { - activate: true, - path: '', - preferredLanguage: '', - }); - await notebook.session.ready; - return notebook; - } - /** - * Safely saves the Jupyter notebook document contents to disk - * @param notebookPanel The notebook panel containing the notebook to save - */ - static async saveNotebook(notebookPanel) { - if (notebookPanel) { - await notebookPanel.context.ready; - notebookPanel.context.save(); - return true; - } - return false; - } - /** - * Convert the notebook contents to JSON - * @param notebookPanel The notebook panel containing the notebook to serialize - */ - static notebookToJSON(notebookPanel) { - if (notebookPanel.content.model) { - return notebookPanel.content.model.toJSON(); - } - return null; - } - /** - * @description Gets the value of a key from specified notebook's metadata. - * @param notebookPanel The notebook to get meta data from. - * @param key The key of the value. - * @returns any -The value of the metadata. Returns null if the key doesn't exist. - */ - static getMetaData(notebookPanel, key) { - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - if (notebookPanel.model && notebookPanel.model.metadata.has(key)) { - return notebookPanel.model.metadata.get(key); - } - return null; - } - /** - * @description Sets the key value pair in the notebook's metadata. - * If the key doesn't exists it will add one. - * @param notebookPanel The notebook to set meta data in. - * @param key The key of the value to create. - * @param value The value to set. - * @param save Default is false. Whether the notebook should be saved after the meta data is set. - * Note: This function will not wait for the save to complete, it only sends a save request. - * @returns The old value for the key, or undefined if it did not exist. - */ - static setMetaData(notebookPanel, key, value, save = false) { - var _a; - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value); - if (save) { - this.saveNotebook(notebookPanel); - } - return oldVal; - } - // /** - // * Get a new Kernel, not tied to a Notebook - // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services - // */ - // public static async createNewKernel() { - // // Get info about the available kernels and start a new one. - // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => { - // // console.log('Default spec:', kernelSpecs.default); - // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs)); - // // use the default name - // return { name: kernelSpecs.default }; - // }); - // return await Kernel.startNew(options).then(_kernel => { - // return _kernel; - // }); - // } - // // TODO: We can use this context manager to execute commands inside a new kernel - // // and be sure that it will be disposed of at the end. - // // Another approach could be to create a kale_rpc Kernel, as a singleton, - // // created at startup. The only (possible) drawback is that we can not name - // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would - // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls. - // public static async executeWithNewKernel(action: Function, args: any[] = []) { - // // create brand new kernel - // const _k = await this.createNewKernel(); - // // execute action inside kernel - // const res = await action(_k, ...args); - // // close kernel - // _k.shutdown(); - // // return result - // return res; - // } - /** - * @description This function runs code directly in the notebook's kernel and then evaluates the - * result and returns it as a promise. - * @param kernel The kernel to run the code in. - * @param runCode The code to run in the kernel. - * @param userExpressions The expressions used to capture the desired info from the executed code. - * @param runSilent Default is false. If true, kernel will execute as quietly as possible. - * store_history will be set to false, and no broadcast on IOPUB channel will be made. - * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history - * and the counter which is shown in the cells will be incremented to reflect code was run. - * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using - * an input_request message. - * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered. - * This allows the queued execution of multiple execute_requests, even if they generate exceptions. - * @returns Promise - A promise containing the execution results of the code as an object with - * keys based on the user_expressions. - * @example - * //The code - * const code = "a=123\nb=456\nsum=a+b"; - * //The user expressions - * const expr = {sum: "sum",prod: "a*b",args:"[a,b,sum]"}; - * //Async function call (returns a promise) - * sendKernelRequest(notebookPanel, code, expr,false); - * //Result when promise resolves: - * { - * sum:{status:"ok",data:{"text/plain":"579"},metadata:{}}, - * prod:{status:"ok",data:{"text/plain":"56088"},metadata:{}}, - * args:{status:"ok",data:{"text/plain":"[123, 456, 579]"}} - * } - * @see For more information on JupyterLab messages: - * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results - */ - static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - if (!kernel) { - throw new Error('Kernel is null or undefined.'); - } - // Wait for kernel to be ready before sending request - // await kernel.status; - const message = await kernel.requestExecute({ - allow_stdin: allowStdIn, - code: runCode, - silent: runSilent, - stop_on_error: stopOnError, - store_history: storeHistory, - user_expressions: userExpressions, - }).done; - const content = message.content; - if (content.status !== 'ok') { - // If response is not 'ok', throw contents as error, log code - const msg = `Code caused an error:\n${runCode}`; - console.error(msg); - if (content.traceback) { - content.traceback.forEach((line) => console.log(line.replace(/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, ''))); - } - throw content; - } - // Return user_expressions of the content - return content.user_expressions; - } - /** - * Same as method sendKernelRequest but passing - * a NotebookPanel instead of a Kernel - */ - static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - var _a, _b, _c, _d; - if (!notebookPanel) { - throw new Error('Notebook is null or undefined.'); - } - // Wait for notebook panel to be ready - await notebookPanel.activate; - await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready); - console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel); - return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError); - } -} diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/__init__.py b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/__init__.py deleted file mode 100644 index 74c4e9f68fc..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json -from pathlib import Path - -from ._version import __version__ - - -HERE = Path(__file__).parent.resolve() - - -with (HERE / "labextension" / "package.json").open() as fid: - data = json.load(fid) - - -def _jupyter_labextension_paths(): - return [{ - "src": "labextension", - "dest": data["name"] - }] - diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/_version.py b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/_version.py deleted file mode 100644 index 30cfd6b1021..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/_version.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2022 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json -from pathlib import Path - -__all__ = ["__version__"] - -def _fetchVersion(): - HERE = Path(__file__).parent.resolve() - - for settings in HERE.rglob("package.json"): - try: - with settings.open() as f: - version = json.load(f)["version"] - return ( - version.replace("-alpha.", "a") - .replace("-beta.", "b") - .replace("-rc.", "rc") - ) - except FileNotFoundError: - pass - - raise FileNotFoundError(f"Could not find package.json under dir {HERE!s}") - -__version__ = _fetchVersion() diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/build_log.json b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/build_log.json deleted file mode 100644 index 30f46e6eba0..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/build_log.json +++ /dev/null @@ -1,651 +0,0 @@ -[ - { - "bail": false, - "module": { - "rules": [ - { - "test": {}, - "use": [ - "style-loader", - "css-loader" - ] - }, - { - "test": {}, - "use": "raw-loader" - }, - { - "test": {}, - "use": "raw-loader" - }, - { - "test": {}, - "use": "file-loader" - }, - { - "test": {}, - "use": "file-loader" - }, - { - "test": {}, - "use": "url-loader?limit=10000&mimetype=application/font-woff" - }, - { - "test": {}, - "use": "url-loader?limit=10000&mimetype=application/font-woff" - }, - { - "test": {}, - "use": "url-loader?limit=10000&mimetype=application/octet-stream" - }, - { - "test": {}, - "use": "file-loader" - }, - { - "test": {}, - "issuer": {}, - "use": { - "loader": "svg-url-loader", - "options": { - "encoding": "none", - "limit": 10000 - } - } - }, - { - "test": {}, - "issuer": {}, - "use": { - "loader": "raw-loader" - } - }, - { - "test": {}, - "type": "javascript/auto" - }, - { - "test": {}, - "resolve": { - "fullySpecified": false - } - }, - { - "test": {}, - "resolve": { - "fullySpecified": false - } - }, - { - "test": {}, - "use": "file-loader" - } - ] - }, - "resolve": { - "alias": {}, - "fallback": { - "url": false, - "buffer": false, - "crypto": false, - "path": "/home/demo/longxin/neural_compressor_ext_lab/node_modules/path-browserify/index.js", - "process": "/home/demo/longxin/neural_compressor_ext_lab/node_modules/process/browser.js" - } - }, - "watchOptions": { - "poll": 500, - "aggregateTimeout": 1000 - }, - "output": { - "hashFunction": "sha256", - "filename": "[name].[contenthash].js", - "path": "/home/demo/longxin/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static", - "publicPath": "auto" - }, - "plugins": [ - { - "definitions": { - "process": "process/browser" - } - }, - { - "_options": { - "name": "neural_compressor_ext_lab", - "library": { - "type": "var", - "name": [ - "_JUPYTERLAB", - "neural_compressor_ext_lab" - ] - }, - "filename": "remoteEntry.[contenthash].js", - "exposes": { - "./index": "/home/demo/longxin/neural_compressor_ext_lab/lib/index.js", - "./extension": "/home/demo/longxin/neural_compressor_ext_lab/lib/index.js", - "./style": "/home/demo/longxin/neural_compressor_ext_lab/style/index.js" - }, - "shared": { - "@jupyterlab/application": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/application-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/apputils-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/cell-toolbar-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/celltags-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/codemirror-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/completer-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/console-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/coreutils": { - "requiredVersion": "^5.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/csvviewer-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/debugger-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/docmanager-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/docprovider-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/documentsearch-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/extensionmanager-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/filebrowser-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/fileeditor-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/help-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/htmlviewer-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/hub-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/imageviewer-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/inspector-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/javascript-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/json-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/launcher-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/logconsole-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/mainmenu-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/markdownviewer-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/mathjax2-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/notebook-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/pdf-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/rendermime-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/running-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/settingeditor-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/shortcuts-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/statusbar-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/terminal-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/theme-dark-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/theme-light-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/toc-extension": { - "requiredVersion": "^5.4.7", - "import": false - }, - "@jupyterlab/tooltip-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/translation-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/ui-components-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/vdom-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/vega5-extension": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/apputils": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/attachments": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/cell-toolbar": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/cells": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/celltags": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/codeeditor": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/codemirror": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/completer": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/console": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/csvviewer": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/debugger": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/docmanager": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/docprovider": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/docregistry": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/documentsearch": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/extensionmanager": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/filebrowser": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/fileeditor": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/htmlviewer": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/imageviewer": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/inspector": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/launcher": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/logconsole": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/mainmenu": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/markdownviewer": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/mathjax2": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/metapackage": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/nbconvert-css": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/nbformat": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/notebook": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/observables": { - "requiredVersion": "^4.4.7", - "import": false - }, - "@jupyterlab/outputarea": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/property-inspector": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/rendermime": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/rendermime-interfaces": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/running": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@jupyterlab/services": { - "requiredVersion": "^6.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/settingeditor": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/settingregistry": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/shared-models": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/statedb": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/statusbar": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/terminal": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/toc": { - "requiredVersion": "^5.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/tooltip": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/translation": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/ui-components": { - "requiredVersion": "^3.4.7", - "import": false, - "singleton": true - }, - "@jupyterlab/vdom": { - "requiredVersion": "^3.4.7", - "import": false - }, - "@lumino/algorithm": { - "requiredVersion": "^1.9.0", - "import": false, - "singleton": true - }, - "@lumino/application": { - "requiredVersion": "^1.27.0", - "import": false, - "singleton": true - }, - "@lumino/commands": { - "requiredVersion": "^1.19.0", - "import": false, - "singleton": true - }, - "@lumino/coreutils": { - "requiredVersion": "^1.11.0", - "import": false, - "singleton": true - }, - "@lumino/disposable": { - "requiredVersion": "^1.10.0", - "import": false, - "singleton": true - }, - "@lumino/domutils": { - "requiredVersion": "^1.8.0", - "import": false, - "singleton": true - }, - "@lumino/dragdrop": { - "requiredVersion": "^1.13.0", - "import": false, - "singleton": true - }, - "@lumino/messaging": { - "requiredVersion": "^1.10.0", - "import": false, - "singleton": true - }, - "@lumino/properties": { - "requiredVersion": "^1.8.0", - "import": false, - "singleton": true - }, - "@lumino/signaling": { - "requiredVersion": "^1.10.0", - "import": false, - "singleton": true - }, - "@lumino/virtualdom": { - "requiredVersion": "^1.14.0", - "import": false, - "singleton": true - }, - "@lumino/widgets": { - "requiredVersion": "^1.33.0", - "import": false, - "singleton": true - }, - "react": { - "requiredVersion": "^17.0.1", - "import": false, - "singleton": true - }, - "react-dom": { - "requiredVersion": "^17.0.1", - "import": false, - "singleton": true - }, - "yjs": { - "requiredVersion": "^13.5.17", - "import": false, - "singleton": true - }, - "@phosphor/commands": {}, - "@types/lerna__child-process": {}, - "@types/shelljs": {}, - "ajv": {}, - "ajv-keywords": {}, - "assert": {}, - "browserify-fs": {}, - "brython": {}, - "buffer": {}, - "cacheable-request": {}, - "child_process": {}, - "clone-response": {}, - "constants": {}, - "css-loader": {}, - "duplicate-package-checker-webpack-plugin": {}, - "enhanced-resolve": {}, - "es-abstract": {}, - "es-to-primitive": {}, - "fs": {}, - "got": {}, - "has": {}, - "icss-utils": {}, - "react-sanitized-html": {}, - "sanitize-html": {}, - "shelljs": {}, - "stream": {}, - "util": {}, - "neural_compressor_ext_lab": { - "version": "0.1.0", - "singleton": true, - "import": "/home/demo/longxin/neural_compressor_ext_lab/lib/index.js" - } - } - } - }, - {} - ], - "mode": "development", - "devtool": "source-map", - "entry": {} - } -] \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/package.json b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/package.json deleted file mode 100644 index b5d8d15ca60..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/package.json +++ /dev/null @@ -1,164 +0,0 @@ -{ - "name": "jupyter-lab-neural-compressor", - "version": "1.0.2", - "description": "Intel® Neural Compressor auto-quantization plugin.", - "keywords": [ - "jupyter", - "jupyterlab", - "jupyterlab-extension" - ], - "homepage": "https://github.com/intel/neural-compressor/tree/master/neural_coder/extensions/neural_compressor_ext_lab", - "bugs": { - "url": "https://github.com/intel/neural-compressor/tree/master/neural_coder/extensions/neural_compressor_ext_lab" - }, - "license": "BSD-3-Clause", - "author": { - "name": "neural_compressor_ext_lab", - "email": "inc.maintainers@intel.com" - }, - "files": [ - "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf}", - "style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}" - ], - "main": "lib/index.js", - "types": "lib/index.d.ts", - "style": "style/index.css", - "repository": { - "type": "git", - "url": "https://github.com/intel/neural-compressor.git" - }, - "scripts": { - "preinstall": "npx npm-force-resolutions", - "build": "jlpm build:lib && jlpm build:labextension:dev", - "build:prod": "jlpm clean && jlpm build:lib && jlpm build:labextension", - "build:labextension": "jupyter labextension build .", - "build:labextension:dev": "jupyter labextension build --development True .", - "build:lib": "tsc", - "clean": "jlpm clean:lib", - "clean:lib": "rimraf lib tsconfig.tsbuildinfo", - "clean:lintcache": "rimraf .eslintcache .stylelintcache", - "clean:labextension": "rimraf neural_compressor_ext_lab/labextension", - "clean:all": "jlpm clean:lib && jlpm clean:labextension && jlpm clean:lintcache", - "eslint": "jlpm eslint:check --fix", - "eslint:check": "eslint . --cache --ext .ts,.tsx", - "install:extension": "jlpm build", - "lint": "jlpm stylelint && jlpm prettier && jlpm eslint", - "lint:check": "jlpm stylelint:check && jlpm prettier:check && jlpm eslint:check", - "prettier": "jlpm prettier:base --write --list-different", - "prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"", - "prettier:check": "jlpm prettier:base --check", - "stylelint": "jlpm stylelint:check --fix", - "stylelint:check": "stylelint --cache \"style/**/*.css\"", - "watch": "run-p watch:src watch:labextension", - "watch:src": "tsc -w", - "watch:labextension": "jupyter labextension watch ." - }, - "dependencies": { - "@jupyterlab/application": "^3.4.7", - "@jupyterlab/apputils": "^3.4.7", - "@jupyterlab/cells": "^3.4.7", - "@jupyterlab/coreutils": "^5.4.7", - "@jupyterlab/docregistry": "^3.4.7", - "@jupyterlab/fileeditor": "^3.4.6", - "@jupyterlab/mainmenu": "^3.4.6", - "@jupyterlab/notebook": "^3.4.7", - "@jupyterlab/services": "^6.4.7", - "@jupyterlab/settingregistry": "^3.4.7", - "@jupyterlab/statedb": "^3.4.7", - "@lumino/coreutils": "^1.12.1", - "@lumino/messaging": "^1.10.2", - "@phosphor/commands": "^1.7.2", - "@types/lerna__child-process": "^5.1.0", - "@types/shelljs": "^0.8.11", - "ajv": "^8.11.0", - "ajv-keywords": "^5.1.0", - "assert": "^2.0.0", - "browserify-fs": "^1.0.0", - "brython": "^3.10.6", - "buffer": "^6.0.3", - "cacheable-request": "^10.1.2", - "child_process": "^1.0.2", - "clone-response": "^2.0.0", - "constants": "^0.0.2", - "css-loader": "^6.7.1", - "duplicate-package-checker-webpack-plugin": "^3.0.0", - "enhanced-resolve": "^5.10.0", - "es-abstract": "^1.20.2", - "es-to-primitive": "^1.2.1", - "fs": "^0.0.1-security", - "has": "^1.0.3", - "icss-utils": "^5.1.0", - "loader-utils": "^2.0.3", - "react": "^17.0.2", - "react-sanitized-html": "^2.0.0", - "sanitize-html": "^2.7.2", - "shelljs": "^0.8.5", - "stream": "^0.0.2", - "util": "^0.12.4" - }, - "devDependencies": { - "@jupyterlab/builder": "^3.1.0", - "@types/fs-extra": "^9.0.13", - "@types/node": "^18.7.15", - "@typescript-eslint/eslint-plugin": "^4.8.1", - "@typescript-eslint/parser": "^4.8.1", - "bl": "^1.2.3", - "eslint": "^7.14.0", - "eslint-config-prettier": "^6.15.0", - "eslint-plugin-prettier": "^3.1.4", - "got": "^12.1.0", - "npm-run-all": "^4.1.5", - "prettier": "^2.1.1", - "rimraf": "^3.0.2", - "semver": "^5.7.2", - "stylelint": "^15.6.0", - "stylelint-config-prettier": "^9.0.3", - "stylelint-config-recommended": "^12.0.0", - "stylelint-config-standard": "~33.0.0", - "stylelint-prettier": "^2.0.0", - "typescript": "~4.1.3" - }, - "sideEffects": [ - "style/*.css", - "style/index.js" - ], - "styleModule": "style/index.js", - "publishConfig": { - "access": "public" - }, - "jupyterlab": { - "extension": true, - "outputDir": "neural_compressor_ext_lab/labextension", - "_build": { - "load": "static/remoteEntry.34f9ad20791fd484f052.js", - "extension": "./extension", - "style": "./style" - } - }, - "browser": { - "child_process": false, - "lerna__child_process": false, - "fs": false, - "path": false, - "os": false - }, - "jupyter-releaser": { - "hooks": { - "before-build-npm": [ - "python -m pip install jupyterlab~=3.1", - "jlpm" - ], - "before-build-python": [ - "jlpm clean:all" - ] - } - }, - "resolutions": { - "got": "^12.1.0", - "semver": "^5.7.2", - "bl": "^1.2.3", - "loader-utils": "^2.0.3", - "json5": "^2.2.2", - "jsonwebtoken": "^9.0.0" - } -} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js deleted file mode 100644 index 644b1b8bff1..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js +++ /dev/null @@ -1,767 +0,0 @@ -"use strict"; -(self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []).push([["lib_index_js"],{ - -/***/ "./lib/constants.js": -/*!**************************!*\ - !*** ./lib/constants.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "Constants": () => (/* binding */ Constants) -/* harmony export */ }); -var Constants; -(function (Constants) { - Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab'; - Constants.WORK_PATH = "neural_coder_workspace/"; - Constants.ICON_FORMAT_ALL_SVG = ''; - Constants.ICON_RUN = ''; - Constants.SVG = ''; - Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`; - Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`; - Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer'; - Constants.PLUGIN_VERSION = '0.1.0'; -})(Constants || (Constants = {})); - - -/***/ }), - -/***/ "./lib/deepcoder.js": -/*!**************************!*\ - !*** ./lib/deepcoder.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "JupyterlabNotebookCodeOptimizer": () => (/* binding */ JupyterlabNotebookCodeOptimizer) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _utils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./utils */ "./lib/utils.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - -class JupyterlabCodeOptimizer { - constructor(panel) { - this.working = false; - this.panel = panel; - this.tmp_path = "tmp.py"; - this.rand = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].GetRandomNum(0, 200); - this.log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder" + this.rand + ".log"; - this.tmp_log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder_tmp" + ".log"; - this.cells = []; - } - async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) { - let codes = []; - code.forEach(function (value) { - value = value.replace(/('\\n')/g, '^^^'); - value = value.replace(/\\n"/g, '###'); - value = value.replace(/\\n'/g, '###'); - value = value.replace(/"\\n/g, '@@'); - value = value.replace(/'\\n/g, '@@'); - value = value.replace(/\n/g, '\\n'); - value = value.replace(/"/g, '+++'); - value = value.replace(/,/g, '$'); - codes.push(value); - }); - let gen_code = `code = "${codes}"\ncodes = code.split(',')\nwith open( '${this.tmp_path}', 'w+' ) as f:\n for i in range(0,len(codes)):\n f.write('# this is the beginning of a single code snippet\\n')\n code_list = codes[i].replace('$',',').replace('+++','\"').split('\\n')\n for line in code_list:\n if('split(^^^)' in line):\n line=line.replace('split(^^^)', 'split(\\'\\\\n\\')')\n if('###' in line):\n line=line.replace('###', '\\\\n\"')\n if('@@' in line):\n line=line.replace('@@', '\"\\\\n')\n f.write(line+'\\n')`; - const expr = { code_list: `code_list` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, gen_code, expr, false); - if (options === 'normal') { - let runcode = `from neural_coder import enable\nenable(code="${this.tmp_path}",features=["${formatter}"], overwrite=True)`; - let expr = { sum: ` ` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let run_code1 = `with open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let expr1 = { optimizedCode: "optimized_code" }; - let result2 = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, run_code1, expr1, false); - result2.then(value => { - var _a, _b, _c, _d; - let optimizedTexts = Object.values(value.optimizedCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - } - }); - } - else { - if (formatter === '') { - if (this.markdown) { - this.markdown.model.value.text += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \n"; - } - // cell.outputArea.node.innerText += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\n" - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}",features=[], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode2 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - }); - } - else { - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}", features=["${formatter}"], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (next !== '') { - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode2 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - } - let runcode3 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr3 = { path: "" }; - let res_tmp = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - res_tmp.then(value => { - if (formatter === 'pytorch_inc_bf16') { - let read_log = `import re\nwith open("${this.tmp_log_path}", 'r') as f:\n logs = f.readlines()\n fps_list=[]\n for log_line in logs[-4:]:\n pat = re.compile(r\'\\d+\\.?\\d+')\n fps = re.findall(pat,log_line)[-1]\n fps_list.append(float(fps))\nmaxi = max(fps_list)\nindex = fps_list.index(maxi)\nboost = round(maxi/fps_list[0],1)\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\nbest_feature = features[index]\nbest_name = feature_name[index]\nfeature_l = []\nfeature_l.append(best_feature)\nfrom neural_coder import enable\nenable(code="${this.tmp_path}",features=feature_l, overwrite=True)\nwith open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let read_expr = { boost: "boost", best_feature: "best_feature", best_name: "best_name", optimizeCode: "optimized_code", feature_l: "fps_list", maxi: "maxi", index: "index" }; - let read_result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false); - read_result.then(value => { - var _a, _b, _c, _d; - console.log("resres", value); - let boost = Object.values(value.boost.data)[0]; - let best_name = Object.values(value.best_name.data)[0]; - let optimizedTexts = Object.values(value.optimizeCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \n`; - } - // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\n` - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - } - // if(this.markdown){ - // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \n` - // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n` - // } - let command = "lscpu | grep 'Model name'"; - let get_hardware = `import subprocess\nsubp = subprocess.Popen("${command}",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")\nsubp.wait(2)\nhardware = subp.communicate()[0].replace("Model name:","").strip()`; - let expr_hardware = { hardware: "hardware" }; - let hard_res = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware, false); - hard_res.then(value => { - let hard = Object.values(value.hardware.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\n` - }); - // cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log\n` - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - }); - } - }); - }); - } - } - } -} -class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer { - constructor(notebookTracker, panel) { - super(panel); - this.notebookTracker = notebookTracker; - this.notebookname = ''; - } - async optimizeAction(config, formatter) { - return this.optimizeCells(true, config, formatter); - } - async optimizeAllCodeCells(config, formatter, notebook, run) { - return this.optimizeCells(false, config, formatter, notebook, run); - } - getCodeCells(ifmarkdown = true, notebook) { - if (!this.notebookTracker.currentWidget) { - return []; - } - const codeCells = []; - notebook = notebook || this.notebookTracker.currentWidget.content; - this.notebookname = notebook.title.label; - let count = 0; - notebook.widgets.forEach((cell) => { - if (cell.model.type === 'code') { - count += 1; - codeCells.push(cell); - } - }); - if (ifmarkdown) { - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.insertBelow(notebook); - this.notebookTracker.currentWidget.content.activeCellIndex = count + 1; - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.changeCellType(notebook, 'markdown'); - const activeCell = notebook.activeCell; - if (activeCell) { - this.markdown = activeCell; - } - } - this.cells = codeCells; - return codeCells; - } - async optimizeCells(selectedOnly, config, formatter, notebook, run) { - if (this.working) { - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - console.log("arrive here 333"); - this.working = true; - const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu'; - if (optimize_type === 'auto-quant') { - selectedOnly = true; - } - else { - selectedOnly = false; - } - const selectedCells = this.getCodeCells(selectedOnly, notebook); - let cell = selectedCells[selectedCells.length - 1]; - if (selectedCells.length === 0) { - this.working = false; - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - const currentTexts = selectedCells.map(cell => cell.model.value.text); - if (optimize_type === 'auto-quant') { - console.log("arrive here 444-111"); - if (this.markdown) { - this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}" \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \n`; - } - // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}"\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\n` - let runcode = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Auto-Quant Started ......\\n")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - let runcode2 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - let runcode3 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Benchmark Mode: Throughput\\n")`; - let expr3 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - // cell.outputArea.node.setAttribute("class","pad") - await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run); - } - else { - console.log("arrive here 444-222"); - await this.optimizeCode(currentTexts, optimize_type, "", "", "normal", true, this.panel, cell, run); - } - this.working = false; - console.log("arrive here 555"); - return new Promise((resolve, reject) => { - resolve("success!"); - }); - } - applicable(formatter, currentWidget) { - const currentNotebookWidget = this.notebookTracker.currentWidget; - return currentNotebookWidget && currentWidget === currentNotebookWidget; - } -} - - -/***/ }), - -/***/ "./lib/index.js": -/*!**********************!*\ - !*** ./lib/index.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! @jupyterlab/settingregistry */ "webpack/sharing/consume/default/@jupyterlab/settingregistry"); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! @jupyterlab/mainmenu */ "webpack/sharing/consume/default/@jupyterlab/mainmenu"); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! @jupyterlab/ui-components */ "webpack/sharing/consume/default/@jupyterlab/ui-components"); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! @lumino/widgets */ "webpack/sharing/consume/default/@lumino/widgets"); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5___default = /*#__PURE__*/__webpack_require__.n(_lumino_widgets__WEBPACK_IMPORTED_MODULE_5__); -/* harmony import */ var _deepcoder__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./deepcoder */ "./lib/deepcoder.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - - - - - - -class neural_compressor_ext_lab { - constructor(app, tracker, notebookpanel) { - this.app = app; - this.tracker = tracker; - this.notebookpanel = notebookpanel; - this.setupWidgetExtension(); - this.config = ''; - } - createNew(nb) { - this.notebookpanel = nb; - this.notebookCodeOptimizer = new _deepcoder__WEBPACK_IMPORTED_MODULE_6__.JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel); - const svg = document.createElement("svg"); - svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_FORMAT_ALL_SVG; - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN; - const div = document.createElement("div"); - div.setAttribute("class", "wrapper"); - const span = document.createElement("span"); - span.setAttribute("class", "f1ozlkqi"); - span.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.SVG; - const selector = document.createElement("select"); - selector.setAttribute("class", "aselector"); - selector.id = "NeuralCoder"; - const option1 = document.createElement("option"); - option1.value = "pytorch_inc_static_quant_fx"; - option1.innerText = "INC Enable INT8 (Static)"; - option1.selected = true; - const option2 = document.createElement("option"); - option2.value = "pytorch_inc_dynamic_quant"; - option2.innerText = "INC Enable INT8 (Dynamic)"; - const option3 = document.createElement("option"); - option3.value = "pytorch_inc_bf16"; - option3.innerText = "INC Enable BF16"; - const option4 = document.createElement("option"); - option4.value = "auto-quant"; - option4.innerText = "INC Auto Enable & Benchmark"; - selector.options.add(option1); - selector.options.add(option2); - selector.options.add(option3); - selector.options.add(option4); - div.appendChild(selector); - div.appendChild(span); - const selector_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - selector_widget.node.appendChild(div); - selector_widget.addClass("aselector"); - let notebookCodeOptimizer = this.notebookCodeOptimizer; - let config = this.config; - const dia_input = document.createElement("input"); - const dia_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - dia_widget.node.appendChild(dia_input); - dia_widget.addClass("dialog"); - const run_button = new _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.ToolbarButton({ - tooltip: 'NeuralCoder', - icon: new _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__.LabIcon({ - name: "run", - svgstr: _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN - }), - onClick: async function () { - var _a, _b, _c, _d; - console.log("arrive here 111"); - (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg); - if (selector.options[selector.selectedIndex].value === 'auto-quant') { - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.showDialog)({ - title: 'Please input execute parameters:', - body: dia_widget, - buttons: [_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.Dialog.okButton({ label: 'Confirm' })] - }).then(result => { - if (result.button.accept) { - config = dia_input.value; - } - }); - } - console.log("arrive here 222"); - await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button); - } - }); - nb.toolbar.insertItem(11, "nc", run_button); - nb.toolbar.insertItem(12, "selector", selector_widget); - } - setupWidgetExtension() { - this.app.docRegistry.addWidgetExtension('Notebook', this); - } -} -/** - * Initialization data for the neural_compressor_ext_lab extension. - */ -const plugin = { - id: 'neural_compressor_ext_lab:plugin', - autoStart: true, - requires: [_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.INotebookTracker, _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__.IMainMenu], - optional: [_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__.ISettingRegistry], - activate: (app, tracker, notebookpanel) => { - new neural_compressor_ext_lab(app, tracker, notebookpanel); - console.log('JupyterLab extension neural_compressor_ext_lab is activated!'); - } -}; -/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (plugin); - - -/***/ }), - -/***/ "./lib/utils.js": -/*!**********************!*\ - !*** ./lib/utils.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (/* binding */ NotebookUtilities) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-sanitized-html */ "webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html"); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react"); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_2__); -/* - * Copyright 2019-2020 The Kale Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// @ts-ignore - - -class NotebookUtilities { - /** - * generate random number - * @Min - * @Max - */ - static GetRandomNum(Min, Max) { - let Range; - Range = Max - Min; - var Rand = Math.random(); - return (Min + Math.round(Rand * Range)); - } - /** - * Builds an HTML container by sanitizing a list of strings and converting - * them in valid HTML - * @param msg A list of string with HTML formatting - * @returns a HTMLDivElement composed of a list of spans with formatted text - */ - static buildDialogBody(msg) { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement("div", null, msg.map((s, i) => { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement(react__WEBPACK_IMPORTED_MODULE_2__.Fragment, { key: `msg-${i}` }, - react__WEBPACK_IMPORTED_MODULE_2__.createElement((react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default()), { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }), - react__WEBPACK_IMPORTED_MODULE_2__.createElement("br", null))); - }))); - } - /** - * Opens a pop-up dialog in JupyterLab to display a simple message. - * @param title The title for the message popup - * @param msg The message as an array of strings - * @param buttonLabel The label to use for the button. Default is 'OK' - * @param buttonClassName The classname to give to the 'ok' button - * @returns Promise - A promise once the message is closed. - */ - static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: buttonLabel, className: buttonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - } - /** - * Opens a pop-up dialog in JupyterLab to display a yes/no dialog. - * @param title The title for the message popup - * @param msg The message - * @param acceptLabel The label to use for the accept button. Default is 'YES' - * @param rejectLabel The label to use for the reject button. Default is 'NO' - * @param yesButtonClassName The classname to give to the accept button. - * @param noButtonClassName The classname to give to the cancel button. - * @returns Promise - A promise once the message is closed. - */ - static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }), - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - const result = await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - return result.button.label === acceptLabel; - } - /** - * Opens a pop-up dialog in JupyterLab with various information and button - * triggering reloading the page. - * @param title The title for the message popup - * @param msg The message - * @param buttonLabel The label to use for the button. Default is 'Refresh' - * @param buttonClassName The classname to give to the 'refresh' button. - * @returns Promise - A promise once the message is closed. - */ - static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') { - await this.showMessage(title, msg, buttonLabel, buttonClassName); - location.reload(); - } - /** - * @description Creates a new JupyterLab notebook for use by the application - * @param command The command registry - * @returns Promise - A promise containing the notebook panel object that was created (if successful). - */ - static async createNewNotebook(command) { - const notebook = await command.execute('notebook:create-new', { - activate: true, - path: '', - preferredLanguage: '', - }); - await notebook.session.ready; - return notebook; - } - /** - * Safely saves the Jupyter notebook document contents to disk - * @param notebookPanel The notebook panel containing the notebook to save - */ - static async saveNotebook(notebookPanel) { - if (notebookPanel) { - await notebookPanel.context.ready; - notebookPanel.context.save(); - return true; - } - return false; - } - /** - * Convert the notebook contents to JSON - * @param notebookPanel The notebook panel containing the notebook to serialize - */ - static notebookToJSON(notebookPanel) { - if (notebookPanel.content.model) { - return notebookPanel.content.model.toJSON(); - } - return null; - } - /** - * @description Gets the value of a key from specified notebook's metadata. - * @param notebookPanel The notebook to get meta data from. - * @param key The key of the value. - * @returns any -The value of the metadata. Returns null if the key doesn't exist. - */ - static getMetaData(notebookPanel, key) { - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - if (notebookPanel.model && notebookPanel.model.metadata.has(key)) { - return notebookPanel.model.metadata.get(key); - } - return null; - } - /** - * @description Sets the key value pair in the notebook's metadata. - * If the key doesn't exists it will add one. - * @param notebookPanel The notebook to set meta data in. - * @param key The key of the value to create. - * @param value The value to set. - * @param save Default is false. Whether the notebook should be saved after the meta data is set. - * Note: This function will not wait for the save to complete, it only sends a save request. - * @returns The old value for the key, or undefined if it did not exist. - */ - static setMetaData(notebookPanel, key, value, save = false) { - var _a; - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value); - if (save) { - this.saveNotebook(notebookPanel); - } - return oldVal; - } - // /** - // * Get a new Kernel, not tied to a Notebook - // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services - // */ - // public static async createNewKernel() { - // // Get info about the available kernels and start a new one. - // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => { - // // console.log('Default spec:', kernelSpecs.default); - // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs)); - // // use the default name - // return { name: kernelSpecs.default }; - // }); - // return await Kernel.startNew(options).then(_kernel => { - // return _kernel; - // }); - // } - // // TODO: We can use this context manager to execute commands inside a new kernel - // // and be sure that it will be disposed of at the end. - // // Another approach could be to create a kale_rpc Kernel, as a singleton, - // // created at startup. The only (possible) drawback is that we can not name - // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would - // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls. - // public static async executeWithNewKernel(action: Function, args: any[] = []) { - // // create brand new kernel - // const _k = await this.createNewKernel(); - // // execute action inside kernel - // const res = await action(_k, ...args); - // // close kernel - // _k.shutdown(); - // // return result - // return res; - // } - /** - * @description This function runs code directly in the notebook's kernel and then evaluates the - * result and returns it as a promise. - * @param kernel The kernel to run the code in. - * @param runCode The code to run in the kernel. - * @param userExpressions The expressions used to capture the desired info from the executed code. - * @param runSilent Default is false. If true, kernel will execute as quietly as possible. - * store_history will be set to false, and no broadcast on IOPUB channel will be made. - * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history - * and the counter which is shown in the cells will be incremented to reflect code was run. - * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using - * an input_request message. - * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered. - * This allows the queued execution of multiple execute_requests, even if they generate exceptions. - * @returns Promise - A promise containing the execution results of the code as an object with - * keys based on the user_expressions. - * @example - * //The code - * const code = "a=123\nb=456\nsum=a+b"; - * //The user expressions - * const expr = {sum: "sum",prod: "a*b",args:"[a,b,sum]"}; - * //Async function call (returns a promise) - * sendKernelRequest(notebookPanel, code, expr,false); - * //Result when promise resolves: - * { - * sum:{status:"ok",data:{"text/plain":"579"},metadata:{}}, - * prod:{status:"ok",data:{"text/plain":"56088"},metadata:{}}, - * args:{status:"ok",data:{"text/plain":"[123, 456, 579]"}} - * } - * @see For more information on JupyterLab messages: - * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results - */ - static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - if (!kernel) { - throw new Error('Kernel is null or undefined.'); - } - // Wait for kernel to be ready before sending request - // await kernel.status; - const message = await kernel.requestExecute({ - allow_stdin: allowStdIn, - code: runCode, - silent: runSilent, - stop_on_error: stopOnError, - store_history: storeHistory, - user_expressions: userExpressions, - }).done; - const content = message.content; - if (content.status !== 'ok') { - // If response is not 'ok', throw contents as error, log code - const msg = `Code caused an error:\n${runCode}`; - console.error(msg); - if (content.traceback) { - content.traceback.forEach((line) => console.log(line.replace(/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, ''))); - } - throw content; - } - // Return user_expressions of the content - return content.user_expressions; - } - /** - * Same as method sendKernelRequest but passing - * a NotebookPanel instead of a Kernel - */ - static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - var _a, _b, _c, _d; - if (!notebookPanel) { - throw new Error('Notebook is null or undefined.'); - } - // Wait for notebook panel to be ready - await notebookPanel.activate; - await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready); - console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel); - return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError); - } -} - - -/***/ }) - -}]); -//# sourceMappingURL=lib_index_js.0c0187df9df8bc38b9c5.js.map \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js.map deleted file mode 100644 index a0dd93ddd68..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.0c0187df9df8bc38b9c5.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"lib_index_js.0c0187df9df8bc38b9c5.js","mappings":";;;;;;;;;;;;;AAAO;AACP;AACA;AACA;AACA,6IAA6I,gCAAgC,gBAAgB,sBAAsB,qVAAqV,mBAAmB,gVAAgV,mBAAmB;AAC95B;AACA;AACA,yCAAyC,4BAA4B;AACrE,oCAAoC,2BAA2B;AAC/D;AACA;AACA,CAAC,8BAA8B;;;;;;;;;;;;;;;;;;;ACXwB;AACf;AACA;AACxC;AACA;AACA;AACA;AACA;AACA,oBAAoB,2DAA8B;AAClD,wBAAwB,2DAAmB;AAC3C,4BAA4B,2DAAmB;AAC/C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT,kCAAkC,MAAM,0CAA0C,cAAc;AAChG,uBAAuB;AACvB,QAAQ,4EAA+C;AACvD;AACA,2EAA2E,cAAc,eAAe,UAAU;AAClH,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,0CAA0C,cAAc;AACxD,0BAA0B;AAC1B,0BAA0B,4EAA+C;AACzE;AACA;AACA;AACA;AACA;AACA,gCAAgC,0BAA0B;AAC1D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wCAAwC,0DAAkB;AAC1D;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA,6CAA6C,cAAc;AAC3D,8BAA8B;AAC9B,gBAAgB,4EAA+C;AAC/D,wGAAwG,cAAc,uCAAuC,QAAQ;AACrK,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,wIAAwI,KAAK;AAC7I;AACA,uIAAuI,KAAK;AAC5I,6GAA6G,KAAK;AAClH,gDAAgD,cAAc,8BAA8B,KAAK;AACjG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA,8GAA8G,MAAM;AACpH;AACA,6GAA6G,MAAM;AACnH,iDAAiD,cAAc,mFAAmF,MAAM;AACxJ,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iBAAiB;AACjB;AACA;AACA,wGAAwG,cAAc,gBAAgB,UAAU,4BAA4B,QAAQ;AACpL,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,kHAAkH,MAAM,KAAK,KAAK;AAClI;AACA,iHAAiH,MAAM,KAAK,KAAK;AACjI,uFAAuF,MAAM,KAAK,KAAK;AACvG,gDAAgD,cAAc,kCAAkC,KAAK;AACrG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA;AACA,kHAAkH,MAAM;AACxH;AACA,iHAAiH,MAAM;AACvH,qDAAqD,cAAc,mFAAmF,MAAM;AAC5J,sCAAsC;AACtC,wBAAwB,4EAA+C;AACvE;AACA,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,kCAAkC,4EAA+C;AACjF;AACA;AACA,oEAAoE,kBAAkB,qpBAAqpB,cAAc,oDAAoD,cAAc;AAC3zB,8CAA8C;AAC9C,8CAA8C,4EAA+C;AAC7F;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yHAAyH,YAAY;AACrI,8GAA8G,MAAM;AACpH;AACA,uHAAuH,UAAU;AACjI,6GAA6G,MAAM;AACnH;AACA,gDAAgD,0BAA0B;AAC1E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yJAAyJ,UAAU;AACnK;AACA;AACA,kGAAkG,QAAQ;AAC1G,sDAAsD;AACtD,+CAA+C,4EAA+C;AAC9F;AACA;AACA;AACA,0GAA0G,OAAO;AACjH,wJAAwJ,UAAU;AAClK;AACA,yGAAyG,KAAK;AAC9G,iCAAiC;AACjC,oJAAoJ,UAAU;AAC9J;AACA,oDAAoD,0DAAkB;AACtE;AACA,6BAA6B;AAC7B;AACA,qBAAqB;AACrB,iBAAiB;AACjB;AACA;AACA;AACA;AACO;AACP;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA,YAAY,6EAA2B;AACvC;AACA,YAAY,gFAA8B;AAC1C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA,mHAAmH,kBAAkB;AACrI;AACA;AACA;AACA,kHAAkH,kBAAkB;AACpI;AACA,wCAAwC,cAAc;AACtD,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc,gGAAgG,kBAAkB;AACzK,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc;AACvD,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACtSwD;AACiB;AACV;AACd;AACG;AACX;AACqB;AACtB;AACxC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yCAAyC,uEAA+B;AACxE;AACA,wBAAwB,qEAA6B;AACrD;AACA,4BAA4B,0DAAkB;AAC9C;AACA;AACA;AACA;AACA,yBAAyB,qDAAa;AACtC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oCAAoC,mDAAM;AAC1C;AACA;AACA;AACA;AACA;AACA,+BAA+B,mDAAM;AACrC;AACA;AACA,+BAA+B,+DAAa;AAC5C;AACA,sBAAsB,8DAAO;AAC7B;AACA,wBAAwB,0DAAkB;AAC1C,aAAa;AACb;AACA;AACA;AACA;AACA;AACA,0BAA0B,gEAAU;AACpC;AACA;AACA,kCAAkC,iEAAe,GAAG,kBAAkB;AACtE,qBAAqB;AACrB;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,eAAe,kEAAgB,EAAE,2DAAS;AAC1C,eAAe,yEAAgB;AAC/B;AACA;AACA;AACA;AACA;AACA,iEAAe,MAAM,EAAC;;;;;;;;;;;;;;;;;;;;;ACxGtB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAC0D;AAC1D;AACiD;AAClB;AAChB;AACf;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,gBAAgB,gDAAmB;AACnC,oBAAoB,gDAAmB,CAAC,2CAAc,IAAI,YAAY,EAAE,GAAG;AAC3E,gBAAgB,gDAAmB,CAAC,6DAAa,IAAI,qBAAqB,aAAa,gEAAgE;AACvJ,gBAAgB,gDAAmB;AACnC,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,gDAAgD;AAC9E;AACA;AACA,cAAc,gEAAU,GAAG,mCAAmC;AAC9D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,mDAAmD;AACjF,YAAY,qEAAmB,GAAG,kDAAkD;AACpF;AACA;AACA,6BAA6B,gEAAU,GAAG,mCAAmC;AAC7E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oBAAoB;AACpB,UAAU;AACV;AACA;AACA,UAAU;AACV;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,aAAa,kBAAkB,mBAAmB,aAAa;AAC/D,cAAc,kBAAkB,qBAAqB,aAAa;AAClE,cAAc,kBAAkB;AAChC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA,kDAAkD,QAAQ;AAC1D;AACA;AACA,kGAAkG,YAAY,IAAI,IAAI,MAAM,IAAI;AAChI;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","sources":["webpack://neural_compressor_ext_lab/./lib/constants.js","webpack://neural_compressor_ext_lab/./lib/deepcoder.js","webpack://neural_compressor_ext_lab/./lib/index.js","webpack://neural_compressor_ext_lab/./lib/utils.js"],"sourcesContent":["export var Constants;\n(function (Constants) {\n Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab';\n Constants.WORK_PATH = \"neural_coder_workspace/\";\n Constants.ICON_FORMAT_ALL_SVG = '';\n Constants.ICON_RUN = '';\n Constants.SVG = '';\n Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`;\n Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`;\n Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer';\n Constants.PLUGIN_VERSION = '0.1.0';\n})(Constants || (Constants = {}));\n","import { NotebookActions } from '@jupyterlab/notebook';\nimport NotebookUtilities from \"./utils\";\nimport { Constants } from './constants';\nclass JupyterlabCodeOptimizer {\n constructor(panel) {\n this.working = false;\n this.panel = panel;\n this.tmp_path = \"tmp.py\";\n this.rand = NotebookUtilities.GetRandomNum(0, 200);\n this.log_path = Constants.WORK_PATH + \"NeuralCoder\" + this.rand + \".log\";\n this.tmp_log_path = Constants.WORK_PATH + \"NeuralCoder_tmp\" + \".log\";\n this.cells = [];\n }\n async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) {\n let codes = [];\n code.forEach(function (value) {\n value = value.replace(/('\\\\n')/g, '^^^');\n value = value.replace(/\\\\n\"/g, '###');\n value = value.replace(/\\\\n'/g, '###');\n value = value.replace(/\"\\\\n/g, '@@');\n value = value.replace(/'\\\\n/g, '@@');\n value = value.replace(/\\n/g, '\\\\n');\n value = value.replace(/\"/g, '+++');\n value = value.replace(/,/g, '$');\n codes.push(value);\n });\n let gen_code = `code = \"${codes}\"\\ncodes = code.split(',')\\nwith open( '${this.tmp_path}', 'w+' ) as f:\\n for i in range(0,len(codes)):\\n f.write('# this is the beginning of a single code snippet\\\\n')\\n code_list = codes[i].replace('$',',').replace('+++','\\\"').split('\\\\n')\\n for line in code_list:\\n if('split(^^^)' in line):\\n line=line.replace('split(^^^)', 'split(\\\\'\\\\\\\\n\\\\')')\\n if('###' in line):\\n line=line.replace('###', '\\\\\\\\n\\\"')\\n if('@@' in line):\\n line=line.replace('@@', '\\\"\\\\\\\\n')\\n f.write(line+'\\\\n')`;\n const expr = { code_list: `code_list` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, gen_code, expr, false);\n if (options === 'normal') {\n let runcode = `from neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=[\"${formatter}\"], overwrite=True)`;\n let expr = { sum: ` ` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let run_code1 = `with open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let expr1 = { optimizedCode: \"optimized_code\" };\n let result2 = NotebookUtilities.sendKernelRequestFromNotebook(panel, run_code1, expr1, false);\n result2.then(value => {\n var _a, _b, _c, _d;\n let optimizedTexts = Object.values(value.optimizedCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n }\n });\n }\n else {\n if (formatter === '') {\n if (this.markdown) {\n this.markdown.model.value.text += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \\n\";\n }\n // cell.outputArea.node.innerText += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n\"\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\",features=[], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode2 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n });\n }\n else {\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\", features=[\"${formatter}\"], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (next !== '') {\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode2 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n }\n let runcode3 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr3 = { path: \"\" };\n let res_tmp = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n res_tmp.then(value => {\n if (formatter === 'pytorch_inc_bf16') {\n let read_log = `import re\\nwith open(\"${this.tmp_log_path}\", 'r') as f:\\n logs = f.readlines()\\n fps_list=[]\\n for log_line in logs[-4:]:\\n pat = re.compile(r\\'\\\\d+\\\\.?\\\\d+')\\n fps = re.findall(pat,log_line)[-1]\\n fps_list.append(float(fps))\\nmaxi = max(fps_list)\\nindex = fps_list.index(maxi)\\nboost = round(maxi/fps_list[0],1)\\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\\nbest_feature = features[index]\\nbest_name = feature_name[index]\\nfeature_l = []\\nfeature_l.append(best_feature)\\nfrom neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=feature_l, overwrite=True)\\nwith open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let read_expr = { boost: \"boost\", best_feature: \"best_feature\", best_name: \"best_name\", optimizeCode: \"optimized_code\", feature_l: \"fps_list\", maxi: \"maxi\", index: \"index\" };\n let read_result = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false);\n read_result.then(value => {\n var _a, _b, _c, _d;\n console.log(\"resres\", value);\n let boost = Object.values(value.boost.data)[0];\n let best_name = Object.values(value.best_name.data)[0];\n let optimizedTexts = Object.values(value.optimizeCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \\n`;\n }\n // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\\n`\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n }\n // if(this.markdown){\n // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \\n`\n // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`\n // }\n let command = \"lscpu | grep 'Model name'\";\n let get_hardware = `import subprocess\\nsubp = subprocess.Popen(\"${command}\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding=\"utf-8\")\\nsubp.wait(2)\\nhardware = subp.communicate()[0].replace(\"Model name:\",\"\").strip()`;\n let expr_hardware = { hardware: \"hardware\" };\n let hard_res = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware, false);\n hard_res.then(value => {\n let hard = Object.values(value.hardware.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\\n`\n });\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log\\n`\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n });\n }\n });\n });\n }\n }\n }\n}\nexport class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer {\n constructor(notebookTracker, panel) {\n super(panel);\n this.notebookTracker = notebookTracker;\n this.notebookname = '';\n }\n async optimizeAction(config, formatter) {\n return this.optimizeCells(true, config, formatter);\n }\n async optimizeAllCodeCells(config, formatter, notebook, run) {\n return this.optimizeCells(false, config, formatter, notebook, run);\n }\n getCodeCells(ifmarkdown = true, notebook) {\n if (!this.notebookTracker.currentWidget) {\n return [];\n }\n const codeCells = [];\n notebook = notebook || this.notebookTracker.currentWidget.content;\n this.notebookname = notebook.title.label;\n let count = 0;\n notebook.widgets.forEach((cell) => {\n if (cell.model.type === 'code') {\n count += 1;\n codeCells.push(cell);\n }\n });\n if (ifmarkdown) {\n NotebookActions.insertBelow(notebook);\n this.notebookTracker.currentWidget.content.activeCellIndex = count + 1;\n NotebookActions.changeCellType(notebook, 'markdown');\n const activeCell = notebook.activeCell;\n if (activeCell) {\n this.markdown = activeCell;\n }\n }\n this.cells = codeCells;\n return codeCells;\n }\n async optimizeCells(selectedOnly, config, formatter, notebook, run) {\n if (this.working) {\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n console.log(\"arrive here 333\");\n this.working = true;\n const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu';\n if (optimize_type === 'auto-quant') {\n selectedOnly = true;\n }\n else {\n selectedOnly = false;\n }\n const selectedCells = this.getCodeCells(selectedOnly, notebook);\n let cell = selectedCells[selectedCells.length - 1];\n if (selectedCells.length === 0) {\n this.working = false;\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n const currentTexts = selectedCells.map(cell => cell.model.value.text);\n if (optimize_type === 'auto-quant') {\n console.log(\"arrive here 444-111\");\n if (this.markdown) {\n this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\" \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \\n`;\n }\n // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\"\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\\n`\n let runcode = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Auto-Quant Started ......\\\\n\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n let runcode2 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n let runcode3 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Benchmark Mode: Throughput\\\\n\")`;\n let expr3 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n // cell.outputArea.node.setAttribute(\"class\",\"pad\")\n await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run);\n }\n else {\n console.log(\"arrive here 444-222\");\n await this.optimizeCode(currentTexts, optimize_type, \"\", \"\", \"normal\", true, this.panel, cell, run);\n }\n this.working = false;\n console.log(\"arrive here 555\");\n return new Promise((resolve, reject) => {\n resolve(\"success!\");\n });\n }\n applicable(formatter, currentWidget) {\n const currentNotebookWidget = this.notebookTracker.currentWidget;\n return currentNotebookWidget && currentWidget === currentNotebookWidget;\n }\n}\n","import { INotebookTracker } from '@jupyterlab/notebook';\nimport { ToolbarButton, showDialog, Dialog } from '@jupyterlab/apputils';\nimport { ISettingRegistry } from '@jupyterlab/settingregistry';\nimport { IMainMenu } from '@jupyterlab/mainmenu';\nimport { LabIcon } from '@jupyterlab/ui-components';\nimport { Widget } from '@lumino/widgets';\nimport { JupyterlabNotebookCodeOptimizer } from './deepcoder';\nimport { Constants } from './constants';\nclass neural_compressor_ext_lab {\n constructor(app, tracker, notebookpanel) {\n this.app = app;\n this.tracker = tracker;\n this.notebookpanel = notebookpanel;\n this.setupWidgetExtension();\n this.config = '';\n }\n createNew(nb) {\n this.notebookpanel = nb;\n this.notebookCodeOptimizer = new JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel);\n const svg = document.createElement(\"svg\");\n svg.innerHTML = Constants.ICON_FORMAT_ALL_SVG;\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n const div = document.createElement(\"div\");\n div.setAttribute(\"class\", \"wrapper\");\n const span = document.createElement(\"span\");\n span.setAttribute(\"class\", \"f1ozlkqi\");\n span.innerHTML = Constants.SVG;\n const selector = document.createElement(\"select\");\n selector.setAttribute(\"class\", \"aselector\");\n selector.id = \"NeuralCoder\";\n const option1 = document.createElement(\"option\");\n option1.value = \"pytorch_inc_static_quant_fx\";\n option1.innerText = \"INC Enable INT8 (Static)\";\n option1.selected = true;\n const option2 = document.createElement(\"option\");\n option2.value = \"pytorch_inc_dynamic_quant\";\n option2.innerText = \"INC Enable INT8 (Dynamic)\";\n const option3 = document.createElement(\"option\");\n option3.value = \"pytorch_inc_bf16\";\n option3.innerText = \"INC Enable BF16\";\n const option4 = document.createElement(\"option\");\n option4.value = \"auto-quant\";\n option4.innerText = \"INC Auto Enable & Benchmark\";\n selector.options.add(option1);\n selector.options.add(option2);\n selector.options.add(option3);\n selector.options.add(option4);\n div.appendChild(selector);\n div.appendChild(span);\n const selector_widget = new Widget();\n selector_widget.node.appendChild(div);\n selector_widget.addClass(\"aselector\");\n let notebookCodeOptimizer = this.notebookCodeOptimizer;\n let config = this.config;\n const dia_input = document.createElement(\"input\");\n const dia_widget = new Widget();\n dia_widget.node.appendChild(dia_input);\n dia_widget.addClass(\"dialog\");\n const run_button = new ToolbarButton({\n tooltip: 'NeuralCoder',\n icon: new LabIcon({\n name: \"run\",\n svgstr: Constants.ICON_RUN\n }),\n onClick: async function () {\n var _a, _b, _c, _d;\n console.log(\"arrive here 111\");\n (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg);\n if (selector.options[selector.selectedIndex].value === 'auto-quant') {\n await showDialog({\n title: 'Please input execute parameters:',\n body: dia_widget,\n buttons: [Dialog.okButton({ label: 'Confirm' })]\n }).then(result => {\n if (result.button.accept) {\n config = dia_input.value;\n }\n });\n }\n console.log(\"arrive here 222\");\n await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button);\n }\n });\n nb.toolbar.insertItem(11, \"nc\", run_button);\n nb.toolbar.insertItem(12, \"selector\", selector_widget);\n }\n setupWidgetExtension() {\n this.app.docRegistry.addWidgetExtension('Notebook', this);\n }\n}\n/**\n * Initialization data for the neural_compressor_ext_lab extension.\n */\nconst plugin = {\n id: 'neural_compressor_ext_lab:plugin',\n autoStart: true,\n requires: [INotebookTracker, IMainMenu],\n optional: [ISettingRegistry],\n activate: (app, tracker, notebookpanel) => {\n new neural_compressor_ext_lab(app, tracker, notebookpanel);\n console.log('JupyterLab extension neural_compressor_ext_lab is activated!');\n }\n};\nexport default plugin;\n","/*\n * Copyright 2019-2020 The Kale Authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport { Dialog, showDialog } from '@jupyterlab/apputils';\n// @ts-ignore\nimport SanitizedHTML from 'react-sanitized-html';\nimport * as React from 'react';\nexport default class NotebookUtilities {\n /**\n * generate random number\n * @Min\n * @Max\n */\n static GetRandomNum(Min, Max) {\n let Range;\n Range = Max - Min;\n var Rand = Math.random();\n return (Min + Math.round(Rand * Range));\n }\n /**\n * Builds an HTML container by sanitizing a list of strings and converting\n * them in valid HTML\n * @param msg A list of string with HTML formatting\n * @returns a HTMLDivElement composed of a list of spans with formatted text\n */\n static buildDialogBody(msg) {\n return (React.createElement(\"div\", null, msg.map((s, i) => {\n return (React.createElement(React.Fragment, { key: `msg-${i}` },\n React.createElement(SanitizedHTML, { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }),\n React.createElement(\"br\", null)));\n })));\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a simple message.\n * @param title The title for the message popup\n * @param msg The message as an array of strings\n * @param buttonLabel The label to use for the button. Default is 'OK'\n * @param buttonClassName The classname to give to the 'ok' button\n * @returns Promise - A promise once the message is closed.\n */\n static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: buttonLabel, className: buttonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n await showDialog({ title, buttons, body: messageBody });\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a yes/no dialog.\n * @param title The title for the message popup\n * @param msg The message\n * @param acceptLabel The label to use for the accept button. Default is 'YES'\n * @param rejectLabel The label to use for the reject button. Default is 'NO'\n * @param yesButtonClassName The classname to give to the accept button.\n * @param noButtonClassName The classname to give to the cancel button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }),\n Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n const result = await showDialog({ title, buttons, body: messageBody });\n return result.button.label === acceptLabel;\n }\n /**\n * Opens a pop-up dialog in JupyterLab with various information and button\n * triggering reloading the page.\n * @param title The title for the message popup\n * @param msg The message\n * @param buttonLabel The label to use for the button. Default is 'Refresh'\n * @param buttonClassName The classname to give to the 'refresh' button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') {\n await this.showMessage(title, msg, buttonLabel, buttonClassName);\n location.reload();\n }\n /**\n * @description Creates a new JupyterLab notebook for use by the application\n * @param command The command registry\n * @returns Promise - A promise containing the notebook panel object that was created (if successful).\n */\n static async createNewNotebook(command) {\n const notebook = await command.execute('notebook:create-new', {\n activate: true,\n path: '',\n preferredLanguage: '',\n });\n await notebook.session.ready;\n return notebook;\n }\n /**\n * Safely saves the Jupyter notebook document contents to disk\n * @param notebookPanel The notebook panel containing the notebook to save\n */\n static async saveNotebook(notebookPanel) {\n if (notebookPanel) {\n await notebookPanel.context.ready;\n notebookPanel.context.save();\n return true;\n }\n return false;\n }\n /**\n * Convert the notebook contents to JSON\n * @param notebookPanel The notebook panel containing the notebook to serialize\n */\n static notebookToJSON(notebookPanel) {\n if (notebookPanel.content.model) {\n return notebookPanel.content.model.toJSON();\n }\n return null;\n }\n /**\n * @description Gets the value of a key from specified notebook's metadata.\n * @param notebookPanel The notebook to get meta data from.\n * @param key The key of the value.\n * @returns any -The value of the metadata. Returns null if the key doesn't exist.\n */\n static getMetaData(notebookPanel, key) {\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n if (notebookPanel.model && notebookPanel.model.metadata.has(key)) {\n return notebookPanel.model.metadata.get(key);\n }\n return null;\n }\n /**\n * @description Sets the key value pair in the notebook's metadata.\n * If the key doesn't exists it will add one.\n * @param notebookPanel The notebook to set meta data in.\n * @param key The key of the value to create.\n * @param value The value to set.\n * @param save Default is false. Whether the notebook should be saved after the meta data is set.\n * Note: This function will not wait for the save to complete, it only sends a save request.\n * @returns The old value for the key, or undefined if it did not exist.\n */\n static setMetaData(notebookPanel, key, value, save = false) {\n var _a;\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value);\n if (save) {\n this.saveNotebook(notebookPanel);\n }\n return oldVal;\n }\n // /**\n // * Get a new Kernel, not tied to a Notebook\n // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services\n // */\n // public static async createNewKernel() {\n // // Get info about the available kernels and start a new one.\n // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => {\n // // console.log('Default spec:', kernelSpecs.default);\n // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs));\n // // use the default name\n // return { name: kernelSpecs.default };\n // });\n // return await Kernel.startNew(options).then(_kernel => {\n // return _kernel;\n // });\n // }\n // // TODO: We can use this context manager to execute commands inside a new kernel\n // // and be sure that it will be disposed of at the end.\n // // Another approach could be to create a kale_rpc Kernel, as a singleton,\n // // created at startup. The only (possible) drawback is that we can not name\n // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would\n // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls.\n // public static async executeWithNewKernel(action: Function, args: any[] = []) {\n // // create brand new kernel\n // const _k = await this.createNewKernel();\n // // execute action inside kernel\n // const res = await action(_k, ...args);\n // // close kernel\n // _k.shutdown();\n // // return result\n // return res;\n // }\n /**\n * @description This function runs code directly in the notebook's kernel and then evaluates the\n * result and returns it as a promise.\n * @param kernel The kernel to run the code in.\n * @param runCode The code to run in the kernel.\n * @param userExpressions The expressions used to capture the desired info from the executed code.\n * @param runSilent Default is false. If true, kernel will execute as quietly as possible.\n * store_history will be set to false, and no broadcast on IOPUB channel will be made.\n * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history\n * and the counter which is shown in the cells will be incremented to reflect code was run.\n * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using\n * an input_request message.\n * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered.\n * This allows the queued execution of multiple execute_requests, even if they generate exceptions.\n * @returns Promise - A promise containing the execution results of the code as an object with\n * keys based on the user_expressions.\n * @example\n * //The code\n * const code = \"a=123\\nb=456\\nsum=a+b\";\n * //The user expressions\n * const expr = {sum: \"sum\",prod: \"a*b\",args:\"[a,b,sum]\"};\n * //Async function call (returns a promise)\n * sendKernelRequest(notebookPanel, code, expr,false);\n * //Result when promise resolves:\n * {\n * sum:{status:\"ok\",data:{\"text/plain\":\"579\"},metadata:{}},\n * prod:{status:\"ok\",data:{\"text/plain\":\"56088\"},metadata:{}},\n * args:{status:\"ok\",data:{\"text/plain\":\"[123, 456, 579]\"}}\n * }\n * @see For more information on JupyterLab messages:\n * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results\n */\n static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n if (!kernel) {\n throw new Error('Kernel is null or undefined.');\n }\n // Wait for kernel to be ready before sending request\n // await kernel.status;\n const message = await kernel.requestExecute({\n allow_stdin: allowStdIn,\n code: runCode,\n silent: runSilent,\n stop_on_error: stopOnError,\n store_history: storeHistory,\n user_expressions: userExpressions,\n }).done;\n const content = message.content;\n if (content.status !== 'ok') {\n // If response is not 'ok', throw contents as error, log code\n const msg = `Code caused an error:\\n${runCode}`;\n console.error(msg);\n if (content.traceback) {\n content.traceback.forEach((line) => console.log(line.replace(/[\\u001b\\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '')));\n }\n throw content;\n }\n // Return user_expressions of the content\n return content.user_expressions;\n }\n /**\n * Same as method sendKernelRequest but passing\n * a NotebookPanel instead of a Kernel\n */\n static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n var _a, _b, _c, _d;\n if (!notebookPanel) {\n throw new Error('Notebook is null or undefined.');\n }\n // Wait for notebook panel to be ready\n await notebookPanel.activate;\n await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready);\n console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel);\n return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError);\n }\n}\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js deleted file mode 100644 index 246f4ba64ae..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js +++ /dev/null @@ -1,767 +0,0 @@ -"use strict"; -(self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []).push([["lib_index_js"],{ - -/***/ "./lib/constants.js": -/*!**************************!*\ - !*** ./lib/constants.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "Constants": () => (/* binding */ Constants) -/* harmony export */ }); -var Constants; -(function (Constants) { - Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab'; - Constants.WORK_PATH = "neural_coder_workspace/"; - Constants.ICON_FORMAT_ALL_SVG = ''; - Constants.ICON_RUN = ''; - Constants.SVG = ''; - Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`; - Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`; - Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer'; - Constants.PLUGIN_VERSION = '0.1.0'; -})(Constants || (Constants = {})); - - -/***/ }), - -/***/ "./lib/deepcoder.js": -/*!**************************!*\ - !*** ./lib/deepcoder.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "JupyterlabNotebookCodeOptimizer": () => (/* binding */ JupyterlabNotebookCodeOptimizer) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _utils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./utils */ "./lib/utils.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - -class JupyterlabCodeOptimizer { - constructor(panel) { - this.working = false; - this.panel = panel; - this.tmp_path = "tmp.py"; - this.rand = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].GetRandomNum(0, 200); - this.log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder" + this.rand + ".log"; - this.tmp_log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder_tmp" + ".log"; - this.cells = []; - } - async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) { - let codes = []; - code.forEach(function (value) { - value = value.replace(/('\\n')/g, '^^^'); - value = value.replace(/\\n"/g, '###'); - value = value.replace(/\\n'/g, '###'); - value = value.replace(/"\\n/g, '@@'); - value = value.replace(/'\\n/g, '@@'); - value = value.replace(/\n/g, '\\n'); - value = value.replace(/"/g, '+++'); - value = value.replace(/,/g, '$'); - codes.push(value); - }); - let gen_code = `code = "${codes}"\ncodes = code.split(',')\nwith open( '${this.tmp_path}', 'w+' ) as f:\n for i in range(0,len(codes)):\n f.write('# this is the beginning of a single code snippet\\n')\n code_list = codes[i].replace('$',',').replace('+++','\"').split('\\n')\n for line in code_list:\n if('split(^^^)' in line):\n line=line.replace('split(^^^)', 'split(\\'\\\\n\\')')\n if('###' in line):\n line=line.replace('###', '\\\\n\"')\n if('@@' in line):\n line=line.replace('@@', '\"\\\\n')\n f.write(line+'\\n')`; - const expr = { code_list: `code_list` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, gen_code, expr, false); - if (options === 'normal') { - let runcode = `from neural_coder import enable\nenable(code="${this.tmp_path}",features=["${formatter}"], overwrite=True)`; - let expr = { sum: ` ` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let run_code1 = `with open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let expr1 = { optimizedCode: "optimized_code" }; - let result2 = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, run_code1, expr1, false); - result2.then(value => { - var _a, _b, _c, _d; - let optimizedTexts = Object.values(value.optimizedCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - } - }); - } - else { - if (formatter === '') { - if (this.markdown) { - this.markdown.model.value.text += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \n"; - } - // cell.outputArea.node.innerText += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\n" - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}",features=[], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode2 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - }); - } - else { - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}", features=["${formatter}"], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (next !== '') { - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode2 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - } - let runcode3 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr3 = { path: "" }; - let res_tmp = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - res_tmp.then(value => { - if (formatter === 'pytorch_inc_bf16') { - let read_log = `import re\nwith open("${this.tmp_log_path}", 'r') as f:\n logs = f.readlines()\n fps_list=[]\n for log_line in logs[-4:]:\n pat = re.compile(r\'\\d+\\.?\\d+')\n fps = re.findall(pat,log_line)[-1]\n fps_list.append(float(fps))\nmaxi = max(fps_list)\nindex = fps_list.index(maxi)\nboost = round(maxi/fps_list[0],1)\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\nbest_feature = features[index]\nbest_name = feature_name[index]\nfeature_l = []\nfeature_l.append(best_feature)\nfrom neural_coder import enable\nenable(code="${this.tmp_path}",features=feature_l, overwrite=True)\nwith open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let read_expr = { boost: "boost", best_feature: "best_feature", best_name: "best_name", optimizeCode: "optimized_code", feature_l: "fps_list", maxi: "maxi", index: "index" }; - let read_result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false); - read_result.then(value => { - var _a, _b, _c, _d; - console.log("resres", value); - let boost = Object.values(value.boost.data)[0]; - let best_name = Object.values(value.best_name.data)[0]; - let optimizedTexts = Object.values(value.optimizeCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \n`; - } - // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\n` - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - } - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n`; - } - // let command = "lscpu | grep 'Model name'" - // let get_hardware = `import subprocess\nsubp = subprocess.Popen("${command}",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")\nsubp.wait(2)\nhardware = subp.communicate()[0].replace("Model name:","").strip()` - // let expr_hardware = {hardware: "hardware"} - // let hard_res = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware,false); - // hard_res.then(value =>{ - // let hard = Object.values(value.hardware.data)[0] as string; - // if(this.markdown){ - // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \n` - // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n` - // } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\n` - // }) - // cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to lab_workspace\\NeuralCoder${this.rand}.log\n` - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - }); - } - }); - }); - } - } - } -} -class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer { - constructor(notebookTracker, panel) { - super(panel); - this.notebookTracker = notebookTracker; - this.notebookname = ''; - } - async optimizeAction(config, formatter) { - return this.optimizeCells(true, config, formatter); - } - async optimizeAllCodeCells(config, formatter, notebook, run) { - return this.optimizeCells(false, config, formatter, notebook, run); - } - getCodeCells(ifmarkdown = true, notebook) { - if (!this.notebookTracker.currentWidget) { - return []; - } - const codeCells = []; - notebook = notebook || this.notebookTracker.currentWidget.content; - this.notebookname = notebook.title.label; - let count = 0; - notebook.widgets.forEach((cell) => { - if (cell.model.type === 'code') { - count += 1; - codeCells.push(cell); - } - }); - if (ifmarkdown) { - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.insertBelow(notebook); - this.notebookTracker.currentWidget.content.activeCellIndex = count + 1; - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.changeCellType(notebook, 'markdown'); - const activeCell = notebook.activeCell; - if (activeCell) { - this.markdown = activeCell; - } - } - this.cells = codeCells; - return codeCells; - } - async optimizeCells(selectedOnly, config, formatter, notebook, run) { - if (this.working) { - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - console.log("arrive here 333"); - this.working = true; - const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu'; - if (optimize_type === 'auto-quant') { - selectedOnly = true; - } - else { - selectedOnly = false; - } - const selectedCells = this.getCodeCells(selectedOnly, notebook); - let cell = selectedCells[selectedCells.length - 1]; - if (selectedCells.length === 0) { - this.working = false; - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - const currentTexts = selectedCells.map(cell => cell.model.value.text); - if (optimize_type === 'auto-quant') { - console.log("arrive here 444-111"); - if (this.markdown) { - this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}" \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \n`; - } - // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}"\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\n` - let runcode = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Auto-Quant Started ......\\n")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - let runcode2 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - let runcode3 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Benchmark Mode: Throughput\\n")`; - let expr3 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - // cell.outputArea.node.setAttribute("class","pad") - await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run); - } - else { - console.log("arrive here 444-222"); - await this.optimizeCode(currentTexts, optimize_type, "", "", "normal", true, this.panel, cell, run); - } - this.working = false; - console.log("arrive here 555"); - return new Promise((resolve, reject) => { - resolve("success!"); - }); - } - applicable(formatter, currentWidget) { - const currentNotebookWidget = this.notebookTracker.currentWidget; - return currentNotebookWidget && currentWidget === currentNotebookWidget; - } -} - - -/***/ }), - -/***/ "./lib/index.js": -/*!**********************!*\ - !*** ./lib/index.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! @jupyterlab/settingregistry */ "webpack/sharing/consume/default/@jupyterlab/settingregistry"); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! @jupyterlab/mainmenu */ "webpack/sharing/consume/default/@jupyterlab/mainmenu"); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! @jupyterlab/ui-components */ "webpack/sharing/consume/default/@jupyterlab/ui-components"); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! @lumino/widgets */ "webpack/sharing/consume/default/@lumino/widgets"); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5___default = /*#__PURE__*/__webpack_require__.n(_lumino_widgets__WEBPACK_IMPORTED_MODULE_5__); -/* harmony import */ var _deepcoder__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./deepcoder */ "./lib/deepcoder.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - - - - - - -class neural_compressor_ext_lab { - constructor(app, tracker, notebookpanel) { - this.app = app; - this.tracker = tracker; - this.notebookpanel = notebookpanel; - this.setupWidgetExtension(); - this.config = ''; - } - createNew(nb) { - this.notebookpanel = nb; - this.notebookCodeOptimizer = new _deepcoder__WEBPACK_IMPORTED_MODULE_6__.JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel); - const svg = document.createElement("svg"); - svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_FORMAT_ALL_SVG; - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN; - const div = document.createElement("div"); - div.setAttribute("class", "wrapper"); - const span = document.createElement("span"); - span.setAttribute("class", "f1ozlkqi"); - span.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.SVG; - const selector = document.createElement("select"); - selector.setAttribute("class", "aselector"); - selector.id = "NeuralCoder"; - const option1 = document.createElement("option"); - option1.value = "pytorch_inc_static_quant_fx"; - option1.innerText = "INC Enable INT8 (Static)"; - option1.selected = true; - const option2 = document.createElement("option"); - option2.value = "pytorch_inc_dynamic_quant"; - option2.innerText = "INC Enable INT8 (Dynamic)"; - const option3 = document.createElement("option"); - option3.value = "pytorch_inc_bf16"; - option3.innerText = "INC Enable BF16"; - const option4 = document.createElement("option"); - option4.value = "auto-quant"; - option4.innerText = "INC Auto Enable & Benchmark"; - selector.options.add(option1); - selector.options.add(option2); - selector.options.add(option3); - selector.options.add(option4); - div.appendChild(selector); - div.appendChild(span); - const selector_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - selector_widget.node.appendChild(div); - selector_widget.addClass("aselector"); - let notebookCodeOptimizer = this.notebookCodeOptimizer; - let config = this.config; - const dia_input = document.createElement("input"); - const dia_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - dia_widget.node.appendChild(dia_input); - dia_widget.addClass("dialog"); - const run_button = new _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.ToolbarButton({ - tooltip: 'NeuralCoder', - icon: new _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__.LabIcon({ - name: "run", - svgstr: _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN - }), - onClick: async function () { - var _a, _b, _c, _d; - console.log("arrive here 111"); - (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg); - if (selector.options[selector.selectedIndex].value === 'auto-quant') { - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.showDialog)({ - title: 'Please input execute parameters:', - body: dia_widget, - buttons: [_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.Dialog.okButton({ label: 'Confirm' })] - }).then(result => { - if (result.button.accept) { - config = dia_input.value; - } - }); - } - console.log("arrive here 222"); - await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button); - } - }); - nb.toolbar.insertItem(11, "nc", run_button); - nb.toolbar.insertItem(12, "selector", selector_widget); - } - setupWidgetExtension() { - this.app.docRegistry.addWidgetExtension('Notebook', this); - } -} -/** - * Initialization data for the neural_compressor_ext_lab extension. - */ -const plugin = { - id: 'neural_compressor_ext_lab:plugin', - autoStart: true, - requires: [_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.INotebookTracker, _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__.IMainMenu], - optional: [_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__.ISettingRegistry], - activate: (app, tracker, notebookpanel) => { - new neural_compressor_ext_lab(app, tracker, notebookpanel); - console.log('JupyterLab extension neural_compressor_ext_lab is activated!'); - } -}; -/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (plugin); - - -/***/ }), - -/***/ "./lib/utils.js": -/*!**********************!*\ - !*** ./lib/utils.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (/* binding */ NotebookUtilities) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-sanitized-html */ "webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html"); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react"); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_2__); -/* - * Copyright 2019-2020 The Kale Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// @ts-ignore - - -class NotebookUtilities { - /** - * generate random number - * @Min - * @Max - */ - static GetRandomNum(Min, Max) { - let Range; - Range = Max - Min; - var Rand = Math.random(); - return (Min + Math.round(Rand * Range)); - } - /** - * Builds an HTML container by sanitizing a list of strings and converting - * them in valid HTML - * @param msg A list of string with HTML formatting - * @returns a HTMLDivElement composed of a list of spans with formatted text - */ - static buildDialogBody(msg) { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement("div", null, msg.map((s, i) => { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement(react__WEBPACK_IMPORTED_MODULE_2__.Fragment, { key: `msg-${i}` }, - react__WEBPACK_IMPORTED_MODULE_2__.createElement((react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default()), { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }), - react__WEBPACK_IMPORTED_MODULE_2__.createElement("br", null))); - }))); - } - /** - * Opens a pop-up dialog in JupyterLab to display a simple message. - * @param title The title for the message popup - * @param msg The message as an array of strings - * @param buttonLabel The label to use for the button. Default is 'OK' - * @param buttonClassName The classname to give to the 'ok' button - * @returns Promise - A promise once the message is closed. - */ - static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: buttonLabel, className: buttonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - } - /** - * Opens a pop-up dialog in JupyterLab to display a yes/no dialog. - * @param title The title for the message popup - * @param msg The message - * @param acceptLabel The label to use for the accept button. Default is 'YES' - * @param rejectLabel The label to use for the reject button. Default is 'NO' - * @param yesButtonClassName The classname to give to the accept button. - * @param noButtonClassName The classname to give to the cancel button. - * @returns Promise - A promise once the message is closed. - */ - static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }), - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - const result = await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - return result.button.label === acceptLabel; - } - /** - * Opens a pop-up dialog in JupyterLab with various information and button - * triggering reloading the page. - * @param title The title for the message popup - * @param msg The message - * @param buttonLabel The label to use for the button. Default is 'Refresh' - * @param buttonClassName The classname to give to the 'refresh' button. - * @returns Promise - A promise once the message is closed. - */ - static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') { - await this.showMessage(title, msg, buttonLabel, buttonClassName); - location.reload(); - } - /** - * @description Creates a new JupyterLab notebook for use by the application - * @param command The command registry - * @returns Promise - A promise containing the notebook panel object that was created (if successful). - */ - static async createNewNotebook(command) { - const notebook = await command.execute('notebook:create-new', { - activate: true, - path: '', - preferredLanguage: '', - }); - await notebook.session.ready; - return notebook; - } - /** - * Safely saves the Jupyter notebook document contents to disk - * @param notebookPanel The notebook panel containing the notebook to save - */ - static async saveNotebook(notebookPanel) { - if (notebookPanel) { - await notebookPanel.context.ready; - notebookPanel.context.save(); - return true; - } - return false; - } - /** - * Convert the notebook contents to JSON - * @param notebookPanel The notebook panel containing the notebook to serialize - */ - static notebookToJSON(notebookPanel) { - if (notebookPanel.content.model) { - return notebookPanel.content.model.toJSON(); - } - return null; - } - /** - * @description Gets the value of a key from specified notebook's metadata. - * @param notebookPanel The notebook to get meta data from. - * @param key The key of the value. - * @returns any -The value of the metadata. Returns null if the key doesn't exist. - */ - static getMetaData(notebookPanel, key) { - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - if (notebookPanel.model && notebookPanel.model.metadata.has(key)) { - return notebookPanel.model.metadata.get(key); - } - return null; - } - /** - * @description Sets the key value pair in the notebook's metadata. - * If the key doesn't exists it will add one. - * @param notebookPanel The notebook to set meta data in. - * @param key The key of the value to create. - * @param value The value to set. - * @param save Default is false. Whether the notebook should be saved after the meta data is set. - * Note: This function will not wait for the save to complete, it only sends a save request. - * @returns The old value for the key, or undefined if it did not exist. - */ - static setMetaData(notebookPanel, key, value, save = false) { - var _a; - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value); - if (save) { - this.saveNotebook(notebookPanel); - } - return oldVal; - } - // /** - // * Get a new Kernel, not tied to a Notebook - // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services - // */ - // public static async createNewKernel() { - // // Get info about the available kernels and start a new one. - // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => { - // // console.log('Default spec:', kernelSpecs.default); - // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs)); - // // use the default name - // return { name: kernelSpecs.default }; - // }); - // return await Kernel.startNew(options).then(_kernel => { - // return _kernel; - // }); - // } - // // TODO: We can use this context manager to execute commands inside a new kernel - // // and be sure that it will be disposed of at the end. - // // Another approach could be to create a kale_rpc Kernel, as a singleton, - // // created at startup. The only (possible) drawback is that we can not name - // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would - // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls. - // public static async executeWithNewKernel(action: Function, args: any[] = []) { - // // create brand new kernel - // const _k = await this.createNewKernel(); - // // execute action inside kernel - // const res = await action(_k, ...args); - // // close kernel - // _k.shutdown(); - // // return result - // return res; - // } - /** - * @description This function runs code directly in the notebook's kernel and then evaluates the - * result and returns it as a promise. - * @param kernel The kernel to run the code in. - * @param runCode The code to run in the kernel. - * @param userExpressions The expressions used to capture the desired info from the executed code. - * @param runSilent Default is false. If true, kernel will execute as quietly as possible. - * store_history will be set to false, and no broadcast on IOPUB channel will be made. - * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history - * and the counter which is shown in the cells will be incremented to reflect code was run. - * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using - * an input_request message. - * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered. - * This allows the queued execution of multiple execute_requests, even if they generate exceptions. - * @returns Promise - A promise containing the execution results of the code as an object with - * keys based on the user_expressions. - * @example - * //The code - * const code = "a=123\nb=456\nsum=a+b"; - * //The user expressions - * const expr = {sum: "sum",prod: "a*b",args:"[a,b,sum]"}; - * //Async function call (returns a promise) - * sendKernelRequest(notebookPanel, code, expr,false); - * //Result when promise resolves: - * { - * sum:{status:"ok",data:{"text/plain":"579"},metadata:{}}, - * prod:{status:"ok",data:{"text/plain":"56088"},metadata:{}}, - * args:{status:"ok",data:{"text/plain":"[123, 456, 579]"}} - * } - * @see For more information on JupyterLab messages: - * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results - */ - static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - if (!kernel) { - throw new Error('Kernel is null or undefined.'); - } - // Wait for kernel to be ready before sending request - // await kernel.status; - const message = await kernel.requestExecute({ - allow_stdin: allowStdIn, - code: runCode, - silent: runSilent, - stop_on_error: stopOnError, - store_history: storeHistory, - user_expressions: userExpressions, - }).done; - const content = message.content; - if (content.status !== 'ok') { - // If response is not 'ok', throw contents as error, log code - const msg = `Code caused an error:\n${runCode}`; - console.error(msg); - if (content.traceback) { - content.traceback.forEach((line) => console.log(line.replace(/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, ''))); - } - throw content; - } - // Return user_expressions of the content - return content.user_expressions; - } - /** - * Same as method sendKernelRequest but passing - * a NotebookPanel instead of a Kernel - */ - static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - var _a, _b, _c, _d; - if (!notebookPanel) { - throw new Error('Notebook is null or undefined.'); - } - // Wait for notebook panel to be ready - await notebookPanel.activate; - await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready); - console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel); - return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError); - } -} - - -/***/ }) - -}]); -//# sourceMappingURL=lib_index_js.2c3b18119886a0a82200.js.map \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js.map deleted file mode 100644 index c4bb7ec7d6d..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c3b18119886a0a82200.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"lib_index_js.2c3b18119886a0a82200.js","mappings":";;;;;;;;;;;;;AAAO;AACP;AACA;AACA;AACA,6IAA6I,gCAAgC,gBAAgB,sBAAsB,qVAAqV,mBAAmB,gVAAgV,mBAAmB;AAC95B;AACA;AACA,yCAAyC,4BAA4B;AACrE,oCAAoC,2BAA2B;AAC/D;AACA;AACA,CAAC,8BAA8B;;;;;;;;;;;;;;;;;;;ACXwB;AACf;AACA;AACxC;AACA;AACA;AACA;AACA;AACA,oBAAoB,2DAA8B;AAClD,wBAAwB,2DAAmB;AAC3C,4BAA4B,2DAAmB;AAC/C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT,kCAAkC,MAAM,0CAA0C,cAAc;AAChG,uBAAuB;AACvB,QAAQ,4EAA+C;AACvD;AACA,2EAA2E,cAAc,eAAe,UAAU;AAClH,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,0CAA0C,cAAc;AACxD,0BAA0B;AAC1B,0BAA0B,4EAA+C;AACzE;AACA;AACA;AACA;AACA;AACA,gCAAgC,0BAA0B;AAC1D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wCAAwC,0DAAkB;AAC1D;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA,6CAA6C,cAAc;AAC3D,8BAA8B;AAC9B,gBAAgB,4EAA+C;AAC/D,wGAAwG,cAAc,uCAAuC,QAAQ;AACrK,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,wIAAwI,KAAK;AAC7I;AACA,uIAAuI,KAAK;AAC5I,6GAA6G,KAAK;AAClH,gDAAgD,cAAc,8BAA8B,KAAK;AACjG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA,8GAA8G,MAAM;AACpH;AACA,6GAA6G,MAAM;AACnH,iDAAiD,cAAc,mFAAmF,MAAM;AACxJ,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iBAAiB;AACjB;AACA;AACA,wGAAwG,cAAc,gBAAgB,UAAU,4BAA4B,QAAQ;AACpL,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,kHAAkH,MAAM,KAAK,KAAK;AAClI;AACA,iHAAiH,MAAM,KAAK,KAAK;AACjI,uFAAuF,MAAM,KAAK,KAAK;AACvG,gDAAgD,cAAc,kCAAkC,KAAK;AACrG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA;AACA,kHAAkH,MAAM;AACxH;AACA,iHAAiH,MAAM;AACvH,qDAAqD,cAAc,mFAAmF,MAAM;AAC5J,sCAAsC;AACtC,wBAAwB,4EAA+C;AACvE;AACA,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,kCAAkC,4EAA+C;AACjF;AACA;AACA,oEAAoE,kBAAkB,qpBAAqpB,cAAc,oDAAoD,cAAc;AAC3zB,8CAA8C;AAC9C,8CAA8C,4EAA+C;AAC7F;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yHAAyH,YAAY;AACrI,8GAA8G,MAAM;AACpH;AACA,uHAAuH,UAAU;AACjI,6GAA6G,MAAM;AACnH;AACA,gDAAgD,0BAA0B;AAC1E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oJAAoJ,UAAU;AAC9J;AACA;AACA,qGAAqG,QAAQ;AAC7G,wDAAwD;AACxD;AACA;AACA;AACA;AACA,yGAAyG,OAAO;AAChH,uJAAuJ,UAAU;AACjK;AACA,qGAAqG,KAAK;AAC1G,qCAAqC;AACrC,2IAA2I,UAAU;AACrJ;AACA,oDAAoD,0DAAkB;AACtE;AACA,6BAA6B;AAC7B;AACA,qBAAqB;AACrB,iBAAiB;AACjB;AACA;AACA;AACA;AACO;AACP;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA,YAAY,6EAA2B;AACvC;AACA,YAAY,gFAA8B;AAC1C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA,mHAAmH,kBAAkB;AACrI;AACA;AACA;AACA,kHAAkH,kBAAkB;AACpI;AACA,wCAAwC,cAAc;AACtD,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc,gGAAgG,kBAAkB;AACzK,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc;AACvD,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACtSwD;AACiB;AACV;AACd;AACG;AACX;AACqB;AACtB;AACxC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yCAAyC,uEAA+B;AACxE;AACA,wBAAwB,qEAA6B;AACrD;AACA,4BAA4B,0DAAkB;AAC9C;AACA;AACA;AACA;AACA,yBAAyB,qDAAa;AACtC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oCAAoC,mDAAM;AAC1C;AACA;AACA;AACA;AACA;AACA,+BAA+B,mDAAM;AACrC;AACA;AACA,+BAA+B,+DAAa;AAC5C;AACA,sBAAsB,8DAAO;AAC7B;AACA,wBAAwB,0DAAkB;AAC1C,aAAa;AACb;AACA;AACA;AACA;AACA;AACA,0BAA0B,gEAAU;AACpC;AACA;AACA,kCAAkC,iEAAe,GAAG,kBAAkB;AACtE,qBAAqB;AACrB;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,eAAe,kEAAgB,EAAE,2DAAS;AAC1C,eAAe,yEAAgB;AAC/B;AACA;AACA;AACA;AACA;AACA,iEAAe,MAAM,EAAC;;;;;;;;;;;;;;;;;;;;;ACxGtB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAC0D;AAC1D;AACiD;AAClB;AAChB;AACf;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,gBAAgB,gDAAmB;AACnC,oBAAoB,gDAAmB,CAAC,2CAAc,IAAI,YAAY,EAAE,GAAG;AAC3E,gBAAgB,gDAAmB,CAAC,6DAAa,IAAI,qBAAqB,aAAa,gEAAgE;AACvJ,gBAAgB,gDAAmB;AACnC,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,gDAAgD;AAC9E;AACA;AACA,cAAc,gEAAU,GAAG,mCAAmC;AAC9D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,mDAAmD;AACjF,YAAY,qEAAmB,GAAG,kDAAkD;AACpF;AACA;AACA,6BAA6B,gEAAU,GAAG,mCAAmC;AAC7E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oBAAoB;AACpB,UAAU;AACV;AACA;AACA,UAAU;AACV;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,aAAa,kBAAkB,mBAAmB,aAAa;AAC/D,cAAc,kBAAkB,qBAAqB,aAAa;AAClE,cAAc,kBAAkB;AAChC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA,kDAAkD,QAAQ;AAC1D;AACA;AACA,kGAAkG,YAAY,IAAI,IAAI,MAAM,IAAI;AAChI;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","sources":["webpack://neural_compressor_ext_lab/./lib/constants.js","webpack://neural_compressor_ext_lab/./lib/deepcoder.js","webpack://neural_compressor_ext_lab/./lib/index.js","webpack://neural_compressor_ext_lab/./lib/utils.js"],"sourcesContent":["export var Constants;\n(function (Constants) {\n Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab';\n Constants.WORK_PATH = \"neural_coder_workspace/\";\n Constants.ICON_FORMAT_ALL_SVG = '';\n Constants.ICON_RUN = '';\n Constants.SVG = '';\n Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`;\n Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`;\n Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer';\n Constants.PLUGIN_VERSION = '0.1.0';\n})(Constants || (Constants = {}));\n","import { NotebookActions } from '@jupyterlab/notebook';\nimport NotebookUtilities from \"./utils\";\nimport { Constants } from './constants';\nclass JupyterlabCodeOptimizer {\n constructor(panel) {\n this.working = false;\n this.panel = panel;\n this.tmp_path = \"tmp.py\";\n this.rand = NotebookUtilities.GetRandomNum(0, 200);\n this.log_path = Constants.WORK_PATH + \"NeuralCoder\" + this.rand + \".log\";\n this.tmp_log_path = Constants.WORK_PATH + \"NeuralCoder_tmp\" + \".log\";\n this.cells = [];\n }\n async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) {\n let codes = [];\n code.forEach(function (value) {\n value = value.replace(/('\\\\n')/g, '^^^');\n value = value.replace(/\\\\n\"/g, '###');\n value = value.replace(/\\\\n'/g, '###');\n value = value.replace(/\"\\\\n/g, '@@');\n value = value.replace(/'\\\\n/g, '@@');\n value = value.replace(/\\n/g, '\\\\n');\n value = value.replace(/\"/g, '+++');\n value = value.replace(/,/g, '$');\n codes.push(value);\n });\n let gen_code = `code = \"${codes}\"\\ncodes = code.split(',')\\nwith open( '${this.tmp_path}', 'w+' ) as f:\\n for i in range(0,len(codes)):\\n f.write('# this is the beginning of a single code snippet\\\\n')\\n code_list = codes[i].replace('$',',').replace('+++','\\\"').split('\\\\n')\\n for line in code_list:\\n if('split(^^^)' in line):\\n line=line.replace('split(^^^)', 'split(\\\\'\\\\\\\\n\\\\')')\\n if('###' in line):\\n line=line.replace('###', '\\\\\\\\n\\\"')\\n if('@@' in line):\\n line=line.replace('@@', '\\\"\\\\\\\\n')\\n f.write(line+'\\\\n')`;\n const expr = { code_list: `code_list` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, gen_code, expr, false);\n if (options === 'normal') {\n let runcode = `from neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=[\"${formatter}\"], overwrite=True)`;\n let expr = { sum: ` ` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let run_code1 = `with open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let expr1 = { optimizedCode: \"optimized_code\" };\n let result2 = NotebookUtilities.sendKernelRequestFromNotebook(panel, run_code1, expr1, false);\n result2.then(value => {\n var _a, _b, _c, _d;\n let optimizedTexts = Object.values(value.optimizedCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n }\n });\n }\n else {\n if (formatter === '') {\n if (this.markdown) {\n this.markdown.model.value.text += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \\n\";\n }\n // cell.outputArea.node.innerText += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n\"\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\",features=[], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode2 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n });\n }\n else {\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\", features=[\"${formatter}\"], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (next !== '') {\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode2 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n }\n let runcode3 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr3 = { path: \"\" };\n let res_tmp = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n res_tmp.then(value => {\n if (formatter === 'pytorch_inc_bf16') {\n let read_log = `import re\\nwith open(\"${this.tmp_log_path}\", 'r') as f:\\n logs = f.readlines()\\n fps_list=[]\\n for log_line in logs[-4:]:\\n pat = re.compile(r\\'\\\\d+\\\\.?\\\\d+')\\n fps = re.findall(pat,log_line)[-1]\\n fps_list.append(float(fps))\\nmaxi = max(fps_list)\\nindex = fps_list.index(maxi)\\nboost = round(maxi/fps_list[0],1)\\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\\nbest_feature = features[index]\\nbest_name = feature_name[index]\\nfeature_l = []\\nfeature_l.append(best_feature)\\nfrom neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=feature_l, overwrite=True)\\nwith open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let read_expr = { boost: \"boost\", best_feature: \"best_feature\", best_name: \"best_name\", optimizeCode: \"optimized_code\", feature_l: \"fps_list\", maxi: \"maxi\", index: \"index\" };\n let read_result = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false);\n read_result.then(value => {\n var _a, _b, _c, _d;\n console.log(\"resres\", value);\n let boost = Object.values(value.boost.data)[0];\n let best_name = Object.values(value.best_name.data)[0];\n let optimizedTexts = Object.values(value.optimizeCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \\n`;\n }\n // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\\n`\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n }\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`;\n }\n // let command = \"lscpu | grep 'Model name'\"\n // let get_hardware = `import subprocess\\nsubp = subprocess.Popen(\"${command}\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding=\"utf-8\")\\nsubp.wait(2)\\nhardware = subp.communicate()[0].replace(\"Model name:\",\"\").strip()`\n // let expr_hardware = {hardware: \"hardware\"}\n // let hard_res = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware,false);\n // hard_res.then(value =>{\n // let hard = Object.values(value.hardware.data)[0] as string;\n // if(this.markdown){\n // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \\n`\n // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`\n // }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\\n`\n // })\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to lab_workspace\\\\NeuralCoder${this.rand}.log\\n`\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n });\n }\n });\n });\n }\n }\n }\n}\nexport class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer {\n constructor(notebookTracker, panel) {\n super(panel);\n this.notebookTracker = notebookTracker;\n this.notebookname = '';\n }\n async optimizeAction(config, formatter) {\n return this.optimizeCells(true, config, formatter);\n }\n async optimizeAllCodeCells(config, formatter, notebook, run) {\n return this.optimizeCells(false, config, formatter, notebook, run);\n }\n getCodeCells(ifmarkdown = true, notebook) {\n if (!this.notebookTracker.currentWidget) {\n return [];\n }\n const codeCells = [];\n notebook = notebook || this.notebookTracker.currentWidget.content;\n this.notebookname = notebook.title.label;\n let count = 0;\n notebook.widgets.forEach((cell) => {\n if (cell.model.type === 'code') {\n count += 1;\n codeCells.push(cell);\n }\n });\n if (ifmarkdown) {\n NotebookActions.insertBelow(notebook);\n this.notebookTracker.currentWidget.content.activeCellIndex = count + 1;\n NotebookActions.changeCellType(notebook, 'markdown');\n const activeCell = notebook.activeCell;\n if (activeCell) {\n this.markdown = activeCell;\n }\n }\n this.cells = codeCells;\n return codeCells;\n }\n async optimizeCells(selectedOnly, config, formatter, notebook, run) {\n if (this.working) {\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n console.log(\"arrive here 333\");\n this.working = true;\n const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu';\n if (optimize_type === 'auto-quant') {\n selectedOnly = true;\n }\n else {\n selectedOnly = false;\n }\n const selectedCells = this.getCodeCells(selectedOnly, notebook);\n let cell = selectedCells[selectedCells.length - 1];\n if (selectedCells.length === 0) {\n this.working = false;\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n const currentTexts = selectedCells.map(cell => cell.model.value.text);\n if (optimize_type === 'auto-quant') {\n console.log(\"arrive here 444-111\");\n if (this.markdown) {\n this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\" \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \\n`;\n }\n // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\"\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\\n`\n let runcode = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Auto-Quant Started ......\\\\n\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n let runcode2 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n let runcode3 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Benchmark Mode: Throughput\\\\n\")`;\n let expr3 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n // cell.outputArea.node.setAttribute(\"class\",\"pad\")\n await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run);\n }\n else {\n console.log(\"arrive here 444-222\");\n await this.optimizeCode(currentTexts, optimize_type, \"\", \"\", \"normal\", true, this.panel, cell, run);\n }\n this.working = false;\n console.log(\"arrive here 555\");\n return new Promise((resolve, reject) => {\n resolve(\"success!\");\n });\n }\n applicable(formatter, currentWidget) {\n const currentNotebookWidget = this.notebookTracker.currentWidget;\n return currentNotebookWidget && currentWidget === currentNotebookWidget;\n }\n}\n","import { INotebookTracker } from '@jupyterlab/notebook';\nimport { ToolbarButton, showDialog, Dialog } from '@jupyterlab/apputils';\nimport { ISettingRegistry } from '@jupyterlab/settingregistry';\nimport { IMainMenu } from '@jupyterlab/mainmenu';\nimport { LabIcon } from '@jupyterlab/ui-components';\nimport { Widget } from '@lumino/widgets';\nimport { JupyterlabNotebookCodeOptimizer } from './deepcoder';\nimport { Constants } from './constants';\nclass neural_compressor_ext_lab {\n constructor(app, tracker, notebookpanel) {\n this.app = app;\n this.tracker = tracker;\n this.notebookpanel = notebookpanel;\n this.setupWidgetExtension();\n this.config = '';\n }\n createNew(nb) {\n this.notebookpanel = nb;\n this.notebookCodeOptimizer = new JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel);\n const svg = document.createElement(\"svg\");\n svg.innerHTML = Constants.ICON_FORMAT_ALL_SVG;\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n const div = document.createElement(\"div\");\n div.setAttribute(\"class\", \"wrapper\");\n const span = document.createElement(\"span\");\n span.setAttribute(\"class\", \"f1ozlkqi\");\n span.innerHTML = Constants.SVG;\n const selector = document.createElement(\"select\");\n selector.setAttribute(\"class\", \"aselector\");\n selector.id = \"NeuralCoder\";\n const option1 = document.createElement(\"option\");\n option1.value = \"pytorch_inc_static_quant_fx\";\n option1.innerText = \"INC Enable INT8 (Static)\";\n option1.selected = true;\n const option2 = document.createElement(\"option\");\n option2.value = \"pytorch_inc_dynamic_quant\";\n option2.innerText = \"INC Enable INT8 (Dynamic)\";\n const option3 = document.createElement(\"option\");\n option3.value = \"pytorch_inc_bf16\";\n option3.innerText = \"INC Enable BF16\";\n const option4 = document.createElement(\"option\");\n option4.value = \"auto-quant\";\n option4.innerText = \"INC Auto Enable & Benchmark\";\n selector.options.add(option1);\n selector.options.add(option2);\n selector.options.add(option3);\n selector.options.add(option4);\n div.appendChild(selector);\n div.appendChild(span);\n const selector_widget = new Widget();\n selector_widget.node.appendChild(div);\n selector_widget.addClass(\"aselector\");\n let notebookCodeOptimizer = this.notebookCodeOptimizer;\n let config = this.config;\n const dia_input = document.createElement(\"input\");\n const dia_widget = new Widget();\n dia_widget.node.appendChild(dia_input);\n dia_widget.addClass(\"dialog\");\n const run_button = new ToolbarButton({\n tooltip: 'NeuralCoder',\n icon: new LabIcon({\n name: \"run\",\n svgstr: Constants.ICON_RUN\n }),\n onClick: async function () {\n var _a, _b, _c, _d;\n console.log(\"arrive here 111\");\n (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg);\n if (selector.options[selector.selectedIndex].value === 'auto-quant') {\n await showDialog({\n title: 'Please input execute parameters:',\n body: dia_widget,\n buttons: [Dialog.okButton({ label: 'Confirm' })]\n }).then(result => {\n if (result.button.accept) {\n config = dia_input.value;\n }\n });\n }\n console.log(\"arrive here 222\");\n await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button);\n }\n });\n nb.toolbar.insertItem(11, \"nc\", run_button);\n nb.toolbar.insertItem(12, \"selector\", selector_widget);\n }\n setupWidgetExtension() {\n this.app.docRegistry.addWidgetExtension('Notebook', this);\n }\n}\n/**\n * Initialization data for the neural_compressor_ext_lab extension.\n */\nconst plugin = {\n id: 'neural_compressor_ext_lab:plugin',\n autoStart: true,\n requires: [INotebookTracker, IMainMenu],\n optional: [ISettingRegistry],\n activate: (app, tracker, notebookpanel) => {\n new neural_compressor_ext_lab(app, tracker, notebookpanel);\n console.log('JupyterLab extension neural_compressor_ext_lab is activated!');\n }\n};\nexport default plugin;\n","/*\n * Copyright 2019-2020 The Kale Authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport { Dialog, showDialog } from '@jupyterlab/apputils';\n// @ts-ignore\nimport SanitizedHTML from 'react-sanitized-html';\nimport * as React from 'react';\nexport default class NotebookUtilities {\n /**\n * generate random number\n * @Min\n * @Max\n */\n static GetRandomNum(Min, Max) {\n let Range;\n Range = Max - Min;\n var Rand = Math.random();\n return (Min + Math.round(Rand * Range));\n }\n /**\n * Builds an HTML container by sanitizing a list of strings and converting\n * them in valid HTML\n * @param msg A list of string with HTML formatting\n * @returns a HTMLDivElement composed of a list of spans with formatted text\n */\n static buildDialogBody(msg) {\n return (React.createElement(\"div\", null, msg.map((s, i) => {\n return (React.createElement(React.Fragment, { key: `msg-${i}` },\n React.createElement(SanitizedHTML, { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }),\n React.createElement(\"br\", null)));\n })));\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a simple message.\n * @param title The title for the message popup\n * @param msg The message as an array of strings\n * @param buttonLabel The label to use for the button. Default is 'OK'\n * @param buttonClassName The classname to give to the 'ok' button\n * @returns Promise - A promise once the message is closed.\n */\n static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: buttonLabel, className: buttonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n await showDialog({ title, buttons, body: messageBody });\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a yes/no dialog.\n * @param title The title for the message popup\n * @param msg The message\n * @param acceptLabel The label to use for the accept button. Default is 'YES'\n * @param rejectLabel The label to use for the reject button. Default is 'NO'\n * @param yesButtonClassName The classname to give to the accept button.\n * @param noButtonClassName The classname to give to the cancel button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }),\n Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n const result = await showDialog({ title, buttons, body: messageBody });\n return result.button.label === acceptLabel;\n }\n /**\n * Opens a pop-up dialog in JupyterLab with various information and button\n * triggering reloading the page.\n * @param title The title for the message popup\n * @param msg The message\n * @param buttonLabel The label to use for the button. Default is 'Refresh'\n * @param buttonClassName The classname to give to the 'refresh' button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') {\n await this.showMessage(title, msg, buttonLabel, buttonClassName);\n location.reload();\n }\n /**\n * @description Creates a new JupyterLab notebook for use by the application\n * @param command The command registry\n * @returns Promise - A promise containing the notebook panel object that was created (if successful).\n */\n static async createNewNotebook(command) {\n const notebook = await command.execute('notebook:create-new', {\n activate: true,\n path: '',\n preferredLanguage: '',\n });\n await notebook.session.ready;\n return notebook;\n }\n /**\n * Safely saves the Jupyter notebook document contents to disk\n * @param notebookPanel The notebook panel containing the notebook to save\n */\n static async saveNotebook(notebookPanel) {\n if (notebookPanel) {\n await notebookPanel.context.ready;\n notebookPanel.context.save();\n return true;\n }\n return false;\n }\n /**\n * Convert the notebook contents to JSON\n * @param notebookPanel The notebook panel containing the notebook to serialize\n */\n static notebookToJSON(notebookPanel) {\n if (notebookPanel.content.model) {\n return notebookPanel.content.model.toJSON();\n }\n return null;\n }\n /**\n * @description Gets the value of a key from specified notebook's metadata.\n * @param notebookPanel The notebook to get meta data from.\n * @param key The key of the value.\n * @returns any -The value of the metadata. Returns null if the key doesn't exist.\n */\n static getMetaData(notebookPanel, key) {\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n if (notebookPanel.model && notebookPanel.model.metadata.has(key)) {\n return notebookPanel.model.metadata.get(key);\n }\n return null;\n }\n /**\n * @description Sets the key value pair in the notebook's metadata.\n * If the key doesn't exists it will add one.\n * @param notebookPanel The notebook to set meta data in.\n * @param key The key of the value to create.\n * @param value The value to set.\n * @param save Default is false. Whether the notebook should be saved after the meta data is set.\n * Note: This function will not wait for the save to complete, it only sends a save request.\n * @returns The old value for the key, or undefined if it did not exist.\n */\n static setMetaData(notebookPanel, key, value, save = false) {\n var _a;\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value);\n if (save) {\n this.saveNotebook(notebookPanel);\n }\n return oldVal;\n }\n // /**\n // * Get a new Kernel, not tied to a Notebook\n // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services\n // */\n // public static async createNewKernel() {\n // // Get info about the available kernels and start a new one.\n // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => {\n // // console.log('Default spec:', kernelSpecs.default);\n // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs));\n // // use the default name\n // return { name: kernelSpecs.default };\n // });\n // return await Kernel.startNew(options).then(_kernel => {\n // return _kernel;\n // });\n // }\n // // TODO: We can use this context manager to execute commands inside a new kernel\n // // and be sure that it will be disposed of at the end.\n // // Another approach could be to create a kale_rpc Kernel, as a singleton,\n // // created at startup. The only (possible) drawback is that we can not name\n // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would\n // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls.\n // public static async executeWithNewKernel(action: Function, args: any[] = []) {\n // // create brand new kernel\n // const _k = await this.createNewKernel();\n // // execute action inside kernel\n // const res = await action(_k, ...args);\n // // close kernel\n // _k.shutdown();\n // // return result\n // return res;\n // }\n /**\n * @description This function runs code directly in the notebook's kernel and then evaluates the\n * result and returns it as a promise.\n * @param kernel The kernel to run the code in.\n * @param runCode The code to run in the kernel.\n * @param userExpressions The expressions used to capture the desired info from the executed code.\n * @param runSilent Default is false. If true, kernel will execute as quietly as possible.\n * store_history will be set to false, and no broadcast on IOPUB channel will be made.\n * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history\n * and the counter which is shown in the cells will be incremented to reflect code was run.\n * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using\n * an input_request message.\n * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered.\n * This allows the queued execution of multiple execute_requests, even if they generate exceptions.\n * @returns Promise - A promise containing the execution results of the code as an object with\n * keys based on the user_expressions.\n * @example\n * //The code\n * const code = \"a=123\\nb=456\\nsum=a+b\";\n * //The user expressions\n * const expr = {sum: \"sum\",prod: \"a*b\",args:\"[a,b,sum]\"};\n * //Async function call (returns a promise)\n * sendKernelRequest(notebookPanel, code, expr,false);\n * //Result when promise resolves:\n * {\n * sum:{status:\"ok\",data:{\"text/plain\":\"579\"},metadata:{}},\n * prod:{status:\"ok\",data:{\"text/plain\":\"56088\"},metadata:{}},\n * args:{status:\"ok\",data:{\"text/plain\":\"[123, 456, 579]\"}}\n * }\n * @see For more information on JupyterLab messages:\n * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results\n */\n static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n if (!kernel) {\n throw new Error('Kernel is null or undefined.');\n }\n // Wait for kernel to be ready before sending request\n // await kernel.status;\n const message = await kernel.requestExecute({\n allow_stdin: allowStdIn,\n code: runCode,\n silent: runSilent,\n stop_on_error: stopOnError,\n store_history: storeHistory,\n user_expressions: userExpressions,\n }).done;\n const content = message.content;\n if (content.status !== 'ok') {\n // If response is not 'ok', throw contents as error, log code\n const msg = `Code caused an error:\\n${runCode}`;\n console.error(msg);\n if (content.traceback) {\n content.traceback.forEach((line) => console.log(line.replace(/[\\u001b\\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '')));\n }\n throw content;\n }\n // Return user_expressions of the content\n return content.user_expressions;\n }\n /**\n * Same as method sendKernelRequest but passing\n * a NotebookPanel instead of a Kernel\n */\n static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n var _a, _b, _c, _d;\n if (!notebookPanel) {\n throw new Error('Notebook is null or undefined.');\n }\n // Wait for notebook panel to be ready\n await notebookPanel.activate;\n await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready);\n console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel);\n return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError);\n }\n}\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js deleted file mode 100644 index 5291fab139f..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js +++ /dev/null @@ -1,767 +0,0 @@ -"use strict"; -(self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []).push([["lib_index_js"],{ - -/***/ "./lib/constants.js": -/*!**************************!*\ - !*** ./lib/constants.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "Constants": () => (/* binding */ Constants) -/* harmony export */ }); -var Constants; -(function (Constants) { - Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab'; - Constants.WORK_PATH = "neural_coder_workspace/"; - Constants.ICON_FORMAT_ALL_SVG = ''; - Constants.ICON_RUN = ''; - Constants.SVG = ''; - Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`; - Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`; - Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer'; - Constants.PLUGIN_VERSION = '0.1.0'; -})(Constants || (Constants = {})); - - -/***/ }), - -/***/ "./lib/deepcoder.js": -/*!**************************!*\ - !*** ./lib/deepcoder.js ***! - \**************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "JupyterlabNotebookCodeOptimizer": () => (/* binding */ JupyterlabNotebookCodeOptimizer) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _utils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./utils */ "./lib/utils.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - -class JupyterlabCodeOptimizer { - constructor(panel) { - this.working = false; - this.panel = panel; - this.tmp_path = "tmp.py"; - this.rand = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].GetRandomNum(0, 200); - this.log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder" + this.rand + ".log"; - this.tmp_log_path = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.WORK_PATH + "NeuralCoder_tmp" + ".log"; - this.cells = []; - } - async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) { - let codes = []; - code.forEach(function (value) { - value = value.replace(/('\\n')/g, '^^^'); - value = value.replace(/\\n"/g, '###'); - value = value.replace(/\\n'/g, '###'); - value = value.replace(/"\\n/g, '@@'); - value = value.replace(/'\\n/g, '@@'); - value = value.replace(/\n/g, '\\n'); - value = value.replace(/"/g, '+++'); - value = value.replace(/,/g, '$'); - codes.push(value); - }); - let gen_code = `code = "${codes}"\ncodes = code.split(',')\nwith open( '${this.tmp_path}', 'w+' ) as f:\n for i in range(0,len(codes)):\n f.write('# this is the beginning of a single code snippet\\n')\n code_list = codes[i].replace('$',',').replace('+++','\"').split('\\n')\n for line in code_list:\n if('split(^^^)' in line):\n line=line.replace('split(^^^)', 'split(\\'\\\\n\\')')\n if('###' in line):\n line=line.replace('###', '\\\\n\"')\n if('@@' in line):\n line=line.replace('@@', '\"\\\\n')\n f.write(line+'\\n')`; - const expr = { code_list: `code_list` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, gen_code, expr, false); - if (options === 'normal') { - let runcode = `from neural_coder import enable\nenable(code="${this.tmp_path}",features=["${formatter}"], overwrite=True)`; - let expr = { sum: ` ` }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let run_code1 = `with open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let expr1 = { optimizedCode: "optimized_code" }; - let result2 = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, run_code1, expr1, false); - result2.then(value => { - var _a, _b, _c, _d; - let optimizedTexts = Object.values(value.optimizedCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - } - }); - } - else { - if (formatter === '') { - if (this.markdown) { - this.markdown.model.value.text += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \n"; - } - // cell.outputArea.node.innerText += "[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\n" - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}",features=[], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode1 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr1 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode1, expr1, false); - let runcode2 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - }); - } - else { - let runcode = `from neural_coder import enable\nperfomance, mode, path = enable(code="${this.tmp_path}", features=["${formatter}"], run_bench=True, args="${options}")\nwith open(path + '/bench.log', 'r') as f:\n logs = f.readlines()\nlog_line = logs[4]\nlog = log_line.split("[")[1].split("]")[0]`; - let expr = { path: "path", log: "log" }; - let result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(panel, runcode, expr, false); - let fps; - result.then(value => { - fps = Object.values(value.log.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\n` - let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\n`; - let runcode = `with open("${this.log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - if (next !== '') { - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \n`; - } - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\n` - let runcode2 = `with open("${this.log_path}", 'a' ) as f:\n f.write("[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - } - let runcode3 = `with open("${this.tmp_log_path}", 'a' ) as f:\n f.write("${text}")`; - let expr3 = { path: "" }; - let res_tmp = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - res_tmp.then(value => { - if (formatter === 'pytorch_inc_bf16') { - let read_log = `import re\nwith open("${this.tmp_log_path}", 'r') as f:\n logs = f.readlines()\n fps_list=[]\n for log_line in logs[-4:]:\n pat = re.compile(r\'\\d+\\.?\\d+')\n fps = re.findall(pat,log_line)[-1]\n fps_list.append(float(fps))\nmaxi = max(fps_list)\nindex = fps_list.index(maxi)\nboost = round(maxi/fps_list[0],1)\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\nbest_feature = features[index]\nbest_name = feature_name[index]\nfeature_l = []\nfeature_l.append(best_feature)\nfrom neural_coder import enable\nenable(code="${this.tmp_path}",features=feature_l, overwrite=True)\nwith open("${this.tmp_path}", 'r') as f:\n optimized_code = f.read()\n`; - let read_expr = { boost: "boost", best_feature: "best_feature", best_name: "best_name", optimizeCode: "optimized_code", feature_l: "fps_list", maxi: "maxi", index: "index" }; - let read_result = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false); - read_result.then(value => { - var _a, _b, _c, _d; - console.log("resres", value); - let boost = Object.values(value.boost.data)[0]; - let best_name = Object.values(value.best_name.data)[0]; - let optimizedTexts = Object.values(value.optimizeCode.data)[0]; - let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\n').slice(1); - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \n`; - } - // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\n` - optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3); - for (let i = 0; i < optimizeCodes.length; ++i) { - const cell = this.cells[i]; - const currentTexts = this.cells.map(cell => cell.model.value.text); - const currentText = currentTexts[i]; - let optimizedtext = optimizeCodes[i]; - optimizedtext = optimizedtext.replace(/\\'\\\\n\\'/g, "^^^"); - optimizedtext = optimizedtext.replace(/\\\\n"/g, "+++"); - optimizedtext = optimizedtext.replace(/\\\\n'/g, "+++"); - optimizedtext = optimizedtext.replace(/"\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/'\\\\n/g, "@@@"); - optimizedtext = optimizedtext.replace(/\\n/g, '\n'); - optimizedtext = optimizedtext.replace(/\\'/g, "'"); - optimizedtext = optimizedtext.replace(/\^\^\^/g, "'\\n'"); - optimizedtext = optimizedtext.replace(/\+\+\+/g, "\\n\""); - optimizedtext = optimizedtext.replace(/\@\@\@/g, "\"\\n"); - if (cell.model.value.text === currentText) { - cell.model.value.text = optimizedtext; - } - } - // if(this.markdown){ - // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \n` - // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n` - // } - let command = "lscpu | grep 'Model name'"; - let get_hardware = `import subprocess\nsubp = subprocess.Popen("${command}",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")\nsubp.wait(2)\nhardware = subp.communicate()[0].replace("Model name:","").strip()`; - let expr_hardware = { hardware: "hardware" }; - let hard_res = _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware, false); - hard_res.then(value => { - let hard = Object.values(value.hardware.data)[0]; - if (this.markdown) { - this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log \n`; - } - cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\n`; - }); - cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\NeuralCoder${this.rand}.log\n`; - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_2__.Constants.ICON_RUN; - (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg); - }); - } - }); - }); - } - } - } -} -class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer { - constructor(notebookTracker, panel) { - super(panel); - this.notebookTracker = notebookTracker; - this.notebookname = ''; - } - async optimizeAction(config, formatter) { - return this.optimizeCells(true, config, formatter); - } - async optimizeAllCodeCells(config, formatter, notebook, run) { - return this.optimizeCells(false, config, formatter, notebook, run); - } - getCodeCells(ifmarkdown = true, notebook) { - if (!this.notebookTracker.currentWidget) { - return []; - } - const codeCells = []; - notebook = notebook || this.notebookTracker.currentWidget.content; - this.notebookname = notebook.title.label; - let count = 0; - notebook.widgets.forEach((cell) => { - if (cell.model.type === 'code') { - count += 1; - codeCells.push(cell); - } - }); - if (ifmarkdown) { - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.insertBelow(notebook); - this.notebookTracker.currentWidget.content.activeCellIndex = count + 1; - _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.NotebookActions.changeCellType(notebook, 'markdown'); - const activeCell = notebook.activeCell; - if (activeCell) { - this.markdown = activeCell; - } - } - this.cells = codeCells; - return codeCells; - } - async optimizeCells(selectedOnly, config, formatter, notebook, run) { - if (this.working) { - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - console.log("arrive here 333"); - this.working = true; - const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu'; - if (optimize_type === 'auto-quant') { - selectedOnly = true; - } - else { - selectedOnly = false; - } - const selectedCells = this.getCodeCells(selectedOnly, notebook); - let cell = selectedCells[selectedCells.length - 1]; - if (selectedCells.length === 0) { - this.working = false; - return new Promise((resolve, reject) => { - resolve("false!"); - }); - } - const currentTexts = selectedCells.map(cell => cell.model.value.text); - if (optimize_type === 'auto-quant') { - console.log("arrive here 444-111"); - if (this.markdown) { - this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}" \n`; - this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \n`; - } - // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook "${this.notebookname}"\n` - // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\n` - let runcode = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Auto-Quant Started ......\\n")`; - let expr = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode, expr, false); - let runcode2 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\n")`; - let expr2 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false); - let runcode3 = `with open('${this.log_path}', 'a' ) as f:\n f.write("[NeuralCoder INFO] Benchmark Mode: Throughput\\n")`; - let expr3 = { path: "" }; - _utils__WEBPACK_IMPORTED_MODULE_1__["default"].sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false); - // cell.outputArea.node.setAttribute("class","pad") - await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run); - await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run); - } - else { - console.log("arrive here 444-222"); - await this.optimizeCode(currentTexts, optimize_type, "", "", "normal", true, this.panel, cell, run); - } - this.working = false; - console.log("arrive here 555"); - return new Promise((resolve, reject) => { - resolve("success!"); - }); - } - applicable(formatter, currentWidget) { - const currentNotebookWidget = this.notebookTracker.currentWidget; - return currentNotebookWidget && currentWidget === currentNotebookWidget; - } -} - - -/***/ }), - -/***/ "./lib/index.js": -/*!**********************!*\ - !*** ./lib/index.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/notebook */ "webpack/sharing/consume/default/@jupyterlab/notebook"); -/* harmony import */ var _jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! @jupyterlab/settingregistry */ "webpack/sharing/consume/default/@jupyterlab/settingregistry"); -/* harmony import */ var _jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! @jupyterlab/mainmenu */ "webpack/sharing/consume/default/@jupyterlab/mainmenu"); -/* harmony import */ var _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! @jupyterlab/ui-components */ "webpack/sharing/consume/default/@jupyterlab/ui-components"); -/* harmony import */ var _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! @lumino/widgets */ "webpack/sharing/consume/default/@lumino/widgets"); -/* harmony import */ var _lumino_widgets__WEBPACK_IMPORTED_MODULE_5___default = /*#__PURE__*/__webpack_require__.n(_lumino_widgets__WEBPACK_IMPORTED_MODULE_5__); -/* harmony import */ var _deepcoder__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./deepcoder */ "./lib/deepcoder.js"); -/* harmony import */ var _constants__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./constants */ "./lib/constants.js"); - - - - - - - - -class neural_compressor_ext_lab { - constructor(app, tracker, notebookpanel) { - this.app = app; - this.tracker = tracker; - this.notebookpanel = notebookpanel; - this.setupWidgetExtension(); - this.config = ''; - } - createNew(nb) { - this.notebookpanel = nb; - this.notebookCodeOptimizer = new _deepcoder__WEBPACK_IMPORTED_MODULE_6__.JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel); - const svg = document.createElement("svg"); - svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_FORMAT_ALL_SVG; - const run_svg = document.createElement("svg"); - run_svg.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN; - const div = document.createElement("div"); - div.setAttribute("class", "wrapper"); - const span = document.createElement("span"); - span.setAttribute("class", "f1ozlkqi"); - span.innerHTML = _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.SVG; - const selector = document.createElement("select"); - selector.setAttribute("class", "aselector"); - selector.id = "NeuralCoder"; - const option1 = document.createElement("option"); - option1.value = "pytorch_inc_static_quant_fx"; - option1.innerText = "INC Enable INT8 (Static)"; - option1.selected = true; - const option2 = document.createElement("option"); - option2.value = "pytorch_inc_dynamic_quant"; - option2.innerText = "INC Enable INT8 (Dynamic)"; - const option3 = document.createElement("option"); - option3.value = "pytorch_inc_bf16"; - option3.innerText = "INC Enable BF16"; - const option4 = document.createElement("option"); - option4.value = "auto-quant"; - option4.innerText = "INC Auto Enable & Benchmark"; - selector.options.add(option1); - selector.options.add(option2); - selector.options.add(option3); - selector.options.add(option4); - div.appendChild(selector); - div.appendChild(span); - const selector_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - selector_widget.node.appendChild(div); - selector_widget.addClass("aselector"); - let notebookCodeOptimizer = this.notebookCodeOptimizer; - let config = this.config; - const dia_input = document.createElement("input"); - const dia_widget = new _lumino_widgets__WEBPACK_IMPORTED_MODULE_5__.Widget(); - dia_widget.node.appendChild(dia_input); - dia_widget.addClass("dialog"); - const run_button = new _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.ToolbarButton({ - tooltip: 'NeuralCoder', - icon: new _jupyterlab_ui_components__WEBPACK_IMPORTED_MODULE_4__.LabIcon({ - name: "run", - svgstr: _constants__WEBPACK_IMPORTED_MODULE_7__.Constants.ICON_RUN - }), - onClick: async function () { - var _a, _b, _c, _d; - console.log("arrive here 111"); - (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg); - if (selector.options[selector.selectedIndex].value === 'auto-quant') { - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.showDialog)({ - title: 'Please input execute parameters:', - body: dia_widget, - buttons: [_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_1__.Dialog.okButton({ label: 'Confirm' })] - }).then(result => { - if (result.button.accept) { - config = dia_input.value; - } - }); - } - console.log("arrive here 222"); - await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button); - } - }); - nb.toolbar.insertItem(11, "nc", run_button); - nb.toolbar.insertItem(12, "selector", selector_widget); - } - setupWidgetExtension() { - this.app.docRegistry.addWidgetExtension('Notebook', this); - } -} -/** - * Initialization data for the neural_compressor_ext_lab extension. - */ -const plugin = { - id: 'neural_compressor_ext_lab:plugin', - autoStart: true, - requires: [_jupyterlab_notebook__WEBPACK_IMPORTED_MODULE_0__.INotebookTracker, _jupyterlab_mainmenu__WEBPACK_IMPORTED_MODULE_3__.IMainMenu], - optional: [_jupyterlab_settingregistry__WEBPACK_IMPORTED_MODULE_2__.ISettingRegistry], - activate: (app, tracker, notebookpanel) => { - new neural_compressor_ext_lab(app, tracker, notebookpanel); - console.log('JupyterLab extension neural_compressor_ext_lab is activated!'); - } -}; -/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (plugin); - - -/***/ }), - -/***/ "./lib/utils.js": -/*!**********************!*\ - !*** ./lib/utils.js ***! - \**********************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (/* binding */ NotebookUtilities) -/* harmony export */ }); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @jupyterlab/apputils */ "webpack/sharing/consume/default/@jupyterlab/apputils"); -/* harmony import */ var _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-sanitized-html */ "webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html"); -/* harmony import */ var react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(react_sanitized_html__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react"); -/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_2__); -/* - * Copyright 2019-2020 The Kale Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// @ts-ignore - - -class NotebookUtilities { - /** - * generate random number - * @Min - * @Max - */ - static GetRandomNum(Min, Max) { - let Range; - Range = Max - Min; - var Rand = Math.random(); - return (Min + Math.round(Rand * Range)); - } - /** - * Builds an HTML container by sanitizing a list of strings and converting - * them in valid HTML - * @param msg A list of string with HTML formatting - * @returns a HTMLDivElement composed of a list of spans with formatted text - */ - static buildDialogBody(msg) { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement("div", null, msg.map((s, i) => { - return (react__WEBPACK_IMPORTED_MODULE_2__.createElement(react__WEBPACK_IMPORTED_MODULE_2__.Fragment, { key: `msg-${i}` }, - react__WEBPACK_IMPORTED_MODULE_2__.createElement((react_sanitized_html__WEBPACK_IMPORTED_MODULE_1___default()), { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }), - react__WEBPACK_IMPORTED_MODULE_2__.createElement("br", null))); - }))); - } - /** - * Opens a pop-up dialog in JupyterLab to display a simple message. - * @param title The title for the message popup - * @param msg The message as an array of strings - * @param buttonLabel The label to use for the button. Default is 'OK' - * @param buttonClassName The classname to give to the 'ok' button - * @returns Promise - A promise once the message is closed. - */ - static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: buttonLabel, className: buttonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - } - /** - * Opens a pop-up dialog in JupyterLab to display a yes/no dialog. - * @param title The title for the message popup - * @param msg The message - * @param acceptLabel The label to use for the accept button. Default is 'YES' - * @param rejectLabel The label to use for the reject button. Default is 'NO' - * @param yesButtonClassName The classname to give to the accept button. - * @param noButtonClassName The classname to give to the cancel button. - * @returns Promise - A promise once the message is closed. - */ - static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') { - const buttons = [ - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }), - _jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }), - ]; - const messageBody = this.buildDialogBody(msg); - const result = await (0,_jupyterlab_apputils__WEBPACK_IMPORTED_MODULE_0__.showDialog)({ title, buttons, body: messageBody }); - return result.button.label === acceptLabel; - } - /** - * Opens a pop-up dialog in JupyterLab with various information and button - * triggering reloading the page. - * @param title The title for the message popup - * @param msg The message - * @param buttonLabel The label to use for the button. Default is 'Refresh' - * @param buttonClassName The classname to give to the 'refresh' button. - * @returns Promise - A promise once the message is closed. - */ - static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') { - await this.showMessage(title, msg, buttonLabel, buttonClassName); - location.reload(); - } - /** - * @description Creates a new JupyterLab notebook for use by the application - * @param command The command registry - * @returns Promise - A promise containing the notebook panel object that was created (if successful). - */ - static async createNewNotebook(command) { - const notebook = await command.execute('notebook:create-new', { - activate: true, - path: '', - preferredLanguage: '', - }); - await notebook.session.ready; - return notebook; - } - /** - * Safely saves the Jupyter notebook document contents to disk - * @param notebookPanel The notebook panel containing the notebook to save - */ - static async saveNotebook(notebookPanel) { - if (notebookPanel) { - await notebookPanel.context.ready; - notebookPanel.context.save(); - return true; - } - return false; - } - /** - * Convert the notebook contents to JSON - * @param notebookPanel The notebook panel containing the notebook to serialize - */ - static notebookToJSON(notebookPanel) { - if (notebookPanel.content.model) { - return notebookPanel.content.model.toJSON(); - } - return null; - } - /** - * @description Gets the value of a key from specified notebook's metadata. - * @param notebookPanel The notebook to get meta data from. - * @param key The key of the value. - * @returns any -The value of the metadata. Returns null if the key doesn't exist. - */ - static getMetaData(notebookPanel, key) { - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - if (notebookPanel.model && notebookPanel.model.metadata.has(key)) { - return notebookPanel.model.metadata.get(key); - } - return null; - } - /** - * @description Sets the key value pair in the notebook's metadata. - * If the key doesn't exists it will add one. - * @param notebookPanel The notebook to set meta data in. - * @param key The key of the value to create. - * @param value The value to set. - * @param save Default is false. Whether the notebook should be saved after the meta data is set. - * Note: This function will not wait for the save to complete, it only sends a save request. - * @returns The old value for the key, or undefined if it did not exist. - */ - static setMetaData(notebookPanel, key, value, save = false) { - var _a; - if (!notebookPanel) { - throw new Error('The notebook is null or undefined. No meta data available.'); - } - const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value); - if (save) { - this.saveNotebook(notebookPanel); - } - return oldVal; - } - // /** - // * Get a new Kernel, not tied to a Notebook - // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services - // */ - // public static async createNewKernel() { - // // Get info about the available kernels and start a new one. - // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => { - // // console.log('Default spec:', kernelSpecs.default); - // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs)); - // // use the default name - // return { name: kernelSpecs.default }; - // }); - // return await Kernel.startNew(options).then(_kernel => { - // return _kernel; - // }); - // } - // // TODO: We can use this context manager to execute commands inside a new kernel - // // and be sure that it will be disposed of at the end. - // // Another approach could be to create a kale_rpc Kernel, as a singleton, - // // created at startup. The only (possible) drawback is that we can not name - // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would - // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls. - // public static async executeWithNewKernel(action: Function, args: any[] = []) { - // // create brand new kernel - // const _k = await this.createNewKernel(); - // // execute action inside kernel - // const res = await action(_k, ...args); - // // close kernel - // _k.shutdown(); - // // return result - // return res; - // } - /** - * @description This function runs code directly in the notebook's kernel and then evaluates the - * result and returns it as a promise. - * @param kernel The kernel to run the code in. - * @param runCode The code to run in the kernel. - * @param userExpressions The expressions used to capture the desired info from the executed code. - * @param runSilent Default is false. If true, kernel will execute as quietly as possible. - * store_history will be set to false, and no broadcast on IOPUB channel will be made. - * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history - * and the counter which is shown in the cells will be incremented to reflect code was run. - * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using - * an input_request message. - * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered. - * This allows the queued execution of multiple execute_requests, even if they generate exceptions. - * @returns Promise - A promise containing the execution results of the code as an object with - * keys based on the user_expressions. - * @example - * //The code - * const code = "a=123\nb=456\nsum=a+b"; - * //The user expressions - * const expr = {sum: "sum",prod: "a*b",args:"[a,b,sum]"}; - * //Async function call (returns a promise) - * sendKernelRequest(notebookPanel, code, expr,false); - * //Result when promise resolves: - * { - * sum:{status:"ok",data:{"text/plain":"579"},metadata:{}}, - * prod:{status:"ok",data:{"text/plain":"56088"},metadata:{}}, - * args:{status:"ok",data:{"text/plain":"[123, 456, 579]"}} - * } - * @see For more information on JupyterLab messages: - * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results - */ - static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - if (!kernel) { - throw new Error('Kernel is null or undefined.'); - } - // Wait for kernel to be ready before sending request - // await kernel.status; - const message = await kernel.requestExecute({ - allow_stdin: allowStdIn, - code: runCode, - silent: runSilent, - stop_on_error: stopOnError, - store_history: storeHistory, - user_expressions: userExpressions, - }).done; - const content = message.content; - if (content.status !== 'ok') { - // If response is not 'ok', throw contents as error, log code - const msg = `Code caused an error:\n${runCode}`; - console.error(msg); - if (content.traceback) { - content.traceback.forEach((line) => console.log(line.replace(/[\u001b\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, ''))); - } - throw content; - } - // Return user_expressions of the content - return content.user_expressions; - } - /** - * Same as method sendKernelRequest but passing - * a NotebookPanel instead of a Kernel - */ - static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) { - var _a, _b, _c, _d; - if (!notebookPanel) { - throw new Error('Notebook is null or undefined.'); - } - // Wait for notebook panel to be ready - await notebookPanel.activate; - await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready); - console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel); - return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError); - } -} - - -/***/ }) - -}]); -//# sourceMappingURL=lib_index_js.2c52105b83d9cf4290a9.js.map \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js.map deleted file mode 100644 index 3515ea5e505..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/lib_index_js.2c52105b83d9cf4290a9.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"lib_index_js.2c52105b83d9cf4290a9.js","mappings":";;;;;;;;;;;;;AAAO;AACP;AACA;AACA;AACA,6IAA6I,gCAAgC,gBAAgB,sBAAsB,qVAAqV,mBAAmB,gVAAgV,mBAAmB;AAC95B;AACA;AACA,yCAAyC,4BAA4B;AACrE,oCAAoC,2BAA2B;AAC/D;AACA;AACA,CAAC,8BAA8B;;;;;;;;;;;;;;;;;;;ACXwB;AACf;AACA;AACxC;AACA;AACA;AACA;AACA;AACA,oBAAoB,2DAA8B;AAClD,wBAAwB,2DAAmB;AAC3C,4BAA4B,2DAAmB;AAC/C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT,kCAAkC,MAAM,0CAA0C,cAAc;AAChG,uBAAuB;AACvB,QAAQ,4EAA+C;AACvD;AACA,2EAA2E,cAAc,eAAe,UAAU;AAClH,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,0CAA0C,cAAc;AACxD,0BAA0B;AAC1B,0BAA0B,4EAA+C;AACzE;AACA;AACA;AACA;AACA;AACA,gCAAgC,0BAA0B;AAC1D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wCAAwC,0DAAkB;AAC1D;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA,6CAA6C,cAAc;AAC3D,8BAA8B;AAC9B,gBAAgB,4EAA+C;AAC/D,wGAAwG,cAAc,uCAAuC,QAAQ;AACrK,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,wIAAwI,KAAK;AAC7I;AACA,uIAAuI,KAAK;AAC5I,6GAA6G,KAAK;AAClH,gDAAgD,cAAc,8BAA8B,KAAK;AACjG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA,8GAA8G,MAAM;AACpH;AACA,6GAA6G,MAAM;AACnH,iDAAiD,cAAc,mFAAmF,MAAM;AACxJ,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,oBAAoB,4EAA+C;AACnE,iBAAiB;AACjB;AACA;AACA,wGAAwG,cAAc,gBAAgB,UAAU,4BAA4B,QAAQ;AACpL,6BAA6B;AAC7B,6BAA6B,4EAA+C;AAC5E;AACA;AACA;AACA;AACA,kHAAkH,MAAM,KAAK,KAAK;AAClI;AACA,iHAAiH,MAAM,KAAK,KAAK;AACjI,uFAAuF,MAAM,KAAK,KAAK;AACvG,gDAAgD,cAAc,kCAAkC,KAAK;AACrG,iCAAiC;AACjC,oBAAoB,4EAA+C;AACnE;AACA;AACA,kHAAkH,MAAM;AACxH;AACA,iHAAiH,MAAM;AACvH,qDAAqD,cAAc,mFAAmF,MAAM;AAC5J,sCAAsC;AACtC,wBAAwB,4EAA+C;AACvE;AACA,iDAAiD,kBAAkB,kCAAkC,KAAK;AAC1G,kCAAkC;AAClC,kCAAkC,4EAA+C;AACjF;AACA;AACA,oEAAoE,kBAAkB,qpBAAqpB,cAAc,oDAAoD,cAAc;AAC3zB,8CAA8C;AAC9C,8CAA8C,4EAA+C;AAC7F;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yHAAyH,YAAY;AACrI,8GAA8G,MAAM;AACpH;AACA,uHAAuH,UAAU;AACjI,6GAA6G,MAAM;AACnH;AACA,gDAAgD,0BAA0B;AAC1E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yJAAyJ,UAAU;AACnK;AACA;AACA,kGAAkG,QAAQ;AAC1G,sDAAsD;AACtD,+CAA+C,4EAA+C;AAC9F;AACA;AACA;AACA,0GAA0G,OAAO;AACjH,wJAAwJ,UAAU;AAClK;AACA,sGAAsG,KAAK;AAC3G,iCAAiC;AACjC,gJAAgJ,UAAU;AAC1J;AACA,oDAAoD,0DAAkB;AACtE;AACA,6BAA6B;AAC7B;AACA,qBAAqB;AACrB,iBAAiB;AACjB;AACA;AACA;AACA;AACO;AACP;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA,YAAY,6EAA2B;AACvC;AACA,YAAY,gFAA8B;AAC1C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,aAAa;AACb;AACA;AACA;AACA;AACA;AACA;AACA,mHAAmH,kBAAkB;AACrI;AACA;AACA;AACA,kHAAkH,kBAAkB;AACpI;AACA,wCAAwC,cAAc;AACtD,yBAAyB;AACzB,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc,gGAAgG,kBAAkB;AACzK,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D,yCAAyC,cAAc;AACvD,0BAA0B;AAC1B,YAAY,4EAA+C;AAC3D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACtSwD;AACiB;AACV;AACd;AACG;AACX;AACqB;AACtB;AACxC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yCAAyC,uEAA+B;AACxE;AACA,wBAAwB,qEAA6B;AACrD;AACA,4BAA4B,0DAAkB;AAC9C;AACA;AACA;AACA;AACA,yBAAyB,qDAAa;AACtC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oCAAoC,mDAAM;AAC1C;AACA;AACA;AACA;AACA;AACA,+BAA+B,mDAAM;AACrC;AACA;AACA,+BAA+B,+DAAa;AAC5C;AACA,sBAAsB,8DAAO;AAC7B;AACA,wBAAwB,0DAAkB;AAC1C,aAAa;AACb;AACA;AACA;AACA;AACA;AACA,0BAA0B,gEAAU;AACpC;AACA;AACA,kCAAkC,iEAAe,GAAG,kBAAkB;AACtE,qBAAqB;AACrB;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,eAAe,kEAAgB,EAAE,2DAAS;AAC1C,eAAe,yEAAgB;AAC/B;AACA;AACA;AACA;AACA;AACA,iEAAe,MAAM,EAAC;;;;;;;;;;;;;;;;;;;;;ACxGtB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAC0D;AAC1D;AACiD;AAClB;AAChB;AACf;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,gBAAgB,gDAAmB;AACnC,oBAAoB,gDAAmB,CAAC,2CAAc,IAAI,YAAY,EAAE,GAAG;AAC3E,gBAAgB,gDAAmB,CAAC,6DAAa,IAAI,qBAAqB,aAAa,gEAAgE;AACvJ,gBAAgB,gDAAmB;AACnC,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,gDAAgD;AAC9E;AACA;AACA,cAAc,gEAAU,GAAG,mCAAmC;AAC9D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,YAAY,iEAAe,GAAG,mDAAmD;AACjF,YAAY,qEAAmB,GAAG,kDAAkD;AACpF;AACA;AACA,6BAA6B,gEAAU,GAAG,mCAAmC;AAC7E;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oBAAoB;AACpB,UAAU;AACV;AACA;AACA,UAAU;AACV;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,aAAa,kBAAkB,mBAAmB,aAAa;AAC/D,cAAc,kBAAkB,qBAAqB,aAAa;AAClE,cAAc,kBAAkB;AAChC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAAS;AACT;AACA;AACA;AACA,kDAAkD,QAAQ;AAC1D;AACA;AACA,kGAAkG,YAAY,IAAI,IAAI,MAAM,IAAI;AAChI;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","sources":["webpack://neural_compressor_ext_lab/./lib/constants.js","webpack://neural_compressor_ext_lab/./lib/deepcoder.js","webpack://neural_compressor_ext_lab/./lib/index.js","webpack://neural_compressor_ext_lab/./lib/utils.js"],"sourcesContent":["export var Constants;\n(function (Constants) {\n Constants.SHORT_PLUGIN_NAME = 'neural_compressor_ext_lab';\n Constants.WORK_PATH = \"neural_coder_workspace/\";\n Constants.ICON_FORMAT_ALL_SVG = '';\n Constants.ICON_RUN = '';\n Constants.SVG = '';\n Constants.LONG_PLUGIN_NAME = `@rya/${Constants.SHORT_PLUGIN_NAME}`;\n Constants.SETTINGS_SECTION = `${Constants.LONG_PLUGIN_NAME}:settings`;\n Constants.COMMAND_SECTION_NAME = 'Jupyterlab Code Optimizer';\n Constants.PLUGIN_VERSION = '0.1.0';\n})(Constants || (Constants = {}));\n","import { NotebookActions } from '@jupyterlab/notebook';\nimport NotebookUtilities from \"./utils\";\nimport { Constants } from './constants';\nclass JupyterlabCodeOptimizer {\n constructor(panel) {\n this.working = false;\n this.panel = panel;\n this.tmp_path = \"tmp.py\";\n this.rand = NotebookUtilities.GetRandomNum(0, 200);\n this.log_path = Constants.WORK_PATH + \"NeuralCoder\" + this.rand + \".log\";\n this.tmp_log_path = Constants.WORK_PATH + \"NeuralCoder_tmp\" + \".log\";\n this.cells = [];\n }\n async optimizeCode(code, formatter, name, next, options, notebook, panel, cell, run) {\n let codes = [];\n code.forEach(function (value) {\n value = value.replace(/('\\\\n')/g, '^^^');\n value = value.replace(/\\\\n\"/g, '###');\n value = value.replace(/\\\\n'/g, '###');\n value = value.replace(/\"\\\\n/g, '@@');\n value = value.replace(/'\\\\n/g, '@@');\n value = value.replace(/\\n/g, '\\\\n');\n value = value.replace(/\"/g, '+++');\n value = value.replace(/,/g, '$');\n codes.push(value);\n });\n let gen_code = `code = \"${codes}\"\\ncodes = code.split(',')\\nwith open( '${this.tmp_path}', 'w+' ) as f:\\n for i in range(0,len(codes)):\\n f.write('# this is the beginning of a single code snippet\\\\n')\\n code_list = codes[i].replace('$',',').replace('+++','\\\"').split('\\\\n')\\n for line in code_list:\\n if('split(^^^)' in line):\\n line=line.replace('split(^^^)', 'split(\\\\'\\\\\\\\n\\\\')')\\n if('###' in line):\\n line=line.replace('###', '\\\\\\\\n\\\"')\\n if('@@' in line):\\n line=line.replace('@@', '\\\"\\\\\\\\n')\\n f.write(line+'\\\\n')`;\n const expr = { code_list: `code_list` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, gen_code, expr, false);\n if (options === 'normal') {\n let runcode = `from neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=[\"${formatter}\"], overwrite=True)`;\n let expr = { sum: ` ` };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let run_code1 = `with open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let expr1 = { optimizedCode: \"optimized_code\" };\n let result2 = NotebookUtilities.sendKernelRequestFromNotebook(panel, run_code1, expr1, false);\n result2.then(value => {\n var _a, _b, _c, _d;\n let optimizedTexts = Object.values(value.optimizedCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n }\n });\n }\n else {\n if (formatter === '') {\n if (this.markdown) {\n this.markdown.model.value.text += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ...... \\n\";\n }\n // cell.outputArea.node.innerText += \"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\n\"\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for The Original Model ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\",features=[], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of The Original Model is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode1 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr1 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode1, expr1, false);\n let runcode2 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n });\n }\n else {\n let runcode = `from neural_coder import enable\\nperfomance, mode, path = enable(code=\"${this.tmp_path}\", features=[\"${formatter}\"], run_bench=True, args=\"${options}\")\\nwith open(path + '/bench.log', 'r') as f:\\n logs = f.readlines()\\nlog_line = logs[4]\\nlog = log_line.split(\"[\")[1].split(\"]\")[0]`;\n let expr = { path: \"path\", log: \"log\" };\n let result = NotebookUtilities.sendKernelRequestFromNotebook(panel, runcode, expr, false);\n let fps;\n result.then(value => {\n fps = Object.values(value.log.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second) \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (FPS)\\n`\n let text = `[NeuralCoder INFO] Benchmark Result (Performance) of ${name} is ${fps} (samples/second)\\\\n`;\n let runcode = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n if (next !== '') {\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ...... \\n`;\n }\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\n`\n let runcode2 = `with open(\"${this.log_path}\", 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Enabling and Benchmarking for ${next} ......\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n }\n let runcode3 = `with open(\"${this.tmp_log_path}\", 'a' ) as f:\\n f.write(\"${text}\")`;\n let expr3 = { path: \"\" };\n let res_tmp = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n res_tmp.then(value => {\n if (formatter === 'pytorch_inc_bf16') {\n let read_log = `import re\\nwith open(\"${this.tmp_log_path}\", 'r') as f:\\n logs = f.readlines()\\n fps_list=[]\\n for log_line in logs[-4:]:\\n pat = re.compile(r\\'\\\\d+\\\\.?\\\\d+')\\n fps = re.findall(pat,log_line)[-1]\\n fps_list.append(float(fps))\\nmaxi = max(fps_list)\\nindex = fps_list.index(maxi)\\nboost = round(maxi/fps_list[0],1)\\nfeatures=['','pytorch_inc_static_quant_fx','pytorch_inc_dynamic_quant','pytorch_inc_bf16']\\nfeature_name=['Original Model','INC Enable INT8 (Static)','INC Enable INT8 (Dynamic)','INC Enable BF16']\\nbest_feature = features[index]\\nbest_name = feature_name[index]\\nfeature_l = []\\nfeature_l.append(best_feature)\\nfrom neural_coder import enable\\nenable(code=\"${this.tmp_path}\",features=feature_l, overwrite=True)\\nwith open(\"${this.tmp_path}\", 'r') as f:\\n optimized_code = f.read()\\n`;\n let read_expr = { boost: \"boost\", best_feature: \"best_feature\", best_name: \"best_name\", optimizeCode: \"optimized_code\", feature_l: \"fps_list\", maxi: \"maxi\", index: \"index\" };\n let read_result = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, read_log, read_expr, false);\n read_result.then(value => {\n var _a, _b, _c, _d;\n console.log(\"resres\", value);\n let boost = Object.values(value.boost.data)[0];\n let best_name = Object.values(value.best_name.data)[0];\n let optimizedTexts = Object.values(value.optimizeCode.data)[0];\n let optimizeCodes = optimizedTexts.split('# this is the beginning of a single code snippet\\\\n').slice(1);\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] The Best Intel Optimization: ${best_name} \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] You can get up to ${boost}X performance boost. \\n`;\n }\n // cell.outputArea.node.innerText +=`[NeuralCoder INFO] The Best Intel Optimization: ${best_name}\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] You can get up to ${boost}X performance boost.\\n`\n optimizeCodes[optimizeCodes.length - 1] = optimizeCodes[optimizeCodes.length - 1].slice(0, -3);\n for (let i = 0; i < optimizeCodes.length; ++i) {\n const cell = this.cells[i];\n const currentTexts = this.cells.map(cell => cell.model.value.text);\n const currentText = currentTexts[i];\n let optimizedtext = optimizeCodes[i];\n optimizedtext = optimizedtext.replace(/\\\\'\\\\\\\\n\\\\'/g, \"^^^\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n\"/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\\\\\\\\n'/g, \"+++\");\n optimizedtext = optimizedtext.replace(/\"\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/'\\\\\\\\n/g, \"@@@\");\n optimizedtext = optimizedtext.replace(/\\\\n/g, '\\n');\n optimizedtext = optimizedtext.replace(/\\\\'/g, \"'\");\n optimizedtext = optimizedtext.replace(/\\^\\^\\^/g, \"'\\\\n'\");\n optimizedtext = optimizedtext.replace(/\\+\\+\\+/g, \"\\\\n\\\"\");\n optimizedtext = optimizedtext.replace(/\\@\\@\\@/g, \"\\\"\\\\n\");\n if (cell.model.value.text === currentText) {\n cell.model.value.text = optimizedtext;\n }\n }\n // if(this.markdown){\n // this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: 4th Gen Intel Xeon Scalable processor with AMX \\n`\n // this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`\n // }\n let command = \"lscpu | grep 'Model name'\";\n let get_hardware = `import subprocess\\nsubp = subprocess.Popen(\"${command}\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding=\"utf-8\")\\nsubp.wait(2)\\nhardware = subp.communicate()[0].replace(\"Model name:\",\"\").strip()`;\n let expr_hardware = { hardware: \"hardware\" };\n let hard_res = NotebookUtilities.sendKernelRequestFromNotebook(this.panel, get_hardware, expr_hardware, false);\n hard_res.then(value => {\n let hard = Object.values(value.hardware.data)[0];\n if (this.markdown) {\n this.markdown.model.value.text += `[NeuralCoder INFO] HardWare: ${hard} \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log \\n`;\n }\n cell.outputArea.node.innerText += `[NeuralCoder INFO] HardWare: ${hard}\\n`;\n });\n cell.outputArea.node.innerText += `[NeuralCoder INFO] The log was saved to neural_coder_workspace\\\\NeuralCoder${this.rand}.log\\n`;\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n (_d = (_c = (_b = (_a = run === null || run === void 0 ? void 0 : run.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(run_svg);\n });\n }\n });\n });\n }\n }\n }\n}\nexport class JupyterlabNotebookCodeOptimizer extends JupyterlabCodeOptimizer {\n constructor(notebookTracker, panel) {\n super(panel);\n this.notebookTracker = notebookTracker;\n this.notebookname = '';\n }\n async optimizeAction(config, formatter) {\n return this.optimizeCells(true, config, formatter);\n }\n async optimizeAllCodeCells(config, formatter, notebook, run) {\n return this.optimizeCells(false, config, formatter, notebook, run);\n }\n getCodeCells(ifmarkdown = true, notebook) {\n if (!this.notebookTracker.currentWidget) {\n return [];\n }\n const codeCells = [];\n notebook = notebook || this.notebookTracker.currentWidget.content;\n this.notebookname = notebook.title.label;\n let count = 0;\n notebook.widgets.forEach((cell) => {\n if (cell.model.type === 'code') {\n count += 1;\n codeCells.push(cell);\n }\n });\n if (ifmarkdown) {\n NotebookActions.insertBelow(notebook);\n this.notebookTracker.currentWidget.content.activeCellIndex = count + 1;\n NotebookActions.changeCellType(notebook, 'markdown');\n const activeCell = notebook.activeCell;\n if (activeCell) {\n this.markdown = activeCell;\n }\n }\n this.cells = codeCells;\n return codeCells;\n }\n async optimizeCells(selectedOnly, config, formatter, notebook, run) {\n if (this.working) {\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n console.log(\"arrive here 333\");\n this.working = true;\n const optimize_type = formatter !== undefined ? formatter : 'pytorch_mixed_precision_cpu';\n if (optimize_type === 'auto-quant') {\n selectedOnly = true;\n }\n else {\n selectedOnly = false;\n }\n const selectedCells = this.getCodeCells(selectedOnly, notebook);\n let cell = selectedCells[selectedCells.length - 1];\n if (selectedCells.length === 0) {\n this.working = false;\n return new Promise((resolve, reject) => {\n resolve(\"false!\");\n });\n }\n const currentTexts = selectedCells.map(cell => cell.model.value.text);\n if (optimize_type === 'auto-quant') {\n console.log(\"arrive here 444-111\");\n if (this.markdown) {\n this.markdown.model.value.text = `[NeuralCoder INFO] Auto-Quant Started ...... \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\" \\n`;\n this.markdown.model.value.text += `[NeuralCoder INFO] Benchmark Mode: Throughput \\n`;\n }\n // cell.outputArea.node.innerText = `[NeuralCoder INFO] Auto-Quant Started ......\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Code: User code from Jupyter Lab notebook \"${this.notebookname}\"\\n`\n // cell.outputArea.node.innerText += `[NeuralCoder INFO] Benchmark Mode: Throughput\\n`\n let runcode = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Auto-Quant Started ......\\\\n\")`;\n let expr = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode, expr, false);\n let runcode2 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Code: User code from Jupyter Lab notebook '${this.notebookname}'\\\\n\")`;\n let expr2 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode2, expr2, false);\n let runcode3 = `with open('${this.log_path}', 'a' ) as f:\\n f.write(\"[NeuralCoder INFO] Benchmark Mode: Throughput\\\\n\")`;\n let expr3 = { path: \"\" };\n NotebookUtilities.sendKernelRequestFromNotebook(this.panel, runcode3, expr3, false);\n // cell.outputArea.node.setAttribute(\"class\",\"pad\")\n await this.optimizeCode(currentTexts, '', 'The Original Model', 'INC Enable INT8 (Static)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_static_quant_fx', 'INC Enable INT8 (Static)', 'INC Enable INT8 (Dynamic)', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_dynamic_quant', 'INC Enable INT8 (Dynamic)', 'INC Enable BF16', config, true, this.panel, cell, run);\n await this.optimizeCode(currentTexts, 'pytorch_inc_bf16', 'INC Enable BF16', '', config, true, this.panel, cell, run);\n }\n else {\n console.log(\"arrive here 444-222\");\n await this.optimizeCode(currentTexts, optimize_type, \"\", \"\", \"normal\", true, this.panel, cell, run);\n }\n this.working = false;\n console.log(\"arrive here 555\");\n return new Promise((resolve, reject) => {\n resolve(\"success!\");\n });\n }\n applicable(formatter, currentWidget) {\n const currentNotebookWidget = this.notebookTracker.currentWidget;\n return currentNotebookWidget && currentWidget === currentNotebookWidget;\n }\n}\n","import { INotebookTracker } from '@jupyterlab/notebook';\nimport { ToolbarButton, showDialog, Dialog } from '@jupyterlab/apputils';\nimport { ISettingRegistry } from '@jupyterlab/settingregistry';\nimport { IMainMenu } from '@jupyterlab/mainmenu';\nimport { LabIcon } from '@jupyterlab/ui-components';\nimport { Widget } from '@lumino/widgets';\nimport { JupyterlabNotebookCodeOptimizer } from './deepcoder';\nimport { Constants } from './constants';\nclass neural_compressor_ext_lab {\n constructor(app, tracker, notebookpanel) {\n this.app = app;\n this.tracker = tracker;\n this.notebookpanel = notebookpanel;\n this.setupWidgetExtension();\n this.config = '';\n }\n createNew(nb) {\n this.notebookpanel = nb;\n this.notebookCodeOptimizer = new JupyterlabNotebookCodeOptimizer(this.tracker, this.notebookpanel);\n const svg = document.createElement(\"svg\");\n svg.innerHTML = Constants.ICON_FORMAT_ALL_SVG;\n const run_svg = document.createElement(\"svg\");\n run_svg.innerHTML = Constants.ICON_RUN;\n const div = document.createElement(\"div\");\n div.setAttribute(\"class\", \"wrapper\");\n const span = document.createElement(\"span\");\n span.setAttribute(\"class\", \"f1ozlkqi\");\n span.innerHTML = Constants.SVG;\n const selector = document.createElement(\"select\");\n selector.setAttribute(\"class\", \"aselector\");\n selector.id = \"NeuralCoder\";\n const option1 = document.createElement(\"option\");\n option1.value = \"pytorch_inc_static_quant_fx\";\n option1.innerText = \"INC Enable INT8 (Static)\";\n option1.selected = true;\n const option2 = document.createElement(\"option\");\n option2.value = \"pytorch_inc_dynamic_quant\";\n option2.innerText = \"INC Enable INT8 (Dynamic)\";\n const option3 = document.createElement(\"option\");\n option3.value = \"pytorch_inc_bf16\";\n option3.innerText = \"INC Enable BF16\";\n const option4 = document.createElement(\"option\");\n option4.value = \"auto-quant\";\n option4.innerText = \"INC Auto Enable & Benchmark\";\n selector.options.add(option1);\n selector.options.add(option2);\n selector.options.add(option3);\n selector.options.add(option4);\n div.appendChild(selector);\n div.appendChild(span);\n const selector_widget = new Widget();\n selector_widget.node.appendChild(div);\n selector_widget.addClass(\"aselector\");\n let notebookCodeOptimizer = this.notebookCodeOptimizer;\n let config = this.config;\n const dia_input = document.createElement(\"input\");\n const dia_widget = new Widget();\n dia_widget.node.appendChild(dia_input);\n dia_widget.addClass(\"dialog\");\n const run_button = new ToolbarButton({\n tooltip: 'NeuralCoder',\n icon: new LabIcon({\n name: \"run\",\n svgstr: Constants.ICON_RUN\n }),\n onClick: async function () {\n var _a, _b, _c, _d;\n console.log(\"arrive here 111\");\n (_d = (_c = (_b = (_a = run_button.node.firstChild) === null || _a === void 0 ? void 0 : _a.firstChild) === null || _b === void 0 ? void 0 : _b.firstChild) === null || _c === void 0 ? void 0 : _c.firstChild) === null || _d === void 0 ? void 0 : _d.replaceWith(svg);\n if (selector.options[selector.selectedIndex].value === 'auto-quant') {\n await showDialog({\n title: 'Please input execute parameters:',\n body: dia_widget,\n buttons: [Dialog.okButton({ label: 'Confirm' })]\n }).then(result => {\n if (result.button.accept) {\n config = dia_input.value;\n }\n });\n }\n console.log(\"arrive here 222\");\n await notebookCodeOptimizer.optimizeAllCodeCells(config, selector.options[selector.selectedIndex].value, undefined, run_button);\n }\n });\n nb.toolbar.insertItem(11, \"nc\", run_button);\n nb.toolbar.insertItem(12, \"selector\", selector_widget);\n }\n setupWidgetExtension() {\n this.app.docRegistry.addWidgetExtension('Notebook', this);\n }\n}\n/**\n * Initialization data for the neural_compressor_ext_lab extension.\n */\nconst plugin = {\n id: 'neural_compressor_ext_lab:plugin',\n autoStart: true,\n requires: [INotebookTracker, IMainMenu],\n optional: [ISettingRegistry],\n activate: (app, tracker, notebookpanel) => {\n new neural_compressor_ext_lab(app, tracker, notebookpanel);\n console.log('JupyterLab extension neural_compressor_ext_lab is activated!');\n }\n};\nexport default plugin;\n","/*\n * Copyright 2019-2020 The Kale Authors\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nimport { Dialog, showDialog } from '@jupyterlab/apputils';\n// @ts-ignore\nimport SanitizedHTML from 'react-sanitized-html';\nimport * as React from 'react';\nexport default class NotebookUtilities {\n /**\n * generate random number\n * @Min\n * @Max\n */\n static GetRandomNum(Min, Max) {\n let Range;\n Range = Max - Min;\n var Rand = Math.random();\n return (Min + Math.round(Rand * Range));\n }\n /**\n * Builds an HTML container by sanitizing a list of strings and converting\n * them in valid HTML\n * @param msg A list of string with HTML formatting\n * @returns a HTMLDivElement composed of a list of spans with formatted text\n */\n static buildDialogBody(msg) {\n return (React.createElement(\"div\", null, msg.map((s, i) => {\n return (React.createElement(React.Fragment, { key: `msg-${i}` },\n React.createElement(SanitizedHTML, { allowedAttributes: { a: ['href'] }, allowedTags: ['b', 'i', 'em', 'strong', 'a', 'pre'], html: s }),\n React.createElement(\"br\", null)));\n })));\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a simple message.\n * @param title The title for the message popup\n * @param msg The message as an array of strings\n * @param buttonLabel The label to use for the button. Default is 'OK'\n * @param buttonClassName The classname to give to the 'ok' button\n * @returns Promise - A promise once the message is closed.\n */\n static async showMessage(title, msg, buttonLabel = 'Dismiss', buttonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: buttonLabel, className: buttonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n await showDialog({ title, buttons, body: messageBody });\n }\n /**\n * Opens a pop-up dialog in JupyterLab to display a yes/no dialog.\n * @param title The title for the message popup\n * @param msg The message\n * @param acceptLabel The label to use for the accept button. Default is 'YES'\n * @param rejectLabel The label to use for the reject button. Default is 'NO'\n * @param yesButtonClassName The classname to give to the accept button.\n * @param noButtonClassName The classname to give to the cancel button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showYesNoDialog(title, msg, acceptLabel = 'YES', rejectLabel = 'NO', yesButtonClassName = '', noButtonClassName = '') {\n const buttons = [\n Dialog.okButton({ label: acceptLabel, className: yesButtonClassName }),\n Dialog.cancelButton({ label: rejectLabel, className: noButtonClassName }),\n ];\n const messageBody = this.buildDialogBody(msg);\n const result = await showDialog({ title, buttons, body: messageBody });\n return result.button.label === acceptLabel;\n }\n /**\n * Opens a pop-up dialog in JupyterLab with various information and button\n * triggering reloading the page.\n * @param title The title for the message popup\n * @param msg The message\n * @param buttonLabel The label to use for the button. Default is 'Refresh'\n * @param buttonClassName The classname to give to the 'refresh' button.\n * @returns Promise - A promise once the message is closed.\n */\n static async showRefreshDialog(title, msg, buttonLabel = 'Refresh', buttonClassName = '') {\n await this.showMessage(title, msg, buttonLabel, buttonClassName);\n location.reload();\n }\n /**\n * @description Creates a new JupyterLab notebook for use by the application\n * @param command The command registry\n * @returns Promise - A promise containing the notebook panel object that was created (if successful).\n */\n static async createNewNotebook(command) {\n const notebook = await command.execute('notebook:create-new', {\n activate: true,\n path: '',\n preferredLanguage: '',\n });\n await notebook.session.ready;\n return notebook;\n }\n /**\n * Safely saves the Jupyter notebook document contents to disk\n * @param notebookPanel The notebook panel containing the notebook to save\n */\n static async saveNotebook(notebookPanel) {\n if (notebookPanel) {\n await notebookPanel.context.ready;\n notebookPanel.context.save();\n return true;\n }\n return false;\n }\n /**\n * Convert the notebook contents to JSON\n * @param notebookPanel The notebook panel containing the notebook to serialize\n */\n static notebookToJSON(notebookPanel) {\n if (notebookPanel.content.model) {\n return notebookPanel.content.model.toJSON();\n }\n return null;\n }\n /**\n * @description Gets the value of a key from specified notebook's metadata.\n * @param notebookPanel The notebook to get meta data from.\n * @param key The key of the value.\n * @returns any -The value of the metadata. Returns null if the key doesn't exist.\n */\n static getMetaData(notebookPanel, key) {\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n if (notebookPanel.model && notebookPanel.model.metadata.has(key)) {\n return notebookPanel.model.metadata.get(key);\n }\n return null;\n }\n /**\n * @description Sets the key value pair in the notebook's metadata.\n * If the key doesn't exists it will add one.\n * @param notebookPanel The notebook to set meta data in.\n * @param key The key of the value to create.\n * @param value The value to set.\n * @param save Default is false. Whether the notebook should be saved after the meta data is set.\n * Note: This function will not wait for the save to complete, it only sends a save request.\n * @returns The old value for the key, or undefined if it did not exist.\n */\n static setMetaData(notebookPanel, key, value, save = false) {\n var _a;\n if (!notebookPanel) {\n throw new Error('The notebook is null or undefined. No meta data available.');\n }\n const oldVal = (_a = notebookPanel.model) === null || _a === void 0 ? void 0 : _a.metadata.set(key, value);\n if (save) {\n this.saveNotebook(notebookPanel);\n }\n return oldVal;\n }\n // /**\n // * Get a new Kernel, not tied to a Notebook\n // * Source code here: https://github.com/jupyterlab/jupyterlab/tree/473348d25bcb258ca2f0c127dd8fb5b193217135/packages/services\n // */\n // public static async createNewKernel() {\n // // Get info about the available kernels and start a new one.\n // let options: Kernel.IOptions = await Kernel.getSpecs().then(kernelSpecs => {\n // // console.log('Default spec:', kernelSpecs.default);\n // // console.log('Available specs', Object.keys(kernelSpecs.kernelspecs));\n // // use the default name\n // return { name: kernelSpecs.default };\n // });\n // return await Kernel.startNew(options).then(_kernel => {\n // return _kernel;\n // });\n // }\n // // TODO: We can use this context manager to execute commands inside a new kernel\n // // and be sure that it will be disposed of at the end.\n // // Another approach could be to create a kale_rpc Kernel, as a singleton,\n // // created at startup. The only (possible) drawback is that we can not name\n // // a kernel instance with a custom id/name, so when refreshing JupyterLab we would\n // // not recognize the kernel. A solution could be to have a kernel spec dedicated to kale rpc calls.\n // public static async executeWithNewKernel(action: Function, args: any[] = []) {\n // // create brand new kernel\n // const _k = await this.createNewKernel();\n // // execute action inside kernel\n // const res = await action(_k, ...args);\n // // close kernel\n // _k.shutdown();\n // // return result\n // return res;\n // }\n /**\n * @description This function runs code directly in the notebook's kernel and then evaluates the\n * result and returns it as a promise.\n * @param kernel The kernel to run the code in.\n * @param runCode The code to run in the kernel.\n * @param userExpressions The expressions used to capture the desired info from the executed code.\n * @param runSilent Default is false. If true, kernel will execute as quietly as possible.\n * store_history will be set to false, and no broadcast on IOPUB channel will be made.\n * @param storeHistory Default is false. If true, the code executed will be stored in the kernel's history\n * and the counter which is shown in the cells will be incremented to reflect code was run.\n * @param allowStdIn Default is false. If true, code running in kernel can prompt user for input using\n * an input_request message.\n * @param stopOnError Default is false. If True, does not abort the execution queue, if an exception is encountered.\n * This allows the queued execution of multiple execute_requests, even if they generate exceptions.\n * @returns Promise - A promise containing the execution results of the code as an object with\n * keys based on the user_expressions.\n * @example\n * //The code\n * const code = \"a=123\\nb=456\\nsum=a+b\";\n * //The user expressions\n * const expr = {sum: \"sum\",prod: \"a*b\",args:\"[a,b,sum]\"};\n * //Async function call (returns a promise)\n * sendKernelRequest(notebookPanel, code, expr,false);\n * //Result when promise resolves:\n * {\n * sum:{status:\"ok\",data:{\"text/plain\":\"579\"},metadata:{}},\n * prod:{status:\"ok\",data:{\"text/plain\":\"56088\"},metadata:{}},\n * args:{status:\"ok\",data:{\"text/plain\":\"[123, 456, 579]\"}}\n * }\n * @see For more information on JupyterLab messages:\n * https://jupyter-client.readthedocs.io/en/latest/messaging.html#execution-results\n */\n static async sendKernelRequest(kernel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n if (!kernel) {\n throw new Error('Kernel is null or undefined.');\n }\n // Wait for kernel to be ready before sending request\n // await kernel.status;\n const message = await kernel.requestExecute({\n allow_stdin: allowStdIn,\n code: runCode,\n silent: runSilent,\n stop_on_error: stopOnError,\n store_history: storeHistory,\n user_expressions: userExpressions,\n }).done;\n const content = message.content;\n if (content.status !== 'ok') {\n // If response is not 'ok', throw contents as error, log code\n const msg = `Code caused an error:\\n${runCode}`;\n console.error(msg);\n if (content.traceback) {\n content.traceback.forEach((line) => console.log(line.replace(/[\\u001b\\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]/g, '')));\n }\n throw content;\n }\n // Return user_expressions of the content\n return content.user_expressions;\n }\n /**\n * Same as method sendKernelRequest but passing\n * a NotebookPanel instead of a Kernel\n */\n static async sendKernelRequestFromNotebook(notebookPanel, runCode, userExpressions, runSilent = false, storeHistory = false, allowStdIn = false, stopOnError = false) {\n var _a, _b, _c, _d;\n if (!notebookPanel) {\n throw new Error('Notebook is null or undefined.');\n }\n // Wait for notebook panel to be ready\n await notebookPanel.activate;\n await ((_a = notebookPanel.sessionContext) === null || _a === void 0 ? void 0 : _a.ready);\n console.log('get kernel', (_b = notebookPanel.sessionContext.session) === null || _b === void 0 ? void 0 : _b.kernel);\n return this.sendKernelRequest((_d = (_c = notebookPanel.sessionContext) === null || _c === void 0 ? void 0 : _c.session) === null || _d === void 0 ? void 0 : _d.kernel, runCode, userExpressions, runSilent, storeHistory, allowStdIn, stopOnError);\n }\n}\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.27309f1e43e64d128990.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.27309f1e43e64d128990.js.map deleted file mode 100644 index 7bd49677533..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.27309f1e43e64d128990.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"remoteEntry.27309f1e43e64d128990.js","mappings":";;;;;;;;;;;AAAA;AACA;AACA;AACA,EAAE;AACF;AACA;AACA,EAAE;AACF;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,IAAI;AACJ;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AAEA;AACA;AACA;AACA;AACA,CAAC;;;;;;UCpCD;UACA;;UAEA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;;;;;WC5BA;WACA;WACA;WACA;WACA;WACA,iCAAiC,WAAW;WAC5C;WACA;;;;;WCPA;WACA;WACA;WACA;WACA,yCAAyC,wCAAwC;WACjF;WACA;WACA;;;;;WCPA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;;;;;WCRA;WACA;WACA;WACA,8BAA8B,kgBAAkgB;WAChiB;;;;;WCJA;WACA;WACA;WACA;WACA,GAAG;WACH;WACA;WACA,CAAC;;;;;WCPD;;;;;WCAA;WACA;WACA;WACA;WACA,uBAAuB,4BAA4B;WACnD;WACA;WACA;WACA,iBAAiB,oBAAoB;WACrC;WACA,mGAAmG,YAAY;WAC/G;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,mEAAmE,iCAAiC;WACpG;WACA;WACA;WACA;;;;;WCzCA;WACA;WACA;WACA,uDAAuD,iBAAiB;WACxE;WACA,gDAAgD,aAAa;WAC7D;;;;;WCNA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,oJAAoJ;WACpJ;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,IAAI,aAAa;WACjB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WC7CA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WCfA;WACA;WACA,WAAW,6BAA6B,iBAAiB,GAAG,qEAAqE;WACjI;WACA;WACA;WACA,qCAAqC,aAAa,EAAE,wDAAwD,2BAA2B,4BAA4B,2BAA2B,+CAA+C,mCAAmC;WAChR;WACA;WACA;WACA,qBAAqB,8BAA8B,SAAS,sDAAsD,gBAAgB,eAAe,KAAK,6DAA6D,SAAS,SAAS,QAAQ,eAAe,KAAK,eAAe,qGAAqG,WAAW,aAAa;WAC7Y;WACA;WACA;WACA,gBAAgB,8BAA8B,qBAAqB,YAAY,sBAAsB,SAAS,iDAAiD,6FAA6F,WAAW,uBAAuB,2BAA2B,wBAAwB,KAAK,oCAAoC,oBAAoB,wBAAwB,oBAAoB,SAAS,KAAK,yBAAyB,KAAK,gCAAgC,yBAAyB,QAAQ,eAAe,KAAK,eAAe,4DAA4D;WACtoB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,CAAC;;WAED;WACA;WACA;WACA,CAAC;WACD;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN,KAAK,WAAW;WAChB,GAAG;WACH;WACA;;;;;WC3LA;;WAEA;WACA;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA,iCAAiC;;WAEjC;WACA;WACA;WACA,KAAK;WACL;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN;WACA;WACA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM,qBAAqB;WAC3B;WACA;WACA;WACA;WACA;WACA;;WAEA;;WAEA;WACA;WACA;;;;;WCrFA;;;;;UEAA;UACA;UACA;UACA","sources":["webpack://neural_compressor_ext_lab/webpack/container-entry","webpack://neural_compressor_ext_lab/webpack/bootstrap","webpack://neural_compressor_ext_lab/webpack/runtime/compat get default export","webpack://neural_compressor_ext_lab/webpack/runtime/define property getters","webpack://neural_compressor_ext_lab/webpack/runtime/ensure chunk","webpack://neural_compressor_ext_lab/webpack/runtime/get javascript chunk filename","webpack://neural_compressor_ext_lab/webpack/runtime/global","webpack://neural_compressor_ext_lab/webpack/runtime/hasOwnProperty shorthand","webpack://neural_compressor_ext_lab/webpack/runtime/load script","webpack://neural_compressor_ext_lab/webpack/runtime/make namespace object","webpack://neural_compressor_ext_lab/webpack/runtime/sharing","webpack://neural_compressor_ext_lab/webpack/runtime/publicPath","webpack://neural_compressor_ext_lab/webpack/runtime/consumes","webpack://neural_compressor_ext_lab/webpack/runtime/jsonp chunk loading","webpack://neural_compressor_ext_lab/webpack/runtime/nonce","webpack://neural_compressor_ext_lab/webpack/before-startup","webpack://neural_compressor_ext_lab/webpack/startup","webpack://neural_compressor_ext_lab/webpack/after-startup"],"sourcesContent":["var moduleMap = {\n\t\"./index\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./extension\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./style\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\"), __webpack_require__.e(\"style_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./style/index.js */ \"./style/index.js\")))));\n\t}\n};\nvar get = (module, getScope) => {\n\t__webpack_require__.R = getScope;\n\tgetScope = (\n\t\t__webpack_require__.o(moduleMap, module)\n\t\t\t? moduleMap[module]()\n\t\t\t: Promise.resolve().then(() => {\n\t\t\t\tthrow new Error('Module \"' + module + '\" does not exist in container.');\n\t\t\t})\n\t);\n\t__webpack_require__.R = undefined;\n\treturn getScope;\n};\nvar init = (shareScope, initScope) => {\n\tif (!__webpack_require__.S) return;\n\tvar name = \"default\"\n\tvar oldScope = __webpack_require__.S[name];\n\tif(oldScope && oldScope !== shareScope) throw new Error(\"Container initialization failed as it has already been initialized with a different share scope\");\n\t__webpack_require__.S[name] = shareScope;\n\treturn __webpack_require__.I(name, initScope);\n};\n\n// This exports getters to disallow modifications\n__webpack_require__.d(exports, {\n\tget: () => (get),\n\tinit: () => (init)\n});","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\tid: moduleId,\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n// expose the modules object (__webpack_modules__)\n__webpack_require__.m = __webpack_modules__;\n\n// expose the module cache\n__webpack_require__.c = __webpack_module_cache__;\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.f = {};\n// This file contains only the entry chunk.\n// The chunk loading function for additional chunks\n__webpack_require__.e = (chunkId) => {\n\treturn Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => {\n\t\t__webpack_require__.f[key](chunkId, promises);\n\t\treturn promises;\n\t}, []));\n};","// This function allow to reference async chunks\n__webpack_require__.u = (chunkId) => {\n\t// return url for filenames based on template\n\treturn \"\" + chunkId + \".\" + {\"webpack_sharing_consume_default_react\":\"19c51f7b56cfd16da3f9\",\"lib_index_js\":\"2c52105b83d9cf4290a9\",\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\":\"e09faf9ec3a764e40dc7\",\"style_index_js\":\"8d733cc8b74fabbd10b8\",\"vendors-node_modules_react-sanitized-html_lib_index_js\":\"500104f7c13c01fe1646\",\"webpack_sharing_consume_default_sanitize-html_sanitize-html\":\"635249bb6dc3884c24a3\",\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\":\"825dbf94ec7371e0b28d\"}[chunkId] + \".js\";\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","var inProgress = {};\nvar dataWebpackPrefix = \"neural_compressor_ext_lab:\";\n// loadScript function to load a script via script tag\n__webpack_require__.l = (url, done, key, chunkId) => {\n\tif(inProgress[url]) { inProgress[url].push(done); return; }\n\tvar script, needAttach;\n\tif(key !== undefined) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tfor(var i = 0; i < scripts.length; i++) {\n\t\t\tvar s = scripts[i];\n\t\t\tif(s.getAttribute(\"src\") == url || s.getAttribute(\"data-webpack\") == dataWebpackPrefix + key) { script = s; break; }\n\t\t}\n\t}\n\tif(!script) {\n\t\tneedAttach = true;\n\t\tscript = document.createElement('script');\n\n\t\tscript.charset = 'utf-8';\n\t\tscript.timeout = 120;\n\t\tif (__webpack_require__.nc) {\n\t\t\tscript.setAttribute(\"nonce\", __webpack_require__.nc);\n\t\t}\n\t\tscript.setAttribute(\"data-webpack\", dataWebpackPrefix + key);\n\t\tscript.src = url;\n\t}\n\tinProgress[url] = [done];\n\tvar onScriptComplete = (prev, event) => {\n\t\t// avoid mem leaks in IE.\n\t\tscript.onerror = script.onload = null;\n\t\tclearTimeout(timeout);\n\t\tvar doneFns = inProgress[url];\n\t\tdelete inProgress[url];\n\t\tscript.parentNode && script.parentNode.removeChild(script);\n\t\tdoneFns && doneFns.forEach((fn) => (fn(event)));\n\t\tif(prev) return prev(event);\n\t}\n\t;\n\tvar timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000);\n\tscript.onerror = onScriptComplete.bind(null, script.onerror);\n\tscript.onload = onScriptComplete.bind(null, script.onload);\n\tneedAttach && document.head.appendChild(script);\n};","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","__webpack_require__.S = {};\nvar initPromises = {};\nvar initTokens = {};\n__webpack_require__.I = (name, initScope) => {\n\tif(!initScope) initScope = [];\n\t// handling circular init calls\n\tvar initToken = initTokens[name];\n\tif(!initToken) initToken = initTokens[name] = {};\n\tif(initScope.indexOf(initToken) >= 0) return;\n\tinitScope.push(initToken);\n\t// only runs once\n\tif(initPromises[name]) return initPromises[name];\n\t// creates a new share scope if needed\n\tif(!__webpack_require__.o(__webpack_require__.S, name)) __webpack_require__.S[name] = {};\n\t// runs all init snippets from all modules reachable\n\tvar scope = __webpack_require__.S[name];\n\tvar warn = (msg) => (typeof console !== \"undefined\" && console.warn && console.warn(msg));\n\tvar uniqueName = \"neural_compressor_ext_lab\";\n\tvar register = (name, version, factory, eager) => {\n\t\tvar versions = scope[name] = scope[name] || {};\n\t\tvar activeVersion = versions[version];\n\t\tif(!activeVersion || (!activeVersion.loaded && (!eager != !activeVersion.eager ? eager : uniqueName > activeVersion.from))) versions[version] = { get: factory, from: uniqueName, eager: !!eager };\n\t};\n\tvar initExternal = (id) => {\n\t\tvar handleError = (err) => (warn(\"Initialization of sharing external failed: \" + err));\n\t\ttry {\n\t\t\tvar module = __webpack_require__(id);\n\t\t\tif(!module) return;\n\t\t\tvar initFn = (module) => (module && module.init && module.init(__webpack_require__.S[name], initScope))\n\t\t\tif(module.then) return promises.push(module.then(initFn, handleError));\n\t\t\tvar initResult = initFn(module);\n\t\t\tif(initResult && initResult.then) return promises.push(initResult['catch'](handleError));\n\t\t} catch(err) { handleError(err); }\n\t}\n\tvar promises = [];\n\tswitch(name) {\n\t\tcase \"default\": {\n\t\t\tregister(\"neural_compressor_ext_lab\", \"0.1.0\", () => (Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => (__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\"))))));\n\t\t\tregister(\"react-sanitized-html\", \"2.0.0\", () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\"), __webpack_require__.e(\"webpack_sharing_consume_default_react\")]).then(() => (() => (__webpack_require__(/*! ./node_modules/react-sanitized-html/lib/index.js */ \"./node_modules/react-sanitized-html/lib/index.js\"))))));\n\t\t\tregister(\"sanitize-html\", \"1.27.5\", () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! ./node_modules/sanitize-html/dist/sanitize-html.js */ \"./node_modules/sanitize-html/dist/sanitize-html.js\"))))));\n\t\t}\n\t\tbreak;\n\t}\n\tif(!promises.length) return initPromises[name] = 1;\n\treturn initPromises[name] = Promise.all(promises).then(() => (initPromises[name] = 1));\n};","var scriptUrl;\nif (__webpack_require__.g.importScripts) scriptUrl = __webpack_require__.g.location + \"\";\nvar document = __webpack_require__.g.document;\nif (!scriptUrl && document) {\n\tif (document.currentScript)\n\t\tscriptUrl = document.currentScript.src\n\tif (!scriptUrl) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tif(scripts.length) scriptUrl = scripts[scripts.length - 1].src\n\t}\n}\n// When supporting browsers where an automatic publicPath is not supported you must specify an output.publicPath manually via configuration\n// or pass an empty string (\"\") and set the __webpack_public_path__ variable from your code to use your own logic.\nif (!scriptUrl) throw new Error(\"Automatic publicPath is not supported in this browser\");\nscriptUrl = scriptUrl.replace(/#.*$/, \"\").replace(/\\?.*$/, \"\").replace(/\\/[^\\/]+$/, \"/\");\n__webpack_require__.p = scriptUrl;","var parseVersion = (str) => {\n\t// see webpack/lib/util/semver.js for original code\n\tvar p=p=>{return p.split(\".\").map((p=>{return+p==p?+p:p}))},n=/^([^-+]+)?(?:-([^+]+))?(?:\\+(.+))?$/.exec(str),r=n[1]?p(n[1]):[];return n[2]&&(r.length++,r.push.apply(r,p(n[2]))),n[3]&&(r.push([]),r.push.apply(r,p(n[3]))),r;\n}\nvar versionLt = (a, b) => {\n\t// see webpack/lib/util/semver.js for original code\n\ta=parseVersion(a),b=parseVersion(b);for(var r=0;;){if(r>=a.length)return r=b.length)return\"u\"==n;var t=b[r],f=(typeof t)[0];if(n!=f)return\"o\"==n&&\"n\"==f||(\"s\"==f||\"u\"==n);if(\"o\"!=n&&\"u\"!=n&&e!=t)return e {\n\t// see webpack/lib/util/semver.js for original code\n\tvar r=range[0],n=\"\";if(1===range.length)return\"*\";if(r+.5){n+=0==r?\">=\":-1==r?\"<\":1==r?\"^\":2==r?\"~\":r>0?\"=\":\"!=\";for(var e=1,a=1;a0?\".\":\"\")+(e=2,t)}return n}var g=[];for(a=1;a {\n\t// see webpack/lib/util/semver.js for original code\n\tif(0 in range){version=parseVersion(version);var e=range[0],r=e<0;r&&(e=-e-1);for(var n=0,i=1,a=!0;;i++,n++){var f,s,g=i=version.length||\"o\"==(s=(typeof(f=version[n]))[0]))return!a||(\"u\"==g?i>e&&!r:\"\"==g!=r);if(\"u\"==s){if(!a||\"u\"!=g)return!1}else if(a)if(g==s)if(i<=e){if(f!=range[i])return!1}else{if(r?f>range[i]:f {\n\tvar scope = __webpack_require__.S[scopeName];\n\tif(!scope || !__webpack_require__.o(scope, key)) throw new Error(\"Shared module \" + key + \" doesn't exist in shared scope \" + scopeName);\n\treturn scope;\n};\nvar findVersion = (scope, key) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar findSingletonVersionKey = (scope, key) => {\n\tvar versions = scope[key];\n\treturn Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || (!versions[a].loaded && versionLt(a, b)) ? b : a;\n\t}, 0);\n};\nvar getInvalidSingletonVersionMessage = (scope, key, version, requiredVersion) => {\n\treturn \"Unsatisfied version \" + version + \" from \" + (version && scope[key][version].from) + \" of shared singleton module \" + key + \" (required \" + rangeToString(requiredVersion) + \")\"\n};\nvar getSingleton = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\treturn get(scope[key][version]);\n};\nvar getSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) typeof console !== \"undefined\" && console.warn && console.warn(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar getStrictSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) throw new Error(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar findValidVersion = (scope, key, requiredVersion) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\tif (!satisfy(requiredVersion, b)) return a;\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar getInvalidVersionMessage = (scope, scopeName, key, requiredVersion) => {\n\tvar versions = scope[key];\n\treturn \"No satisfying version (\" + rangeToString(requiredVersion) + \") of shared module \" + key + \" found in shared scope \" + scopeName + \".\\n\" +\n\t\t\"Available versions: \" + Object.keys(versions).map((key) => {\n\t\treturn key + \" from \" + versions[key].from;\n\t}).join(\", \");\n};\nvar getValidVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar entry = findValidVersion(scope, key, requiredVersion);\n\tif(entry) return get(entry);\n\tthrow new Error(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar warnInvalidVersion = (scope, scopeName, key, requiredVersion) => {\n\ttypeof console !== \"undefined\" && console.warn && console.warn(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar get = (entry) => {\n\tentry.loaded = 1;\n\treturn entry.get()\n};\nvar init = (fn) => (function(scopeName, a, b, c) {\n\tvar promise = __webpack_require__.I(scopeName);\n\tif (promise && promise.then) return promise.then(fn.bind(fn, scopeName, __webpack_require__.S[scopeName], a, b, c));\n\treturn fn(scopeName, __webpack_require__.S[scopeName], a, b, c);\n});\n\nvar load = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn get(findVersion(scope, key));\n});\nvar loadFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\treturn scope && __webpack_require__.o(scope, key) ? get(findVersion(scope, key)) : fallback();\n});\nvar loadVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingleton = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getValidVersion(scope, scopeName, key, version);\n});\nvar loadStrictSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar loadVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingletonFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tvar entry = scope && __webpack_require__.o(scope, key) && findValidVersion(scope, key, version);\n\treturn entry ? get(entry) : fallback();\n});\nvar loadStrictSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar installedModules = {};\nvar moduleToHandlerMapping = {\n\t\"webpack/sharing/consume/default/react\": () => (loadSingletonVersionCheck(\"default\", \"react\", [1,17,0,1])),\n\t\"webpack/sharing/consume/default/@jupyterlab/notebook\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/notebook\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/apputils\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/apputils\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/settingregistry\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/mainmenu\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/ui-components\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@lumino/widgets\": () => (loadSingletonVersionCheck(\"default\", \"@lumino/widgets\", [1,1,33,0])),\n\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\": () => (loadStrictVersionCheckFallback(\"default\", \"react-sanitized-html\", [1,2,0,0], () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\")]).then(() => (() => (__webpack_require__(/*! react-sanitized-html */ \"./node_modules/react-sanitized-html/lib/index.js\"))))))),\n\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\": () => (loadStrictVersionCheckFallback(\"default\", \"sanitize-html\", [1,1,16,1], () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! sanitize-html */ \"./node_modules/sanitize-html/dist/sanitize-html.js\")))))))\n};\n// no consumes in initial chunks\nvar chunkMapping = {\n\t\"webpack_sharing_consume_default_react\": [\n\t\t\"webpack/sharing/consume/default/react\"\n\t],\n\t\"lib_index_js\": [\n\t\t\"webpack/sharing/consume/default/@jupyterlab/notebook\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/apputils\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\",\n\t\t\"webpack/sharing/consume/default/@lumino/widgets\",\n\t\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\"\n\t],\n\t\"webpack_sharing_consume_default_sanitize-html_sanitize-html\": [\n\t\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\"\n\t]\n};\n__webpack_require__.f.consumes = (chunkId, promises) => {\n\tif(__webpack_require__.o(chunkMapping, chunkId)) {\n\t\tchunkMapping[chunkId].forEach((id) => {\n\t\t\tif(__webpack_require__.o(installedModules, id)) return promises.push(installedModules[id]);\n\t\t\tvar onFactory = (factory) => {\n\t\t\t\tinstalledModules[id] = 0;\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tmodule.exports = factory();\n\t\t\t\t}\n\t\t\t};\n\t\t\tvar onError = (error) => {\n\t\t\t\tdelete installedModules[id];\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tthrow error;\n\t\t\t\t}\n\t\t\t};\n\t\t\ttry {\n\t\t\t\tvar promise = moduleToHandlerMapping[id]();\n\t\t\t\tif(promise.then) {\n\t\t\t\t\tpromises.push(installedModules[id] = promise.then(onFactory)['catch'](onError));\n\t\t\t\t} else onFactory(promise);\n\t\t\t} catch(e) { onError(e); }\n\t\t});\n\t}\n}","__webpack_require__.b = document.baseURI || self.location.href;\n\n// object to store loaded and loading chunks\n// undefined = chunk not loaded, null = chunk preloaded/prefetched\n// [resolve, reject, Promise] = chunk loading, 0 = chunk loaded\nvar installedChunks = {\n\t\"neural_compressor_ext_lab\": 0\n};\n\n__webpack_require__.f.j = (chunkId, promises) => {\n\t\t// JSONP chunk loading for javascript\n\t\tvar installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined;\n\t\tif(installedChunkData !== 0) { // 0 means \"already installed\".\n\n\t\t\t// a Promise means \"currently loading\".\n\t\t\tif(installedChunkData) {\n\t\t\t\tpromises.push(installedChunkData[2]);\n\t\t\t} else {\n\t\t\t\tif(!/^webpack_sharing_consume_default_(react|sanitize\\-html_sanitize\\-html)$/.test(chunkId)) {\n\t\t\t\t\t// setup Promise in chunk cache\n\t\t\t\t\tvar promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject]));\n\t\t\t\t\tpromises.push(installedChunkData[2] = promise);\n\n\t\t\t\t\t// start chunk loading\n\t\t\t\t\tvar url = __webpack_require__.p + __webpack_require__.u(chunkId);\n\t\t\t\t\t// create error before stack unwound to get useful stacktrace later\n\t\t\t\t\tvar error = new Error();\n\t\t\t\t\tvar loadingEnded = (event) => {\n\t\t\t\t\t\tif(__webpack_require__.o(installedChunks, chunkId)) {\n\t\t\t\t\t\t\tinstalledChunkData = installedChunks[chunkId];\n\t\t\t\t\t\t\tif(installedChunkData !== 0) installedChunks[chunkId] = undefined;\n\t\t\t\t\t\t\tif(installedChunkData) {\n\t\t\t\t\t\t\t\tvar errorType = event && (event.type === 'load' ? 'missing' : event.type);\n\t\t\t\t\t\t\t\tvar realSrc = event && event.target && event.target.src;\n\t\t\t\t\t\t\t\terror.message = 'Loading chunk ' + chunkId + ' failed.\\n(' + errorType + ': ' + realSrc + ')';\n\t\t\t\t\t\t\t\terror.name = 'ChunkLoadError';\n\t\t\t\t\t\t\t\terror.type = errorType;\n\t\t\t\t\t\t\t\terror.request = realSrc;\n\t\t\t\t\t\t\t\tinstalledChunkData[1](error);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t\t__webpack_require__.l(url, loadingEnded, \"chunk-\" + chunkId, chunkId);\n\t\t\t\t} else installedChunks[chunkId] = 0;\n\t\t\t}\n\t\t}\n};\n\n// no prefetching\n\n// no preloaded\n\n// no HMR\n\n// no HMR manifest\n\n// no on chunks loaded\n\n// install a JSONP callback for chunk loading\nvar webpackJsonpCallback = (parentChunkLoadingFunction, data) => {\n\tvar [chunkIds, moreModules, runtime] = data;\n\t// add \"moreModules\" to the modules object,\n\t// then flag all \"chunkIds\" as loaded and fire callback\n\tvar moduleId, chunkId, i = 0;\n\tif(chunkIds.some((id) => (installedChunks[id] !== 0))) {\n\t\tfor(moduleId in moreModules) {\n\t\t\tif(__webpack_require__.o(moreModules, moduleId)) {\n\t\t\t\t__webpack_require__.m[moduleId] = moreModules[moduleId];\n\t\t\t}\n\t\t}\n\t\tif(runtime) var result = runtime(__webpack_require__);\n\t}\n\tif(parentChunkLoadingFunction) parentChunkLoadingFunction(data);\n\tfor(;i < chunkIds.length; i++) {\n\t\tchunkId = chunkIds[i];\n\t\tif(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) {\n\t\t\tinstalledChunks[chunkId][0]();\n\t\t}\n\t\tinstalledChunks[chunkId] = 0;\n\t}\n\n}\n\nvar chunkLoadingGlobal = self[\"webpackChunkneural_compressor_ext_lab\"] = self[\"webpackChunkneural_compressor_ext_lab\"] || [];\nchunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0));\nchunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal));","__webpack_require__.nc = undefined;","","// module cache are used so entry inlining is disabled\n// startup\n// Load entry module and return exports\nvar __webpack_exports__ = __webpack_require__(\"webpack/container/entry/neural_compressor_ext_lab\");\n",""],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js deleted file mode 100644 index a06a23ed0e0..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js +++ /dev/null @@ -1,572 +0,0 @@ -var _JUPYTERLAB; -/******/ (() => { // webpackBootstrap -/******/ "use strict"; -/******/ var __webpack_modules__ = ({ - -/***/ "webpack/container/entry/neural_compressor_ext_lab": -/*!***********************!*\ - !*** container entry ***! - \***********************/ -/***/ ((__unused_webpack_module, exports, __webpack_require__) => { - -var moduleMap = { - "./index": () => { - return Promise.all([__webpack_require__.e("webpack_sharing_consume_default_react"), __webpack_require__.e("lib_index_js")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ "./lib/index.js"))))); - }, - "./extension": () => { - return Promise.all([__webpack_require__.e("webpack_sharing_consume_default_react"), __webpack_require__.e("lib_index_js")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ "./lib/index.js"))))); - }, - "./style": () => { - return Promise.all([__webpack_require__.e("vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854"), __webpack_require__.e("style_index_js")]).then(() => (() => ((__webpack_require__(/*! ./style/index.js */ "./style/index.js"))))); - } -}; -var get = (module, getScope) => { - __webpack_require__.R = getScope; - getScope = ( - __webpack_require__.o(moduleMap, module) - ? moduleMap[module]() - : Promise.resolve().then(() => { - throw new Error('Module "' + module + '" does not exist in container.'); - }) - ); - __webpack_require__.R = undefined; - return getScope; -}; -var init = (shareScope, initScope) => { - if (!__webpack_require__.S) return; - var name = "default" - var oldScope = __webpack_require__.S[name]; - if(oldScope && oldScope !== shareScope) throw new Error("Container initialization failed as it has already been initialized with a different share scope"); - __webpack_require__.S[name] = shareScope; - return __webpack_require__.I(name, initScope); -}; - -// This exports getters to disallow modifications -__webpack_require__.d(exports, { - get: () => (get), - init: () => (init) -}); - -/***/ }) - -/******/ }); -/************************************************************************/ -/******/ // The module cache -/******/ var __webpack_module_cache__ = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ // Check if module is in cache -/******/ var cachedModule = __webpack_module_cache__[moduleId]; -/******/ if (cachedModule !== undefined) { -/******/ return cachedModule.exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = __webpack_module_cache__[moduleId] = { -/******/ id: moduleId, -/******/ // no module.loaded needed -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ __webpack_modules__[moduleId](module, module.exports, __webpack_require__); -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ // expose the modules object (__webpack_modules__) -/******/ __webpack_require__.m = __webpack_modules__; -/******/ -/******/ // expose the module cache -/******/ __webpack_require__.c = __webpack_module_cache__; -/******/ -/************************************************************************/ -/******/ /* webpack/runtime/compat get default export */ -/******/ (() => { -/******/ // getDefaultExport function for compatibility with non-harmony modules -/******/ __webpack_require__.n = (module) => { -/******/ var getter = module && module.__esModule ? -/******/ () => (module['default']) : -/******/ () => (module); -/******/ __webpack_require__.d(getter, { a: getter }); -/******/ return getter; -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/define property getters */ -/******/ (() => { -/******/ // define getter functions for harmony exports -/******/ __webpack_require__.d = (exports, definition) => { -/******/ for(var key in definition) { -/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) { -/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] }); -/******/ } -/******/ } -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/ensure chunk */ -/******/ (() => { -/******/ __webpack_require__.f = {}; -/******/ // This file contains only the entry chunk. -/******/ // The chunk loading function for additional chunks -/******/ __webpack_require__.e = (chunkId) => { -/******/ return Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => { -/******/ __webpack_require__.f[key](chunkId, promises); -/******/ return promises; -/******/ }, [])); -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/get javascript chunk filename */ -/******/ (() => { -/******/ // This function allow to reference async chunks -/******/ __webpack_require__.u = (chunkId) => { -/******/ // return url for filenames based on template -/******/ return "" + chunkId + "." + {"webpack_sharing_consume_default_react":"19c51f7b56cfd16da3f9","lib_index_js":"0c0187df9df8bc38b9c5","vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854":"e09faf9ec3a764e40dc7","style_index_js":"8d733cc8b74fabbd10b8","vendors-node_modules_react-sanitized-html_lib_index_js":"500104f7c13c01fe1646","webpack_sharing_consume_default_sanitize-html_sanitize-html":"635249bb6dc3884c24a3","vendors-node_modules_sanitize-html_dist_sanitize-html_js":"825dbf94ec7371e0b28d"}[chunkId] + ".js"; -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/global */ -/******/ (() => { -/******/ __webpack_require__.g = (function() { -/******/ if (typeof globalThis === 'object') return globalThis; -/******/ try { -/******/ return this || new Function('return this')(); -/******/ } catch (e) { -/******/ if (typeof window === 'object') return window; -/******/ } -/******/ })(); -/******/ })(); -/******/ -/******/ /* webpack/runtime/hasOwnProperty shorthand */ -/******/ (() => { -/******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop)) -/******/ })(); -/******/ -/******/ /* webpack/runtime/load script */ -/******/ (() => { -/******/ var inProgress = {}; -/******/ var dataWebpackPrefix = "neural_compressor_ext_lab:"; -/******/ // loadScript function to load a script via script tag -/******/ __webpack_require__.l = (url, done, key, chunkId) => { -/******/ if(inProgress[url]) { inProgress[url].push(done); return; } -/******/ var script, needAttach; -/******/ if(key !== undefined) { -/******/ var scripts = document.getElementsByTagName("script"); -/******/ for(var i = 0; i < scripts.length; i++) { -/******/ var s = scripts[i]; -/******/ if(s.getAttribute("src") == url || s.getAttribute("data-webpack") == dataWebpackPrefix + key) { script = s; break; } -/******/ } -/******/ } -/******/ if(!script) { -/******/ needAttach = true; -/******/ script = document.createElement('script'); -/******/ -/******/ script.charset = 'utf-8'; -/******/ script.timeout = 120; -/******/ if (__webpack_require__.nc) { -/******/ script.setAttribute("nonce", __webpack_require__.nc); -/******/ } -/******/ script.setAttribute("data-webpack", dataWebpackPrefix + key); -/******/ script.src = url; -/******/ } -/******/ inProgress[url] = [done]; -/******/ var onScriptComplete = (prev, event) => { -/******/ // avoid mem leaks in IE. -/******/ script.onerror = script.onload = null; -/******/ clearTimeout(timeout); -/******/ var doneFns = inProgress[url]; -/******/ delete inProgress[url]; -/******/ script.parentNode && script.parentNode.removeChild(script); -/******/ doneFns && doneFns.forEach((fn) => (fn(event))); -/******/ if(prev) return prev(event); -/******/ } -/******/ ; -/******/ var timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000); -/******/ script.onerror = onScriptComplete.bind(null, script.onerror); -/******/ script.onload = onScriptComplete.bind(null, script.onload); -/******/ needAttach && document.head.appendChild(script); -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/make namespace object */ -/******/ (() => { -/******/ // define __esModule on exports -/******/ __webpack_require__.r = (exports) => { -/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { -/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); -/******/ } -/******/ Object.defineProperty(exports, '__esModule', { value: true }); -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/sharing */ -/******/ (() => { -/******/ __webpack_require__.S = {}; -/******/ var initPromises = {}; -/******/ var initTokens = {}; -/******/ __webpack_require__.I = (name, initScope) => { -/******/ if(!initScope) initScope = []; -/******/ // handling circular init calls -/******/ var initToken = initTokens[name]; -/******/ if(!initToken) initToken = initTokens[name] = {}; -/******/ if(initScope.indexOf(initToken) >= 0) return; -/******/ initScope.push(initToken); -/******/ // only runs once -/******/ if(initPromises[name]) return initPromises[name]; -/******/ // creates a new share scope if needed -/******/ if(!__webpack_require__.o(__webpack_require__.S, name)) __webpack_require__.S[name] = {}; -/******/ // runs all init snippets from all modules reachable -/******/ var scope = __webpack_require__.S[name]; -/******/ var warn = (msg) => (typeof console !== "undefined" && console.warn && console.warn(msg)); -/******/ var uniqueName = "neural_compressor_ext_lab"; -/******/ var register = (name, version, factory, eager) => { -/******/ var versions = scope[name] = scope[name] || {}; -/******/ var activeVersion = versions[version]; -/******/ if(!activeVersion || (!activeVersion.loaded && (!eager != !activeVersion.eager ? eager : uniqueName > activeVersion.from))) versions[version] = { get: factory, from: uniqueName, eager: !!eager }; -/******/ }; -/******/ var initExternal = (id) => { -/******/ var handleError = (err) => (warn("Initialization of sharing external failed: " + err)); -/******/ try { -/******/ var module = __webpack_require__(id); -/******/ if(!module) return; -/******/ var initFn = (module) => (module && module.init && module.init(__webpack_require__.S[name], initScope)) -/******/ if(module.then) return promises.push(module.then(initFn, handleError)); -/******/ var initResult = initFn(module); -/******/ if(initResult && initResult.then) return promises.push(initResult['catch'](handleError)); -/******/ } catch(err) { handleError(err); } -/******/ } -/******/ var promises = []; -/******/ switch(name) { -/******/ case "default": { -/******/ register("neural_compressor_ext_lab", "0.1.0", () => (Promise.all([__webpack_require__.e("webpack_sharing_consume_default_react"), __webpack_require__.e("lib_index_js")]).then(() => (() => (__webpack_require__(/*! ./lib/index.js */ "./lib/index.js")))))); -/******/ register("react-sanitized-html", "2.0.0", () => (Promise.all([__webpack_require__.e("vendors-node_modules_react-sanitized-html_lib_index_js"), __webpack_require__.e("webpack_sharing_consume_default_sanitize-html_sanitize-html"), __webpack_require__.e("webpack_sharing_consume_default_react")]).then(() => (() => (__webpack_require__(/*! ./node_modules/react-sanitized-html/lib/index.js */ "./node_modules/react-sanitized-html/lib/index.js")))))); -/******/ register("sanitize-html", "1.27.5", () => (__webpack_require__.e("vendors-node_modules_sanitize-html_dist_sanitize-html_js").then(() => (() => (__webpack_require__(/*! ./node_modules/sanitize-html/dist/sanitize-html.js */ "./node_modules/sanitize-html/dist/sanitize-html.js")))))); -/******/ } -/******/ break; -/******/ } -/******/ if(!promises.length) return initPromises[name] = 1; -/******/ return initPromises[name] = Promise.all(promises).then(() => (initPromises[name] = 1)); -/******/ }; -/******/ })(); -/******/ -/******/ /* webpack/runtime/publicPath */ -/******/ (() => { -/******/ var scriptUrl; -/******/ if (__webpack_require__.g.importScripts) scriptUrl = __webpack_require__.g.location + ""; -/******/ var document = __webpack_require__.g.document; -/******/ if (!scriptUrl && document) { -/******/ if (document.currentScript) -/******/ scriptUrl = document.currentScript.src -/******/ if (!scriptUrl) { -/******/ var scripts = document.getElementsByTagName("script"); -/******/ if(scripts.length) scriptUrl = scripts[scripts.length - 1].src -/******/ } -/******/ } -/******/ // When supporting browsers where an automatic publicPath is not supported you must specify an output.publicPath manually via configuration -/******/ // or pass an empty string ("") and set the __webpack_public_path__ variable from your code to use your own logic. -/******/ if (!scriptUrl) throw new Error("Automatic publicPath is not supported in this browser"); -/******/ scriptUrl = scriptUrl.replace(/#.*$/, "").replace(/\?.*$/, "").replace(/\/[^\/]+$/, "/"); -/******/ __webpack_require__.p = scriptUrl; -/******/ })(); -/******/ -/******/ /* webpack/runtime/consumes */ -/******/ (() => { -/******/ var parseVersion = (str) => { -/******/ // see webpack/lib/util/semver.js for original code -/******/ var p=p=>{return p.split(".").map((p=>{return+p==p?+p:p}))},n=/^([^-+]+)?(?:-([^+]+))?(?:\+(.+))?$/.exec(str),r=n[1]?p(n[1]):[];return n[2]&&(r.length++,r.push.apply(r,p(n[2]))),n[3]&&(r.push([]),r.push.apply(r,p(n[3]))),r; -/******/ } -/******/ var versionLt = (a, b) => { -/******/ // see webpack/lib/util/semver.js for original code -/******/ a=parseVersion(a),b=parseVersion(b);for(var r=0;;){if(r>=a.length)return r=b.length)return"u"==n;var t=b[r],f=(typeof t)[0];if(n!=f)return"o"==n&&"n"==f||("s"==f||"u"==n);if("o"!=n&&"u"!=n&&e!=t)return e { -/******/ // see webpack/lib/util/semver.js for original code -/******/ var r=range[0],n="";if(1===range.length)return"*";if(r+.5){n+=0==r?">=":-1==r?"<":1==r?"^":2==r?"~":r>0?"=":"!=";for(var e=1,a=1;a0?".":"")+(e=2,t)}return n}var g=[];for(a=1;a { -/******/ // see webpack/lib/util/semver.js for original code -/******/ if(0 in range){version=parseVersion(version);var e=range[0],r=e<0;r&&(e=-e-1);for(var n=0,i=1,a=!0;;i++,n++){var f,s,g=i=version.length||"o"==(s=(typeof(f=version[n]))[0]))return!a||("u"==g?i>e&&!r:""==g!=r);if("u"==s){if(!a||"u"!=g)return!1}else if(a)if(g==s)if(i<=e){if(f!=range[i])return!1}else{if(r?f>range[i]:f { -/******/ var scope = __webpack_require__.S[scopeName]; -/******/ if(!scope || !__webpack_require__.o(scope, key)) throw new Error("Shared module " + key + " doesn't exist in shared scope " + scopeName); -/******/ return scope; -/******/ }; -/******/ var findVersion = (scope, key) => { -/******/ var versions = scope[key]; -/******/ var key = Object.keys(versions).reduce((a, b) => { -/******/ return !a || versionLt(a, b) ? b : a; -/******/ }, 0); -/******/ return key && versions[key] -/******/ }; -/******/ var findSingletonVersionKey = (scope, key) => { -/******/ var versions = scope[key]; -/******/ return Object.keys(versions).reduce((a, b) => { -/******/ return !a || (!versions[a].loaded && versionLt(a, b)) ? b : a; -/******/ }, 0); -/******/ }; -/******/ var getInvalidSingletonVersionMessage = (scope, key, version, requiredVersion) => { -/******/ return "Unsatisfied version " + version + " from " + (version && scope[key][version].from) + " of shared singleton module " + key + " (required " + rangeToString(requiredVersion) + ")" -/******/ }; -/******/ var getSingleton = (scope, scopeName, key, requiredVersion) => { -/******/ var version = findSingletonVersionKey(scope, key); -/******/ return get(scope[key][version]); -/******/ }; -/******/ var getSingletonVersion = (scope, scopeName, key, requiredVersion) => { -/******/ var version = findSingletonVersionKey(scope, key); -/******/ if (!satisfy(requiredVersion, version)) typeof console !== "undefined" && console.warn && console.warn(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion)); -/******/ return get(scope[key][version]); -/******/ }; -/******/ var getStrictSingletonVersion = (scope, scopeName, key, requiredVersion) => { -/******/ var version = findSingletonVersionKey(scope, key); -/******/ if (!satisfy(requiredVersion, version)) throw new Error(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion)); -/******/ return get(scope[key][version]); -/******/ }; -/******/ var findValidVersion = (scope, key, requiredVersion) => { -/******/ var versions = scope[key]; -/******/ var key = Object.keys(versions).reduce((a, b) => { -/******/ if (!satisfy(requiredVersion, b)) return a; -/******/ return !a || versionLt(a, b) ? b : a; -/******/ }, 0); -/******/ return key && versions[key] -/******/ }; -/******/ var getInvalidVersionMessage = (scope, scopeName, key, requiredVersion) => { -/******/ var versions = scope[key]; -/******/ return "No satisfying version (" + rangeToString(requiredVersion) + ") of shared module " + key + " found in shared scope " + scopeName + ".\n" + -/******/ "Available versions: " + Object.keys(versions).map((key) => { -/******/ return key + " from " + versions[key].from; -/******/ }).join(", "); -/******/ }; -/******/ var getValidVersion = (scope, scopeName, key, requiredVersion) => { -/******/ var entry = findValidVersion(scope, key, requiredVersion); -/******/ if(entry) return get(entry); -/******/ throw new Error(getInvalidVersionMessage(scope, scopeName, key, requiredVersion)); -/******/ }; -/******/ var warnInvalidVersion = (scope, scopeName, key, requiredVersion) => { -/******/ typeof console !== "undefined" && console.warn && console.warn(getInvalidVersionMessage(scope, scopeName, key, requiredVersion)); -/******/ }; -/******/ var get = (entry) => { -/******/ entry.loaded = 1; -/******/ return entry.get() -/******/ }; -/******/ var init = (fn) => (function(scopeName, a, b, c) { -/******/ var promise = __webpack_require__.I(scopeName); -/******/ if (promise && promise.then) return promise.then(fn.bind(fn, scopeName, __webpack_require__.S[scopeName], a, b, c)); -/******/ return fn(scopeName, __webpack_require__.S[scopeName], a, b, c); -/******/ }); -/******/ -/******/ var load = /*#__PURE__*/ init((scopeName, scope, key) => { -/******/ ensureExistence(scopeName, key); -/******/ return get(findVersion(scope, key)); -/******/ }); -/******/ var loadFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => { -/******/ return scope && __webpack_require__.o(scope, key) ? get(findVersion(scope, key)) : fallback(); -/******/ }); -/******/ var loadVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => { -/******/ ensureExistence(scopeName, key); -/******/ return get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key)); -/******/ }); -/******/ var loadSingleton = /*#__PURE__*/ init((scopeName, scope, key) => { -/******/ ensureExistence(scopeName, key); -/******/ return getSingleton(scope, scopeName, key); -/******/ }); -/******/ var loadSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => { -/******/ ensureExistence(scopeName, key); -/******/ return getSingletonVersion(scope, scopeName, key, version); -/******/ }); -/******/ var loadStrictVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => { -/******/ ensureExistence(scopeName, key); -/******/ return getValidVersion(scope, scopeName, key, version); -/******/ }); -/******/ var loadStrictSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => { -/******/ ensureExistence(scopeName, key); -/******/ return getStrictSingletonVersion(scope, scopeName, key, version); -/******/ }); -/******/ var loadVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => { -/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback(); -/******/ return get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key)); -/******/ }); -/******/ var loadSingletonFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => { -/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback(); -/******/ return getSingleton(scope, scopeName, key); -/******/ }); -/******/ var loadSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => { -/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback(); -/******/ return getSingletonVersion(scope, scopeName, key, version); -/******/ }); -/******/ var loadStrictVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => { -/******/ var entry = scope && __webpack_require__.o(scope, key) && findValidVersion(scope, key, version); -/******/ return entry ? get(entry) : fallback(); -/******/ }); -/******/ var loadStrictSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => { -/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback(); -/******/ return getStrictSingletonVersion(scope, scopeName, key, version); -/******/ }); -/******/ var installedModules = {}; -/******/ var moduleToHandlerMapping = { -/******/ "webpack/sharing/consume/default/react": () => (loadSingletonVersionCheck("default", "react", [1,17,0,1])), -/******/ "webpack/sharing/consume/default/@jupyterlab/notebook": () => (loadSingletonVersionCheck("default", "@jupyterlab/notebook", [1,3,4,7])), -/******/ "webpack/sharing/consume/default/@jupyterlab/apputils": () => (loadSingletonVersionCheck("default", "@jupyterlab/apputils", [1,3,4,7])), -/******/ "webpack/sharing/consume/default/@jupyterlab/settingregistry": () => (loadSingletonVersionCheck("default", "@jupyterlab/settingregistry", [1,3,4,7])), -/******/ "webpack/sharing/consume/default/@jupyterlab/mainmenu": () => (loadSingletonVersionCheck("default", "@jupyterlab/mainmenu", [1,3,4,7])), -/******/ "webpack/sharing/consume/default/@jupyterlab/ui-components": () => (loadSingletonVersionCheck("default", "@jupyterlab/ui-components", [1,3,4,7])), -/******/ "webpack/sharing/consume/default/@lumino/widgets": () => (loadSingletonVersionCheck("default", "@lumino/widgets", [1,1,33,0])), -/******/ "webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html": () => (loadStrictVersionCheckFallback("default", "react-sanitized-html", [1,2,0,0], () => (Promise.all([__webpack_require__.e("vendors-node_modules_react-sanitized-html_lib_index_js"), __webpack_require__.e("webpack_sharing_consume_default_sanitize-html_sanitize-html")]).then(() => (() => (__webpack_require__(/*! react-sanitized-html */ "./node_modules/react-sanitized-html/lib/index.js"))))))), -/******/ "webpack/sharing/consume/default/sanitize-html/sanitize-html": () => (loadStrictVersionCheckFallback("default", "sanitize-html", [1,1,16,1], () => (__webpack_require__.e("vendors-node_modules_sanitize-html_dist_sanitize-html_js").then(() => (() => (__webpack_require__(/*! sanitize-html */ "./node_modules/sanitize-html/dist/sanitize-html.js"))))))) -/******/ }; -/******/ // no consumes in initial chunks -/******/ var chunkMapping = { -/******/ "webpack_sharing_consume_default_react": [ -/******/ "webpack/sharing/consume/default/react" -/******/ ], -/******/ "lib_index_js": [ -/******/ "webpack/sharing/consume/default/@jupyterlab/notebook", -/******/ "webpack/sharing/consume/default/@jupyterlab/apputils", -/******/ "webpack/sharing/consume/default/@jupyterlab/settingregistry", -/******/ "webpack/sharing/consume/default/@jupyterlab/mainmenu", -/******/ "webpack/sharing/consume/default/@jupyterlab/ui-components", -/******/ "webpack/sharing/consume/default/@lumino/widgets", -/******/ "webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html" -/******/ ], -/******/ "webpack_sharing_consume_default_sanitize-html_sanitize-html": [ -/******/ "webpack/sharing/consume/default/sanitize-html/sanitize-html" -/******/ ] -/******/ }; -/******/ __webpack_require__.f.consumes = (chunkId, promises) => { -/******/ if(__webpack_require__.o(chunkMapping, chunkId)) { -/******/ chunkMapping[chunkId].forEach((id) => { -/******/ if(__webpack_require__.o(installedModules, id)) return promises.push(installedModules[id]); -/******/ var onFactory = (factory) => { -/******/ installedModules[id] = 0; -/******/ __webpack_require__.m[id] = (module) => { -/******/ delete __webpack_require__.c[id]; -/******/ module.exports = factory(); -/******/ } -/******/ }; -/******/ var onError = (error) => { -/******/ delete installedModules[id]; -/******/ __webpack_require__.m[id] = (module) => { -/******/ delete __webpack_require__.c[id]; -/******/ throw error; -/******/ } -/******/ }; -/******/ try { -/******/ var promise = moduleToHandlerMapping[id](); -/******/ if(promise.then) { -/******/ promises.push(installedModules[id] = promise.then(onFactory)['catch'](onError)); -/******/ } else onFactory(promise); -/******/ } catch(e) { onError(e); } -/******/ }); -/******/ } -/******/ } -/******/ })(); -/******/ -/******/ /* webpack/runtime/jsonp chunk loading */ -/******/ (() => { -/******/ __webpack_require__.b = document.baseURI || self.location.href; -/******/ -/******/ // object to store loaded and loading chunks -/******/ // undefined = chunk not loaded, null = chunk preloaded/prefetched -/******/ // [resolve, reject, Promise] = chunk loading, 0 = chunk loaded -/******/ var installedChunks = { -/******/ "neural_compressor_ext_lab": 0 -/******/ }; -/******/ -/******/ __webpack_require__.f.j = (chunkId, promises) => { -/******/ // JSONP chunk loading for javascript -/******/ var installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined; -/******/ if(installedChunkData !== 0) { // 0 means "already installed". -/******/ -/******/ // a Promise means "currently loading". -/******/ if(installedChunkData) { -/******/ promises.push(installedChunkData[2]); -/******/ } else { -/******/ if(!/^webpack_sharing_consume_default_(react|sanitize\-html_sanitize\-html)$/.test(chunkId)) { -/******/ // setup Promise in chunk cache -/******/ var promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject])); -/******/ promises.push(installedChunkData[2] = promise); -/******/ -/******/ // start chunk loading -/******/ var url = __webpack_require__.p + __webpack_require__.u(chunkId); -/******/ // create error before stack unwound to get useful stacktrace later -/******/ var error = new Error(); -/******/ var loadingEnded = (event) => { -/******/ if(__webpack_require__.o(installedChunks, chunkId)) { -/******/ installedChunkData = installedChunks[chunkId]; -/******/ if(installedChunkData !== 0) installedChunks[chunkId] = undefined; -/******/ if(installedChunkData) { -/******/ var errorType = event && (event.type === 'load' ? 'missing' : event.type); -/******/ var realSrc = event && event.target && event.target.src; -/******/ error.message = 'Loading chunk ' + chunkId + ' failed.\n(' + errorType + ': ' + realSrc + ')'; -/******/ error.name = 'ChunkLoadError'; -/******/ error.type = errorType; -/******/ error.request = realSrc; -/******/ installedChunkData[1](error); -/******/ } -/******/ } -/******/ }; -/******/ __webpack_require__.l(url, loadingEnded, "chunk-" + chunkId, chunkId); -/******/ } else installedChunks[chunkId] = 0; -/******/ } -/******/ } -/******/ }; -/******/ -/******/ // no prefetching -/******/ -/******/ // no preloaded -/******/ -/******/ // no HMR -/******/ -/******/ // no HMR manifest -/******/ -/******/ // no on chunks loaded -/******/ -/******/ // install a JSONP callback for chunk loading -/******/ var webpackJsonpCallback = (parentChunkLoadingFunction, data) => { -/******/ var [chunkIds, moreModules, runtime] = data; -/******/ // add "moreModules" to the modules object, -/******/ // then flag all "chunkIds" as loaded and fire callback -/******/ var moduleId, chunkId, i = 0; -/******/ if(chunkIds.some((id) => (installedChunks[id] !== 0))) { -/******/ for(moduleId in moreModules) { -/******/ if(__webpack_require__.o(moreModules, moduleId)) { -/******/ __webpack_require__.m[moduleId] = moreModules[moduleId]; -/******/ } -/******/ } -/******/ if(runtime) var result = runtime(__webpack_require__); -/******/ } -/******/ if(parentChunkLoadingFunction) parentChunkLoadingFunction(data); -/******/ for(;i < chunkIds.length; i++) { -/******/ chunkId = chunkIds[i]; -/******/ if(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) { -/******/ installedChunks[chunkId][0](); -/******/ } -/******/ installedChunks[chunkId] = 0; -/******/ } -/******/ -/******/ } -/******/ -/******/ var chunkLoadingGlobal = self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []; -/******/ chunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0)); -/******/ chunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal)); -/******/ })(); -/******/ -/******/ /* webpack/runtime/nonce */ -/******/ (() => { -/******/ __webpack_require__.nc = undefined; -/******/ })(); -/******/ -/************************************************************************/ -/******/ -/******/ // module cache are used so entry inlining is disabled -/******/ // startup -/******/ // Load entry module and return exports -/******/ var __webpack_exports__ = __webpack_require__("webpack/container/entry/neural_compressor_ext_lab"); -/******/ (_JUPYTERLAB = typeof _JUPYTERLAB === "undefined" ? {} : _JUPYTERLAB).neural_compressor_ext_lab = __webpack_exports__; -/******/ -/******/ })() -; -//# sourceMappingURL=remoteEntry.34f9ad20791fd484f052.js.map \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js.map deleted file mode 100644 index f8175790c7a..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.34f9ad20791fd484f052.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"remoteEntry.34f9ad20791fd484f052.js","mappings":";;;;;;;;;;;AAAA;AACA;AACA;AACA,EAAE;AACF;AACA;AACA,EAAE;AACF;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,IAAI;AACJ;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AAEA;AACA;AACA;AACA;AACA,CAAC;;;;;;UCpCD;UACA;;UAEA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;;;;;WC5BA;WACA;WACA;WACA;WACA;WACA,iCAAiC,WAAW;WAC5C;WACA;;;;;WCPA;WACA;WACA;WACA;WACA,yCAAyC,wCAAwC;WACjF;WACA;WACA;;;;;WCPA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;;;;;WCRA;WACA;WACA;WACA,8BAA8B,kgBAAkgB;WAChiB;;;;;WCJA;WACA;WACA;WACA;WACA,GAAG;WACH;WACA;WACA,CAAC;;;;;WCPD;;;;;WCAA;WACA;WACA;WACA;WACA,uBAAuB,4BAA4B;WACnD;WACA;WACA;WACA,iBAAiB,oBAAoB;WACrC;WACA,mGAAmG,YAAY;WAC/G;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,mEAAmE,iCAAiC;WACpG;WACA;WACA;WACA;;;;;WCzCA;WACA;WACA;WACA,uDAAuD,iBAAiB;WACxE;WACA,gDAAgD,aAAa;WAC7D;;;;;WCNA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,oJAAoJ;WACpJ;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,IAAI,aAAa;WACjB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WC7CA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WCfA;WACA;WACA,WAAW,6BAA6B,iBAAiB,GAAG,qEAAqE;WACjI;WACA;WACA;WACA,qCAAqC,aAAa,EAAE,wDAAwD,2BAA2B,4BAA4B,2BAA2B,+CAA+C,mCAAmC;WAChR;WACA;WACA;WACA,qBAAqB,8BAA8B,SAAS,sDAAsD,gBAAgB,eAAe,KAAK,6DAA6D,SAAS,SAAS,QAAQ,eAAe,KAAK,eAAe,qGAAqG,WAAW,aAAa;WAC7Y;WACA;WACA;WACA,gBAAgB,8BAA8B,qBAAqB,YAAY,sBAAsB,SAAS,iDAAiD,6FAA6F,WAAW,uBAAuB,2BAA2B,wBAAwB,KAAK,oCAAoC,oBAAoB,wBAAwB,oBAAoB,SAAS,KAAK,yBAAyB,KAAK,gCAAgC,yBAAyB,QAAQ,eAAe,KAAK,eAAe,4DAA4D;WACtoB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,CAAC;;WAED;WACA;WACA;WACA,CAAC;WACD;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN,KAAK,WAAW;WAChB,GAAG;WACH;WACA;;;;;WC3LA;;WAEA;WACA;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA,iCAAiC;;WAEjC;WACA;WACA;WACA,KAAK;WACL;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN;WACA;WACA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM,qBAAqB;WAC3B;WACA;WACA;WACA;WACA;WACA;;WAEA;;WAEA;WACA;WACA;;;;;WCrFA;;;;;UEAA;UACA;UACA;UACA","sources":["webpack://neural_compressor_ext_lab/webpack/container-entry","webpack://neural_compressor_ext_lab/webpack/bootstrap","webpack://neural_compressor_ext_lab/webpack/runtime/compat get default export","webpack://neural_compressor_ext_lab/webpack/runtime/define property getters","webpack://neural_compressor_ext_lab/webpack/runtime/ensure chunk","webpack://neural_compressor_ext_lab/webpack/runtime/get javascript chunk filename","webpack://neural_compressor_ext_lab/webpack/runtime/global","webpack://neural_compressor_ext_lab/webpack/runtime/hasOwnProperty shorthand","webpack://neural_compressor_ext_lab/webpack/runtime/load script","webpack://neural_compressor_ext_lab/webpack/runtime/make namespace object","webpack://neural_compressor_ext_lab/webpack/runtime/sharing","webpack://neural_compressor_ext_lab/webpack/runtime/publicPath","webpack://neural_compressor_ext_lab/webpack/runtime/consumes","webpack://neural_compressor_ext_lab/webpack/runtime/jsonp chunk loading","webpack://neural_compressor_ext_lab/webpack/runtime/nonce","webpack://neural_compressor_ext_lab/webpack/before-startup","webpack://neural_compressor_ext_lab/webpack/startup","webpack://neural_compressor_ext_lab/webpack/after-startup"],"sourcesContent":["var moduleMap = {\n\t\"./index\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./extension\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./style\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\"), __webpack_require__.e(\"style_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./style/index.js */ \"./style/index.js\")))));\n\t}\n};\nvar get = (module, getScope) => {\n\t__webpack_require__.R = getScope;\n\tgetScope = (\n\t\t__webpack_require__.o(moduleMap, module)\n\t\t\t? moduleMap[module]()\n\t\t\t: Promise.resolve().then(() => {\n\t\t\t\tthrow new Error('Module \"' + module + '\" does not exist in container.');\n\t\t\t})\n\t);\n\t__webpack_require__.R = undefined;\n\treturn getScope;\n};\nvar init = (shareScope, initScope) => {\n\tif (!__webpack_require__.S) return;\n\tvar name = \"default\"\n\tvar oldScope = __webpack_require__.S[name];\n\tif(oldScope && oldScope !== shareScope) throw new Error(\"Container initialization failed as it has already been initialized with a different share scope\");\n\t__webpack_require__.S[name] = shareScope;\n\treturn __webpack_require__.I(name, initScope);\n};\n\n// This exports getters to disallow modifications\n__webpack_require__.d(exports, {\n\tget: () => (get),\n\tinit: () => (init)\n});","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\tid: moduleId,\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n// expose the modules object (__webpack_modules__)\n__webpack_require__.m = __webpack_modules__;\n\n// expose the module cache\n__webpack_require__.c = __webpack_module_cache__;\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.f = {};\n// This file contains only the entry chunk.\n// The chunk loading function for additional chunks\n__webpack_require__.e = (chunkId) => {\n\treturn Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => {\n\t\t__webpack_require__.f[key](chunkId, promises);\n\t\treturn promises;\n\t}, []));\n};","// This function allow to reference async chunks\n__webpack_require__.u = (chunkId) => {\n\t// return url for filenames based on template\n\treturn \"\" + chunkId + \".\" + {\"webpack_sharing_consume_default_react\":\"19c51f7b56cfd16da3f9\",\"lib_index_js\":\"0c0187df9df8bc38b9c5\",\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\":\"e09faf9ec3a764e40dc7\",\"style_index_js\":\"8d733cc8b74fabbd10b8\",\"vendors-node_modules_react-sanitized-html_lib_index_js\":\"500104f7c13c01fe1646\",\"webpack_sharing_consume_default_sanitize-html_sanitize-html\":\"635249bb6dc3884c24a3\",\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\":\"825dbf94ec7371e0b28d\"}[chunkId] + \".js\";\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","var inProgress = {};\nvar dataWebpackPrefix = \"neural_compressor_ext_lab:\";\n// loadScript function to load a script via script tag\n__webpack_require__.l = (url, done, key, chunkId) => {\n\tif(inProgress[url]) { inProgress[url].push(done); return; }\n\tvar script, needAttach;\n\tif(key !== undefined) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tfor(var i = 0; i < scripts.length; i++) {\n\t\t\tvar s = scripts[i];\n\t\t\tif(s.getAttribute(\"src\") == url || s.getAttribute(\"data-webpack\") == dataWebpackPrefix + key) { script = s; break; }\n\t\t}\n\t}\n\tif(!script) {\n\t\tneedAttach = true;\n\t\tscript = document.createElement('script');\n\n\t\tscript.charset = 'utf-8';\n\t\tscript.timeout = 120;\n\t\tif (__webpack_require__.nc) {\n\t\t\tscript.setAttribute(\"nonce\", __webpack_require__.nc);\n\t\t}\n\t\tscript.setAttribute(\"data-webpack\", dataWebpackPrefix + key);\n\t\tscript.src = url;\n\t}\n\tinProgress[url] = [done];\n\tvar onScriptComplete = (prev, event) => {\n\t\t// avoid mem leaks in IE.\n\t\tscript.onerror = script.onload = null;\n\t\tclearTimeout(timeout);\n\t\tvar doneFns = inProgress[url];\n\t\tdelete inProgress[url];\n\t\tscript.parentNode && script.parentNode.removeChild(script);\n\t\tdoneFns && doneFns.forEach((fn) => (fn(event)));\n\t\tif(prev) return prev(event);\n\t}\n\t;\n\tvar timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000);\n\tscript.onerror = onScriptComplete.bind(null, script.onerror);\n\tscript.onload = onScriptComplete.bind(null, script.onload);\n\tneedAttach && document.head.appendChild(script);\n};","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","__webpack_require__.S = {};\nvar initPromises = {};\nvar initTokens = {};\n__webpack_require__.I = (name, initScope) => {\n\tif(!initScope) initScope = [];\n\t// handling circular init calls\n\tvar initToken = initTokens[name];\n\tif(!initToken) initToken = initTokens[name] = {};\n\tif(initScope.indexOf(initToken) >= 0) return;\n\tinitScope.push(initToken);\n\t// only runs once\n\tif(initPromises[name]) return initPromises[name];\n\t// creates a new share scope if needed\n\tif(!__webpack_require__.o(__webpack_require__.S, name)) __webpack_require__.S[name] = {};\n\t// runs all init snippets from all modules reachable\n\tvar scope = __webpack_require__.S[name];\n\tvar warn = (msg) => (typeof console !== \"undefined\" && console.warn && console.warn(msg));\n\tvar uniqueName = \"neural_compressor_ext_lab\";\n\tvar register = (name, version, factory, eager) => {\n\t\tvar versions = scope[name] = scope[name] || {};\n\t\tvar activeVersion = versions[version];\n\t\tif(!activeVersion || (!activeVersion.loaded && (!eager != !activeVersion.eager ? eager : uniqueName > activeVersion.from))) versions[version] = { get: factory, from: uniqueName, eager: !!eager };\n\t};\n\tvar initExternal = (id) => {\n\t\tvar handleError = (err) => (warn(\"Initialization of sharing external failed: \" + err));\n\t\ttry {\n\t\t\tvar module = __webpack_require__(id);\n\t\t\tif(!module) return;\n\t\t\tvar initFn = (module) => (module && module.init && module.init(__webpack_require__.S[name], initScope))\n\t\t\tif(module.then) return promises.push(module.then(initFn, handleError));\n\t\t\tvar initResult = initFn(module);\n\t\t\tif(initResult && initResult.then) return promises.push(initResult['catch'](handleError));\n\t\t} catch(err) { handleError(err); }\n\t}\n\tvar promises = [];\n\tswitch(name) {\n\t\tcase \"default\": {\n\t\t\tregister(\"neural_compressor_ext_lab\", \"0.1.0\", () => (Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => (__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\"))))));\n\t\t\tregister(\"react-sanitized-html\", \"2.0.0\", () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\"), __webpack_require__.e(\"webpack_sharing_consume_default_react\")]).then(() => (() => (__webpack_require__(/*! ./node_modules/react-sanitized-html/lib/index.js */ \"./node_modules/react-sanitized-html/lib/index.js\"))))));\n\t\t\tregister(\"sanitize-html\", \"1.27.5\", () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! ./node_modules/sanitize-html/dist/sanitize-html.js */ \"./node_modules/sanitize-html/dist/sanitize-html.js\"))))));\n\t\t}\n\t\tbreak;\n\t}\n\tif(!promises.length) return initPromises[name] = 1;\n\treturn initPromises[name] = Promise.all(promises).then(() => (initPromises[name] = 1));\n};","var scriptUrl;\nif (__webpack_require__.g.importScripts) scriptUrl = __webpack_require__.g.location + \"\";\nvar document = __webpack_require__.g.document;\nif (!scriptUrl && document) {\n\tif (document.currentScript)\n\t\tscriptUrl = document.currentScript.src\n\tif (!scriptUrl) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tif(scripts.length) scriptUrl = scripts[scripts.length - 1].src\n\t}\n}\n// When supporting browsers where an automatic publicPath is not supported you must specify an output.publicPath manually via configuration\n// or pass an empty string (\"\") and set the __webpack_public_path__ variable from your code to use your own logic.\nif (!scriptUrl) throw new Error(\"Automatic publicPath is not supported in this browser\");\nscriptUrl = scriptUrl.replace(/#.*$/, \"\").replace(/\\?.*$/, \"\").replace(/\\/[^\\/]+$/, \"/\");\n__webpack_require__.p = scriptUrl;","var parseVersion = (str) => {\n\t// see webpack/lib/util/semver.js for original code\n\tvar p=p=>{return p.split(\".\").map((p=>{return+p==p?+p:p}))},n=/^([^-+]+)?(?:-([^+]+))?(?:\\+(.+))?$/.exec(str),r=n[1]?p(n[1]):[];return n[2]&&(r.length++,r.push.apply(r,p(n[2]))),n[3]&&(r.push([]),r.push.apply(r,p(n[3]))),r;\n}\nvar versionLt = (a, b) => {\n\t// see webpack/lib/util/semver.js for original code\n\ta=parseVersion(a),b=parseVersion(b);for(var r=0;;){if(r>=a.length)return r=b.length)return\"u\"==n;var t=b[r],f=(typeof t)[0];if(n!=f)return\"o\"==n&&\"n\"==f||(\"s\"==f||\"u\"==n);if(\"o\"!=n&&\"u\"!=n&&e!=t)return e {\n\t// see webpack/lib/util/semver.js for original code\n\tvar r=range[0],n=\"\";if(1===range.length)return\"*\";if(r+.5){n+=0==r?\">=\":-1==r?\"<\":1==r?\"^\":2==r?\"~\":r>0?\"=\":\"!=\";for(var e=1,a=1;a0?\".\":\"\")+(e=2,t)}return n}var g=[];for(a=1;a {\n\t// see webpack/lib/util/semver.js for original code\n\tif(0 in range){version=parseVersion(version);var e=range[0],r=e<0;r&&(e=-e-1);for(var n=0,i=1,a=!0;;i++,n++){var f,s,g=i=version.length||\"o\"==(s=(typeof(f=version[n]))[0]))return!a||(\"u\"==g?i>e&&!r:\"\"==g!=r);if(\"u\"==s){if(!a||\"u\"!=g)return!1}else if(a)if(g==s)if(i<=e){if(f!=range[i])return!1}else{if(r?f>range[i]:f {\n\tvar scope = __webpack_require__.S[scopeName];\n\tif(!scope || !__webpack_require__.o(scope, key)) throw new Error(\"Shared module \" + key + \" doesn't exist in shared scope \" + scopeName);\n\treturn scope;\n};\nvar findVersion = (scope, key) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar findSingletonVersionKey = (scope, key) => {\n\tvar versions = scope[key];\n\treturn Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || (!versions[a].loaded && versionLt(a, b)) ? b : a;\n\t}, 0);\n};\nvar getInvalidSingletonVersionMessage = (scope, key, version, requiredVersion) => {\n\treturn \"Unsatisfied version \" + version + \" from \" + (version && scope[key][version].from) + \" of shared singleton module \" + key + \" (required \" + rangeToString(requiredVersion) + \")\"\n};\nvar getSingleton = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\treturn get(scope[key][version]);\n};\nvar getSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) typeof console !== \"undefined\" && console.warn && console.warn(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar getStrictSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) throw new Error(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar findValidVersion = (scope, key, requiredVersion) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\tif (!satisfy(requiredVersion, b)) return a;\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar getInvalidVersionMessage = (scope, scopeName, key, requiredVersion) => {\n\tvar versions = scope[key];\n\treturn \"No satisfying version (\" + rangeToString(requiredVersion) + \") of shared module \" + key + \" found in shared scope \" + scopeName + \".\\n\" +\n\t\t\"Available versions: \" + Object.keys(versions).map((key) => {\n\t\treturn key + \" from \" + versions[key].from;\n\t}).join(\", \");\n};\nvar getValidVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar entry = findValidVersion(scope, key, requiredVersion);\n\tif(entry) return get(entry);\n\tthrow new Error(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar warnInvalidVersion = (scope, scopeName, key, requiredVersion) => {\n\ttypeof console !== \"undefined\" && console.warn && console.warn(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar get = (entry) => {\n\tentry.loaded = 1;\n\treturn entry.get()\n};\nvar init = (fn) => (function(scopeName, a, b, c) {\n\tvar promise = __webpack_require__.I(scopeName);\n\tif (promise && promise.then) return promise.then(fn.bind(fn, scopeName, __webpack_require__.S[scopeName], a, b, c));\n\treturn fn(scopeName, __webpack_require__.S[scopeName], a, b, c);\n});\n\nvar load = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn get(findVersion(scope, key));\n});\nvar loadFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\treturn scope && __webpack_require__.o(scope, key) ? get(findVersion(scope, key)) : fallback();\n});\nvar loadVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingleton = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getValidVersion(scope, scopeName, key, version);\n});\nvar loadStrictSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar loadVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingletonFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tvar entry = scope && __webpack_require__.o(scope, key) && findValidVersion(scope, key, version);\n\treturn entry ? get(entry) : fallback();\n});\nvar loadStrictSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar installedModules = {};\nvar moduleToHandlerMapping = {\n\t\"webpack/sharing/consume/default/react\": () => (loadSingletonVersionCheck(\"default\", \"react\", [1,17,0,1])),\n\t\"webpack/sharing/consume/default/@jupyterlab/notebook\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/notebook\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/apputils\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/apputils\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/settingregistry\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/mainmenu\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/ui-components\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@lumino/widgets\": () => (loadSingletonVersionCheck(\"default\", \"@lumino/widgets\", [1,1,33,0])),\n\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\": () => (loadStrictVersionCheckFallback(\"default\", \"react-sanitized-html\", [1,2,0,0], () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\")]).then(() => (() => (__webpack_require__(/*! react-sanitized-html */ \"./node_modules/react-sanitized-html/lib/index.js\"))))))),\n\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\": () => (loadStrictVersionCheckFallback(\"default\", \"sanitize-html\", [1,1,16,1], () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! sanitize-html */ \"./node_modules/sanitize-html/dist/sanitize-html.js\")))))))\n};\n// no consumes in initial chunks\nvar chunkMapping = {\n\t\"webpack_sharing_consume_default_react\": [\n\t\t\"webpack/sharing/consume/default/react\"\n\t],\n\t\"lib_index_js\": [\n\t\t\"webpack/sharing/consume/default/@jupyterlab/notebook\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/apputils\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\",\n\t\t\"webpack/sharing/consume/default/@lumino/widgets\",\n\t\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\"\n\t],\n\t\"webpack_sharing_consume_default_sanitize-html_sanitize-html\": [\n\t\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\"\n\t]\n};\n__webpack_require__.f.consumes = (chunkId, promises) => {\n\tif(__webpack_require__.o(chunkMapping, chunkId)) {\n\t\tchunkMapping[chunkId].forEach((id) => {\n\t\t\tif(__webpack_require__.o(installedModules, id)) return promises.push(installedModules[id]);\n\t\t\tvar onFactory = (factory) => {\n\t\t\t\tinstalledModules[id] = 0;\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tmodule.exports = factory();\n\t\t\t\t}\n\t\t\t};\n\t\t\tvar onError = (error) => {\n\t\t\t\tdelete installedModules[id];\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tthrow error;\n\t\t\t\t}\n\t\t\t};\n\t\t\ttry {\n\t\t\t\tvar promise = moduleToHandlerMapping[id]();\n\t\t\t\tif(promise.then) {\n\t\t\t\t\tpromises.push(installedModules[id] = promise.then(onFactory)['catch'](onError));\n\t\t\t\t} else onFactory(promise);\n\t\t\t} catch(e) { onError(e); }\n\t\t});\n\t}\n}","__webpack_require__.b = document.baseURI || self.location.href;\n\n// object to store loaded and loading chunks\n// undefined = chunk not loaded, null = chunk preloaded/prefetched\n// [resolve, reject, Promise] = chunk loading, 0 = chunk loaded\nvar installedChunks = {\n\t\"neural_compressor_ext_lab\": 0\n};\n\n__webpack_require__.f.j = (chunkId, promises) => {\n\t\t// JSONP chunk loading for javascript\n\t\tvar installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined;\n\t\tif(installedChunkData !== 0) { // 0 means \"already installed\".\n\n\t\t\t// a Promise means \"currently loading\".\n\t\t\tif(installedChunkData) {\n\t\t\t\tpromises.push(installedChunkData[2]);\n\t\t\t} else {\n\t\t\t\tif(!/^webpack_sharing_consume_default_(react|sanitize\\-html_sanitize\\-html)$/.test(chunkId)) {\n\t\t\t\t\t// setup Promise in chunk cache\n\t\t\t\t\tvar promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject]));\n\t\t\t\t\tpromises.push(installedChunkData[2] = promise);\n\n\t\t\t\t\t// start chunk loading\n\t\t\t\t\tvar url = __webpack_require__.p + __webpack_require__.u(chunkId);\n\t\t\t\t\t// create error before stack unwound to get useful stacktrace later\n\t\t\t\t\tvar error = new Error();\n\t\t\t\t\tvar loadingEnded = (event) => {\n\t\t\t\t\t\tif(__webpack_require__.o(installedChunks, chunkId)) {\n\t\t\t\t\t\t\tinstalledChunkData = installedChunks[chunkId];\n\t\t\t\t\t\t\tif(installedChunkData !== 0) installedChunks[chunkId] = undefined;\n\t\t\t\t\t\t\tif(installedChunkData) {\n\t\t\t\t\t\t\t\tvar errorType = event && (event.type === 'load' ? 'missing' : event.type);\n\t\t\t\t\t\t\t\tvar realSrc = event && event.target && event.target.src;\n\t\t\t\t\t\t\t\terror.message = 'Loading chunk ' + chunkId + ' failed.\\n(' + errorType + ': ' + realSrc + ')';\n\t\t\t\t\t\t\t\terror.name = 'ChunkLoadError';\n\t\t\t\t\t\t\t\terror.type = errorType;\n\t\t\t\t\t\t\t\terror.request = realSrc;\n\t\t\t\t\t\t\t\tinstalledChunkData[1](error);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t\t__webpack_require__.l(url, loadingEnded, \"chunk-\" + chunkId, chunkId);\n\t\t\t\t} else installedChunks[chunkId] = 0;\n\t\t\t}\n\t\t}\n};\n\n// no prefetching\n\n// no preloaded\n\n// no HMR\n\n// no HMR manifest\n\n// no on chunks loaded\n\n// install a JSONP callback for chunk loading\nvar webpackJsonpCallback = (parentChunkLoadingFunction, data) => {\n\tvar [chunkIds, moreModules, runtime] = data;\n\t// add \"moreModules\" to the modules object,\n\t// then flag all \"chunkIds\" as loaded and fire callback\n\tvar moduleId, chunkId, i = 0;\n\tif(chunkIds.some((id) => (installedChunks[id] !== 0))) {\n\t\tfor(moduleId in moreModules) {\n\t\t\tif(__webpack_require__.o(moreModules, moduleId)) {\n\t\t\t\t__webpack_require__.m[moduleId] = moreModules[moduleId];\n\t\t\t}\n\t\t}\n\t\tif(runtime) var result = runtime(__webpack_require__);\n\t}\n\tif(parentChunkLoadingFunction) parentChunkLoadingFunction(data);\n\tfor(;i < chunkIds.length; i++) {\n\t\tchunkId = chunkIds[i];\n\t\tif(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) {\n\t\t\tinstalledChunks[chunkId][0]();\n\t\t}\n\t\tinstalledChunks[chunkId] = 0;\n\t}\n\n}\n\nvar chunkLoadingGlobal = self[\"webpackChunkneural_compressor_ext_lab\"] = self[\"webpackChunkneural_compressor_ext_lab\"] || [];\nchunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0));\nchunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal));","__webpack_require__.nc = undefined;","","// module cache are used so entry inlining is disabled\n// startup\n// Load entry module and return exports\nvar __webpack_exports__ = __webpack_require__(\"webpack/container/entry/neural_compressor_ext_lab\");\n",""],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.e241a62ffe7f3e40b1d0.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.e241a62ffe7f3e40b1d0.js.map deleted file mode 100644 index 7bff25b9d23..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/remoteEntry.e241a62ffe7f3e40b1d0.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"remoteEntry.e241a62ffe7f3e40b1d0.js","mappings":";;;;;;;;;;;AAAA;AACA;AACA;AACA,EAAE;AACF;AACA;AACA,EAAE;AACF;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,IAAI;AACJ;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AAEA;AACA;AACA;AACA;AACA,CAAC;;;;;;UCpCD;UACA;;UAEA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;UACA;;UAEA;UACA;;UAEA;UACA;;;;;WC5BA;WACA;WACA;WACA;WACA;WACA,iCAAiC,WAAW;WAC5C;WACA;;;;;WCPA;WACA;WACA;WACA;WACA,yCAAyC,wCAAwC;WACjF;WACA;WACA;;;;;WCPA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;;;;;WCRA;WACA;WACA;WACA,8BAA8B,kgBAAkgB;WAChiB;;;;;WCJA;WACA;WACA;WACA;WACA,GAAG;WACH;WACA;WACA,CAAC;;;;;WCPD;;;;;WCAA;WACA;WACA;WACA;WACA,uBAAuB,4BAA4B;WACnD;WACA;WACA;WACA,iBAAiB,oBAAoB;WACrC;WACA,mGAAmG,YAAY;WAC/G;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,mEAAmE,iCAAiC;WACpG;WACA;WACA;WACA;;;;;WCzCA;WACA;WACA;WACA,uDAAuD,iBAAiB;WACxE;WACA,gDAAgD,aAAa;WAC7D;;;;;WCNA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,oJAAoJ;WACpJ;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,IAAI,aAAa;WACjB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WC7CA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;;;;;WCfA;WACA;WACA,WAAW,6BAA6B,iBAAiB,GAAG,qEAAqE;WACjI;WACA;WACA;WACA,qCAAqC,aAAa,EAAE,wDAAwD,2BAA2B,4BAA4B,2BAA2B,+CAA+C,mCAAmC;WAChR;WACA;WACA;WACA,qBAAqB,8BAA8B,SAAS,sDAAsD,gBAAgB,eAAe,KAAK,6DAA6D,SAAS,SAAS,QAAQ,eAAe,KAAK,eAAe,qGAAqG,WAAW,aAAa;WAC7Y;WACA;WACA;WACA,gBAAgB,8BAA8B,qBAAqB,YAAY,sBAAsB,SAAS,iDAAiD,6FAA6F,WAAW,uBAAuB,2BAA2B,wBAAwB,KAAK,oCAAoC,oBAAoB,wBAAwB,oBAAoB,SAAS,KAAK,yBAAyB,KAAK,gCAAgC,yBAAyB,QAAQ,eAAe,KAAK,eAAe,4DAA4D;WACtoB;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA,EAAE;WACF;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,CAAC;;WAED;WACA;WACA;WACA,CAAC;WACD;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA,CAAC;WACD;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN,KAAK,WAAW;WAChB,GAAG;WACH;WACA;;;;;WC3LA;;WAEA;WACA;WACA;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA,iCAAiC;;WAEjC;WACA;WACA;WACA,KAAK;WACL;WACA;WACA;WACA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM;WACN;WACA;WACA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;;WAEA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA;WACA,MAAM,qBAAqB;WAC3B;WACA;WACA;WACA;WACA;WACA;;WAEA;;WAEA;WACA;WACA;;;;;WCrFA;;;;;UEAA;UACA;UACA;UACA","sources":["webpack://neural_compressor_ext_lab/webpack/container-entry","webpack://neural_compressor_ext_lab/webpack/bootstrap","webpack://neural_compressor_ext_lab/webpack/runtime/compat get default export","webpack://neural_compressor_ext_lab/webpack/runtime/define property getters","webpack://neural_compressor_ext_lab/webpack/runtime/ensure chunk","webpack://neural_compressor_ext_lab/webpack/runtime/get javascript chunk filename","webpack://neural_compressor_ext_lab/webpack/runtime/global","webpack://neural_compressor_ext_lab/webpack/runtime/hasOwnProperty shorthand","webpack://neural_compressor_ext_lab/webpack/runtime/load script","webpack://neural_compressor_ext_lab/webpack/runtime/make namespace object","webpack://neural_compressor_ext_lab/webpack/runtime/sharing","webpack://neural_compressor_ext_lab/webpack/runtime/publicPath","webpack://neural_compressor_ext_lab/webpack/runtime/consumes","webpack://neural_compressor_ext_lab/webpack/runtime/jsonp chunk loading","webpack://neural_compressor_ext_lab/webpack/runtime/nonce","webpack://neural_compressor_ext_lab/webpack/before-startup","webpack://neural_compressor_ext_lab/webpack/startup","webpack://neural_compressor_ext_lab/webpack/after-startup"],"sourcesContent":["var moduleMap = {\n\t\"./index\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./extension\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\")))));\n\t},\n\t\"./style\": () => {\n\t\treturn Promise.all([__webpack_require__.e(\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\"), __webpack_require__.e(\"style_index_js\")]).then(() => (() => ((__webpack_require__(/*! ./style/index.js */ \"./style/index.js\")))));\n\t}\n};\nvar get = (module, getScope) => {\n\t__webpack_require__.R = getScope;\n\tgetScope = (\n\t\t__webpack_require__.o(moduleMap, module)\n\t\t\t? moduleMap[module]()\n\t\t\t: Promise.resolve().then(() => {\n\t\t\t\tthrow new Error('Module \"' + module + '\" does not exist in container.');\n\t\t\t})\n\t);\n\t__webpack_require__.R = undefined;\n\treturn getScope;\n};\nvar init = (shareScope, initScope) => {\n\tif (!__webpack_require__.S) return;\n\tvar name = \"default\"\n\tvar oldScope = __webpack_require__.S[name];\n\tif(oldScope && oldScope !== shareScope) throw new Error(\"Container initialization failed as it has already been initialized with a different share scope\");\n\t__webpack_require__.S[name] = shareScope;\n\treturn __webpack_require__.I(name, initScope);\n};\n\n// This exports getters to disallow modifications\n__webpack_require__.d(exports, {\n\tget: () => (get),\n\tinit: () => (init)\n});","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\tid: moduleId,\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n// expose the modules object (__webpack_modules__)\n__webpack_require__.m = __webpack_modules__;\n\n// expose the module cache\n__webpack_require__.c = __webpack_module_cache__;\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.f = {};\n// This file contains only the entry chunk.\n// The chunk loading function for additional chunks\n__webpack_require__.e = (chunkId) => {\n\treturn Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => {\n\t\t__webpack_require__.f[key](chunkId, promises);\n\t\treturn promises;\n\t}, []));\n};","// This function allow to reference async chunks\n__webpack_require__.u = (chunkId) => {\n\t// return url for filenames based on template\n\treturn \"\" + chunkId + \".\" + {\"webpack_sharing_consume_default_react\":\"19c51f7b56cfd16da3f9\",\"lib_index_js\":\"2c3b18119886a0a82200\",\"vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854\":\"e09faf9ec3a764e40dc7\",\"style_index_js\":\"8d733cc8b74fabbd10b8\",\"vendors-node_modules_react-sanitized-html_lib_index_js\":\"500104f7c13c01fe1646\",\"webpack_sharing_consume_default_sanitize-html_sanitize-html\":\"635249bb6dc3884c24a3\",\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\":\"825dbf94ec7371e0b28d\"}[chunkId] + \".js\";\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","var inProgress = {};\nvar dataWebpackPrefix = \"neural_compressor_ext_lab:\";\n// loadScript function to load a script via script tag\n__webpack_require__.l = (url, done, key, chunkId) => {\n\tif(inProgress[url]) { inProgress[url].push(done); return; }\n\tvar script, needAttach;\n\tif(key !== undefined) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tfor(var i = 0; i < scripts.length; i++) {\n\t\t\tvar s = scripts[i];\n\t\t\tif(s.getAttribute(\"src\") == url || s.getAttribute(\"data-webpack\") == dataWebpackPrefix + key) { script = s; break; }\n\t\t}\n\t}\n\tif(!script) {\n\t\tneedAttach = true;\n\t\tscript = document.createElement('script');\n\n\t\tscript.charset = 'utf-8';\n\t\tscript.timeout = 120;\n\t\tif (__webpack_require__.nc) {\n\t\t\tscript.setAttribute(\"nonce\", __webpack_require__.nc);\n\t\t}\n\t\tscript.setAttribute(\"data-webpack\", dataWebpackPrefix + key);\n\t\tscript.src = url;\n\t}\n\tinProgress[url] = [done];\n\tvar onScriptComplete = (prev, event) => {\n\t\t// avoid mem leaks in IE.\n\t\tscript.onerror = script.onload = null;\n\t\tclearTimeout(timeout);\n\t\tvar doneFns = inProgress[url];\n\t\tdelete inProgress[url];\n\t\tscript.parentNode && script.parentNode.removeChild(script);\n\t\tdoneFns && doneFns.forEach((fn) => (fn(event)));\n\t\tif(prev) return prev(event);\n\t}\n\t;\n\tvar timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000);\n\tscript.onerror = onScriptComplete.bind(null, script.onerror);\n\tscript.onload = onScriptComplete.bind(null, script.onload);\n\tneedAttach && document.head.appendChild(script);\n};","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","__webpack_require__.S = {};\nvar initPromises = {};\nvar initTokens = {};\n__webpack_require__.I = (name, initScope) => {\n\tif(!initScope) initScope = [];\n\t// handling circular init calls\n\tvar initToken = initTokens[name];\n\tif(!initToken) initToken = initTokens[name] = {};\n\tif(initScope.indexOf(initToken) >= 0) return;\n\tinitScope.push(initToken);\n\t// only runs once\n\tif(initPromises[name]) return initPromises[name];\n\t// creates a new share scope if needed\n\tif(!__webpack_require__.o(__webpack_require__.S, name)) __webpack_require__.S[name] = {};\n\t// runs all init snippets from all modules reachable\n\tvar scope = __webpack_require__.S[name];\n\tvar warn = (msg) => (typeof console !== \"undefined\" && console.warn && console.warn(msg));\n\tvar uniqueName = \"neural_compressor_ext_lab\";\n\tvar register = (name, version, factory, eager) => {\n\t\tvar versions = scope[name] = scope[name] || {};\n\t\tvar activeVersion = versions[version];\n\t\tif(!activeVersion || (!activeVersion.loaded && (!eager != !activeVersion.eager ? eager : uniqueName > activeVersion.from))) versions[version] = { get: factory, from: uniqueName, eager: !!eager };\n\t};\n\tvar initExternal = (id) => {\n\t\tvar handleError = (err) => (warn(\"Initialization of sharing external failed: \" + err));\n\t\ttry {\n\t\t\tvar module = __webpack_require__(id);\n\t\t\tif(!module) return;\n\t\t\tvar initFn = (module) => (module && module.init && module.init(__webpack_require__.S[name], initScope))\n\t\t\tif(module.then) return promises.push(module.then(initFn, handleError));\n\t\t\tvar initResult = initFn(module);\n\t\t\tif(initResult && initResult.then) return promises.push(initResult['catch'](handleError));\n\t\t} catch(err) { handleError(err); }\n\t}\n\tvar promises = [];\n\tswitch(name) {\n\t\tcase \"default\": {\n\t\t\tregister(\"neural_compressor_ext_lab\", \"0.1.0\", () => (Promise.all([__webpack_require__.e(\"webpack_sharing_consume_default_react\"), __webpack_require__.e(\"lib_index_js\")]).then(() => (() => (__webpack_require__(/*! ./lib/index.js */ \"./lib/index.js\"))))));\n\t\t\tregister(\"react-sanitized-html\", \"2.0.0\", () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\"), __webpack_require__.e(\"webpack_sharing_consume_default_react\")]).then(() => (() => (__webpack_require__(/*! ./node_modules/react-sanitized-html/lib/index.js */ \"./node_modules/react-sanitized-html/lib/index.js\"))))));\n\t\t\tregister(\"sanitize-html\", \"1.27.5\", () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! ./node_modules/sanitize-html/dist/sanitize-html.js */ \"./node_modules/sanitize-html/dist/sanitize-html.js\"))))));\n\t\t}\n\t\tbreak;\n\t}\n\tif(!promises.length) return initPromises[name] = 1;\n\treturn initPromises[name] = Promise.all(promises).then(() => (initPromises[name] = 1));\n};","var scriptUrl;\nif (__webpack_require__.g.importScripts) scriptUrl = __webpack_require__.g.location + \"\";\nvar document = __webpack_require__.g.document;\nif (!scriptUrl && document) {\n\tif (document.currentScript)\n\t\tscriptUrl = document.currentScript.src\n\tif (!scriptUrl) {\n\t\tvar scripts = document.getElementsByTagName(\"script\");\n\t\tif(scripts.length) scriptUrl = scripts[scripts.length - 1].src\n\t}\n}\n// When supporting browsers where an automatic publicPath is not supported you must specify an output.publicPath manually via configuration\n// or pass an empty string (\"\") and set the __webpack_public_path__ variable from your code to use your own logic.\nif (!scriptUrl) throw new Error(\"Automatic publicPath is not supported in this browser\");\nscriptUrl = scriptUrl.replace(/#.*$/, \"\").replace(/\\?.*$/, \"\").replace(/\\/[^\\/]+$/, \"/\");\n__webpack_require__.p = scriptUrl;","var parseVersion = (str) => {\n\t// see webpack/lib/util/semver.js for original code\n\tvar p=p=>{return p.split(\".\").map((p=>{return+p==p?+p:p}))},n=/^([^-+]+)?(?:-([^+]+))?(?:\\+(.+))?$/.exec(str),r=n[1]?p(n[1]):[];return n[2]&&(r.length++,r.push.apply(r,p(n[2]))),n[3]&&(r.push([]),r.push.apply(r,p(n[3]))),r;\n}\nvar versionLt = (a, b) => {\n\t// see webpack/lib/util/semver.js for original code\n\ta=parseVersion(a),b=parseVersion(b);for(var r=0;;){if(r>=a.length)return r=b.length)return\"u\"==n;var t=b[r],f=(typeof t)[0];if(n!=f)return\"o\"==n&&\"n\"==f||(\"s\"==f||\"u\"==n);if(\"o\"!=n&&\"u\"!=n&&e!=t)return e {\n\t// see webpack/lib/util/semver.js for original code\n\tvar r=range[0],n=\"\";if(1===range.length)return\"*\";if(r+.5){n+=0==r?\">=\":-1==r?\"<\":1==r?\"^\":2==r?\"~\":r>0?\"=\":\"!=\";for(var e=1,a=1;a0?\".\":\"\")+(e=2,t)}return n}var g=[];for(a=1;a {\n\t// see webpack/lib/util/semver.js for original code\n\tif(0 in range){version=parseVersion(version);var e=range[0],r=e<0;r&&(e=-e-1);for(var n=0,i=1,a=!0;;i++,n++){var f,s,g=i=version.length||\"o\"==(s=(typeof(f=version[n]))[0]))return!a||(\"u\"==g?i>e&&!r:\"\"==g!=r);if(\"u\"==s){if(!a||\"u\"!=g)return!1}else if(a)if(g==s)if(i<=e){if(f!=range[i])return!1}else{if(r?f>range[i]:f {\n\tvar scope = __webpack_require__.S[scopeName];\n\tif(!scope || !__webpack_require__.o(scope, key)) throw new Error(\"Shared module \" + key + \" doesn't exist in shared scope \" + scopeName);\n\treturn scope;\n};\nvar findVersion = (scope, key) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar findSingletonVersionKey = (scope, key) => {\n\tvar versions = scope[key];\n\treturn Object.keys(versions).reduce((a, b) => {\n\t\treturn !a || (!versions[a].loaded && versionLt(a, b)) ? b : a;\n\t}, 0);\n};\nvar getInvalidSingletonVersionMessage = (scope, key, version, requiredVersion) => {\n\treturn \"Unsatisfied version \" + version + \" from \" + (version && scope[key][version].from) + \" of shared singleton module \" + key + \" (required \" + rangeToString(requiredVersion) + \")\"\n};\nvar getSingleton = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\treturn get(scope[key][version]);\n};\nvar getSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) typeof console !== \"undefined\" && console.warn && console.warn(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar getStrictSingletonVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar version = findSingletonVersionKey(scope, key);\n\tif (!satisfy(requiredVersion, version)) throw new Error(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));\n\treturn get(scope[key][version]);\n};\nvar findValidVersion = (scope, key, requiredVersion) => {\n\tvar versions = scope[key];\n\tvar key = Object.keys(versions).reduce((a, b) => {\n\t\tif (!satisfy(requiredVersion, b)) return a;\n\t\treturn !a || versionLt(a, b) ? b : a;\n\t}, 0);\n\treturn key && versions[key]\n};\nvar getInvalidVersionMessage = (scope, scopeName, key, requiredVersion) => {\n\tvar versions = scope[key];\n\treturn \"No satisfying version (\" + rangeToString(requiredVersion) + \") of shared module \" + key + \" found in shared scope \" + scopeName + \".\\n\" +\n\t\t\"Available versions: \" + Object.keys(versions).map((key) => {\n\t\treturn key + \" from \" + versions[key].from;\n\t}).join(\", \");\n};\nvar getValidVersion = (scope, scopeName, key, requiredVersion) => {\n\tvar entry = findValidVersion(scope, key, requiredVersion);\n\tif(entry) return get(entry);\n\tthrow new Error(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar warnInvalidVersion = (scope, scopeName, key, requiredVersion) => {\n\ttypeof console !== \"undefined\" && console.warn && console.warn(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));\n};\nvar get = (entry) => {\n\tentry.loaded = 1;\n\treturn entry.get()\n};\nvar init = (fn) => (function(scopeName, a, b, c) {\n\tvar promise = __webpack_require__.I(scopeName);\n\tif (promise && promise.then) return promise.then(fn.bind(fn, scopeName, __webpack_require__.S[scopeName], a, b, c));\n\treturn fn(scopeName, __webpack_require__.S[scopeName], a, b, c);\n});\n\nvar load = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn get(findVersion(scope, key));\n});\nvar loadFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\treturn scope && __webpack_require__.o(scope, key) ? get(findVersion(scope, key)) : fallback();\n});\nvar loadVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingleton = /*#__PURE__*/ init((scopeName, scope, key) => {\n\tensureExistence(scopeName, key);\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getValidVersion(scope, scopeName, key, version);\n});\nvar loadStrictSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {\n\tensureExistence(scopeName, key);\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar loadVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));\n});\nvar loadSingletonFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingleton(scope, scopeName, key);\n});\nvar loadSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getSingletonVersion(scope, scopeName, key, version);\n});\nvar loadStrictVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tvar entry = scope && __webpack_require__.o(scope, key) && findValidVersion(scope, key, version);\n\treturn entry ? get(entry) : fallback();\n});\nvar loadStrictSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {\n\tif(!scope || !__webpack_require__.o(scope, key)) return fallback();\n\treturn getStrictSingletonVersion(scope, scopeName, key, version);\n});\nvar installedModules = {};\nvar moduleToHandlerMapping = {\n\t\"webpack/sharing/consume/default/react\": () => (loadSingletonVersionCheck(\"default\", \"react\", [1,17,0,1])),\n\t\"webpack/sharing/consume/default/@jupyterlab/notebook\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/notebook\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/apputils\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/apputils\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/settingregistry\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/mainmenu\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\": () => (loadSingletonVersionCheck(\"default\", \"@jupyterlab/ui-components\", [1,3,4,7])),\n\t\"webpack/sharing/consume/default/@lumino/widgets\": () => (loadSingletonVersionCheck(\"default\", \"@lumino/widgets\", [1,1,33,0])),\n\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\": () => (loadStrictVersionCheckFallback(\"default\", \"react-sanitized-html\", [1,2,0,0], () => (Promise.all([__webpack_require__.e(\"vendors-node_modules_react-sanitized-html_lib_index_js\"), __webpack_require__.e(\"webpack_sharing_consume_default_sanitize-html_sanitize-html\")]).then(() => (() => (__webpack_require__(/*! react-sanitized-html */ \"./node_modules/react-sanitized-html/lib/index.js\"))))))),\n\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\": () => (loadStrictVersionCheckFallback(\"default\", \"sanitize-html\", [1,1,16,1], () => (__webpack_require__.e(\"vendors-node_modules_sanitize-html_dist_sanitize-html_js\").then(() => (() => (__webpack_require__(/*! sanitize-html */ \"./node_modules/sanitize-html/dist/sanitize-html.js\")))))))\n};\n// no consumes in initial chunks\nvar chunkMapping = {\n\t\"webpack_sharing_consume_default_react\": [\n\t\t\"webpack/sharing/consume/default/react\"\n\t],\n\t\"lib_index_js\": [\n\t\t\"webpack/sharing/consume/default/@jupyterlab/notebook\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/apputils\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/settingregistry\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/mainmenu\",\n\t\t\"webpack/sharing/consume/default/@jupyterlab/ui-components\",\n\t\t\"webpack/sharing/consume/default/@lumino/widgets\",\n\t\t\"webpack/sharing/consume/default/react-sanitized-html/react-sanitized-html\"\n\t],\n\t\"webpack_sharing_consume_default_sanitize-html_sanitize-html\": [\n\t\t\"webpack/sharing/consume/default/sanitize-html/sanitize-html\"\n\t]\n};\n__webpack_require__.f.consumes = (chunkId, promises) => {\n\tif(__webpack_require__.o(chunkMapping, chunkId)) {\n\t\tchunkMapping[chunkId].forEach((id) => {\n\t\t\tif(__webpack_require__.o(installedModules, id)) return promises.push(installedModules[id]);\n\t\t\tvar onFactory = (factory) => {\n\t\t\t\tinstalledModules[id] = 0;\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tmodule.exports = factory();\n\t\t\t\t}\n\t\t\t};\n\t\t\tvar onError = (error) => {\n\t\t\t\tdelete installedModules[id];\n\t\t\t\t__webpack_require__.m[id] = (module) => {\n\t\t\t\t\tdelete __webpack_require__.c[id];\n\t\t\t\t\tthrow error;\n\t\t\t\t}\n\t\t\t};\n\t\t\ttry {\n\t\t\t\tvar promise = moduleToHandlerMapping[id]();\n\t\t\t\tif(promise.then) {\n\t\t\t\t\tpromises.push(installedModules[id] = promise.then(onFactory)['catch'](onError));\n\t\t\t\t} else onFactory(promise);\n\t\t\t} catch(e) { onError(e); }\n\t\t});\n\t}\n}","__webpack_require__.b = document.baseURI || self.location.href;\n\n// object to store loaded and loading chunks\n// undefined = chunk not loaded, null = chunk preloaded/prefetched\n// [resolve, reject, Promise] = chunk loading, 0 = chunk loaded\nvar installedChunks = {\n\t\"neural_compressor_ext_lab\": 0\n};\n\n__webpack_require__.f.j = (chunkId, promises) => {\n\t\t// JSONP chunk loading for javascript\n\t\tvar installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined;\n\t\tif(installedChunkData !== 0) { // 0 means \"already installed\".\n\n\t\t\t// a Promise means \"currently loading\".\n\t\t\tif(installedChunkData) {\n\t\t\t\tpromises.push(installedChunkData[2]);\n\t\t\t} else {\n\t\t\t\tif(!/^webpack_sharing_consume_default_(react|sanitize\\-html_sanitize\\-html)$/.test(chunkId)) {\n\t\t\t\t\t// setup Promise in chunk cache\n\t\t\t\t\tvar promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject]));\n\t\t\t\t\tpromises.push(installedChunkData[2] = promise);\n\n\t\t\t\t\t// start chunk loading\n\t\t\t\t\tvar url = __webpack_require__.p + __webpack_require__.u(chunkId);\n\t\t\t\t\t// create error before stack unwound to get useful stacktrace later\n\t\t\t\t\tvar error = new Error();\n\t\t\t\t\tvar loadingEnded = (event) => {\n\t\t\t\t\t\tif(__webpack_require__.o(installedChunks, chunkId)) {\n\t\t\t\t\t\t\tinstalledChunkData = installedChunks[chunkId];\n\t\t\t\t\t\t\tif(installedChunkData !== 0) installedChunks[chunkId] = undefined;\n\t\t\t\t\t\t\tif(installedChunkData) {\n\t\t\t\t\t\t\t\tvar errorType = event && (event.type === 'load' ? 'missing' : event.type);\n\t\t\t\t\t\t\t\tvar realSrc = event && event.target && event.target.src;\n\t\t\t\t\t\t\t\terror.message = 'Loading chunk ' + chunkId + ' failed.\\n(' + errorType + ': ' + realSrc + ')';\n\t\t\t\t\t\t\t\terror.name = 'ChunkLoadError';\n\t\t\t\t\t\t\t\terror.type = errorType;\n\t\t\t\t\t\t\t\terror.request = realSrc;\n\t\t\t\t\t\t\t\tinstalledChunkData[1](error);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t\t__webpack_require__.l(url, loadingEnded, \"chunk-\" + chunkId, chunkId);\n\t\t\t\t} else installedChunks[chunkId] = 0;\n\t\t\t}\n\t\t}\n};\n\n// no prefetching\n\n// no preloaded\n\n// no HMR\n\n// no HMR manifest\n\n// no on chunks loaded\n\n// install a JSONP callback for chunk loading\nvar webpackJsonpCallback = (parentChunkLoadingFunction, data) => {\n\tvar [chunkIds, moreModules, runtime] = data;\n\t// add \"moreModules\" to the modules object,\n\t// then flag all \"chunkIds\" as loaded and fire callback\n\tvar moduleId, chunkId, i = 0;\n\tif(chunkIds.some((id) => (installedChunks[id] !== 0))) {\n\t\tfor(moduleId in moreModules) {\n\t\t\tif(__webpack_require__.o(moreModules, moduleId)) {\n\t\t\t\t__webpack_require__.m[moduleId] = moreModules[moduleId];\n\t\t\t}\n\t\t}\n\t\tif(runtime) var result = runtime(__webpack_require__);\n\t}\n\tif(parentChunkLoadingFunction) parentChunkLoadingFunction(data);\n\tfor(;i < chunkIds.length; i++) {\n\t\tchunkId = chunkIds[i];\n\t\tif(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) {\n\t\t\tinstalledChunks[chunkId][0]();\n\t\t}\n\t\tinstalledChunks[chunkId] = 0;\n\t}\n\n}\n\nvar chunkLoadingGlobal = self[\"webpackChunkneural_compressor_ext_lab\"] = self[\"webpackChunkneural_compressor_ext_lab\"] || [];\nchunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0));\nchunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal));","__webpack_require__.nc = undefined;","","// module cache are used so entry inlining is disabled\n// startup\n// Load entry module and return exports\nvar __webpack_exports__ = __webpack_require__(\"webpack/container/entry/neural_compressor_ext_lab\");\n",""],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style.js deleted file mode 100644 index 7b10c692587..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style.js +++ /dev/null @@ -1,4 +0,0 @@ -/* This is a generated file of CSS imports */ -/* It was generated by @jupyterlab/builder in Build.ensureAssets() */ - -import 'neural_compressor_ext_lab/style/index.js'; diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js deleted file mode 100644 index e7bdb005a65..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js +++ /dev/null @@ -1,88 +0,0 @@ -"use strict"; -(self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []).push([["style_index_js"],{ - -/***/ "./node_modules/css-loader/dist/cjs.js!./style/base.css": -/*!**************************************************************!*\ - !*** ./node_modules/css-loader/dist/cjs.js!./style/base.css ***! - \**************************************************************/ -/***/ ((module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) -/* harmony export */ }); -/* harmony import */ var _node_modules_css_loader_dist_runtime_sourceMaps_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../node_modules/css-loader/dist/runtime/sourceMaps.js */ "./node_modules/css-loader/dist/runtime/sourceMaps.js"); -/* harmony import */ var _node_modules_css_loader_dist_runtime_sourceMaps_js__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_node_modules_css_loader_dist_runtime_sourceMaps_js__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _node_modules_css_loader_dist_runtime_api_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../node_modules/css-loader/dist/runtime/api.js */ "./node_modules/css-loader/dist/runtime/api.js"); -/* harmony import */ var _node_modules_css_loader_dist_runtime_api_js__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(_node_modules_css_loader_dist_runtime_api_js__WEBPACK_IMPORTED_MODULE_1__); -/* harmony import */ var _node_modules_css_loader_dist_runtime_getUrl_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ../node_modules/css-loader/dist/runtime/getUrl.js */ "./node_modules/css-loader/dist/runtime/getUrl.js"); -/* harmony import */ var _node_modules_css_loader_dist_runtime_getUrl_js__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(_node_modules_css_loader_dist_runtime_getUrl_js__WEBPACK_IMPORTED_MODULE_2__); -// Imports - - - -var ___CSS_LOADER_URL_IMPORT_0___ = new URL(/* asset import */ __webpack_require__(/*! data:image/svg+xml, */ "data:image/svg+xml,"), __webpack_require__.b); -var ___CSS_LOADER_EXPORT___ = _node_modules_css_loader_dist_runtime_api_js__WEBPACK_IMPORTED_MODULE_1___default()((_node_modules_css_loader_dist_runtime_sourceMaps_js__WEBPACK_IMPORTED_MODULE_0___default())); -var ___CSS_LOADER_URL_REPLACEMENT_0___ = _node_modules_css_loader_dist_runtime_getUrl_js__WEBPACK_IMPORTED_MODULE_2___default()(___CSS_LOADER_URL_IMPORT_0___); -// Module -___CSS_LOADER_EXPORT___.push([module.id, "/*\n See the JupyterLab Developer Guide for useful CSS Patterns:\n\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\n*/\n.lds-ripple {\n display: flex;\n position: absolute; \n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 80px;\n height: 80px;\n}\n.lds-ripple div {\n position: absolute; \n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n border: 4px solid rgb(245, 131, 55);\n opacity: 1;\n border-radius: 50%;\n animation: lds-ripple 1s cubic-bezier(0, 0.2, 0.8, 1) infinite;\n}\n.lds-ripple div:nth-child(2) {\n animation-delay: -0.5s;\n}\n@keyframes lds-ripple {\n 0% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 0;\n }\n 4.9% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 0;\n }\n 5% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 1;\n }\n 100% {\n top: 0px;\n left: 0px;\n width: 72px;\n height: 72px;\n opacity: 0;\n }\n}\n\n\n/* CSS */\n.button-62 {\n background: linear-gradient(to bottom right, #EF4765, #FF9A5A);\n border: 0;\n border-radius: 12px;\n color: #FFFFFF;\n cursor: pointer;\n display: inline-block;\n font-family: -apple-system,system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif;\n font-size: 16px;\n font-weight: 500;\n line-height: 2.5;\n outline: transparent;\n padding: 0 1rem;\n text-align: center;\n text-decoration: none;\n transition: box-shadow .2s ease-in-out;\n user-select: none;\n -webkit-user-select: none;\n touch-action: manipulation;\n white-space: nowrap;\n}\n\n.button-62:not([disabled]):focus {\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\n}\n\n.button-62:not([disabled]):hover {\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\n}\n\n.aselector select {\n background-color: initial;\n border: none;\n border-radius: 0;\n box-shadow: none;\n color: var(--jp-ui-font-color0);\n display: block;\n font-size: var(--jp-ui-font-size1);\n height: 24px;\n line-height: 14px;\n padding: 0 25px 0 10px;\n text-align: left;\n -moz-appearance: none;\n -webkit-appearance: none;\n}\n\n/* Use our own theme for hover and option styles */\n.aselector select:hover,\n.aselector select > option {\n background-color: var(--jp-layout-color2);\n color: var(--jp-ui-font-color0);\n}\nselect {\n box-sizing: border-box;\n}\n\n.font{\nbackground-color: initial;\nborder: none;\nheight: 21px;\nborder-radius: 0;\nfont-weight:500;\ncolor: var(--jp-ui-font-color0);\ndisplay: block;\nline-height: 22.5px;\npadding: 0 25px 0 10px;\nfont-size: var(--jp-ui-font-size1);\n}\n.wrapper {\n display: flex;\n}\n.f1ozlkqi {\n pointer-events: none;\n}\n\n.palybutton{\n background-image: '/home2/longxin/Neural_Coder_EXT/style/icons8-circled-play.gif';\n}\n.loading{\n \n background-image: url(" + ___CSS_LOADER_URL_REPLACEMENT_0___ + ");\n background-size: contain; \n}\n\n.dialog{\nbody {\n margin: 0;\n height: 100vh;\n width:600px;\n display: flex;\n align-items: center;\n justify-content: center;\n overflow: hidden;\n font-family: \"Poppins\", sans-serif;\n background: #e3d0b6;\n}\n\n#cookie-policy {\n display: flex;\n flex-direction: column;\n justify-content: center;\n align-items: center;\n width: 460px;\n height: 600px;\n background: #f3efe6;\n border-radius: 12px;\n transform: scale(.8);\n}\n\n#cookie-wrapper {\n height: 240px;\n width: 240px;\n margin: 30px 0;\n position: relative;\n left: -40px;\n}\n\nh1 {\n color: #6c3a1f;\n text-align: center;\n font-size: 36px;\n margin: 0;\n}\n\np {\n color: #a28561;\n font-size: 14px;\n margin-top: 0;\n padding: 0 60px;\n text-align: center;\n}\na {\n margin-top: 18px;\n font-size: 14px;\n color: #a28561;\n text-decoration: none;\n pointer-events: none;\n}\na:hover {\n color: #846b4d;\n}\n\nspan {\n font-family: \"Amatic SC\", cursive;\n font-weight: 400;\n font-size: 20px;\n position: relative;\n top: -18px;\n left: 3px;\n color: #a28561; \n}\n\n#heart-no, #thought-heart-yes, #mouth, #face-no, #thought-1, #thought-2, #thought-heart-na, #q-mark, #eyes, #leg-l, #leg-r {\n opacity: 0;\n}\n}\n.dia_button {\n color: white;\n background: #dd794a;\n margin-top: 12px;\n cursor: pointer;\n font-size: 24px;\n font-family: \"Poppins\", sans-serif;\n border-radius: 9px;\n border: none;\n width: 72%;\n padding: 12px 0;\n transition: 150ms ease-out;\n pointer-events: none;\n}\n\n.dia_button:hover {\n background: #d66029;\n}\n\n.pad{\n padding-left:6%\n}\n\n:root {\n /* Elevation\n *\n * We style box-shadows using Material Design's idea of elevation. These particular numbers are taken from here:\n *\n * https://github.com/material-components/material-components-web\n * https://material-components-web.appspot.com/elevation.html\n */\n\n --jp-shadow-base-lightness: 0;\n --jp-shadow-umbra-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.2\n );\n --jp-shadow-penumbra-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.14\n );\n --jp-shadow-ambient-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.12\n );\n --jp-elevation-z0: none;\n --jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color),\n 0px 1px 1px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 3px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color),\n 0px 2px 2px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 5px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color),\n 0px 4px 5px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 10px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color),\n 0px 6px 10px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 18px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color),\n 0px 8px 10px 1px var(--jp-shadow-penumbra-color),\n 0px 3px 14px 2px var(--jp-shadow-ambient-color);\n --jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color),\n 0px 12px 17px 2px var(--jp-shadow-penumbra-color),\n 0px 5px 22px 4px var(--jp-shadow-ambient-color);\n --jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color),\n 0px 16px 24px 2px var(--jp-shadow-penumbra-color),\n 0px 6px 30px 5px var(--jp-shadow-ambient-color);\n --jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color),\n 0px 20px 31px 3px var(--jp-shadow-penumbra-color),\n 0px 8px 38px 7px var(--jp-shadow-ambient-color);\n --jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color),\n 0px 24px 38px 3px var(--jp-shadow-penumbra-color),\n 0px 9px 46px 8px var(--jp-shadow-ambient-color);\n\n /* Borders\n *\n * The following variables, specify the visual styling of borders in JupyterLab.\n */\n\n --jp-border-width: 1px;\n --jp-border-color0: var(--md-grey-400);\n --jp-border-color1: var(--md-grey-400);\n --jp-border-color2: var(--md-grey-300);\n --jp-border-color3: var(--md-grey-200);\n --jp-inverse-border-color: var(--md-grey-600);\n --jp-border-radius: 2px;\n\n /* UI Fonts\n *\n * The UI font CSS variables are used for the typography all of the JupyterLab\n * user interface elements that are not directly user generated content.\n *\n * The font sizing here is done assuming that the body font size of --jp-ui-font-size1\n * is applied to a parent element. When children elements, such as headings, are sized\n * in em all things will be computed relative to that body size.\n */\n\n --jp-ui-font-scale-factor: 1.2;\n --jp-ui-font-size0: 0.83333em;\n --jp-ui-font-size1: 13px; /* Base font size */\n --jp-ui-font-size2: 1.2em;\n --jp-ui-font-size3: 1.44em;\n\n --jp-ui-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica,\n Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol';\n\n /*\n * Use these font colors against the corresponding main layout colors.\n * In a light theme, these go from dark to light.\n */\n\n /* Defaults use Material Design specification */\n --jp-ui-font-color0: rgba(0, 0, 0, 1);\n --jp-ui-font-color1: rgba(0, 0, 0, 0.87);\n --jp-ui-font-color2: rgba(0, 0, 0, 0.54);\n --jp-ui-font-color3: rgba(0, 0, 0, 0.38);\n\n /*\n * Use these against the brand/accent/warn/error colors.\n * These will typically go from light to darker, in both a dark and light theme.\n */\n\n --jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);\n --jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);\n --jp-ui-inverse-font-color2: rgba(255, 255, 255, 0.7);\n --jp-ui-inverse-font-color3: rgba(255, 255, 255, 0.5);\n\n /* Content Fonts\n *\n * Content font variables are used for typography of user generated content.\n *\n * The font sizing here is done assuming that the body font size of --jp-content-font-size1\n * is applied to a parent element. When children elements, such as headings, are sized\n * in em all things will be computed relative to that body size.\n */\n\n --jp-content-line-height: 1.6;\n --jp-content-font-scale-factor: 1.2;\n --jp-content-font-size0: 0.83333em;\n --jp-content-font-size1: 14px; /* Base font size */\n --jp-content-font-size2: 1.2em;\n --jp-content-font-size3: 1.44em;\n --jp-content-font-size4: 1.728em;\n --jp-content-font-size5: 2.0736em;\n\n /* This gives a magnification of about 125% in presentation mode over normal. */\n --jp-content-presentation-font-size1: 17px;\n\n --jp-content-heading-line-height: 1;\n --jp-content-heading-margin-top: 1.2em;\n --jp-content-heading-margin-bottom: 0.8em;\n --jp-content-heading-font-weight: 500;\n\n /* Defaults use Material Design specification */\n --jp-content-font-color0: rgba(0, 0, 0, 1);\n --jp-content-font-color1: rgba(0, 0, 0, 0.87);\n --jp-content-font-color2: rgba(0, 0, 0, 0.54);\n --jp-content-font-color3: rgba(0, 0, 0, 0.38);\n\n --jp-content-link-color: var(--md-blue-700);\n\n --jp-content-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI',\n Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji',\n 'Segoe UI Symbol';\n\n /*\n * Code Fonts\n *\n * Code font variables are used for typography of code and other monospaces content.\n */\n\n --jp-code-font-size: 13px;\n --jp-code-line-height: 1.3077; /* 17px for 13px base */\n --jp-code-padding: 5px; /* 5px for 13px base, codemirror highlighting needs integer px value */\n --jp-code-font-family-default: Menlo, Consolas, 'DejaVu Sans Mono', monospace;\n --jp-code-font-family: var(--jp-code-font-family-default);\n\n /* This gives a magnification of about 125% in presentation mode over normal. */\n --jp-code-presentation-font-size: 16px;\n\n /* may need to tweak cursor width if you change font size */\n --jp-code-cursor-width0: 1.4px;\n --jp-code-cursor-width1: 2px;\n --jp-code-cursor-width2: 4px;\n\n /* Layout\n *\n * The following are the main layout colors use in JupyterLab. In a light\n * theme these would go from light to dark.\n */\n\n --jp-layout-color0: white;\n --jp-layout-color1: white;\n --jp-layout-color2: var(--md-grey-200);\n --jp-layout-color3: var(--md-grey-400);\n --jp-layout-color4: var(--md-grey-600);\n\n /* Inverse Layout\n *\n * The following are the inverse layout colors use in JupyterLab. In a light\n * theme these would go from dark to light.\n */\n\n --jp-inverse-layout-color0: #111111;\n --jp-inverse-layout-color1: var(--md-grey-900);\n --jp-inverse-layout-color2: var(--md-grey-800);\n --jp-inverse-layout-color3: var(--md-grey-700);\n --jp-inverse-layout-color4: var(--md-grey-600);\n\n /* Brand/accent */\n\n --jp-brand-color0: var(--md-blue-900);\n --jp-brand-color1: var(--md-blue-700);\n --jp-brand-color2: var(--md-blue-300);\n --jp-brand-color3: var(--md-blue-100);\n --jp-brand-color4: var(--md-blue-50);\n\n --jp-accent-color0: var(--md-green-900);\n --jp-accent-color1: var(--md-green-700);\n --jp-accent-color2: var(--md-green-300);\n --jp-accent-color3: var(--md-green-100);\n\n /* State colors (warn, error, success, info) */\n\n --jp-warn-color0: var(--md-orange-900);\n --jp-warn-color1: var(--md-orange-700);\n --jp-warn-color2: var(--md-orange-300);\n --jp-warn-color3: var(--md-orange-100);\n\n --jp-error-color0: var(--md-red-900);\n --jp-error-color1: var(--md-red-700);\n --jp-error-color2: var(--md-red-300);\n --jp-error-color3: var(--md-red-100);\n\n --jp-success-color0: var(--md-green-900);\n --jp-success-color1: var(--md-green-700);\n --jp-success-color2: var(--md-green-300);\n --jp-success-color3: var(--md-green-100);\n\n --jp-info-color0: var(--md-cyan-900);\n --jp-info-color1: var(--md-cyan-700);\n --jp-info-color2: var(--md-cyan-300);\n --jp-info-color3: var(--md-cyan-100);\n\n /* Cell specific styles */\n\n --jp-cell-padding: 5px;\n\n --jp-cell-collapser-width: 8px;\n --jp-cell-collapser-min-height: 20px;\n --jp-cell-collapser-not-active-hover-opacity: 0.6;\n\n --jp-cell-editor-background: var(--md-grey-100);\n --jp-cell-editor-border-color: var(--md-grey-300);\n --jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);\n --jp-cell-editor-active-background: var(--jp-layout-color0);\n --jp-cell-editor-active-border-color: var(--jp-brand-color1);\n\n --jp-cell-prompt-width: 64px;\n --jp-cell-prompt-font-family: var(--jp-code-font-family-default);\n --jp-cell-prompt-letter-spacing: 0px;\n --jp-cell-prompt-opacity: 1;\n --jp-cell-prompt-not-active-opacity: 0.5;\n --jp-cell-prompt-not-active-font-color: var(--md-grey-700);\n /* A custom blend of MD grey and blue 600\n * See https://meyerweb.com/eric/tools/color-blend/#546E7A:1E88E5:5:hex */\n --jp-cell-inprompt-font-color: #307fc1;\n /* A custom blend of MD grey and orange 600\n * https://meyerweb.com/eric/tools/color-blend/#546E7A:F4511E:5:hex */\n --jp-cell-outprompt-font-color: #bf5b3d;\n\n /* Notebook specific styles */\n\n --jp-notebook-padding: 10px;\n --jp-notebook-select-background: var(--jp-layout-color1);\n --jp-notebook-multiselected-color: var(--md-blue-50);\n\n /* The scroll padding is calculated to fill enough space at the bottom of the\n notebook to show one single-line cell (with appropriate padding) at the top\n when the notebook is scrolled all the way to the bottom. We also subtract one\n pixel so that no scrollbar appears if we have just one single-line cell in the\n notebook. This padding is to enable a 'scroll past end' feature in a notebook.\n */\n --jp-notebook-scroll-padding: calc(\n 100% - var(--jp-code-font-size) * var(--jp-code-line-height) -\n var(--jp-code-padding) - var(--jp-cell-padding) - 1px\n );\n\n /* Rendermime styles */\n\n --jp-rendermime-error-background: #fdd;\n --jp-rendermime-table-row-background: var(--md-grey-100);\n --jp-rendermime-table-row-hover-background: var(--md-light-blue-50);\n\n /* Dialog specific styles */\n\n --jp-dialog-background: rgba(0, 0, 0, 0.25);\n\n /* Console specific styles */\n\n --jp-console-padding: 10px;\n\n /* Toolbar specific styles */\n\n --jp-toolbar-border-color: var(--jp-border-color1);\n --jp-toolbar-micro-height: 8px;\n --jp-toolbar-background: var(--jp-layout-color1);\n --jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, 0.24);\n --jp-toolbar-header-margin: 4px 4px 0px 4px;\n --jp-toolbar-active-background: var(--md-grey-300);\n\n /* Statusbar specific styles */\n\n --jp-statusbar-height: 24px;\n\n /* Input field styles */\n\n --jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);\n --jp-input-active-background: var(--jp-layout-color1);\n --jp-input-hover-background: var(--jp-layout-color1);\n --jp-input-background: var(--md-grey-100);\n --jp-input-border-color: var(--jp-inverse-border-color);\n --jp-input-active-border-color: var(--jp-brand-color1);\n --jp-input-active-box-shadow-color: rgba(19, 124, 189, 0.3);\n\n /* General editor styles */\n\n --jp-editor-selected-background: #d9d9d9;\n --jp-editor-selected-focused-background: #d7d4f0;\n --jp-editor-cursor-color: var(--jp-ui-font-color0);\n\n /* Code mirror specific styles */\n\n --jp-mirror-editor-keyword-color: #008000;\n --jp-mirror-editor-atom-color: #88f;\n --jp-mirror-editor-number-color: #080;\n --jp-mirror-editor-def-color: #00f;\n --jp-mirror-editor-variable-color: var(--md-grey-900);\n --jp-mirror-editor-variable-2-color: #05a;\n --jp-mirror-editor-variable-3-color: #085;\n --jp-mirror-editor-punctuation-color: #05a;\n --jp-mirror-editor-property-color: #05a;\n --jp-mirror-editor-operator-color: #aa22ff;\n --jp-mirror-editor-comment-color: #408080;\n --jp-mirror-editor-string-color: #ba2121;\n --jp-mirror-editor-string-2-color: #708;\n --jp-mirror-editor-meta-color: #aa22ff;\n --jp-mirror-editor-qualifier-color: #555;\n --jp-mirror-editor-builtin-color: #008000;\n --jp-mirror-editor-bracket-color: #997;\n --jp-mirror-editor-tag-color: #170;\n --jp-mirror-editor-attribute-color: #00c;\n --jp-mirror-editor-header-color: blue;\n --jp-mirror-editor-quote-color: #090;\n --jp-mirror-editor-link-color: #00c;\n --jp-mirror-editor-error-color: #f00;\n --jp-mirror-editor-hr-color: #999;\n\n /* Vega extension styles */\n\n --jp-vega-background: white;\n\n /* Sidebar-related styles */\n\n --jp-sidebar-min-width: 250px;\n\n /* Search-related styles */\n\n --jp-search-toggle-off-opacity: 0.5;\n --jp-search-toggle-hover-opacity: 0.8;\n --jp-search-toggle-on-opacity: 1;\n --jp-search-selected-match-background-color: rgb(245, 200, 0);\n --jp-search-selected-match-color: black;\n --jp-search-unselected-match-background-color: var(\n --jp-inverse-layout-color0\n );\n --jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);\n\n /* Icon colors that work well with light or dark backgrounds */\n --jp-icon-contrast-color0: var(--md-purple-600);\n --jp-icon-contrast-color1: var(--md-green-600);\n --jp-icon-contrast-color2: var(--md-pink-600);\n --jp-icon-contrast-color3: var(--md-blue-600);\n}\n\n/*-----------------------------------------------------------------------------\n| Copyright (c) Jupyter Development Team.\n| Distributed under the terms of the Modified BSD License.\n|----------------------------------------------------------------------------*/\n\n/* Set the default typography for monospace elements */\ntt,\ncode,\nkbd,\nsamp,\npre {\n font-family: var(--jp-code-font-family);\n font-size: var(--jp-code-font-size);\n line-height: var(--jp-code-line-height);\n}\n\n", "",{"version":3,"sources":["webpack://./style/base.css"],"names":[],"mappings":"AAAA;;;;CAIC;AACD;EACE,aAAa;EACb,kBAAkB;EAClB,OAAO;EACP,MAAM;EACN,QAAQ;EACR,SAAS;EACT,YAAY;EACZ,WAAW;EACX,YAAY;AACd;AACA;EACE,kBAAkB;EAClB,OAAO;EACP,MAAM;EACN,QAAQ;EACR,SAAS;EACT,YAAY;EACZ,mCAAmC;EACnC,UAAU;EACV,kBAAkB;EAClB,8DAA8D;AAChE;AACA;EACE,sBAAsB;AACxB;AACA;EACE;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,QAAQ;IACR,SAAS;IACT,WAAW;IACX,YAAY;IACZ,UAAU;EACZ;AACF;;;AAGA,QAAQ;AACR;EACE,8DAA8D;EAC9D,SAAS;EACT,mBAAmB;EACnB,cAAc;EACd,eAAe;EACf,qBAAqB;EACrB,iFAAiF;EACjF,eAAe;EACf,gBAAgB;EAChB,gBAAgB;EAChB,oBAAoB;EACpB,eAAe;EACf,kBAAkB;EAClB,qBAAqB;EACrB,sCAAsC;EACtC,iBAAiB;EACjB,yBAAyB;EACzB,0BAA0B;EAC1B,mBAAmB;AACrB;;AAEA;EACE,uIAAuI;AACzI;;AAEA;EACE,uIAAuI;AACzI;;AAEA;EACE,yBAAyB;EACzB,YAAY;EACZ,gBAAgB;EAChB,gBAAgB;EAChB,+BAA+B;EAC/B,cAAc;EACd,kCAAkC;EAClC,YAAY;EACZ,iBAAiB;EACjB,sBAAsB;EACtB,gBAAgB;EAChB,qBAAqB;EACrB,wBAAwB;AAC1B;;AAEA,kDAAkD;AAClD;;EAEE,yCAAyC;EACzC,+BAA+B;AACjC;AACA;EACE,sBAAsB;AACxB;;AAEA;AACA,yBAAyB;AACzB,YAAY;AACZ,YAAY;AACZ,gBAAgB;AAChB,eAAe;AACf,+BAA+B;AAC/B,cAAc;AACd,mBAAmB;AACnB,sBAAsB;AACtB,kCAAkC;AAClC;AACA;EACE,aAAa;AACf;AACA;EACE,oBAAoB;AACtB;;AAEA;EACE,iFAAiF;AACnF;AACA;;EAEE,yDAAm9B;EACn9B,wBAAwB;AAC1B;;AAEA;AACA;EACE,SAAS;EACT,aAAa;EACb,WAAW;EACX,aAAa;EACb,mBAAmB;EACnB,uBAAuB;EACvB,gBAAgB;EAChB,kCAAkC;EAClC,mBAAmB;AACrB;;AAEA;EACE,aAAa;EACb,sBAAsB;EACtB,uBAAuB;EACvB,mBAAmB;EACnB,YAAY;EACZ,aAAa;EACb,mBAAmB;EACnB,mBAAmB;EACnB,oBAAoB;AACtB;;AAEA;EACE,aAAa;EACb,YAAY;EACZ,cAAc;EACd,kBAAkB;EAClB,WAAW;AACb;;AAEA;EACE,cAAc;EACd,kBAAkB;EAClB,eAAe;EACf,SAAS;AACX;;AAEA;EACE,cAAc;EACd,eAAe;EACf,aAAa;EACb,eAAe;EACf,kBAAkB;AACpB;AACA;EACE,gBAAgB;EAChB,eAAe;EACf,cAAc;EACd,qBAAqB;EACrB,oBAAoB;AACtB;AACA;EACE,cAAc;AAChB;;AAEA;EACE,iCAAiC;EACjC,gBAAgB;EAChB,eAAe;EACf,kBAAkB;EAClB,UAAU;EACV,SAAS;EACT,cAAc;AAChB;;AAEA;EACE,UAAU;AACZ;AACA;AACA;EACE,YAAY;EACZ,mBAAmB;EACnB,gBAAgB;EAChB,eAAe;EACf,eAAe;EACf,kCAAkC;EAClC,kBAAkB;EAClB,YAAY;EACZ,UAAU;EACV,eAAe;EACf,0BAA0B;EAC1B,oBAAoB;AACtB;;AAEA;EACE,mBAAmB;AACrB;;AAEA;EACE;AACF;;AAEA;EACE;;;;;;IAME;;EAEF,6BAA6B;EAC7B;;;;;GAKC;EACD;;;;;GAKC;EACD;;;;;GAKC;EACD,uBAAuB;EACvB;;kDAEgD;EAChD;;kDAEgD;EAChD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;;EAEjD;;;IAGE;;EAEF,sBAAsB;EACtB,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,6CAA6C;EAC7C,uBAAuB;;EAEvB;;;;;;;;IAQE;;EAEF,8BAA8B;EAC9B,6BAA6B;EAC7B,wBAAwB,EAAE,mBAAmB;EAC7C,yBAAyB;EACzB,0BAA0B;;EAE1B;+EAC6E;;EAE7E;;;IAGE;;EAEF,+CAA+C;EAC/C,qCAAqC;EACrC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;;EAExC;;;IAGE;;EAEF,mDAAmD;EACnD,mDAAmD;EACnD,qDAAqD;EACrD,qDAAqD;;EAErD;;;;;;;IAOE;;EAEF,6BAA6B;EAC7B,mCAAmC;EACnC,kCAAkC;EAClC,6BAA6B,EAAE,mBAAmB;EAClD,8BAA8B;EAC9B,+BAA+B;EAC/B,gCAAgC;EAChC,iCAAiC;;EAEjC,+EAA+E;EAC/E,0CAA0C;;EAE1C,mCAAmC;EACnC,sCAAsC;EACtC,yCAAyC;EACzC,qCAAqC;;EAErC,+CAA+C;EAC/C,0CAA0C;EAC1C,6CAA6C;EAC7C,6CAA6C;EAC7C,6CAA6C;;EAE7C,2CAA2C;;EAE3C;;qBAEmB;;EAEnB;;;;IAIE;;EAEF,yBAAyB;EACzB,6BAA6B,EAAE,uBAAuB;EACtD,sBAAsB,EAAE,sEAAsE;EAC9F,6EAA6E;EAC7E,yDAAyD;;EAEzD,+EAA+E;EAC/E,sCAAsC;;EAEtC,2DAA2D;EAC3D,8BAA8B;EAC9B,4BAA4B;EAC5B,4BAA4B;;EAE5B;;;;IAIE;;EAEF,yBAAyB;EACzB,yBAAyB;EACzB,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;;EAEtC;;;;IAIE;;EAEF,mCAAmC;EACnC,8CAA8C;EAC9C,8CAA8C;EAC9C,8CAA8C;EAC9C,8CAA8C;;EAE9C,iBAAiB;;EAEjB,qCAAqC;EACrC,qCAAqC;EACrC,qCAAqC;EACrC,qCAAqC;EACrC,oCAAoC;;EAEpC,uCAAuC;EACvC,uCAAuC;EACvC,uCAAuC;EACvC,uCAAuC;;EAEvC,8CAA8C;;EAE9C,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;;EAEtC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;;EAEpC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;;EAExC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;;EAEpC,yBAAyB;;EAEzB,sBAAsB;;EAEtB,8BAA8B;EAC9B,oCAAoC;EACpC,iDAAiD;;EAEjD,+CAA+C;EAC/C,iDAAiD;EACjD,6DAA6D;EAC7D,2DAA2D;EAC3D,4DAA4D;;EAE5D,4BAA4B;EAC5B,gEAAgE;EAChE,oCAAoC;EACpC,2BAA2B;EAC3B,wCAAwC;EACxC,0DAA0D;EAC1D;2EACyE;EACzE,sCAAsC;EACtC;uEACqE;EACrE,uCAAuC;;EAEvC,6BAA6B;;EAE7B,2BAA2B;EAC3B,wDAAwD;EACxD,oDAAoD;;EAEpD;;;;;GAKC;EACD;;;GAGC;;EAED,sBAAsB;;EAEtB,sCAAsC;EACtC,wDAAwD;EACxD,mEAAmE;;EAEnE,2BAA2B;;EAE3B,2CAA2C;;EAE3C,4BAA4B;;EAE5B,0BAA0B;;EAE1B,4BAA4B;;EAE5B,kDAAkD;EAClD,8BAA8B;EAC9B,gDAAgD;EAChD,4DAA4D;EAC5D,2CAA2C;EAC3C,kDAAkD;;EAElD,8BAA8B;;EAE9B,2BAA2B;;EAE3B,uBAAuB;;EAEvB,uDAAuD;EACvD,qDAAqD;EACrD,oDAAoD;EACpD,yCAAyC;EACzC,uDAAuD;EACvD,sDAAsD;EACtD,2DAA2D;;EAE3D,0BAA0B;;EAE1B,wCAAwC;EACxC,gDAAgD;EAChD,kDAAkD;;EAElD,gCAAgC;;EAEhC,yCAAyC;EACzC,mCAAmC;EACnC,qCAAqC;EACrC,kCAAkC;EAClC,qDAAqD;EACrD,yCAAyC;EACzC,yCAAyC;EACzC,0CAA0C;EAC1C,uCAAuC;EACvC,0CAA0C;EAC1C,yCAAyC;EACzC,wCAAwC;EACxC,uCAAuC;EACvC,sCAAsC;EACtC,wCAAwC;EACxC,yCAAyC;EACzC,sCAAsC;EACtC,kCAAkC;EAClC,wCAAwC;EACxC,qCAAqC;EACrC,oCAAoC;EACpC,mCAAmC;EACnC,oCAAoC;EACpC,iCAAiC;;EAEjC,0BAA0B;;EAE1B,2BAA2B;;EAE3B,2BAA2B;;EAE3B,6BAA6B;;EAE7B,0BAA0B;;EAE1B,mCAAmC;EACnC,qCAAqC;EACrC,gCAAgC;EAChC,6DAA6D;EAC7D,uCAAuC;EACvC;;GAEC;EACD,oEAAoE;;EAEpE,8DAA8D;EAC9D,+CAA+C;EAC/C,8CAA8C;EAC9C,6CAA6C;EAC7C,6CAA6C;AAC/C;;AAEA;;;8EAG8E;;AAE9E,sDAAsD;AACtD;;;;;EAKE,uCAAuC;EACvC,mCAAmC;EACnC,uCAAuC;AACzC","sourcesContent":["/*\n See the JupyterLab Developer Guide for useful CSS Patterns:\n\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\n*/\n.lds-ripple {\n display: flex;\n position: absolute; \n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 80px;\n height: 80px;\n}\n.lds-ripple div {\n position: absolute; \n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n border: 4px solid rgb(245, 131, 55);\n opacity: 1;\n border-radius: 50%;\n animation: lds-ripple 1s cubic-bezier(0, 0.2, 0.8, 1) infinite;\n}\n.lds-ripple div:nth-child(2) {\n animation-delay: -0.5s;\n}\n@keyframes lds-ripple {\n 0% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 0;\n }\n 4.9% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 0;\n }\n 5% {\n left: 0; \n top: 0; \n right: 0; \n bottom: 0;\n margin: auto; \n width: 0;\n height: 0;\n opacity: 1;\n }\n 100% {\n top: 0px;\n left: 0px;\n width: 72px;\n height: 72px;\n opacity: 0;\n }\n}\n\n\n/* CSS */\n.button-62 {\n background: linear-gradient(to bottom right, #EF4765, #FF9A5A);\n border: 0;\n border-radius: 12px;\n color: #FFFFFF;\n cursor: pointer;\n display: inline-block;\n font-family: -apple-system,system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif;\n font-size: 16px;\n font-weight: 500;\n line-height: 2.5;\n outline: transparent;\n padding: 0 1rem;\n text-align: center;\n text-decoration: none;\n transition: box-shadow .2s ease-in-out;\n user-select: none;\n -webkit-user-select: none;\n touch-action: manipulation;\n white-space: nowrap;\n}\n\n.button-62:not([disabled]):focus {\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\n}\n\n.button-62:not([disabled]):hover {\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\n}\n\n.aselector select {\n background-color: initial;\n border: none;\n border-radius: 0;\n box-shadow: none;\n color: var(--jp-ui-font-color0);\n display: block;\n font-size: var(--jp-ui-font-size1);\n height: 24px;\n line-height: 14px;\n padding: 0 25px 0 10px;\n text-align: left;\n -moz-appearance: none;\n -webkit-appearance: none;\n}\n\n/* Use our own theme for hover and option styles */\n.aselector select:hover,\n.aselector select > option {\n background-color: var(--jp-layout-color2);\n color: var(--jp-ui-font-color0);\n}\nselect {\n box-sizing: border-box;\n}\n\n.font{\nbackground-color: initial;\nborder: none;\nheight: 21px;\nborder-radius: 0;\nfont-weight:500;\ncolor: var(--jp-ui-font-color0);\ndisplay: block;\nline-height: 22.5px;\npadding: 0 25px 0 10px;\nfont-size: var(--jp-ui-font-size1);\n}\n.wrapper {\n display: flex;\n}\n.f1ozlkqi {\n pointer-events: none;\n}\n\n.palybutton{\n background-image: '/home2/longxin/Neural_Coder_EXT/style/icons8-circled-play.gif';\n}\n.loading{\n \n background-image: url(\"data:image/svg+xml,\");\n background-size: contain; \n}\n\n.dialog{\nbody {\n margin: 0;\n height: 100vh;\n width:600px;\n display: flex;\n align-items: center;\n justify-content: center;\n overflow: hidden;\n font-family: \"Poppins\", sans-serif;\n background: #e3d0b6;\n}\n\n#cookie-policy {\n display: flex;\n flex-direction: column;\n justify-content: center;\n align-items: center;\n width: 460px;\n height: 600px;\n background: #f3efe6;\n border-radius: 12px;\n transform: scale(.8);\n}\n\n#cookie-wrapper {\n height: 240px;\n width: 240px;\n margin: 30px 0;\n position: relative;\n left: -40px;\n}\n\nh1 {\n color: #6c3a1f;\n text-align: center;\n font-size: 36px;\n margin: 0;\n}\n\np {\n color: #a28561;\n font-size: 14px;\n margin-top: 0;\n padding: 0 60px;\n text-align: center;\n}\na {\n margin-top: 18px;\n font-size: 14px;\n color: #a28561;\n text-decoration: none;\n pointer-events: none;\n}\na:hover {\n color: #846b4d;\n}\n\nspan {\n font-family: \"Amatic SC\", cursive;\n font-weight: 400;\n font-size: 20px;\n position: relative;\n top: -18px;\n left: 3px;\n color: #a28561; \n}\n\n#heart-no, #thought-heart-yes, #mouth, #face-no, #thought-1, #thought-2, #thought-heart-na, #q-mark, #eyes, #leg-l, #leg-r {\n opacity: 0;\n}\n}\n.dia_button {\n color: white;\n background: #dd794a;\n margin-top: 12px;\n cursor: pointer;\n font-size: 24px;\n font-family: \"Poppins\", sans-serif;\n border-radius: 9px;\n border: none;\n width: 72%;\n padding: 12px 0;\n transition: 150ms ease-out;\n pointer-events: none;\n}\n\n.dia_button:hover {\n background: #d66029;\n}\n\n.pad{\n padding-left:6%\n}\n\n:root {\n /* Elevation\n *\n * We style box-shadows using Material Design's idea of elevation. These particular numbers are taken from here:\n *\n * https://github.com/material-components/material-components-web\n * https://material-components-web.appspot.com/elevation.html\n */\n\n --jp-shadow-base-lightness: 0;\n --jp-shadow-umbra-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.2\n );\n --jp-shadow-penumbra-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.14\n );\n --jp-shadow-ambient-color: rgba(\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n var(--jp-shadow-base-lightness),\n 0.12\n );\n --jp-elevation-z0: none;\n --jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color),\n 0px 1px 1px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 3px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color),\n 0px 2px 2px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 5px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color),\n 0px 4px 5px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 10px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color),\n 0px 6px 10px 0px var(--jp-shadow-penumbra-color),\n 0px 1px 18px 0px var(--jp-shadow-ambient-color);\n --jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color),\n 0px 8px 10px 1px var(--jp-shadow-penumbra-color),\n 0px 3px 14px 2px var(--jp-shadow-ambient-color);\n --jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color),\n 0px 12px 17px 2px var(--jp-shadow-penumbra-color),\n 0px 5px 22px 4px var(--jp-shadow-ambient-color);\n --jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color),\n 0px 16px 24px 2px var(--jp-shadow-penumbra-color),\n 0px 6px 30px 5px var(--jp-shadow-ambient-color);\n --jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color),\n 0px 20px 31px 3px var(--jp-shadow-penumbra-color),\n 0px 8px 38px 7px var(--jp-shadow-ambient-color);\n --jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color),\n 0px 24px 38px 3px var(--jp-shadow-penumbra-color),\n 0px 9px 46px 8px var(--jp-shadow-ambient-color);\n\n /* Borders\n *\n * The following variables, specify the visual styling of borders in JupyterLab.\n */\n\n --jp-border-width: 1px;\n --jp-border-color0: var(--md-grey-400);\n --jp-border-color1: var(--md-grey-400);\n --jp-border-color2: var(--md-grey-300);\n --jp-border-color3: var(--md-grey-200);\n --jp-inverse-border-color: var(--md-grey-600);\n --jp-border-radius: 2px;\n\n /* UI Fonts\n *\n * The UI font CSS variables are used for the typography all of the JupyterLab\n * user interface elements that are not directly user generated content.\n *\n * The font sizing here is done assuming that the body font size of --jp-ui-font-size1\n * is applied to a parent element. When children elements, such as headings, are sized\n * in em all things will be computed relative to that body size.\n */\n\n --jp-ui-font-scale-factor: 1.2;\n --jp-ui-font-size0: 0.83333em;\n --jp-ui-font-size1: 13px; /* Base font size */\n --jp-ui-font-size2: 1.2em;\n --jp-ui-font-size3: 1.44em;\n\n --jp-ui-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica,\n Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol';\n\n /*\n * Use these font colors against the corresponding main layout colors.\n * In a light theme, these go from dark to light.\n */\n\n /* Defaults use Material Design specification */\n --jp-ui-font-color0: rgba(0, 0, 0, 1);\n --jp-ui-font-color1: rgba(0, 0, 0, 0.87);\n --jp-ui-font-color2: rgba(0, 0, 0, 0.54);\n --jp-ui-font-color3: rgba(0, 0, 0, 0.38);\n\n /*\n * Use these against the brand/accent/warn/error colors.\n * These will typically go from light to darker, in both a dark and light theme.\n */\n\n --jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);\n --jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);\n --jp-ui-inverse-font-color2: rgba(255, 255, 255, 0.7);\n --jp-ui-inverse-font-color3: rgba(255, 255, 255, 0.5);\n\n /* Content Fonts\n *\n * Content font variables are used for typography of user generated content.\n *\n * The font sizing here is done assuming that the body font size of --jp-content-font-size1\n * is applied to a parent element. When children elements, such as headings, are sized\n * in em all things will be computed relative to that body size.\n */\n\n --jp-content-line-height: 1.6;\n --jp-content-font-scale-factor: 1.2;\n --jp-content-font-size0: 0.83333em;\n --jp-content-font-size1: 14px; /* Base font size */\n --jp-content-font-size2: 1.2em;\n --jp-content-font-size3: 1.44em;\n --jp-content-font-size4: 1.728em;\n --jp-content-font-size5: 2.0736em;\n\n /* This gives a magnification of about 125% in presentation mode over normal. */\n --jp-content-presentation-font-size1: 17px;\n\n --jp-content-heading-line-height: 1;\n --jp-content-heading-margin-top: 1.2em;\n --jp-content-heading-margin-bottom: 0.8em;\n --jp-content-heading-font-weight: 500;\n\n /* Defaults use Material Design specification */\n --jp-content-font-color0: rgba(0, 0, 0, 1);\n --jp-content-font-color1: rgba(0, 0, 0, 0.87);\n --jp-content-font-color2: rgba(0, 0, 0, 0.54);\n --jp-content-font-color3: rgba(0, 0, 0, 0.38);\n\n --jp-content-link-color: var(--md-blue-700);\n\n --jp-content-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI',\n Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji',\n 'Segoe UI Symbol';\n\n /*\n * Code Fonts\n *\n * Code font variables are used for typography of code and other monospaces content.\n */\n\n --jp-code-font-size: 13px;\n --jp-code-line-height: 1.3077; /* 17px for 13px base */\n --jp-code-padding: 5px; /* 5px for 13px base, codemirror highlighting needs integer px value */\n --jp-code-font-family-default: Menlo, Consolas, 'DejaVu Sans Mono', monospace;\n --jp-code-font-family: var(--jp-code-font-family-default);\n\n /* This gives a magnification of about 125% in presentation mode over normal. */\n --jp-code-presentation-font-size: 16px;\n\n /* may need to tweak cursor width if you change font size */\n --jp-code-cursor-width0: 1.4px;\n --jp-code-cursor-width1: 2px;\n --jp-code-cursor-width2: 4px;\n\n /* Layout\n *\n * The following are the main layout colors use in JupyterLab. In a light\n * theme these would go from light to dark.\n */\n\n --jp-layout-color0: white;\n --jp-layout-color1: white;\n --jp-layout-color2: var(--md-grey-200);\n --jp-layout-color3: var(--md-grey-400);\n --jp-layout-color4: var(--md-grey-600);\n\n /* Inverse Layout\n *\n * The following are the inverse layout colors use in JupyterLab. In a light\n * theme these would go from dark to light.\n */\n\n --jp-inverse-layout-color0: #111111;\n --jp-inverse-layout-color1: var(--md-grey-900);\n --jp-inverse-layout-color2: var(--md-grey-800);\n --jp-inverse-layout-color3: var(--md-grey-700);\n --jp-inverse-layout-color4: var(--md-grey-600);\n\n /* Brand/accent */\n\n --jp-brand-color0: var(--md-blue-900);\n --jp-brand-color1: var(--md-blue-700);\n --jp-brand-color2: var(--md-blue-300);\n --jp-brand-color3: var(--md-blue-100);\n --jp-brand-color4: var(--md-blue-50);\n\n --jp-accent-color0: var(--md-green-900);\n --jp-accent-color1: var(--md-green-700);\n --jp-accent-color2: var(--md-green-300);\n --jp-accent-color3: var(--md-green-100);\n\n /* State colors (warn, error, success, info) */\n\n --jp-warn-color0: var(--md-orange-900);\n --jp-warn-color1: var(--md-orange-700);\n --jp-warn-color2: var(--md-orange-300);\n --jp-warn-color3: var(--md-orange-100);\n\n --jp-error-color0: var(--md-red-900);\n --jp-error-color1: var(--md-red-700);\n --jp-error-color2: var(--md-red-300);\n --jp-error-color3: var(--md-red-100);\n\n --jp-success-color0: var(--md-green-900);\n --jp-success-color1: var(--md-green-700);\n --jp-success-color2: var(--md-green-300);\n --jp-success-color3: var(--md-green-100);\n\n --jp-info-color0: var(--md-cyan-900);\n --jp-info-color1: var(--md-cyan-700);\n --jp-info-color2: var(--md-cyan-300);\n --jp-info-color3: var(--md-cyan-100);\n\n /* Cell specific styles */\n\n --jp-cell-padding: 5px;\n\n --jp-cell-collapser-width: 8px;\n --jp-cell-collapser-min-height: 20px;\n --jp-cell-collapser-not-active-hover-opacity: 0.6;\n\n --jp-cell-editor-background: var(--md-grey-100);\n --jp-cell-editor-border-color: var(--md-grey-300);\n --jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);\n --jp-cell-editor-active-background: var(--jp-layout-color0);\n --jp-cell-editor-active-border-color: var(--jp-brand-color1);\n\n --jp-cell-prompt-width: 64px;\n --jp-cell-prompt-font-family: var(--jp-code-font-family-default);\n --jp-cell-prompt-letter-spacing: 0px;\n --jp-cell-prompt-opacity: 1;\n --jp-cell-prompt-not-active-opacity: 0.5;\n --jp-cell-prompt-not-active-font-color: var(--md-grey-700);\n /* A custom blend of MD grey and blue 600\n * See https://meyerweb.com/eric/tools/color-blend/#546E7A:1E88E5:5:hex */\n --jp-cell-inprompt-font-color: #307fc1;\n /* A custom blend of MD grey and orange 600\n * https://meyerweb.com/eric/tools/color-blend/#546E7A:F4511E:5:hex */\n --jp-cell-outprompt-font-color: #bf5b3d;\n\n /* Notebook specific styles */\n\n --jp-notebook-padding: 10px;\n --jp-notebook-select-background: var(--jp-layout-color1);\n --jp-notebook-multiselected-color: var(--md-blue-50);\n\n /* The scroll padding is calculated to fill enough space at the bottom of the\n notebook to show one single-line cell (with appropriate padding) at the top\n when the notebook is scrolled all the way to the bottom. We also subtract one\n pixel so that no scrollbar appears if we have just one single-line cell in the\n notebook. This padding is to enable a 'scroll past end' feature in a notebook.\n */\n --jp-notebook-scroll-padding: calc(\n 100% - var(--jp-code-font-size) * var(--jp-code-line-height) -\n var(--jp-code-padding) - var(--jp-cell-padding) - 1px\n );\n\n /* Rendermime styles */\n\n --jp-rendermime-error-background: #fdd;\n --jp-rendermime-table-row-background: var(--md-grey-100);\n --jp-rendermime-table-row-hover-background: var(--md-light-blue-50);\n\n /* Dialog specific styles */\n\n --jp-dialog-background: rgba(0, 0, 0, 0.25);\n\n /* Console specific styles */\n\n --jp-console-padding: 10px;\n\n /* Toolbar specific styles */\n\n --jp-toolbar-border-color: var(--jp-border-color1);\n --jp-toolbar-micro-height: 8px;\n --jp-toolbar-background: var(--jp-layout-color1);\n --jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, 0.24);\n --jp-toolbar-header-margin: 4px 4px 0px 4px;\n --jp-toolbar-active-background: var(--md-grey-300);\n\n /* Statusbar specific styles */\n\n --jp-statusbar-height: 24px;\n\n /* Input field styles */\n\n --jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);\n --jp-input-active-background: var(--jp-layout-color1);\n --jp-input-hover-background: var(--jp-layout-color1);\n --jp-input-background: var(--md-grey-100);\n --jp-input-border-color: var(--jp-inverse-border-color);\n --jp-input-active-border-color: var(--jp-brand-color1);\n --jp-input-active-box-shadow-color: rgba(19, 124, 189, 0.3);\n\n /* General editor styles */\n\n --jp-editor-selected-background: #d9d9d9;\n --jp-editor-selected-focused-background: #d7d4f0;\n --jp-editor-cursor-color: var(--jp-ui-font-color0);\n\n /* Code mirror specific styles */\n\n --jp-mirror-editor-keyword-color: #008000;\n --jp-mirror-editor-atom-color: #88f;\n --jp-mirror-editor-number-color: #080;\n --jp-mirror-editor-def-color: #00f;\n --jp-mirror-editor-variable-color: var(--md-grey-900);\n --jp-mirror-editor-variable-2-color: #05a;\n --jp-mirror-editor-variable-3-color: #085;\n --jp-mirror-editor-punctuation-color: #05a;\n --jp-mirror-editor-property-color: #05a;\n --jp-mirror-editor-operator-color: #aa22ff;\n --jp-mirror-editor-comment-color: #408080;\n --jp-mirror-editor-string-color: #ba2121;\n --jp-mirror-editor-string-2-color: #708;\n --jp-mirror-editor-meta-color: #aa22ff;\n --jp-mirror-editor-qualifier-color: #555;\n --jp-mirror-editor-builtin-color: #008000;\n --jp-mirror-editor-bracket-color: #997;\n --jp-mirror-editor-tag-color: #170;\n --jp-mirror-editor-attribute-color: #00c;\n --jp-mirror-editor-header-color: blue;\n --jp-mirror-editor-quote-color: #090;\n --jp-mirror-editor-link-color: #00c;\n --jp-mirror-editor-error-color: #f00;\n --jp-mirror-editor-hr-color: #999;\n\n /* Vega extension styles */\n\n --jp-vega-background: white;\n\n /* Sidebar-related styles */\n\n --jp-sidebar-min-width: 250px;\n\n /* Search-related styles */\n\n --jp-search-toggle-off-opacity: 0.5;\n --jp-search-toggle-hover-opacity: 0.8;\n --jp-search-toggle-on-opacity: 1;\n --jp-search-selected-match-background-color: rgb(245, 200, 0);\n --jp-search-selected-match-color: black;\n --jp-search-unselected-match-background-color: var(\n --jp-inverse-layout-color0\n );\n --jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);\n\n /* Icon colors that work well with light or dark backgrounds */\n --jp-icon-contrast-color0: var(--md-purple-600);\n --jp-icon-contrast-color1: var(--md-green-600);\n --jp-icon-contrast-color2: var(--md-pink-600);\n --jp-icon-contrast-color3: var(--md-blue-600);\n}\n\n/*-----------------------------------------------------------------------------\n| Copyright (c) Jupyter Development Team.\n| Distributed under the terms of the Modified BSD License.\n|----------------------------------------------------------------------------*/\n\n/* Set the default typography for monospace elements */\ntt,\ncode,\nkbd,\nsamp,\npre {\n font-family: var(--jp-code-font-family);\n font-size: var(--jp-code-font-size);\n line-height: var(--jp-code-line-height);\n}\n\n"],"sourceRoot":""}]); -// Exports -/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (___CSS_LOADER_EXPORT___); - - -/***/ }), - -/***/ "./style/base.css": -/*!************************!*\ - !*** ./style/base.css ***! - \************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony export */ __webpack_require__.d(__webpack_exports__, { -/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) -/* harmony export */ }); -/* harmony import */ var _node_modules_style_loader_dist_runtime_injectStylesIntoStyleTag_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! !../node_modules/style-loader/dist/runtime/injectStylesIntoStyleTag.js */ "./node_modules/style-loader/dist/runtime/injectStylesIntoStyleTag.js"); -/* harmony import */ var _node_modules_style_loader_dist_runtime_injectStylesIntoStyleTag_js__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_node_modules_style_loader_dist_runtime_injectStylesIntoStyleTag_js__WEBPACK_IMPORTED_MODULE_0__); -/* harmony import */ var _node_modules_css_loader_dist_cjs_js_base_css__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! !!../node_modules/css-loader/dist/cjs.js!./base.css */ "./node_modules/css-loader/dist/cjs.js!./style/base.css"); - - - -var options = {}; - -options.insert = "head"; -options.singleton = false; - -var update = _node_modules_style_loader_dist_runtime_injectStylesIntoStyleTag_js__WEBPACK_IMPORTED_MODULE_0___default()(_node_modules_css_loader_dist_cjs_js_base_css__WEBPACK_IMPORTED_MODULE_1__["default"], options); - - - -/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (_node_modules_css_loader_dist_cjs_js_base_css__WEBPACK_IMPORTED_MODULE_1__["default"].locals || {}); - -/***/ }), - -/***/ "./style/index.js": -/*!************************!*\ - !*** ./style/index.js ***! - \************************/ -/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { - -__webpack_require__.r(__webpack_exports__); -/* harmony import */ var _base_css__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./base.css */ "./style/base.css"); - - - -/***/ }), - -/***/ "data:image/svg+xml,": -/*!********************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************!*\ - !*** data:image/svg+xml, ***! - \********************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************/ -/***/ ((module) => { - -module.exports = "data:image/svg+xml,"; - -/***/ }) - -}]); -//# sourceMappingURL=style_index_js.8d733cc8b74fabbd10b8.js.map \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js.map b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js.map deleted file mode 100644 index c39a124ce9d..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/style_index_js.8d733cc8b74fabbd10b8.js.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"file":"style_index_js.8d733cc8b74fabbd10b8.js","mappings":";;;;;;;;;;;;;;;;;;;AAAA;AAC0G;AACjB;AACO;AAChG,4CAA4C,yuEAAmmC;AAC/oC,8BAA8B,mFAA2B,CAAC,4FAAqC;AAC/F,yCAAyC,sFAA+B;AACxE;AACA,sMAAsM,kBAAkB,wBAAwB,aAAa,YAAY,cAAc,cAAc,kBAAkB,gBAAgB,iBAAiB,GAAG,mBAAmB,wBAAwB,aAAa,YAAY,cAAc,cAAc,kBAAkB,wCAAwC,eAAe,uBAAuB,mEAAmE,GAAG,gCAAgC,2BAA2B,GAAG,yBAAyB,QAAQ,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,UAAU,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,QAAQ,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,UAAU,eAAe,gBAAgB,kBAAkB,mBAAmB,iBAAiB,KAAK,GAAG,6BAA6B,mEAAmE,cAAc,wBAAwB,mBAAmB,oBAAoB,0BAA0B,wFAAwF,oBAAoB,qBAAqB,qBAAqB,yBAAyB,oBAAoB,uBAAuB,0BAA0B,2CAA2C,sBAAsB,8BAA8B,+BAA+B,wBAAwB,GAAG,sCAAsC,4IAA4I,GAAG,sCAAsC,4IAA4I,GAAG,uBAAuB,8BAA8B,iBAAiB,qBAAqB,qBAAqB,oCAAoC,mBAAmB,uCAAuC,iBAAiB,sBAAsB,2BAA2B,qBAAqB,0BAA0B,6BAA6B,GAAG,+GAA+G,8CAA8C,oCAAoC,GAAG,UAAU,2BAA2B,GAAG,UAAU,4BAA4B,eAAe,eAAe,mBAAmB,kBAAkB,kCAAkC,iBAAiB,sBAAsB,yBAAyB,qCAAqC,GAAG,YAAY,kBAAkB,GAAG,aAAa,yBAAyB,GAAG,gBAAgB,sFAAsF,GAAG,WAAW,0EAA0E,+BAA+B,GAAG,YAAY,QAAQ,cAAc,kBAAkB,gBAAgB,kBAAkB,wBAAwB,4BAA4B,qBAAqB,yCAAyC,wBAAwB,GAAG,oBAAoB,kBAAkB,2BAA2B,4BAA4B,wBAAwB,iBAAiB,kBAAkB,wBAAwB,wBAAwB,yBAAyB,GAAG,qBAAqB,kBAAkB,iBAAiB,mBAAmB,uBAAuB,gBAAgB,GAAG,QAAQ,mBAAmB,uBAAuB,oBAAoB,cAAc,GAAG,OAAO,mBAAmB,oBAAoB,kBAAkB,oBAAoB,uBAAuB,GAAG,KAAK,qBAAqB,oBAAoB,mBAAmB,0BAA0B,yBAAyB,GAAG,WAAW,mBAAmB,GAAG,UAAU,wCAAwC,qBAAqB,oBAAoB,uBAAuB,eAAe,cAAc,oBAAoB,GAAG,gIAAgI,eAAe,GAAG,GAAG,eAAe,iBAAiB,wBAAwB,qBAAqB,oBAAoB,oBAAoB,yCAAyC,uBAAuB,iBAAiB,eAAe,oBAAoB,+BAA+B,yBAAyB,GAAG,uBAAuB,wBAAwB,GAAG,SAAS,sBAAsB,WAAW,iUAAiU,mKAAmK,uKAAuK,sKAAsK,4BAA4B,gLAAgL,gLAAgL,iLAAiL,kLAAkL,kLAAkL,oLAAoL,qLAAqL,sLAAsL,sLAAsL,8IAA8I,2CAA2C,2CAA2C,2CAA2C,2CAA2C,kDAAkD,4BAA4B,+dAA+d,kCAAkC,8BAA8B,kDAAkD,+BAA+B,sKAAsK,8OAA8O,6CAA6C,6CAA6C,6CAA6C,yNAAyN,wDAAwD,0DAA0D,0DAA0D,0ZAA0Z,wCAAwC,uCAAuC,mCAAmC,uDAAuD,oCAAoC,qCAAqC,sCAAsC,qIAAqI,0CAA0C,2CAA2C,8CAA8C,0CAA0C,qGAAqG,kDAAkD,kDAAkD,kDAAkD,kDAAkD,gLAAgL,8JAA8J,mCAAmC,oDAAoD,yJAAyJ,8DAA8D,iIAAiI,qGAAqG,iCAAiC,iCAAiC,wLAAwL,8BAA8B,2CAA2C,2CAA2C,2CAA2C,6MAA6M,mDAAmD,mDAAmD,mDAAmD,mDAAmD,oEAAoE,0CAA0C,0CAA0C,0CAA0C,yCAAyC,8CAA8C,4CAA4C,4CAA4C,4CAA4C,kGAAkG,2CAA2C,2CAA2C,2CAA2C,2CAA2C,yCAAyC,yCAAyC,yCAAyC,+CAA+C,6CAA6C,6CAA6C,6CAA6C,2CAA2C,yCAAyC,yCAAyC,yCAAyC,6DAA6D,qCAAqC,yCAAyC,sDAAsD,sDAAsD,sDAAsD,kEAAkE,gEAAgE,iEAAiE,mCAAmC,qEAAqE,yCAAyC,gCAAgC,6CAA6C,+DAA+D,sKAAsK,qKAAqK,sEAAsE,6DAA6D,yDAAyD,2kBAA2kB,0EAA0E,6DAA6D,wEAAwE,oFAAoF,oEAAoE,4FAA4F,mCAAmC,qDAAqD,iEAAiE,gDAAgD,uDAAuD,uEAAuE,4FAA4F,0DAA0D,yDAAyD,8CAA8C,4DAA4D,2DAA2D,gEAAgE,gFAAgF,qDAAqD,uDAAuD,uFAAuF,wCAAwC,0CAA0C,uCAAuC,0DAA0D,8CAA8C,8CAA8C,+CAA+C,4CAA4C,+CAA+C,8CAA8C,6CAA6C,4CAA4C,2CAA2C,6CAA6C,8CAA8C,2CAA2C,uCAAuC,6CAA6C,0CAA0C,yCAAyC,wCAAwC,yCAAyC,sCAAsC,mEAAmE,sEAAsE,2EAA2E,0CAA0C,qCAAqC,kEAAkE,4CAA4C,6FAA6F,yEAAyE,yHAAyH,mDAAmD,kDAAkD,kDAAkD,GAAG,sWAAsW,4CAA4C,wCAAwC,4CAA4C,GAAG,WAAW,oFAAoF,KAAK,KAAK,UAAU,YAAY,WAAW,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,KAAK,KAAK,YAAY,WAAW,UAAU,UAAU,UAAU,UAAU,YAAY,WAAW,YAAY,aAAa,MAAM,KAAK,YAAY,MAAM,KAAK,KAAK,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,KAAK,KAAK,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,KAAK,KAAK,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,UAAU,KAAK,KAAK,UAAU,UAAU,UAAU,UAAU,UAAU,KAAK,OAAO,UAAU,KAAK,YAAY,WAAW,YAAY,WAAW,UAAU,YAAY,aAAa,WAAW,YAAY,aAAa,aAAa,WAAW,YAAY,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,OAAO,KAAK,YAAY,OAAO,KAAK,YAAY,OAAO,KAAK,YAAY,WAAW,YAAY,aAAa,aAAa,WAAW,YAAY,WAAW,YAAY,aAAa,aAAa,aAAa,aAAa,OAAO,YAAY,OAAO,YAAY,aAAa,MAAM,KAAK,YAAY,OAAO,KAAK,YAAY,WAAW,UAAU,YAAY,WAAW,YAAY,WAAW,YAAY,aAAa,aAAa,MAAM,KAAK,UAAU,KAAK,KAAK,YAAY,OAAO,KAAK,YAAY,MAAM,MAAM,aAAa,cAAc,OAAO,KAAK,KAAK,UAAU,UAAU,UAAU,UAAU,YAAY,aAAa,aAAa,aAAa,aAAa,OAAO,KAAK,UAAU,YAAY,aAAa,aAAa,WAAW,UAAU,YAAY,aAAa,aAAa,OAAO,KAAK,UAAU,UAAU,UAAU,YAAY,WAAW,MAAM,KAAK,UAAU,YAAY,WAAW,UAAU,MAAM,KAAK,UAAU,UAAU,UAAU,UAAU,YAAY,MAAM,KAAK,YAAY,WAAW,UAAU,YAAY,aAAa,MAAM,KAAK,UAAU,OAAO,KAAK,YAAY,aAAa,WAAW,YAAY,WAAW,UAAU,UAAU,OAAO,KAAK,UAAU,KAAK,KAAK,KAAK,UAAU,YAAY,aAAa,WAAW,UAAU,YAAY,aAAa,WAAW,UAAU,UAAU,YAAY,aAAa,OAAO,KAAK,YAAY,OAAO,KAAK,KAAK,MAAM,KAAK,UAAU,MAAM,YAAY,UAAU,KAAK,SAAS,KAAK,SAAS,KAAK,YAAY,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,OAAO,QAAQ,QAAQ,MAAM,YAAY,aAAa,aAAa,aAAa,aAAa,aAAa,cAAc,aAAa,MAAM,YAAY,aAAa,yBAAyB,aAAa,cAAc,MAAM,QAAQ,QAAQ,MAAM,YAAY,aAAa,aAAa,aAAa,cAAc,QAAQ,MAAM,YAAY,aAAa,aAAa,cAAc,YAAY,MAAM,YAAY,aAAa,aAAa,yBAAyB,aAAa,aAAa,aAAa,cAAc,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,aAAa,cAAc,cAAc,OAAO,QAAQ,SAAS,MAAM,YAAY,yBAAyB,yBAAyB,aAAa,cAAc,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,SAAS,MAAM,YAAY,aAAa,aAAa,aAAa,cAAc,SAAS,MAAM,YAAY,aAAa,aAAa,aAAa,cAAc,cAAc,aAAa,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,cAAc,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,cAAc,cAAc,cAAc,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,aAAa,cAAc,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,MAAM,OAAO,aAAa,MAAM,OAAO,cAAc,cAAc,aAAa,aAAa,cAAc,UAAU,KAAK,OAAO,MAAM,aAAa,aAAa,aAAa,cAAc,cAAc,cAAc,cAAc,cAAc,cAAc,aAAa,aAAa,aAAa,aAAa,aAAa,cAAc,cAAc,cAAc,cAAc,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,cAAc,cAAc,aAAa,aAAa,cAAc,cAAc,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,cAAc,cAAc,cAAc,cAAc,cAAc,cAAc,aAAa,aAAa,aAAa,aAAa,aAAa,OAAO,KAAK,aAAa,aAAa,aAAa,aAAa,aAAa,aAAa,OAAO,OAAO,QAAQ,aAAa,UAAU,YAAY,aAAa,aAAa,sLAAsL,kBAAkB,wBAAwB,aAAa,YAAY,cAAc,cAAc,kBAAkB,gBAAgB,iBAAiB,GAAG,mBAAmB,wBAAwB,aAAa,YAAY,cAAc,cAAc,kBAAkB,wCAAwC,eAAe,uBAAuB,mEAAmE,GAAG,gCAAgC,2BAA2B,GAAG,yBAAyB,QAAQ,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,UAAU,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,QAAQ,eAAe,cAAc,gBAAgB,gBAAgB,oBAAoB,eAAe,gBAAgB,iBAAiB,KAAK,UAAU,eAAe,gBAAgB,kBAAkB,mBAAmB,iBAAiB,KAAK,GAAG,6BAA6B,mEAAmE,cAAc,wBAAwB,mBAAmB,oBAAoB,0BAA0B,wFAAwF,oBAAoB,qBAAqB,qBAAqB,yBAAyB,oBAAoB,uBAAuB,0BAA0B,2CAA2C,sBAAsB,8BAA8B,+BAA+B,wBAAwB,GAAG,sCAAsC,4IAA4I,GAAG,sCAAsC,4IAA4I,GAAG,uBAAuB,8BAA8B,iBAAiB,qBAAqB,qBAAqB,oCAAoC,mBAAmB,uCAAuC,iBAAiB,sBAAsB,2BAA2B,qBAAqB,0BAA0B,6BAA6B,GAAG,+GAA+G,8CAA8C,oCAAoC,GAAG,UAAU,2BAA2B,GAAG,UAAU,4BAA4B,eAAe,eAAe,mBAAmB,kBAAkB,kCAAkC,iBAAiB,sBAAsB,yBAAyB,qCAAqC,GAAG,YAAY,kBAAkB,GAAG,aAAa,yBAAyB,GAAG,gBAAgB,sFAAsF,GAAG,WAAW,2JAA2J,gCAAgC,gBAAgB,sBAAsB,qVAAqV,mBAAmB,gVAAgV,mBAAmB,kDAAkD,+BAA+B,GAAG,YAAY,QAAQ,cAAc,kBAAkB,gBAAgB,kBAAkB,wBAAwB,4BAA4B,qBAAqB,yCAAyC,wBAAwB,GAAG,oBAAoB,kBAAkB,2BAA2B,4BAA4B,wBAAwB,iBAAiB,kBAAkB,wBAAwB,wBAAwB,yBAAyB,GAAG,qBAAqB,kBAAkB,iBAAiB,mBAAmB,uBAAuB,gBAAgB,GAAG,QAAQ,mBAAmB,uBAAuB,oBAAoB,cAAc,GAAG,OAAO,mBAAmB,oBAAoB,kBAAkB,oBAAoB,uBAAuB,GAAG,KAAK,qBAAqB,oBAAoB,mBAAmB,0BAA0B,yBAAyB,GAAG,WAAW,mBAAmB,GAAG,UAAU,wCAAwC,qBAAqB,oBAAoB,uBAAuB,eAAe,cAAc,oBAAoB,GAAG,gIAAgI,eAAe,GAAG,GAAG,eAAe,iBAAiB,wBAAwB,qBAAqB,oBAAoB,oBAAoB,yCAAyC,uBAAuB,iBAAiB,eAAe,oBAAoB,+BAA+B,yBAAyB,GAAG,uBAAuB,wBAAwB,GAAG,SAAS,sBAAsB,WAAW,iUAAiU,mKAAmK,uKAAuK,sKAAsK,4BAA4B,gLAAgL,gLAAgL,iLAAiL,kLAAkL,kLAAkL,oLAAoL,qLAAqL,sLAAsL,sLAAsL,8IAA8I,2CAA2C,2CAA2C,2CAA2C,2CAA2C,kDAAkD,4BAA4B,+dAA+d,kCAAkC,8BAA8B,kDAAkD,+BAA+B,sKAAsK,8OAA8O,6CAA6C,6CAA6C,6CAA6C,yNAAyN,wDAAwD,0DAA0D,0DAA0D,0ZAA0Z,wCAAwC,uCAAuC,mCAAmC,uDAAuD,oCAAoC,qCAAqC,sCAAsC,qIAAqI,0CAA0C,2CAA2C,8CAA8C,0CAA0C,qGAAqG,kDAAkD,kDAAkD,kDAAkD,kDAAkD,gLAAgL,8JAA8J,mCAAmC,oDAAoD,yJAAyJ,8DAA8D,iIAAiI,qGAAqG,iCAAiC,iCAAiC,wLAAwL,8BAA8B,2CAA2C,2CAA2C,2CAA2C,6MAA6M,mDAAmD,mDAAmD,mDAAmD,mDAAmD,oEAAoE,0CAA0C,0CAA0C,0CAA0C,yCAAyC,8CAA8C,4CAA4C,4CAA4C,4CAA4C,kGAAkG,2CAA2C,2CAA2C,2CAA2C,2CAA2C,yCAAyC,yCAAyC,yCAAyC,+CAA+C,6CAA6C,6CAA6C,6CAA6C,2CAA2C,yCAAyC,yCAAyC,yCAAyC,6DAA6D,qCAAqC,yCAAyC,sDAAsD,sDAAsD,sDAAsD,kEAAkE,gEAAgE,iEAAiE,mCAAmC,qEAAqE,yCAAyC,gCAAgC,6CAA6C,+DAA+D,sKAAsK,qKAAqK,sEAAsE,6DAA6D,yDAAyD,2kBAA2kB,0EAA0E,6DAA6D,wEAAwE,oFAAoF,oEAAoE,4FAA4F,mCAAmC,qDAAqD,iEAAiE,gDAAgD,uDAAuD,uEAAuE,4FAA4F,0DAA0D,yDAAyD,8CAA8C,4DAA4D,2DAA2D,gEAAgE,gFAAgF,qDAAqD,uDAAuD,uFAAuF,wCAAwC,0CAA0C,uCAAuC,0DAA0D,8CAA8C,8CAA8C,+CAA+C,4CAA4C,+CAA+C,8CAA8C,6CAA6C,4CAA4C,2CAA2C,6CAA6C,8CAA8C,2CAA2C,uCAAuC,6CAA6C,0CAA0C,yCAAyC,wCAAwC,yCAAyC,sCAAsC,mEAAmE,sEAAsE,2EAA2E,0CAA0C,qCAAqC,kEAAkE,4CAA4C,6FAA6F,yEAAyE,yHAAyH,mDAAmD,kDAAkD,kDAAkD,GAAG,sWAAsW,4CAA4C,wCAAwC,4CAA4C,GAAG,uBAAuB;AACp/2C;AACA,iEAAe,uBAAuB,EAAC;;;;;;;;;;;;;;;;;;ACVkD;AACzF,YAAsF;;AAEtF;;AAEA;AACA;;AAEA,aAAa,0GAAG,CAAC,qFAAO;;;;AAIxB,iEAAe,4FAAc,MAAM;;;;;;;;;;;;ACZf","sources":["webpack://neural_compressor_ext_lab/./style/base.css","webpack://neural_compressor_ext_lab/./style/base.css?1944","webpack://neural_compressor_ext_lab/./style/index.js"],"sourcesContent":["// Imports\nimport ___CSS_LOADER_API_SOURCEMAP_IMPORT___ from \"../node_modules/css-loader/dist/runtime/sourceMaps.js\";\nimport ___CSS_LOADER_API_IMPORT___ from \"../node_modules/css-loader/dist/runtime/api.js\";\nimport ___CSS_LOADER_GET_URL_IMPORT___ from \"../node_modules/css-loader/dist/runtime/getUrl.js\";\nvar ___CSS_LOADER_URL_IMPORT_0___ = new URL(\"data:image/svg+xml,\", import.meta.url);\nvar ___CSS_LOADER_EXPORT___ = ___CSS_LOADER_API_IMPORT___(___CSS_LOADER_API_SOURCEMAP_IMPORT___);\nvar ___CSS_LOADER_URL_REPLACEMENT_0___ = ___CSS_LOADER_GET_URL_IMPORT___(___CSS_LOADER_URL_IMPORT_0___);\n// Module\n___CSS_LOADER_EXPORT___.push([module.id, \"/*\\n See the JupyterLab Developer Guide for useful CSS Patterns:\\n\\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\\n*/\\n.lds-ripple {\\n display: flex;\\n position: absolute; \\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 80px;\\n height: 80px;\\n}\\n.lds-ripple div {\\n position: absolute; \\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n border: 4px solid rgb(245, 131, 55);\\n opacity: 1;\\n border-radius: 50%;\\n animation: lds-ripple 1s cubic-bezier(0, 0.2, 0.8, 1) infinite;\\n}\\n.lds-ripple div:nth-child(2) {\\n animation-delay: -0.5s;\\n}\\n@keyframes lds-ripple {\\n 0% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 0;\\n }\\n 4.9% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 0;\\n }\\n 5% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 1;\\n }\\n 100% {\\n top: 0px;\\n left: 0px;\\n width: 72px;\\n height: 72px;\\n opacity: 0;\\n }\\n}\\n\\n\\n/* CSS */\\n.button-62 {\\n background: linear-gradient(to bottom right, #EF4765, #FF9A5A);\\n border: 0;\\n border-radius: 12px;\\n color: #FFFFFF;\\n cursor: pointer;\\n display: inline-block;\\n font-family: -apple-system,system-ui,\\\"Segoe UI\\\",Roboto,Helvetica,Arial,sans-serif;\\n font-size: 16px;\\n font-weight: 500;\\n line-height: 2.5;\\n outline: transparent;\\n padding: 0 1rem;\\n text-align: center;\\n text-decoration: none;\\n transition: box-shadow .2s ease-in-out;\\n user-select: none;\\n -webkit-user-select: none;\\n touch-action: manipulation;\\n white-space: nowrap;\\n}\\n\\n.button-62:not([disabled]):focus {\\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\\n}\\n\\n.button-62:not([disabled]):hover {\\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\\n}\\n\\n.aselector select {\\n background-color: initial;\\n border: none;\\n border-radius: 0;\\n box-shadow: none;\\n color: var(--jp-ui-font-color0);\\n display: block;\\n font-size: var(--jp-ui-font-size1);\\n height: 24px;\\n line-height: 14px;\\n padding: 0 25px 0 10px;\\n text-align: left;\\n -moz-appearance: none;\\n -webkit-appearance: none;\\n}\\n\\n/* Use our own theme for hover and option styles */\\n.aselector select:hover,\\n.aselector select > option {\\n background-color: var(--jp-layout-color2);\\n color: var(--jp-ui-font-color0);\\n}\\nselect {\\n box-sizing: border-box;\\n}\\n\\n.font{\\nbackground-color: initial;\\nborder: none;\\nheight: 21px;\\nborder-radius: 0;\\nfont-weight:500;\\ncolor: var(--jp-ui-font-color0);\\ndisplay: block;\\nline-height: 22.5px;\\npadding: 0 25px 0 10px;\\nfont-size: var(--jp-ui-font-size1);\\n}\\n.wrapper {\\n display: flex;\\n}\\n.f1ozlkqi {\\n pointer-events: none;\\n}\\n\\n.palybutton{\\n background-image: '/home2/longxin/Neural_Coder_EXT/style/icons8-circled-play.gif';\\n}\\n.loading{\\n \\n background-image: url(\" + ___CSS_LOADER_URL_REPLACEMENT_0___ + \");\\n background-size: contain; \\n}\\n\\n.dialog{\\nbody {\\n margin: 0;\\n height: 100vh;\\n width:600px;\\n display: flex;\\n align-items: center;\\n justify-content: center;\\n overflow: hidden;\\n font-family: \\\"Poppins\\\", sans-serif;\\n background: #e3d0b6;\\n}\\n\\n#cookie-policy {\\n display: flex;\\n flex-direction: column;\\n justify-content: center;\\n align-items: center;\\n width: 460px;\\n height: 600px;\\n background: #f3efe6;\\n border-radius: 12px;\\n transform: scale(.8);\\n}\\n\\n#cookie-wrapper {\\n height: 240px;\\n width: 240px;\\n margin: 30px 0;\\n position: relative;\\n left: -40px;\\n}\\n\\nh1 {\\n color: #6c3a1f;\\n text-align: center;\\n font-size: 36px;\\n margin: 0;\\n}\\n\\np {\\n color: #a28561;\\n font-size: 14px;\\n margin-top: 0;\\n padding: 0 60px;\\n text-align: center;\\n}\\na {\\n margin-top: 18px;\\n font-size: 14px;\\n color: #a28561;\\n text-decoration: none;\\n pointer-events: none;\\n}\\na:hover {\\n color: #846b4d;\\n}\\n\\nspan {\\n font-family: \\\"Amatic SC\\\", cursive;\\n font-weight: 400;\\n font-size: 20px;\\n position: relative;\\n top: -18px;\\n left: 3px;\\n color: #a28561; \\n}\\n\\n#heart-no, #thought-heart-yes, #mouth, #face-no, #thought-1, #thought-2, #thought-heart-na, #q-mark, #eyes, #leg-l, #leg-r {\\n opacity: 0;\\n}\\n}\\n.dia_button {\\n color: white;\\n background: #dd794a;\\n margin-top: 12px;\\n cursor: pointer;\\n font-size: 24px;\\n font-family: \\\"Poppins\\\", sans-serif;\\n border-radius: 9px;\\n border: none;\\n width: 72%;\\n padding: 12px 0;\\n transition: 150ms ease-out;\\n pointer-events: none;\\n}\\n\\n.dia_button:hover {\\n background: #d66029;\\n}\\n\\n.pad{\\n padding-left:6%\\n}\\n\\n:root {\\n /* Elevation\\n *\\n * We style box-shadows using Material Design's idea of elevation. These particular numbers are taken from here:\\n *\\n * https://github.com/material-components/material-components-web\\n * https://material-components-web.appspot.com/elevation.html\\n */\\n\\n --jp-shadow-base-lightness: 0;\\n --jp-shadow-umbra-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.2\\n );\\n --jp-shadow-penumbra-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.14\\n );\\n --jp-shadow-ambient-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.12\\n );\\n --jp-elevation-z0: none;\\n --jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color),\\n 0px 1px 1px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 3px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color),\\n 0px 2px 2px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 5px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color),\\n 0px 4px 5px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 10px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color),\\n 0px 6px 10px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 18px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color),\\n 0px 8px 10px 1px var(--jp-shadow-penumbra-color),\\n 0px 3px 14px 2px var(--jp-shadow-ambient-color);\\n --jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color),\\n 0px 12px 17px 2px var(--jp-shadow-penumbra-color),\\n 0px 5px 22px 4px var(--jp-shadow-ambient-color);\\n --jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color),\\n 0px 16px 24px 2px var(--jp-shadow-penumbra-color),\\n 0px 6px 30px 5px var(--jp-shadow-ambient-color);\\n --jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color),\\n 0px 20px 31px 3px var(--jp-shadow-penumbra-color),\\n 0px 8px 38px 7px var(--jp-shadow-ambient-color);\\n --jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color),\\n 0px 24px 38px 3px var(--jp-shadow-penumbra-color),\\n 0px 9px 46px 8px var(--jp-shadow-ambient-color);\\n\\n /* Borders\\n *\\n * The following variables, specify the visual styling of borders in JupyterLab.\\n */\\n\\n --jp-border-width: 1px;\\n --jp-border-color0: var(--md-grey-400);\\n --jp-border-color1: var(--md-grey-400);\\n --jp-border-color2: var(--md-grey-300);\\n --jp-border-color3: var(--md-grey-200);\\n --jp-inverse-border-color: var(--md-grey-600);\\n --jp-border-radius: 2px;\\n\\n /* UI Fonts\\n *\\n * The UI font CSS variables are used for the typography all of the JupyterLab\\n * user interface elements that are not directly user generated content.\\n *\\n * The font sizing here is done assuming that the body font size of --jp-ui-font-size1\\n * is applied to a parent element. When children elements, such as headings, are sized\\n * in em all things will be computed relative to that body size.\\n */\\n\\n --jp-ui-font-scale-factor: 1.2;\\n --jp-ui-font-size0: 0.83333em;\\n --jp-ui-font-size1: 13px; /* Base font size */\\n --jp-ui-font-size2: 1.2em;\\n --jp-ui-font-size3: 1.44em;\\n\\n --jp-ui-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica,\\n Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol';\\n\\n /*\\n * Use these font colors against the corresponding main layout colors.\\n * In a light theme, these go from dark to light.\\n */\\n\\n /* Defaults use Material Design specification */\\n --jp-ui-font-color0: rgba(0, 0, 0, 1);\\n --jp-ui-font-color1: rgba(0, 0, 0, 0.87);\\n --jp-ui-font-color2: rgba(0, 0, 0, 0.54);\\n --jp-ui-font-color3: rgba(0, 0, 0, 0.38);\\n\\n /*\\n * Use these against the brand/accent/warn/error colors.\\n * These will typically go from light to darker, in both a dark and light theme.\\n */\\n\\n --jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);\\n --jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);\\n --jp-ui-inverse-font-color2: rgba(255, 255, 255, 0.7);\\n --jp-ui-inverse-font-color3: rgba(255, 255, 255, 0.5);\\n\\n /* Content Fonts\\n *\\n * Content font variables are used for typography of user generated content.\\n *\\n * The font sizing here is done assuming that the body font size of --jp-content-font-size1\\n * is applied to a parent element. When children elements, such as headings, are sized\\n * in em all things will be computed relative to that body size.\\n */\\n\\n --jp-content-line-height: 1.6;\\n --jp-content-font-scale-factor: 1.2;\\n --jp-content-font-size0: 0.83333em;\\n --jp-content-font-size1: 14px; /* Base font size */\\n --jp-content-font-size2: 1.2em;\\n --jp-content-font-size3: 1.44em;\\n --jp-content-font-size4: 1.728em;\\n --jp-content-font-size5: 2.0736em;\\n\\n /* This gives a magnification of about 125% in presentation mode over normal. */\\n --jp-content-presentation-font-size1: 17px;\\n\\n --jp-content-heading-line-height: 1;\\n --jp-content-heading-margin-top: 1.2em;\\n --jp-content-heading-margin-bottom: 0.8em;\\n --jp-content-heading-font-weight: 500;\\n\\n /* Defaults use Material Design specification */\\n --jp-content-font-color0: rgba(0, 0, 0, 1);\\n --jp-content-font-color1: rgba(0, 0, 0, 0.87);\\n --jp-content-font-color2: rgba(0, 0, 0, 0.54);\\n --jp-content-font-color3: rgba(0, 0, 0, 0.38);\\n\\n --jp-content-link-color: var(--md-blue-700);\\n\\n --jp-content-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI',\\n Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji',\\n 'Segoe UI Symbol';\\n\\n /*\\n * Code Fonts\\n *\\n * Code font variables are used for typography of code and other monospaces content.\\n */\\n\\n --jp-code-font-size: 13px;\\n --jp-code-line-height: 1.3077; /* 17px for 13px base */\\n --jp-code-padding: 5px; /* 5px for 13px base, codemirror highlighting needs integer px value */\\n --jp-code-font-family-default: Menlo, Consolas, 'DejaVu Sans Mono', monospace;\\n --jp-code-font-family: var(--jp-code-font-family-default);\\n\\n /* This gives a magnification of about 125% in presentation mode over normal. */\\n --jp-code-presentation-font-size: 16px;\\n\\n /* may need to tweak cursor width if you change font size */\\n --jp-code-cursor-width0: 1.4px;\\n --jp-code-cursor-width1: 2px;\\n --jp-code-cursor-width2: 4px;\\n\\n /* Layout\\n *\\n * The following are the main layout colors use in JupyterLab. In a light\\n * theme these would go from light to dark.\\n */\\n\\n --jp-layout-color0: white;\\n --jp-layout-color1: white;\\n --jp-layout-color2: var(--md-grey-200);\\n --jp-layout-color3: var(--md-grey-400);\\n --jp-layout-color4: var(--md-grey-600);\\n\\n /* Inverse Layout\\n *\\n * The following are the inverse layout colors use in JupyterLab. In a light\\n * theme these would go from dark to light.\\n */\\n\\n --jp-inverse-layout-color0: #111111;\\n --jp-inverse-layout-color1: var(--md-grey-900);\\n --jp-inverse-layout-color2: var(--md-grey-800);\\n --jp-inverse-layout-color3: var(--md-grey-700);\\n --jp-inverse-layout-color4: var(--md-grey-600);\\n\\n /* Brand/accent */\\n\\n --jp-brand-color0: var(--md-blue-900);\\n --jp-brand-color1: var(--md-blue-700);\\n --jp-brand-color2: var(--md-blue-300);\\n --jp-brand-color3: var(--md-blue-100);\\n --jp-brand-color4: var(--md-blue-50);\\n\\n --jp-accent-color0: var(--md-green-900);\\n --jp-accent-color1: var(--md-green-700);\\n --jp-accent-color2: var(--md-green-300);\\n --jp-accent-color3: var(--md-green-100);\\n\\n /* State colors (warn, error, success, info) */\\n\\n --jp-warn-color0: var(--md-orange-900);\\n --jp-warn-color1: var(--md-orange-700);\\n --jp-warn-color2: var(--md-orange-300);\\n --jp-warn-color3: var(--md-orange-100);\\n\\n --jp-error-color0: var(--md-red-900);\\n --jp-error-color1: var(--md-red-700);\\n --jp-error-color2: var(--md-red-300);\\n --jp-error-color3: var(--md-red-100);\\n\\n --jp-success-color0: var(--md-green-900);\\n --jp-success-color1: var(--md-green-700);\\n --jp-success-color2: var(--md-green-300);\\n --jp-success-color3: var(--md-green-100);\\n\\n --jp-info-color0: var(--md-cyan-900);\\n --jp-info-color1: var(--md-cyan-700);\\n --jp-info-color2: var(--md-cyan-300);\\n --jp-info-color3: var(--md-cyan-100);\\n\\n /* Cell specific styles */\\n\\n --jp-cell-padding: 5px;\\n\\n --jp-cell-collapser-width: 8px;\\n --jp-cell-collapser-min-height: 20px;\\n --jp-cell-collapser-not-active-hover-opacity: 0.6;\\n\\n --jp-cell-editor-background: var(--md-grey-100);\\n --jp-cell-editor-border-color: var(--md-grey-300);\\n --jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);\\n --jp-cell-editor-active-background: var(--jp-layout-color0);\\n --jp-cell-editor-active-border-color: var(--jp-brand-color1);\\n\\n --jp-cell-prompt-width: 64px;\\n --jp-cell-prompt-font-family: var(--jp-code-font-family-default);\\n --jp-cell-prompt-letter-spacing: 0px;\\n --jp-cell-prompt-opacity: 1;\\n --jp-cell-prompt-not-active-opacity: 0.5;\\n --jp-cell-prompt-not-active-font-color: var(--md-grey-700);\\n /* A custom blend of MD grey and blue 600\\n * See https://meyerweb.com/eric/tools/color-blend/#546E7A:1E88E5:5:hex */\\n --jp-cell-inprompt-font-color: #307fc1;\\n /* A custom blend of MD grey and orange 600\\n * https://meyerweb.com/eric/tools/color-blend/#546E7A:F4511E:5:hex */\\n --jp-cell-outprompt-font-color: #bf5b3d;\\n\\n /* Notebook specific styles */\\n\\n --jp-notebook-padding: 10px;\\n --jp-notebook-select-background: var(--jp-layout-color1);\\n --jp-notebook-multiselected-color: var(--md-blue-50);\\n\\n /* The scroll padding is calculated to fill enough space at the bottom of the\\n notebook to show one single-line cell (with appropriate padding) at the top\\n when the notebook is scrolled all the way to the bottom. We also subtract one\\n pixel so that no scrollbar appears if we have just one single-line cell in the\\n notebook. This padding is to enable a 'scroll past end' feature in a notebook.\\n */\\n --jp-notebook-scroll-padding: calc(\\n 100% - var(--jp-code-font-size) * var(--jp-code-line-height) -\\n var(--jp-code-padding) - var(--jp-cell-padding) - 1px\\n );\\n\\n /* Rendermime styles */\\n\\n --jp-rendermime-error-background: #fdd;\\n --jp-rendermime-table-row-background: var(--md-grey-100);\\n --jp-rendermime-table-row-hover-background: var(--md-light-blue-50);\\n\\n /* Dialog specific styles */\\n\\n --jp-dialog-background: rgba(0, 0, 0, 0.25);\\n\\n /* Console specific styles */\\n\\n --jp-console-padding: 10px;\\n\\n /* Toolbar specific styles */\\n\\n --jp-toolbar-border-color: var(--jp-border-color1);\\n --jp-toolbar-micro-height: 8px;\\n --jp-toolbar-background: var(--jp-layout-color1);\\n --jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, 0.24);\\n --jp-toolbar-header-margin: 4px 4px 0px 4px;\\n --jp-toolbar-active-background: var(--md-grey-300);\\n\\n /* Statusbar specific styles */\\n\\n --jp-statusbar-height: 24px;\\n\\n /* Input field styles */\\n\\n --jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);\\n --jp-input-active-background: var(--jp-layout-color1);\\n --jp-input-hover-background: var(--jp-layout-color1);\\n --jp-input-background: var(--md-grey-100);\\n --jp-input-border-color: var(--jp-inverse-border-color);\\n --jp-input-active-border-color: var(--jp-brand-color1);\\n --jp-input-active-box-shadow-color: rgba(19, 124, 189, 0.3);\\n\\n /* General editor styles */\\n\\n --jp-editor-selected-background: #d9d9d9;\\n --jp-editor-selected-focused-background: #d7d4f0;\\n --jp-editor-cursor-color: var(--jp-ui-font-color0);\\n\\n /* Code mirror specific styles */\\n\\n --jp-mirror-editor-keyword-color: #008000;\\n --jp-mirror-editor-atom-color: #88f;\\n --jp-mirror-editor-number-color: #080;\\n --jp-mirror-editor-def-color: #00f;\\n --jp-mirror-editor-variable-color: var(--md-grey-900);\\n --jp-mirror-editor-variable-2-color: #05a;\\n --jp-mirror-editor-variable-3-color: #085;\\n --jp-mirror-editor-punctuation-color: #05a;\\n --jp-mirror-editor-property-color: #05a;\\n --jp-mirror-editor-operator-color: #aa22ff;\\n --jp-mirror-editor-comment-color: #408080;\\n --jp-mirror-editor-string-color: #ba2121;\\n --jp-mirror-editor-string-2-color: #708;\\n --jp-mirror-editor-meta-color: #aa22ff;\\n --jp-mirror-editor-qualifier-color: #555;\\n --jp-mirror-editor-builtin-color: #008000;\\n --jp-mirror-editor-bracket-color: #997;\\n --jp-mirror-editor-tag-color: #170;\\n --jp-mirror-editor-attribute-color: #00c;\\n --jp-mirror-editor-header-color: blue;\\n --jp-mirror-editor-quote-color: #090;\\n --jp-mirror-editor-link-color: #00c;\\n --jp-mirror-editor-error-color: #f00;\\n --jp-mirror-editor-hr-color: #999;\\n\\n /* Vega extension styles */\\n\\n --jp-vega-background: white;\\n\\n /* Sidebar-related styles */\\n\\n --jp-sidebar-min-width: 250px;\\n\\n /* Search-related styles */\\n\\n --jp-search-toggle-off-opacity: 0.5;\\n --jp-search-toggle-hover-opacity: 0.8;\\n --jp-search-toggle-on-opacity: 1;\\n --jp-search-selected-match-background-color: rgb(245, 200, 0);\\n --jp-search-selected-match-color: black;\\n --jp-search-unselected-match-background-color: var(\\n --jp-inverse-layout-color0\\n );\\n --jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);\\n\\n /* Icon colors that work well with light or dark backgrounds */\\n --jp-icon-contrast-color0: var(--md-purple-600);\\n --jp-icon-contrast-color1: var(--md-green-600);\\n --jp-icon-contrast-color2: var(--md-pink-600);\\n --jp-icon-contrast-color3: var(--md-blue-600);\\n}\\n\\n/*-----------------------------------------------------------------------------\\n| Copyright (c) Jupyter Development Team.\\n| Distributed under the terms of the Modified BSD License.\\n|----------------------------------------------------------------------------*/\\n\\n/* Set the default typography for monospace elements */\\ntt,\\ncode,\\nkbd,\\nsamp,\\npre {\\n font-family: var(--jp-code-font-family);\\n font-size: var(--jp-code-font-size);\\n line-height: var(--jp-code-line-height);\\n}\\n\\n\", \"\",{\"version\":3,\"sources\":[\"webpack://./style/base.css\"],\"names\":[],\"mappings\":\"AAAA;;;;CAIC;AACD;EACE,aAAa;EACb,kBAAkB;EAClB,OAAO;EACP,MAAM;EACN,QAAQ;EACR,SAAS;EACT,YAAY;EACZ,WAAW;EACX,YAAY;AACd;AACA;EACE,kBAAkB;EAClB,OAAO;EACP,MAAM;EACN,QAAQ;EACR,SAAS;EACT,YAAY;EACZ,mCAAmC;EACnC,UAAU;EACV,kBAAkB;EAClB,8DAA8D;AAChE;AACA;EACE,sBAAsB;AACxB;AACA;EACE;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,OAAO;IACP,MAAM;IACN,QAAQ;IACR,SAAS;IACT,YAAY;IACZ,QAAQ;IACR,SAAS;IACT,UAAU;EACZ;EACA;IACE,QAAQ;IACR,SAAS;IACT,WAAW;IACX,YAAY;IACZ,UAAU;EACZ;AACF;;;AAGA,QAAQ;AACR;EACE,8DAA8D;EAC9D,SAAS;EACT,mBAAmB;EACnB,cAAc;EACd,eAAe;EACf,qBAAqB;EACrB,iFAAiF;EACjF,eAAe;EACf,gBAAgB;EAChB,gBAAgB;EAChB,oBAAoB;EACpB,eAAe;EACf,kBAAkB;EAClB,qBAAqB;EACrB,sCAAsC;EACtC,iBAAiB;EACjB,yBAAyB;EACzB,0BAA0B;EAC1B,mBAAmB;AACrB;;AAEA;EACE,uIAAuI;AACzI;;AAEA;EACE,uIAAuI;AACzI;;AAEA;EACE,yBAAyB;EACzB,YAAY;EACZ,gBAAgB;EAChB,gBAAgB;EAChB,+BAA+B;EAC/B,cAAc;EACd,kCAAkC;EAClC,YAAY;EACZ,iBAAiB;EACjB,sBAAsB;EACtB,gBAAgB;EAChB,qBAAqB;EACrB,wBAAwB;AAC1B;;AAEA,kDAAkD;AAClD;;EAEE,yCAAyC;EACzC,+BAA+B;AACjC;AACA;EACE,sBAAsB;AACxB;;AAEA;AACA,yBAAyB;AACzB,YAAY;AACZ,YAAY;AACZ,gBAAgB;AAChB,eAAe;AACf,+BAA+B;AAC/B,cAAc;AACd,mBAAmB;AACnB,sBAAsB;AACtB,kCAAkC;AAClC;AACA;EACE,aAAa;AACf;AACA;EACE,oBAAoB;AACtB;;AAEA;EACE,iFAAiF;AACnF;AACA;;EAEE,yDAAm9B;EACn9B,wBAAwB;AAC1B;;AAEA;AACA;EACE,SAAS;EACT,aAAa;EACb,WAAW;EACX,aAAa;EACb,mBAAmB;EACnB,uBAAuB;EACvB,gBAAgB;EAChB,kCAAkC;EAClC,mBAAmB;AACrB;;AAEA;EACE,aAAa;EACb,sBAAsB;EACtB,uBAAuB;EACvB,mBAAmB;EACnB,YAAY;EACZ,aAAa;EACb,mBAAmB;EACnB,mBAAmB;EACnB,oBAAoB;AACtB;;AAEA;EACE,aAAa;EACb,YAAY;EACZ,cAAc;EACd,kBAAkB;EAClB,WAAW;AACb;;AAEA;EACE,cAAc;EACd,kBAAkB;EAClB,eAAe;EACf,SAAS;AACX;;AAEA;EACE,cAAc;EACd,eAAe;EACf,aAAa;EACb,eAAe;EACf,kBAAkB;AACpB;AACA;EACE,gBAAgB;EAChB,eAAe;EACf,cAAc;EACd,qBAAqB;EACrB,oBAAoB;AACtB;AACA;EACE,cAAc;AAChB;;AAEA;EACE,iCAAiC;EACjC,gBAAgB;EAChB,eAAe;EACf,kBAAkB;EAClB,UAAU;EACV,SAAS;EACT,cAAc;AAChB;;AAEA;EACE,UAAU;AACZ;AACA;AACA;EACE,YAAY;EACZ,mBAAmB;EACnB,gBAAgB;EAChB,eAAe;EACf,eAAe;EACf,kCAAkC;EAClC,kBAAkB;EAClB,YAAY;EACZ,UAAU;EACV,eAAe;EACf,0BAA0B;EAC1B,oBAAoB;AACtB;;AAEA;EACE,mBAAmB;AACrB;;AAEA;EACE;AACF;;AAEA;EACE;;;;;;IAME;;EAEF,6BAA6B;EAC7B;;;;;GAKC;EACD;;;;;GAKC;EACD;;;;;GAKC;EACD,uBAAuB;EACvB;;kDAEgD;EAChD;;kDAEgD;EAChD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;EACjD;;mDAEiD;;EAEjD;;;IAGE;;EAEF,sBAAsB;EACtB,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,6CAA6C;EAC7C,uBAAuB;;EAEvB;;;;;;;;IAQE;;EAEF,8BAA8B;EAC9B,6BAA6B;EAC7B,wBAAwB,EAAE,mBAAmB;EAC7C,yBAAyB;EACzB,0BAA0B;;EAE1B;+EAC6E;;EAE7E;;;IAGE;;EAEF,+CAA+C;EAC/C,qCAAqC;EACrC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;;EAExC;;;IAGE;;EAEF,mDAAmD;EACnD,mDAAmD;EACnD,qDAAqD;EACrD,qDAAqD;;EAErD;;;;;;;IAOE;;EAEF,6BAA6B;EAC7B,mCAAmC;EACnC,kCAAkC;EAClC,6BAA6B,EAAE,mBAAmB;EAClD,8BAA8B;EAC9B,+BAA+B;EAC/B,gCAAgC;EAChC,iCAAiC;;EAEjC,+EAA+E;EAC/E,0CAA0C;;EAE1C,mCAAmC;EACnC,sCAAsC;EACtC,yCAAyC;EACzC,qCAAqC;;EAErC,+CAA+C;EAC/C,0CAA0C;EAC1C,6CAA6C;EAC7C,6CAA6C;EAC7C,6CAA6C;;EAE7C,2CAA2C;;EAE3C;;qBAEmB;;EAEnB;;;;IAIE;;EAEF,yBAAyB;EACzB,6BAA6B,EAAE,uBAAuB;EACtD,sBAAsB,EAAE,sEAAsE;EAC9F,6EAA6E;EAC7E,yDAAyD;;EAEzD,+EAA+E;EAC/E,sCAAsC;;EAEtC,2DAA2D;EAC3D,8BAA8B;EAC9B,4BAA4B;EAC5B,4BAA4B;;EAE5B;;;;IAIE;;EAEF,yBAAyB;EACzB,yBAAyB;EACzB,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;;EAEtC;;;;IAIE;;EAEF,mCAAmC;EACnC,8CAA8C;EAC9C,8CAA8C;EAC9C,8CAA8C;EAC9C,8CAA8C;;EAE9C,iBAAiB;;EAEjB,qCAAqC;EACrC,qCAAqC;EACrC,qCAAqC;EACrC,qCAAqC;EACrC,oCAAoC;;EAEpC,uCAAuC;EACvC,uCAAuC;EACvC,uCAAuC;EACvC,uCAAuC;;EAEvC,8CAA8C;;EAE9C,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;EACtC,sCAAsC;;EAEtC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;;EAEpC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;EACxC,wCAAwC;;EAExC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;EACpC,oCAAoC;;EAEpC,yBAAyB;;EAEzB,sBAAsB;;EAEtB,8BAA8B;EAC9B,oCAAoC;EACpC,iDAAiD;;EAEjD,+CAA+C;EAC/C,iDAAiD;EACjD,6DAA6D;EAC7D,2DAA2D;EAC3D,4DAA4D;;EAE5D,4BAA4B;EAC5B,gEAAgE;EAChE,oCAAoC;EACpC,2BAA2B;EAC3B,wCAAwC;EACxC,0DAA0D;EAC1D;2EACyE;EACzE,sCAAsC;EACtC;uEACqE;EACrE,uCAAuC;;EAEvC,6BAA6B;;EAE7B,2BAA2B;EAC3B,wDAAwD;EACxD,oDAAoD;;EAEpD;;;;;GAKC;EACD;;;GAGC;;EAED,sBAAsB;;EAEtB,sCAAsC;EACtC,wDAAwD;EACxD,mEAAmE;;EAEnE,2BAA2B;;EAE3B,2CAA2C;;EAE3C,4BAA4B;;EAE5B,0BAA0B;;EAE1B,4BAA4B;;EAE5B,kDAAkD;EAClD,8BAA8B;EAC9B,gDAAgD;EAChD,4DAA4D;EAC5D,2CAA2C;EAC3C,kDAAkD;;EAElD,8BAA8B;;EAE9B,2BAA2B;;EAE3B,uBAAuB;;EAEvB,uDAAuD;EACvD,qDAAqD;EACrD,oDAAoD;EACpD,yCAAyC;EACzC,uDAAuD;EACvD,sDAAsD;EACtD,2DAA2D;;EAE3D,0BAA0B;;EAE1B,wCAAwC;EACxC,gDAAgD;EAChD,kDAAkD;;EAElD,gCAAgC;;EAEhC,yCAAyC;EACzC,mCAAmC;EACnC,qCAAqC;EACrC,kCAAkC;EAClC,qDAAqD;EACrD,yCAAyC;EACzC,yCAAyC;EACzC,0CAA0C;EAC1C,uCAAuC;EACvC,0CAA0C;EAC1C,yCAAyC;EACzC,wCAAwC;EACxC,uCAAuC;EACvC,sCAAsC;EACtC,wCAAwC;EACxC,yCAAyC;EACzC,sCAAsC;EACtC,kCAAkC;EAClC,wCAAwC;EACxC,qCAAqC;EACrC,oCAAoC;EACpC,mCAAmC;EACnC,oCAAoC;EACpC,iCAAiC;;EAEjC,0BAA0B;;EAE1B,2BAA2B;;EAE3B,2BAA2B;;EAE3B,6BAA6B;;EAE7B,0BAA0B;;EAE1B,mCAAmC;EACnC,qCAAqC;EACrC,gCAAgC;EAChC,6DAA6D;EAC7D,uCAAuC;EACvC;;GAEC;EACD,oEAAoE;;EAEpE,8DAA8D;EAC9D,+CAA+C;EAC/C,8CAA8C;EAC9C,6CAA6C;EAC7C,6CAA6C;AAC/C;;AAEA;;;8EAG8E;;AAE9E,sDAAsD;AACtD;;;;;EAKE,uCAAuC;EACvC,mCAAmC;EACnC,uCAAuC;AACzC\",\"sourcesContent\":[\"/*\\n See the JupyterLab Developer Guide for useful CSS Patterns:\\n\\n https://jupyterlab.readthedocs.io/en/stable/developer/css.html\\n*/\\n.lds-ripple {\\n display: flex;\\n position: absolute; \\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 80px;\\n height: 80px;\\n}\\n.lds-ripple div {\\n position: absolute; \\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n border: 4px solid rgb(245, 131, 55);\\n opacity: 1;\\n border-radius: 50%;\\n animation: lds-ripple 1s cubic-bezier(0, 0.2, 0.8, 1) infinite;\\n}\\n.lds-ripple div:nth-child(2) {\\n animation-delay: -0.5s;\\n}\\n@keyframes lds-ripple {\\n 0% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 0;\\n }\\n 4.9% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 0;\\n }\\n 5% {\\n left: 0; \\n top: 0; \\n right: 0; \\n bottom: 0;\\n margin: auto; \\n width: 0;\\n height: 0;\\n opacity: 1;\\n }\\n 100% {\\n top: 0px;\\n left: 0px;\\n width: 72px;\\n height: 72px;\\n opacity: 0;\\n }\\n}\\n\\n\\n/* CSS */\\n.button-62 {\\n background: linear-gradient(to bottom right, #EF4765, #FF9A5A);\\n border: 0;\\n border-radius: 12px;\\n color: #FFFFFF;\\n cursor: pointer;\\n display: inline-block;\\n font-family: -apple-system,system-ui,\\\"Segoe UI\\\",Roboto,Helvetica,Arial,sans-serif;\\n font-size: 16px;\\n font-weight: 500;\\n line-height: 2.5;\\n outline: transparent;\\n padding: 0 1rem;\\n text-align: center;\\n text-decoration: none;\\n transition: box-shadow .2s ease-in-out;\\n user-select: none;\\n -webkit-user-select: none;\\n touch-action: manipulation;\\n white-space: nowrap;\\n}\\n\\n.button-62:not([disabled]):focus {\\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\\n}\\n\\n.button-62:not([disabled]):hover {\\n box-shadow: 0 0 .25rem rgba(0, 0, 0, 0.5), -.125rem -.125rem 1rem rgba(239, 71, 101, 0.5), .125rem .125rem 1rem rgba(255, 154, 90, 0.5);\\n}\\n\\n.aselector select {\\n background-color: initial;\\n border: none;\\n border-radius: 0;\\n box-shadow: none;\\n color: var(--jp-ui-font-color0);\\n display: block;\\n font-size: var(--jp-ui-font-size1);\\n height: 24px;\\n line-height: 14px;\\n padding: 0 25px 0 10px;\\n text-align: left;\\n -moz-appearance: none;\\n -webkit-appearance: none;\\n}\\n\\n/* Use our own theme for hover and option styles */\\n.aselector select:hover,\\n.aselector select > option {\\n background-color: var(--jp-layout-color2);\\n color: var(--jp-ui-font-color0);\\n}\\nselect {\\n box-sizing: border-box;\\n}\\n\\n.font{\\nbackground-color: initial;\\nborder: none;\\nheight: 21px;\\nborder-radius: 0;\\nfont-weight:500;\\ncolor: var(--jp-ui-font-color0);\\ndisplay: block;\\nline-height: 22.5px;\\npadding: 0 25px 0 10px;\\nfont-size: var(--jp-ui-font-size1);\\n}\\n.wrapper {\\n display: flex;\\n}\\n.f1ozlkqi {\\n pointer-events: none;\\n}\\n\\n.palybutton{\\n background-image: '/home2/longxin/Neural_Coder_EXT/style/icons8-circled-play.gif';\\n}\\n.loading{\\n \\n background-image: url(\\\"data:image/svg+xml,\\\");\\n background-size: contain; \\n}\\n\\n.dialog{\\nbody {\\n margin: 0;\\n height: 100vh;\\n width:600px;\\n display: flex;\\n align-items: center;\\n justify-content: center;\\n overflow: hidden;\\n font-family: \\\"Poppins\\\", sans-serif;\\n background: #e3d0b6;\\n}\\n\\n#cookie-policy {\\n display: flex;\\n flex-direction: column;\\n justify-content: center;\\n align-items: center;\\n width: 460px;\\n height: 600px;\\n background: #f3efe6;\\n border-radius: 12px;\\n transform: scale(.8);\\n}\\n\\n#cookie-wrapper {\\n height: 240px;\\n width: 240px;\\n margin: 30px 0;\\n position: relative;\\n left: -40px;\\n}\\n\\nh1 {\\n color: #6c3a1f;\\n text-align: center;\\n font-size: 36px;\\n margin: 0;\\n}\\n\\np {\\n color: #a28561;\\n font-size: 14px;\\n margin-top: 0;\\n padding: 0 60px;\\n text-align: center;\\n}\\na {\\n margin-top: 18px;\\n font-size: 14px;\\n color: #a28561;\\n text-decoration: none;\\n pointer-events: none;\\n}\\na:hover {\\n color: #846b4d;\\n}\\n\\nspan {\\n font-family: \\\"Amatic SC\\\", cursive;\\n font-weight: 400;\\n font-size: 20px;\\n position: relative;\\n top: -18px;\\n left: 3px;\\n color: #a28561; \\n}\\n\\n#heart-no, #thought-heart-yes, #mouth, #face-no, #thought-1, #thought-2, #thought-heart-na, #q-mark, #eyes, #leg-l, #leg-r {\\n opacity: 0;\\n}\\n}\\n.dia_button {\\n color: white;\\n background: #dd794a;\\n margin-top: 12px;\\n cursor: pointer;\\n font-size: 24px;\\n font-family: \\\"Poppins\\\", sans-serif;\\n border-radius: 9px;\\n border: none;\\n width: 72%;\\n padding: 12px 0;\\n transition: 150ms ease-out;\\n pointer-events: none;\\n}\\n\\n.dia_button:hover {\\n background: #d66029;\\n}\\n\\n.pad{\\n padding-left:6%\\n}\\n\\n:root {\\n /* Elevation\\n *\\n * We style box-shadows using Material Design's idea of elevation. These particular numbers are taken from here:\\n *\\n * https://github.com/material-components/material-components-web\\n * https://material-components-web.appspot.com/elevation.html\\n */\\n\\n --jp-shadow-base-lightness: 0;\\n --jp-shadow-umbra-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.2\\n );\\n --jp-shadow-penumbra-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.14\\n );\\n --jp-shadow-ambient-color: rgba(\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n var(--jp-shadow-base-lightness),\\n 0.12\\n );\\n --jp-elevation-z0: none;\\n --jp-elevation-z1: 0px 2px 1px -1px var(--jp-shadow-umbra-color),\\n 0px 1px 1px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 3px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z2: 0px 3px 1px -2px var(--jp-shadow-umbra-color),\\n 0px 2px 2px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 5px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z4: 0px 2px 4px -1px var(--jp-shadow-umbra-color),\\n 0px 4px 5px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 10px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z6: 0px 3px 5px -1px var(--jp-shadow-umbra-color),\\n 0px 6px 10px 0px var(--jp-shadow-penumbra-color),\\n 0px 1px 18px 0px var(--jp-shadow-ambient-color);\\n --jp-elevation-z8: 0px 5px 5px -3px var(--jp-shadow-umbra-color),\\n 0px 8px 10px 1px var(--jp-shadow-penumbra-color),\\n 0px 3px 14px 2px var(--jp-shadow-ambient-color);\\n --jp-elevation-z12: 0px 7px 8px -4px var(--jp-shadow-umbra-color),\\n 0px 12px 17px 2px var(--jp-shadow-penumbra-color),\\n 0px 5px 22px 4px var(--jp-shadow-ambient-color);\\n --jp-elevation-z16: 0px 8px 10px -5px var(--jp-shadow-umbra-color),\\n 0px 16px 24px 2px var(--jp-shadow-penumbra-color),\\n 0px 6px 30px 5px var(--jp-shadow-ambient-color);\\n --jp-elevation-z20: 0px 10px 13px -6px var(--jp-shadow-umbra-color),\\n 0px 20px 31px 3px var(--jp-shadow-penumbra-color),\\n 0px 8px 38px 7px var(--jp-shadow-ambient-color);\\n --jp-elevation-z24: 0px 11px 15px -7px var(--jp-shadow-umbra-color),\\n 0px 24px 38px 3px var(--jp-shadow-penumbra-color),\\n 0px 9px 46px 8px var(--jp-shadow-ambient-color);\\n\\n /* Borders\\n *\\n * The following variables, specify the visual styling of borders in JupyterLab.\\n */\\n\\n --jp-border-width: 1px;\\n --jp-border-color0: var(--md-grey-400);\\n --jp-border-color1: var(--md-grey-400);\\n --jp-border-color2: var(--md-grey-300);\\n --jp-border-color3: var(--md-grey-200);\\n --jp-inverse-border-color: var(--md-grey-600);\\n --jp-border-radius: 2px;\\n\\n /* UI Fonts\\n *\\n * The UI font CSS variables are used for the typography all of the JupyterLab\\n * user interface elements that are not directly user generated content.\\n *\\n * The font sizing here is done assuming that the body font size of --jp-ui-font-size1\\n * is applied to a parent element. When children elements, such as headings, are sized\\n * in em all things will be computed relative to that body size.\\n */\\n\\n --jp-ui-font-scale-factor: 1.2;\\n --jp-ui-font-size0: 0.83333em;\\n --jp-ui-font-size1: 13px; /* Base font size */\\n --jp-ui-font-size2: 1.2em;\\n --jp-ui-font-size3: 1.44em;\\n\\n --jp-ui-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica,\\n Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol';\\n\\n /*\\n * Use these font colors against the corresponding main layout colors.\\n * In a light theme, these go from dark to light.\\n */\\n\\n /* Defaults use Material Design specification */\\n --jp-ui-font-color0: rgba(0, 0, 0, 1);\\n --jp-ui-font-color1: rgba(0, 0, 0, 0.87);\\n --jp-ui-font-color2: rgba(0, 0, 0, 0.54);\\n --jp-ui-font-color3: rgba(0, 0, 0, 0.38);\\n\\n /*\\n * Use these against the brand/accent/warn/error colors.\\n * These will typically go from light to darker, in both a dark and light theme.\\n */\\n\\n --jp-ui-inverse-font-color0: rgba(255, 255, 255, 1);\\n --jp-ui-inverse-font-color1: rgba(255, 255, 255, 1);\\n --jp-ui-inverse-font-color2: rgba(255, 255, 255, 0.7);\\n --jp-ui-inverse-font-color3: rgba(255, 255, 255, 0.5);\\n\\n /* Content Fonts\\n *\\n * Content font variables are used for typography of user generated content.\\n *\\n * The font sizing here is done assuming that the body font size of --jp-content-font-size1\\n * is applied to a parent element. When children elements, such as headings, are sized\\n * in em all things will be computed relative to that body size.\\n */\\n\\n --jp-content-line-height: 1.6;\\n --jp-content-font-scale-factor: 1.2;\\n --jp-content-font-size0: 0.83333em;\\n --jp-content-font-size1: 14px; /* Base font size */\\n --jp-content-font-size2: 1.2em;\\n --jp-content-font-size3: 1.44em;\\n --jp-content-font-size4: 1.728em;\\n --jp-content-font-size5: 2.0736em;\\n\\n /* This gives a magnification of about 125% in presentation mode over normal. */\\n --jp-content-presentation-font-size1: 17px;\\n\\n --jp-content-heading-line-height: 1;\\n --jp-content-heading-margin-top: 1.2em;\\n --jp-content-heading-margin-bottom: 0.8em;\\n --jp-content-heading-font-weight: 500;\\n\\n /* Defaults use Material Design specification */\\n --jp-content-font-color0: rgba(0, 0, 0, 1);\\n --jp-content-font-color1: rgba(0, 0, 0, 0.87);\\n --jp-content-font-color2: rgba(0, 0, 0, 0.54);\\n --jp-content-font-color3: rgba(0, 0, 0, 0.38);\\n\\n --jp-content-link-color: var(--md-blue-700);\\n\\n --jp-content-font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI',\\n Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji',\\n 'Segoe UI Symbol';\\n\\n /*\\n * Code Fonts\\n *\\n * Code font variables are used for typography of code and other monospaces content.\\n */\\n\\n --jp-code-font-size: 13px;\\n --jp-code-line-height: 1.3077; /* 17px for 13px base */\\n --jp-code-padding: 5px; /* 5px for 13px base, codemirror highlighting needs integer px value */\\n --jp-code-font-family-default: Menlo, Consolas, 'DejaVu Sans Mono', monospace;\\n --jp-code-font-family: var(--jp-code-font-family-default);\\n\\n /* This gives a magnification of about 125% in presentation mode over normal. */\\n --jp-code-presentation-font-size: 16px;\\n\\n /* may need to tweak cursor width if you change font size */\\n --jp-code-cursor-width0: 1.4px;\\n --jp-code-cursor-width1: 2px;\\n --jp-code-cursor-width2: 4px;\\n\\n /* Layout\\n *\\n * The following are the main layout colors use in JupyterLab. In a light\\n * theme these would go from light to dark.\\n */\\n\\n --jp-layout-color0: white;\\n --jp-layout-color1: white;\\n --jp-layout-color2: var(--md-grey-200);\\n --jp-layout-color3: var(--md-grey-400);\\n --jp-layout-color4: var(--md-grey-600);\\n\\n /* Inverse Layout\\n *\\n * The following are the inverse layout colors use in JupyterLab. In a light\\n * theme these would go from dark to light.\\n */\\n\\n --jp-inverse-layout-color0: #111111;\\n --jp-inverse-layout-color1: var(--md-grey-900);\\n --jp-inverse-layout-color2: var(--md-grey-800);\\n --jp-inverse-layout-color3: var(--md-grey-700);\\n --jp-inverse-layout-color4: var(--md-grey-600);\\n\\n /* Brand/accent */\\n\\n --jp-brand-color0: var(--md-blue-900);\\n --jp-brand-color1: var(--md-blue-700);\\n --jp-brand-color2: var(--md-blue-300);\\n --jp-brand-color3: var(--md-blue-100);\\n --jp-brand-color4: var(--md-blue-50);\\n\\n --jp-accent-color0: var(--md-green-900);\\n --jp-accent-color1: var(--md-green-700);\\n --jp-accent-color2: var(--md-green-300);\\n --jp-accent-color3: var(--md-green-100);\\n\\n /* State colors (warn, error, success, info) */\\n\\n --jp-warn-color0: var(--md-orange-900);\\n --jp-warn-color1: var(--md-orange-700);\\n --jp-warn-color2: var(--md-orange-300);\\n --jp-warn-color3: var(--md-orange-100);\\n\\n --jp-error-color0: var(--md-red-900);\\n --jp-error-color1: var(--md-red-700);\\n --jp-error-color2: var(--md-red-300);\\n --jp-error-color3: var(--md-red-100);\\n\\n --jp-success-color0: var(--md-green-900);\\n --jp-success-color1: var(--md-green-700);\\n --jp-success-color2: var(--md-green-300);\\n --jp-success-color3: var(--md-green-100);\\n\\n --jp-info-color0: var(--md-cyan-900);\\n --jp-info-color1: var(--md-cyan-700);\\n --jp-info-color2: var(--md-cyan-300);\\n --jp-info-color3: var(--md-cyan-100);\\n\\n /* Cell specific styles */\\n\\n --jp-cell-padding: 5px;\\n\\n --jp-cell-collapser-width: 8px;\\n --jp-cell-collapser-min-height: 20px;\\n --jp-cell-collapser-not-active-hover-opacity: 0.6;\\n\\n --jp-cell-editor-background: var(--md-grey-100);\\n --jp-cell-editor-border-color: var(--md-grey-300);\\n --jp-cell-editor-box-shadow: inset 0 0 2px var(--md-blue-300);\\n --jp-cell-editor-active-background: var(--jp-layout-color0);\\n --jp-cell-editor-active-border-color: var(--jp-brand-color1);\\n\\n --jp-cell-prompt-width: 64px;\\n --jp-cell-prompt-font-family: var(--jp-code-font-family-default);\\n --jp-cell-prompt-letter-spacing: 0px;\\n --jp-cell-prompt-opacity: 1;\\n --jp-cell-prompt-not-active-opacity: 0.5;\\n --jp-cell-prompt-not-active-font-color: var(--md-grey-700);\\n /* A custom blend of MD grey and blue 600\\n * See https://meyerweb.com/eric/tools/color-blend/#546E7A:1E88E5:5:hex */\\n --jp-cell-inprompt-font-color: #307fc1;\\n /* A custom blend of MD grey and orange 600\\n * https://meyerweb.com/eric/tools/color-blend/#546E7A:F4511E:5:hex */\\n --jp-cell-outprompt-font-color: #bf5b3d;\\n\\n /* Notebook specific styles */\\n\\n --jp-notebook-padding: 10px;\\n --jp-notebook-select-background: var(--jp-layout-color1);\\n --jp-notebook-multiselected-color: var(--md-blue-50);\\n\\n /* The scroll padding is calculated to fill enough space at the bottom of the\\n notebook to show one single-line cell (with appropriate padding) at the top\\n when the notebook is scrolled all the way to the bottom. We also subtract one\\n pixel so that no scrollbar appears if we have just one single-line cell in the\\n notebook. This padding is to enable a 'scroll past end' feature in a notebook.\\n */\\n --jp-notebook-scroll-padding: calc(\\n 100% - var(--jp-code-font-size) * var(--jp-code-line-height) -\\n var(--jp-code-padding) - var(--jp-cell-padding) - 1px\\n );\\n\\n /* Rendermime styles */\\n\\n --jp-rendermime-error-background: #fdd;\\n --jp-rendermime-table-row-background: var(--md-grey-100);\\n --jp-rendermime-table-row-hover-background: var(--md-light-blue-50);\\n\\n /* Dialog specific styles */\\n\\n --jp-dialog-background: rgba(0, 0, 0, 0.25);\\n\\n /* Console specific styles */\\n\\n --jp-console-padding: 10px;\\n\\n /* Toolbar specific styles */\\n\\n --jp-toolbar-border-color: var(--jp-border-color1);\\n --jp-toolbar-micro-height: 8px;\\n --jp-toolbar-background: var(--jp-layout-color1);\\n --jp-toolbar-box-shadow: 0px 0px 2px 0px rgba(0, 0, 0, 0.24);\\n --jp-toolbar-header-margin: 4px 4px 0px 4px;\\n --jp-toolbar-active-background: var(--md-grey-300);\\n\\n /* Statusbar specific styles */\\n\\n --jp-statusbar-height: 24px;\\n\\n /* Input field styles */\\n\\n --jp-input-box-shadow: inset 0 0 2px var(--md-blue-300);\\n --jp-input-active-background: var(--jp-layout-color1);\\n --jp-input-hover-background: var(--jp-layout-color1);\\n --jp-input-background: var(--md-grey-100);\\n --jp-input-border-color: var(--jp-inverse-border-color);\\n --jp-input-active-border-color: var(--jp-brand-color1);\\n --jp-input-active-box-shadow-color: rgba(19, 124, 189, 0.3);\\n\\n /* General editor styles */\\n\\n --jp-editor-selected-background: #d9d9d9;\\n --jp-editor-selected-focused-background: #d7d4f0;\\n --jp-editor-cursor-color: var(--jp-ui-font-color0);\\n\\n /* Code mirror specific styles */\\n\\n --jp-mirror-editor-keyword-color: #008000;\\n --jp-mirror-editor-atom-color: #88f;\\n --jp-mirror-editor-number-color: #080;\\n --jp-mirror-editor-def-color: #00f;\\n --jp-mirror-editor-variable-color: var(--md-grey-900);\\n --jp-mirror-editor-variable-2-color: #05a;\\n --jp-mirror-editor-variable-3-color: #085;\\n --jp-mirror-editor-punctuation-color: #05a;\\n --jp-mirror-editor-property-color: #05a;\\n --jp-mirror-editor-operator-color: #aa22ff;\\n --jp-mirror-editor-comment-color: #408080;\\n --jp-mirror-editor-string-color: #ba2121;\\n --jp-mirror-editor-string-2-color: #708;\\n --jp-mirror-editor-meta-color: #aa22ff;\\n --jp-mirror-editor-qualifier-color: #555;\\n --jp-mirror-editor-builtin-color: #008000;\\n --jp-mirror-editor-bracket-color: #997;\\n --jp-mirror-editor-tag-color: #170;\\n --jp-mirror-editor-attribute-color: #00c;\\n --jp-mirror-editor-header-color: blue;\\n --jp-mirror-editor-quote-color: #090;\\n --jp-mirror-editor-link-color: #00c;\\n --jp-mirror-editor-error-color: #f00;\\n --jp-mirror-editor-hr-color: #999;\\n\\n /* Vega extension styles */\\n\\n --jp-vega-background: white;\\n\\n /* Sidebar-related styles */\\n\\n --jp-sidebar-min-width: 250px;\\n\\n /* Search-related styles */\\n\\n --jp-search-toggle-off-opacity: 0.5;\\n --jp-search-toggle-hover-opacity: 0.8;\\n --jp-search-toggle-on-opacity: 1;\\n --jp-search-selected-match-background-color: rgb(245, 200, 0);\\n --jp-search-selected-match-color: black;\\n --jp-search-unselected-match-background-color: var(\\n --jp-inverse-layout-color0\\n );\\n --jp-search-unselected-match-color: var(--jp-ui-inverse-font-color0);\\n\\n /* Icon colors that work well with light or dark backgrounds */\\n --jp-icon-contrast-color0: var(--md-purple-600);\\n --jp-icon-contrast-color1: var(--md-green-600);\\n --jp-icon-contrast-color2: var(--md-pink-600);\\n --jp-icon-contrast-color3: var(--md-blue-600);\\n}\\n\\n/*-----------------------------------------------------------------------------\\n| Copyright (c) Jupyter Development Team.\\n| Distributed under the terms of the Modified BSD License.\\n|----------------------------------------------------------------------------*/\\n\\n/* Set the default typography for monospace elements */\\ntt,\\ncode,\\nkbd,\\nsamp,\\npre {\\n font-family: var(--jp-code-font-family);\\n font-size: var(--jp-code-font-size);\\n line-height: var(--jp-code-line-height);\\n}\\n\\n\"],\"sourceRoot\":\"\"}]);\n// Exports\nexport default ___CSS_LOADER_EXPORT___;\n","import api from \"!../node_modules/style-loader/dist/runtime/injectStylesIntoStyleTag.js\";\n import content from \"!!../node_modules/css-loader/dist/cjs.js!./base.css\";\n\nvar options = {};\n\noptions.insert = \"head\";\noptions.singleton = false;\n\nvar update = api(content, options);\n\n\n\nexport default content.locals || {};","import './base.css';\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854.e09faf9ec3a764e40dc7.js b/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854.e09faf9ec3a764e40dc7.js deleted file mode 100644 index 34c1fdb7608..00000000000 --- a/neural_coder/extensions/neural_compressor_ext_lab/neural_compressor_ext_lab/labextension/static/vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854.e09faf9ec3a764e40dc7.js +++ /dev/null @@ -1,463 +0,0 @@ -"use strict"; -(self["webpackChunkneural_compressor_ext_lab"] = self["webpackChunkneural_compressor_ext_lab"] || []).push([["vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_getU-849854"],{ - -/***/ "./node_modules/css-loader/dist/runtime/api.js": -/*!*****************************************************!*\ - !*** ./node_modules/css-loader/dist/runtime/api.js ***! - \*****************************************************/ -/***/ ((module) => { - - - -/* - MIT License http://www.opensource.org/licenses/mit-license.php - Author Tobias Koppers @sokra -*/ -module.exports = function (cssWithMappingToString) { - var list = []; // return the list of modules as css string - - list.toString = function toString() { - return this.map(function (item) { - var content = ""; - var needLayer = typeof item[5] !== "undefined"; - - if (item[4]) { - content += "@supports (".concat(item[4], ") {"); - } - - if (item[2]) { - content += "@media ".concat(item[2], " {"); - } - - if (needLayer) { - content += "@layer".concat(item[5].length > 0 ? " ".concat(item[5]) : "", " {"); - } - - content += cssWithMappingToString(item); - - if (needLayer) { - content += "}"; - } - - if (item[2]) { - content += "}"; - } - - if (item[4]) { - content += "}"; - } - - return content; - }).join(""); - }; // import a list of modules into the list - - - list.i = function i(modules, media, dedupe, supports, layer) { - if (typeof modules === "string") { - modules = [[null, modules, undefined]]; - } - - var alreadyImportedModules = {}; - - if (dedupe) { - for (var k = 0; k < this.length; k++) { - var id = this[k][0]; - - if (id != null) { - alreadyImportedModules[id] = true; - } - } - } - - for (var _k = 0; _k < modules.length; _k++) { - var item = [].concat(modules[_k]); - - if (dedupe && alreadyImportedModules[item[0]]) { - continue; - } - - if (typeof layer !== "undefined") { - if (typeof item[5] === "undefined") { - item[5] = layer; - } else { - item[1] = "@layer".concat(item[5].length > 0 ? " ".concat(item[5]) : "", " {").concat(item[1], "}"); - item[5] = layer; - } - } - - if (media) { - if (!item[2]) { - item[2] = media; - } else { - item[1] = "@media ".concat(item[2], " {").concat(item[1], "}"); - item[2] = media; - } - } - - if (supports) { - if (!item[4]) { - item[4] = "".concat(supports); - } else { - item[1] = "@supports (".concat(item[4], ") {").concat(item[1], "}"); - item[4] = supports; - } - } - - list.push(item); - } - }; - - return list; -}; - -/***/ }), - -/***/ "./node_modules/css-loader/dist/runtime/getUrl.js": -/*!********************************************************!*\ - !*** ./node_modules/css-loader/dist/runtime/getUrl.js ***! - \********************************************************/ -/***/ ((module) => { - - - -module.exports = function (url, options) { - if (!options) { - options = {}; - } - - if (!url) { - return url; - } - - url = String(url.__esModule ? url.default : url); // If url is already wrapped in quotes, remove them - - if (/^['"].*['"]$/.test(url)) { - url = url.slice(1, -1); - } - - if (options.hash) { - url += options.hash; - } // Should url be wrapped? - // See https://drafts.csswg.org/css-values-3/#urls - - - if (/["'() \t\n]|(%20)/.test(url) || options.needQuotes) { - return "\"".concat(url.replace(/"/g, '\\"').replace(/\n/g, "\\n"), "\""); - } - - return url; -}; - -/***/ }), - -/***/ "./node_modules/css-loader/dist/runtime/sourceMaps.js": -/*!************************************************************!*\ - !*** ./node_modules/css-loader/dist/runtime/sourceMaps.js ***! - \************************************************************/ -/***/ ((module) => { - - - -module.exports = function (item) { - var content = item[1]; - var cssMapping = item[3]; - - if (!cssMapping) { - return content; - } - - if (typeof btoa === "function") { - var base64 = btoa(unescape(encodeURIComponent(JSON.stringify(cssMapping)))); - var data = "sourceMappingURL=data:application/json;charset=utf-8;base64,".concat(base64); - var sourceMapping = "/*# ".concat(data, " */"); - var sourceURLs = cssMapping.sources.map(function (source) { - return "/*# sourceURL=".concat(cssMapping.sourceRoot || "").concat(source, " */"); - }); - return [content].concat(sourceURLs).concat([sourceMapping]).join("\n"); - } - - return [content].join("\n"); -}; - -/***/ }), - -/***/ "./node_modules/style-loader/dist/runtime/injectStylesIntoStyleTag.js": -/*!****************************************************************************!*\ - !*** ./node_modules/style-loader/dist/runtime/injectStylesIntoStyleTag.js ***! - \****************************************************************************/ -/***/ ((module, __unused_webpack_exports, __webpack_require__) => { - - - -var isOldIE = function isOldIE() { - var memo; - return function memorize() { - if (typeof memo === 'undefined') { - // Test for IE <= 9 as proposed by Browserhacks - // @see http://browserhacks.com/#hack-e71d8692f65334173fee715c222cb805 - // Tests for existence of standard globals is to allow style-loader - // to operate correctly into non-standard environments - // @see https://github.com/webpack-contrib/style-loader/issues/177 - memo = Boolean(window && document && document.all && !window.atob); - } - - return memo; - }; -}(); - -var getTarget = function getTarget() { - var memo = {}; - return function memorize(target) { - if (typeof memo[target] === 'undefined') { - var styleTarget = document.querySelector(target); // Special case to return head of iframe instead of iframe itself - - if (window.HTMLIFrameElement && styleTarget instanceof window.HTMLIFrameElement) { - try { - // This will throw an exception if access to iframe is blocked - // due to cross-origin restrictions - styleTarget = styleTarget.contentDocument.head; - } catch (e) { - // istanbul ignore next - styleTarget = null; - } - } - - memo[target] = styleTarget; - } - - return memo[target]; - }; -}(); - -var stylesInDom = []; - -function getIndexByIdentifier(identifier) { - var result = -1; - - for (var i = 0; i < stylesInDom.length; i++) { - if (stylesInDom[i].identifier === identifier) { - result = i; - break; - } - } - - return result; -} - -function modulesToDom(list, options) { - var idCountMap = {}; - var identifiers = []; - - for (var i = 0; i < list.length; i++) { - var item = list[i]; - var id = options.base ? item[0] + options.base : item[0]; - var count = idCountMap[id] || 0; - var identifier = "".concat(id, " ").concat(count); - idCountMap[id] = count + 1; - var index = getIndexByIdentifier(identifier); - var obj = { - css: item[1], - media: item[2], - sourceMap: item[3] - }; - - if (index !== -1) { - stylesInDom[index].references++; - stylesInDom[index].updater(obj); - } else { - stylesInDom.push({ - identifier: identifier, - updater: addStyle(obj, options), - references: 1 - }); - } - - identifiers.push(identifier); - } - - return identifiers; -} - -function insertStyleElement(options) { - var style = document.createElement('style'); - var attributes = options.attributes || {}; - - if (typeof attributes.nonce === 'undefined') { - var nonce = true ? __webpack_require__.nc : 0; - - if (nonce) { - attributes.nonce = nonce; - } - } - - Object.keys(attributes).forEach(function (key) { - style.setAttribute(key, attributes[key]); - }); - - if (typeof options.insert === 'function') { - options.insert(style); - } else { - var target = getTarget(options.insert || 'head'); - - if (!target) { - throw new Error("Couldn't find a style target. This probably means that the value for the 'insert' parameter is invalid."); - } - - target.appendChild(style); - } - - return style; -} - -function removeStyleElement(style) { - // istanbul ignore if - if (style.parentNode === null) { - return false; - } - - style.parentNode.removeChild(style); -} -/* istanbul ignore next */ - - -var replaceText = function replaceText() { - var textStore = []; - return function replace(index, replacement) { - textStore[index] = replacement; - return textStore.filter(Boolean).join('\n'); - }; -}(); - -function applyToSingletonTag(style, index, remove, obj) { - var css = remove ? '' : obj.media ? "@media ".concat(obj.media, " {").concat(obj.css, "}") : obj.css; // For old IE - - /* istanbul ignore if */ - - if (style.styleSheet) { - style.styleSheet.cssText = replaceText(index, css); - } else { - var cssNode = document.createTextNode(css); - var childNodes = style.childNodes; - - if (childNodes[index]) { - style.removeChild(childNodes[index]); - } - - if (childNodes.length) { - style.insertBefore(cssNode, childNodes[index]); - } else { - style.appendChild(cssNode); - } - } -} - -function applyToTag(style, options, obj) { - var css = obj.css; - var media = obj.media; - var sourceMap = obj.sourceMap; - - if (media) { - style.setAttribute('media', media); - } else { - style.removeAttribute('media'); - } - - if (sourceMap && typeof btoa !== 'undefined') { - css += "\n/*# sourceMappingURL=data:application/json;base64,".concat(btoa(unescape(encodeURIComponent(JSON.stringify(sourceMap)))), " */"); - } // For old IE - - /* istanbul ignore if */ - - - if (style.styleSheet) { - style.styleSheet.cssText = css; - } else { - while (style.firstChild) { - style.removeChild(style.firstChild); - } - - style.appendChild(document.createTextNode(css)); - } -} - -var singleton = null; -var singletonCounter = 0; - -function addStyle(obj, options) { - var style; - var update; - var remove; - - if (options.singleton) { - var styleIndex = singletonCounter++; - style = singleton || (singleton = insertStyleElement(options)); - update = applyToSingletonTag.bind(null, style, styleIndex, false); - remove = applyToSingletonTag.bind(null, style, styleIndex, true); - } else { - style = insertStyleElement(options); - update = applyToTag.bind(null, style, options); - - remove = function remove() { - removeStyleElement(style); - }; - } - - update(obj); - return function updateStyle(newObj) { - if (newObj) { - if (newObj.css === obj.css && newObj.media === obj.media && newObj.sourceMap === obj.sourceMap) { - return; - } - - update(obj = newObj); - } else { - remove(); - } - }; -} - -module.exports = function (list, options) { - options = options || {}; // Force single-tag solution on IE6-9, which has a hard limit on the # of ",""," plotly-logomark"," "," "," "," "," "," "," "," "," "," "," "," "," ",""].join("")}}},99863:function(t,e){"use strict";e.isLeftAnchor=function(t){return"left"===t.xanchor||"auto"===t.xanchor&&t.x<=1/3},e.isCenterAnchor=function(t){return"center"===t.xanchor||"auto"===t.xanchor&&t.x>1/3&&t.x<2/3},e.isRightAnchor=function(t){return"right"===t.xanchor||"auto"===t.xanchor&&t.x>=2/3},e.isTopAnchor=function(t){return"top"===t.yanchor||"auto"===t.yanchor&&t.y>=2/3},e.isMiddleAnchor=function(t){return"middle"===t.yanchor||"auto"===t.yanchor&&t.y>1/3&&t.y<2/3},e.isBottomAnchor=function(t){return"bottom"===t.yanchor||"auto"===t.yanchor&&t.y<=1/3}},26348:function(t,e,n){"use strict";var r=n(64872),i=r.mod,a=r.modHalf,o=Math.PI,s=2*o;function l(t){return Math.abs(t[1]-t[0])>s-1e-14}function c(t,e){return a(e-t,s)}function u(t,e){if(l(e))return!0;var n,r;e[0](r=i(r,s))&&(r+=s);var a=i(t,s),o=a+s;return a>=n&&a<=r||o>=n&&o<=r}function f(t,e,n,r,i,a,c){i=i||0,a=a||0;var u,f,h,d,p,g=l([n,r]);function v(t,e){return[t*Math.cos(e)+i,a-t*Math.sin(e)]}g?(u=0,f=o,h=s):n=i&&t<=a);var i,a},pathArc:function(t,e,n,r,i){return f(null,t,e,n,r,i,0)},pathSector:function(t,e,n,r,i){return f(null,t,e,n,r,i,1)},pathAnnulus:function(t,e,n,r,i,a){return f(t,e,n,r,i,a,1)}}},73627:function(t,e){"use strict";var n=Array.isArray,r=ArrayBuffer,i=DataView;function a(t){return r.isView(t)&&!(t instanceof i)}function o(t){return n(t)||a(t)}function s(t,e,n){if(o(t)){if(o(t[0])){for(var r=n,i=0;ii.max?e.set(n):e.set(+t)}},integer:{coerceFunction:function(t,e,n,i){t%1||!r(t)||void 0!==i.min&&ti.max?e.set(n):e.set(+t)}},string:{coerceFunction:function(t,e,n,r){if("string"!==typeof t){var i="number"===typeof t;!0!==r.strict&&i?e.set(String(t)):e.set(n)}else r.noBlank&&!t?e.set(n):e.set(t)}},color:{coerceFunction:function(t,e,n){i(t).isValid()?e.set(t):e.set(n)}},colorlist:{coerceFunction:function(t,e,n){Array.isArray(t)&&t.length&&t.every((function(t){return i(t).isValid()}))?e.set(t):e.set(n)}},colorscale:{coerceFunction:function(t,e,n){e.set(o.get(t,n))}},angle:{coerceFunction:function(t,e,n){"auto"===t?e.set("auto"):r(t)?e.set(f(+t,360)):e.set(n)}},subplotid:{coerceFunction:function(t,e,n,r){var i=r.regex||u(n);"string"===typeof t&&i.test(t)?e.set(t):e.set(n)},validateFunction:function(t,e){var n=e.dflt;return t===n||"string"===typeof t&&!!u(n).test(t)}},flaglist:{coerceFunction:function(t,e,n,r){if(-1===(r.extras||[]).indexOf(t))if("string"===typeof t){for(var i=t.split("+"),a=0;a=r&&t<=i?t:u}if("string"!==typeof t&&"number"!==typeof t)return u;t=String(t);var c=w(n),m=t.charAt(0);!c||"G"!==m&&"g"!==m||(t=t.substr(1),n="");var _=c&&"chinese"===n.substr(0,7),k=t.match(_?b:y);if(!k)return u;var T=k[1],M=k[3]||"1",A=Number(k[5]||1),S=Number(k[7]||0),E=Number(k[9]||0),C=Number(k[11]||0);if(c){if(2===T.length)return u;var P;T=Number(T);try{var O=v.getComponentMethod("calendars","getCal")(n);if(_){var L="i"===M.charAt(M.length-1);M=parseInt(M,10),P=O.newDate(T,O.toMonthIndex(T,M,L),A)}else P=O.newDate(T,Number(M),A)}catch(D){return u}return P?(P.toJD()-g)*f+S*h+E*d+C*p:u}T=2===T.length?(Number(T)+2e3-x)%100+x:Number(T),M-=1;var I=new Date(Date.UTC(2e3,M,A,S,E));return I.setUTCFullYear(T),I.getUTCMonth()!==M||I.getUTCDate()!==A?u:I.getTime()+C*p},r=e.MIN_MS=e.dateTime2ms("-9999"),i=e.MAX_MS=e.dateTime2ms("9999-12-31 23:59:59.9999"),e.isDateTime=function(t,n){return e.dateTime2ms(t,n)!==u};var k=90*f,T=3*h,M=5*d;function A(t,e,n,r,i){if((e||n||r||i)&&(t+=" "+_(e,2)+":"+_(n,2),(r||i)&&(t+=":"+_(r,2),i))){for(var a=4;i%10===0;)a-=1,i/=10;t+="."+_(i,a)}return t}e.ms2DateTime=function(t,e,n){if("number"!==typeof t||!(t>=r&&t<=i))return u;e||(e=0);var a,o,s,c,y,b,x=Math.floor(10*l(t+.05,1)),_=Math.round(t-x/10);if(w(n)){var S=Math.floor(_/f)+g,E=Math.floor(l(t,f));try{a=v.getComponentMethod("calendars","getCal")(n).fromJD(S).formatDate("yyyy-mm-dd")}catch(C){a=m("G%Y-%m-%d")(new Date(_))}if("-"===a.charAt(0))for(;a.length<11;)a="-0"+a.substr(1);else for(;a.length<10;)a="0"+a;o=e=r+f&&t<=i-f))return u;var e=Math.floor(10*l(t+.05,1)),n=new Date(Math.round(t-e/10));return A(a("%Y-%m-%d")(n),n.getHours(),n.getMinutes(),n.getSeconds(),10*n.getUTCMilliseconds()+e)},e.cleanDate=function(t,n,r){if(t===u)return n;if(e.isJSDate(t)||"number"===typeof t&&isFinite(t)){if(w(r))return s.error("JS Dates and milliseconds are incompatible with world calendars",t),n;if(!(t=e.ms2DateTimeLocal(+t))&&void 0!==n)return n}else if(!e.isDateTime(t,r))return s.error("unrecognized date",t),n;return t};var S=/%\d?f/g,E=/%h/g,C={1:"1",2:"1",3:"2",4:"2"};function P(t,e,n,r){t=t.replace(S,(function(t){var n=Math.min(+t.charAt(1)||6,6);return(e/1e3%1+2).toFixed(n).substr(2).replace(/0+$/,"")||"0"}));var i=new Date(Math.floor(e+.05));if(t=t.replace(E,(function(){return C[n("%q")(i)]})),w(r))try{t=v.getComponentMethod("calendars","worldCalFmt")(t,e,r)}catch(a){return"Invalid"}return n(t)(i)}var O=[59,59.9,59.99,59.999,59.9999];e.formatDate=function(t,e,n,r,i,a){if(i=w(i)&&i,!e)if("y"===n)e=a.year;else if("m"===n)e=a.month;else{if("d"!==n)return function(t,e){var n=l(t+.05,f),r=_(Math.floor(n/h),2)+":"+_(l(Math.floor(n/d),60),2);if("M"!==e){o(e)||(e=0);var i=(100+Math.min(l(t/p,60),O[e])).toFixed(e).substr(1);e>0&&(i=i.replace(/0+$/,"").replace(/[\.]$/,"")),r+=":"+i}return r}(t,n)+"\n"+P(a.dayMonthYear,t,r,i);e=a.dayMonth+"\n"+a.year}return P(e,t,r,i)};var L=3*f;e.incrementMonth=function(t,e,n){n=w(n)&&n;var r=l(t,f);if(t=Math.round(t-r),n)try{var i=Math.round(t/f)+g,a=v.getComponentMethod("calendars","getCal")(n),o=a.fromJD(i);return e%12?a.add(o,e,"m"):a.add(o,e/12,"y"),(o.toJD()-g)*f+r}catch(u){s.error("invalid ms "+t+" in calendar "+n)}var c=new Date(t+L);return c.setUTCMonth(c.getUTCMonth()+e)+r-L},e.findExactDates=function(t,e){for(var n,r,i=0,a=0,s=0,l=0,c=w(e)&&v.getComponentMethod("calendars","getCal")(e),u=0;u0&&t[e+1][0]<0)return e;return null}switch(e="RUS"===s||"FJI"===s?function(t){var e;if(null===c(t))e=t;else for(e=new Array(t.length),i=0;ie?n[r++]=[t[i][0]+360,t[i][1]]:i===e?(n[r++]=t[i],n[r++]=[t[i][0],-90]):n[r++]=t[i];var a=h.tester(n);a.pts.pop(),l.push(a)}:function(t){l.push(h.tester(t))},a.type){case"MultiPolygon":for(n=0;ni&&(i=c,e=l)}else e=n;return o.default(e).geometry.coordinates}(u),r.fIn=t,r.fOut=u,s.push(u)}else c.log(["Location",r.loc,"does not have a valid GeoJSON geometry.","Traces with locationmode *geojson-id* only support","*Polygon* and *MultiPolygon* geometries."].join(" "))}delete i[n]}switch(n.type){case"FeatureCollection":var h=n.features;for(r=0;r100?(clearInterval(a),r("Unexpected error while fetching from "+t)):void i++}),50)}))}for(var o=0;o0&&(n.push(i),i=[])}return i.length>0&&n.push(i),n},e.makeLine=function(t){return 1===t.length?{type:"LineString",coordinates:t[0]}:{type:"MultiLineString",coordinates:t}},e.makePolygon=function(t){if(1===t.length)return{type:"Polygon",coordinates:t};for(var e=new Array(t.length),n=0;n1||g<0||g>1?null:{x:t+l*g,y:e+f*g}}function l(t,e,n,r,i){var a=r*t+i*e;if(a<0)return r*r+i*i;if(a>n){var o=r-t,s=i-e;return o*o+s*s}var l=r*e-i*t;return l*l/n}e.segmentsIntersect=s,e.segmentDistance=function(t,e,n,r,i,a,o,c){if(s(t,e,n,r,i,a,o,c))return 0;var u=n-t,f=r-e,h=o-i,d=c-a,p=u*u+f*f,g=h*h+d*d,v=Math.min(l(u,f,p,i-t,a-e),l(u,f,p,o-t,c-e),l(h,d,g,t-i,e-a),l(h,d,g,n-i,r-a));return Math.sqrt(v)},e.getTextLocation=function(t,e,n,s){if(t===i&&s===a||(r={},i=t,a=s),r[n])return r[n];var l=t.getPointAtLength(o(n-s/2,e)),c=t.getPointAtLength(o(n+s/2,e)),u=Math.atan((c.y-l.y)/(c.x-l.x)),f=t.getPointAtLength(o(n,e)),h={x:(4*f.x+l.x+c.x)/6,y:(4*f.y+l.y+c.y)/6,theta:u};return r[n]=h,h},e.clearLocationCache=function(){i=null},e.getVisibleSegment=function(t,e,n){var r,i,a=e.left,o=e.right,s=e.top,l=e.bottom,c=0,u=t.getTotalLength(),f=u;function h(e){var n=t.getPointAtLength(e);0===e?r=n:e===u&&(i=n);var c=n.xo?n.x-o:0,f=n.yl?n.y-l:0;return Math.sqrt(c*c+f*f)}for(var d=h(c);d;){if((c+=d+n)>f)return;d=h(c)}for(d=h(f);d;){if(c>(f-=d+n))return;d=h(f)}return{min:c,max:f,len:f-c,total:u,isClosed:0===c&&f===u&&Math.abs(r.x-i.x)<.1&&Math.abs(r.y-i.y)<.1}},e.findPointOnPath=function(t,e,n,r){for(var i,a,o,s=(r=r||{}).pathLength||t.getTotalLength(),l=r.tolerance||.001,c=r.iterationLimit||30,u=t.getPointAtLength(0)[n]>t.getPointAtLength(s)[n]?-1:1,f=0,h=0,d=s;f0?d=i:h=i,f++}return a}},81697:function(t,e,n){"use strict";var r=n(92770),i=n(84267),a=n(25075),o=n(21081),s=n(22399).defaultLine,l=n(73627).isArrayOrTypedArray,c=a(s),u=1;function f(t,e){var n=t;return n[3]*=e,n}function h(t){if(r(t))return c;var e=a(t);return e.length?e:c}function d(t){return r(t)?t:u}t.exports={formatColor:function(t,e,n){var r,i,s,p,g,v=t.color,m=l(v),y=l(e),b=o.extractOpts(t),x=[];if(r=void 0!==b.colorscale?o.makeColorScaleFuncFromTrace(t):h,i=m?function(t,e){return void 0===t[e]?c:a(r(t[e]))}:h,s=y?function(t,e){return void 0===t[e]?u:d(t[e])}:d,m||y)for(var w=0;w1?(n*t+n*e)/n:t+e,i=String(r).length;if(i>16){var a=String(e).length;if(i>=String(t).length+a){var o=parseFloat(r).toPrecision(12);-1===o.indexOf("e+")&&(r=+o)}}return r}},71828:function(t,e,n){"use strict";var r=n(39898),i=n(84096).g0,a=n(60721).WU,o=n(92770),s=n(50606),l=s.FP_SAFE,c=-l,u=s.BADNUM,f=t.exports={};f.adjustFormat=function(t){return!t||/^\d[.]\df/.test(t)||/[.]\d%/.test(t)?t:"0.f"===t?"~f":/^\d%/.test(t)?"~%":/^\ds/.test(t)?"~s":!/^[~,.0$]/.test(t)&&/[&fps]/.test(t)?"~"+t:t};var h={};f.warnBadFormat=function(t){var e=String(t);h[e]||(h[e]=1,f.warn('encountered bad format: "'+e+'"'))},f.noFormat=function(t){return String(t)},f.numberFormat=function(t){var e;try{e=a(f.adjustFormat(t))}catch(n){return f.warnBadFormat(t),f.noFormat}return e},f.nestedProperty=n(65487),f.keyedContainer=n(66636),f.relativeAttr=n(6962),f.isPlainObject=n(41965),f.toLogRange=n(58163),f.relinkPrivateKeys=n(51332);var d=n(73627);f.isTypedArray=d.isTypedArray,f.isArrayOrTypedArray=d.isArrayOrTypedArray,f.isArray1D=d.isArray1D,f.ensureArray=d.ensureArray,f.concat=d.concat,f.maxRowLength=d.maxRowLength,f.minRowLength=d.minRowLength;var p=n(64872);f.mod=p.mod,f.modHalf=p.modHalf;var g=n(96554);f.valObjectMeta=g.valObjectMeta,f.coerce=g.coerce,f.coerce2=g.coerce2,f.coerceFont=g.coerceFont,f.coercePattern=g.coercePattern,f.coerceHoverinfo=g.coerceHoverinfo,f.coerceSelectionMarkerOpacity=g.coerceSelectionMarkerOpacity,f.validate=g.validate;var v=n(41631);f.dateTime2ms=v.dateTime2ms,f.isDateTime=v.isDateTime,f.ms2DateTime=v.ms2DateTime,f.ms2DateTimeLocal=v.ms2DateTimeLocal,f.cleanDate=v.cleanDate,f.isJSDate=v.isJSDate,f.formatDate=v.formatDate,f.incrementMonth=v.incrementMonth,f.dateTick0=v.dateTick0,f.dfltRange=v.dfltRange,f.findExactDates=v.findExactDates,f.MIN_MS=v.MIN_MS,f.MAX_MS=v.MAX_MS;var m=n(65888);f.findBin=m.findBin,f.sorterAsc=m.sorterAsc,f.sorterDes=m.sorterDes,f.distinctVals=m.distinctVals,f.roundUp=m.roundUp,f.sort=m.sort,f.findIndexOfMin=m.findIndexOfMin,f.sortObjectKeys=n(78607);var y=n(80038);f.aggNums=y.aggNums,f.len=y.len,f.mean=y.mean,f.median=y.median,f.midRange=y.midRange,f.variance=y.variance,f.stdev=y.stdev,f.interp=y.interp;var b=n(35657);f.init2dArray=b.init2dArray,f.transposeRagged=b.transposeRagged,f.dot=b.dot,f.translationMatrix=b.translationMatrix,f.rotationMatrix=b.rotationMatrix,f.rotationXYMatrix=b.rotationXYMatrix,f.apply3DTransform=b.apply3DTransform,f.apply2DTransform=b.apply2DTransform,f.apply2DTransform2=b.apply2DTransform2,f.convertCssMatrix=b.convertCssMatrix,f.inverseTransformMatrix=b.inverseTransformMatrix;var x=n(26348);f.deg2rad=x.deg2rad,f.rad2deg=x.rad2deg,f.angleDelta=x.angleDelta,f.angleDist=x.angleDist,f.isFullCircle=x.isFullCircle,f.isAngleInsideSector=x.isAngleInsideSector,f.isPtInsideSector=x.isPtInsideSector,f.pathArc=x.pathArc,f.pathSector=x.pathSector,f.pathAnnulus=x.pathAnnulus;var w=n(99863);f.isLeftAnchor=w.isLeftAnchor,f.isCenterAnchor=w.isCenterAnchor,f.isRightAnchor=w.isRightAnchor,f.isTopAnchor=w.isTopAnchor,f.isMiddleAnchor=w.isMiddleAnchor,f.isBottomAnchor=w.isBottomAnchor;var _=n(87642);f.segmentsIntersect=_.segmentsIntersect,f.segmentDistance=_.segmentDistance,f.getTextLocation=_.getTextLocation,f.clearLocationCache=_.clearLocationCache,f.getVisibleSegment=_.getVisibleSegment,f.findPointOnPath=_.findPointOnPath;var k=n(1426);f.extendFlat=k.extendFlat,f.extendDeep=k.extendDeep,f.extendDeepAll=k.extendDeepAll,f.extendDeepNoArrays=k.extendDeepNoArrays;var T=n(47769);f.log=T.log,f.warn=T.warn,f.error=T.error;var M=n(30587);f.counterRegex=M.counter;var A=n(79990);f.throttle=A.throttle,f.throttleDone=A.done,f.clearThrottle=A.clear;var S=n(24401);function E(t){var e={};for(var n in t)for(var r=t[n],i=0;il||t=e)&&o(t)&&t>=0&&t%1===0},f.noop=n(64213),f.identity=n(23389),f.repeat=function(t,e){for(var n=new Array(e),r=0;rn?Math.max(n,Math.min(e,t)):Math.max(e,Math.min(n,t))},f.bBoxIntersect=function(t,e,n){return n=n||0,t.left<=e.right+n&&e.left<=t.right+n&&t.top<=e.bottom+n&&e.top<=t.bottom+n},f.simpleMap=function(t,e,n,r,i){for(var a=t.length,o=new Array(a),s=0;s=Math.pow(2,n)?i>10?(f.warn("randstr failed uniqueness"),l):t(e,n,r,(i||0)+1):l},f.OptionControl=function(t,e){t||(t={}),e||(e="opt");var n={optionList:[],_newoption:function(r){r[e]=t,n[r.name]=r,n.optionList.push(r)}};return n["_"+e]=t,n},f.smooth=function(t,e){if((e=Math.round(e)||0)<2)return t;var n,r,i,a,o=t.length,s=2*o,l=2*e-1,c=new Array(l),u=new Array(o);for(n=0;n=s&&(i-=s*Math.floor(i/s)),i<0?i=-1-i:i>=o&&(i=s-1-i),a+=t[i]*c[r];u[n]=a}return u},f.syncOrAsync=function(t,e,n){var r;function i(){return f.syncOrAsync(t,e,n)}for(;t.length;)if((r=(0,t.splice(0,1)[0])(e))&&r.then)return r.then(i);return n&&n(e)},f.stripTrailingSlash=function(t){return"/"===t.substr(-1)?t.substr(0,t.length-1):t},f.noneOrAll=function(t,e,n){if(t){var r,i,a=!1,o=!0;for(r=0;r0?e:0}))},f.fillArray=function(t,e,n,r){if(r=r||f.identity,f.isArrayOrTypedArray(t))for(var i=0;i1?i+o[1]:"";if(a&&(o.length>1||s.length>4||n))for(;r.test(s);)s=s.replace(r,"$1"+a+"$2");return s+l},f.TEMPLATE_STRING_REGEX=/%{([^\s%{}:]*)([:|\|][^}]*)?}/g;var D=/^\w*$/;f.templateString=function(t,e){var n={};return t.replace(f.TEMPLATE_STRING_REGEX,(function(t,r){var i;return D.test(r)?i=e[r]:(n[r]=n[r]||f.nestedProperty(e,r).get,i=n[r]()),f.isValidTextValue(i)?i:""}))};var R={max:10,count:0,name:"hovertemplate"};f.hovertemplateString=function(){return B.apply(R,arguments)};var z={max:10,count:0,name:"texttemplate"};f.texttemplateString=function(){return B.apply(z,arguments)};var N=/^(\S+)([\*\/])(-?\d+(\.\d+)?)$/,j={max:10,count:0,name:"texttemplate",parseMultDiv:!0};f.texttemplateStringForShapes=function(){return B.apply(j,arguments)};var F=/^[:|\|]/;function B(t,e,n){var r=this,a=arguments;e||(e={});var o={};return t.replace(f.TEMPLATE_STRING_REGEX,(function(t,s,l){var c="_xother"===s||"_yother"===s,u="_xother_"===s||"_yother_"===s,h="xother_"===s||"yother_"===s,d="xother"===s||"yother"===s||c||h||u,p=s;(c||u)&&(p=p.substring(1)),(h||u)&&(p=p.substring(0,p.length-1));var g,v,m,y=null,b=null;if(r.parseMultDiv){var x=function(t){var e=t.match(N);return e?{key:e[1],op:e[2],number:Number(e[3])}:{key:t,op:null,number:null}}(p);p=x.key,y=x.op,b=x.number}if(d){if(void 0===(g=e[p]))return""}else for(m=3;m=48&&o<=57,c=s>=48&&s<=57;if(l&&(r=10*r+o-48),c&&(i=10*i+s-48),!l||!c){if(r!==i)return r-i;if(o!==s)return o-s}}return i-r};var U=2e9;f.seedPseudoRandom=function(){U=2e9},f.pseudoRandom=function(){var t=U;return U=(69069*U+1)%4294967296,Math.abs(U-t)<429496729?f.pseudoRandom():U/4294967296},f.fillText=function(t,e,n){var r=Array.isArray(n)?function(t){n.push(t)}:function(t){n.text=t},i=f.extractOption(t,e,"htx","hovertext");if(f.isValidTextValue(i))return r(i);var a=f.extractOption(t,e,"tx","text");return f.isValidTextValue(a)?r(a):void 0},f.isValidTextValue=function(t){return t||0===t},f.formatPercent=function(t,e){e=e||0;for(var n=(Math.round(100*t*Math.pow(10,e))*Math.pow(.1,e)).toFixed(e)+"%",r=0;r1&&(c=1):c=0,f.strTranslate(i-c*(n+o),a-c*(r+s))+f.strScale(c)+(l?"rotate("+l+(e?"":" "+n+" "+r)+")":"")},f.setTransormAndDisplay=function(t,e){t.attr("transform",f.getTextTransform(e)),t.style("display",e.scale?null:"none")},f.ensureUniformFontSize=function(t,e){var n=f.extendFlat({},e);return n.size=Math.max(e.size,t._fullLayout.uniformtext.minsize||0),n},f.join2=function(t,e,n){var r=t.length;return r>1?t.slice(0,-1).join(e)+n+t[r-1]:t.join(e)},f.bigFont=function(t){return Math.round(1.2*t)};var H=f.getFirefoxVersion(),V=null!==H&&H<86;f.getPositionFromD3Event=function(){return V?[r.event.layerX,r.event.layerY]:[r.event.offsetX,r.event.offsetY]}},41965:function(t){"use strict";t.exports=function(t){return window&&window.process&&window.process.versions?"[object Object]"===Object.prototype.toString.call(t):"[object Object]"===Object.prototype.toString.call(t)&&Object.getPrototypeOf(t).hasOwnProperty("hasOwnProperty")}},66636:function(t,e,n){"use strict";var r=n(65487),i=/^\w*$/;t.exports=function(t,e,n,a){var o,s,l;n=n||"name",a=a||"value";var c={};e&&e.length?(l=r(t,e),s=l.get()):s=t,e=e||"";var u={};if(s)for(o=0;o2)return c[e]=2|c[e],h.set(t,null);if(f){for(o=e;o1){var e=["LOG:"];for(t=0;t1){var n=[];for(t=0;t"),"long")}},a.warn=function(){var t;if(r.logging>0){var e=["WARN:"];for(t=0;t0){var n=[];for(t=0;t"),"stick")}},a.error=function(){var t;if(r.logging>0){var e=["ERROR:"];for(t=0;t0){var n=[];for(t=0;t"),"stick")}}},77310:function(t,e,n){"use strict";var r=n(39898);t.exports=function(t,e,n){var i=t.selectAll("g."+n.replace(/\s/g,".")).data(e,(function(t){return t[0].trace.uid}));i.exit().remove(),i.enter().append("g").attr("class",n),i.order();var a=t.classed("rangeplot")?"nodeRangePlot3":"node3";return i.each((function(t){t[0][a]=r.select(this)})),i}},35657:function(t,e,n){"use strict";var r=n(79576);e.init2dArray=function(t,e){for(var n=new Array(t),r=0;re/2?t-Math.round(t/e)*e:t}}},65487:function(t,e,n){"use strict";var r=n(92770),i=n(73627).isArrayOrTypedArray;function a(t,e){return function(){var n,r,o,s,l,c=t;for(s=0;s/g),l=0;la||c===i||cs)&&(!e||!l(t))}:function(t,e){var l=t[0],c=t[1];if(l===i||la||c===i||cs)return!1;var u,f,h,d,p,g=n.length,v=n[0][0],m=n[0][1],y=0;for(u=1;uMath.max(f,v)||c>Math.max(h,m)))if(cu||Math.abs(r(o,h))>i)return!0;return!1},a.filter=function(t,e){var n=[t[0]],r=0,i=0;function o(o){t.push(o);var s=n.length,l=r;n.splice(i+1);for(var c=l+1;c1&&o(t.pop()),{addPt:o,raw:t,filtered:n}}},79749:function(t,e,n){"use strict";var r=n(58617),i=n(98580);t.exports=function(t,e,a){var o=t._fullLayout,s=!0;return o._glcanvas.each((function(r){if(r.regl)r.regl.preloadCachedCode(a);else if(!r.pick||o._has("parcoords")){try{r.regl=i({canvas:this,attributes:{antialias:!r.pick,preserveDrawingBuffer:!0},pixelRatio:t._context.plotGlPixelRatio||n.g.devicePixelRatio,extensions:e||[],cachedCode:a||{}})}catch(l){s=!1}r.regl||(s=!1),s&&this.addEventListener("webglcontextlost",(function(e){t&&t.emit&&t.emit("plotly_webglcontextlost",{event:e,layer:r.key})}),!1)}})),s||r({container:o._glcontainer.node()}),s}},45142:function(t,e,n){"use strict";var r=n(92770),i=n(35791);t.exports=function(t){var e;if("string"!==typeof(e=t&&t.hasOwnProperty("userAgent")?t.userAgent:function(){var t;return"undefined"!==typeof navigator&&(t=navigator.userAgent),t&&t.headers&&"string"===typeof t.headers["user-agent"]&&(t=t.headers["user-agent"]),t}()))return!0;var n=i({ua:{headers:{"user-agent":e}},tablet:!0,featureDetect:!1});if(!n)for(var a=e.split(" "),o=1;o-1;s--){var l=a[s];if("Version/"===l.substr(0,8)){var c=l.substr(8).split(".")[0];if(r(c)&&(c=+c),c>=13)return!0}}return n}},75138:function(t){"use strict";t.exports=function(t,e){if(e instanceof RegExp){for(var n=e.toString(),r=0;ri.queueLength&&(t.undoQueue.queue.shift(),t.undoQueue.index--))},startSequence:function(t){t.undoQueue=t.undoQueue||{index:0,queue:[],sequence:!1},t.undoQueue.sequence=!0,t.undoQueue.beginSequence=!0},stopSequence:function(t){t.undoQueue=t.undoQueue||{index:0,queue:[],sequence:!1},t.undoQueue.sequence=!1,t.undoQueue.beginSequence=!1},undo:function(t){var e,n;if(!(void 0===t.undoQueue||isNaN(t.undoQueue.index)||t.undoQueue.index<=0)){for(t.undoQueue.index--,e=t.undoQueue.queue[t.undoQueue.index],t.undoQueue.inSequence=!0,n=0;n=t.undoQueue.queue.length)){for(e=t.undoQueue.queue[t.undoQueue.index],t.undoQueue.inSequence=!0,n=0;ne}function f(t,e){return t>=e}e.findBin=function(t,e,n){if(r(e.start))return n?Math.ceil((t-e.start)/e.size-s)-1:Math.floor((t-e.start)/e.size+s);var a,o,h=0,d=e.length,p=0,g=d>1?(e[d-1]-e[0])/(d-1):1;for(o=g>=0?n?l:c:n?f:u,t+=g*s*(n?-1:1)*(g>=0?1:-1);h90&&i.log("Long binary search..."),h-1},e.sorterAsc=function(t,e){return t-e},e.sorterDes=function(t,e){return e-t},e.distinctVals=function(t){var n,r=t.slice();for(r.sort(e.sorterAsc),n=r.length-1;n>-1&&r[n]===o;n--);for(var i,a=r[n]-r[0]||1,s=a/(n||1)/1e4,l=[],c=0;c<=n;c++){var u=r[c],f=u-i;void 0===i?(l.push(u),i=u):f>s&&(a=Math.min(a,f),l.push(u),i=u)}return{vals:l,minDiff:a}},e.roundUp=function(t,e,n){for(var r,i=0,a=e.length-1,o=0,s=n?0:1,l=n?1:0,c=n?Math.ceil:Math.floor;i0&&(r=1),n&&r)return t.sort(e)}return r?t:t.reverse()},e.findIndexOfMin=function(t,e){e=e||a;for(var n,r=1/0,i=0;ia.length)&&(o=a.length),r(n)||(n=!1),i(a[0])){for(l=new Array(o),s=0;st.length-1)return t[t.length-1];var n=e%1;return n*t[Math.ceil(e)]+(1-n)*t[Math.floor(e)]}},78614:function(t,e,n){"use strict";var r=n(25075);t.exports=function(t){return t?r(t):[0,0,0,1]}},3883:function(t,e,n){"use strict";var r=n(32396),i=n(91424),a=n(71828),o=null;t.exports=function(){if(null!==o)return o;o=!1;var t=a.isIE()||a.isSafari()||a.isIOS();if(window.navigator.userAgent&&!t){var e=Array.from(r.CSS_DECLARATIONS).reverse(),n=window.CSS&&window.CSS.supports||window.supportsCSS;if("function"===typeof n)o=e.some((function(t){return n.apply(null,t)}));else{var s=i.tester.append("image").attr("style",r.STYLE),l=window.getComputedStyle(s.node()).imageRendering;o=e.some((function(t){var e=t[1];return l===e||l===e.toLowerCase()})),s.remove()}}return o}},63893:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=i.strTranslate,o=n(77922),s=n(18783).LINE_SPACING,l=/([^$]*)([$]+[^$]*[$]+)([^$]*)/;e.convertToTspans=function(t,n,v){var S=t.text(),E=!t.attr("data-notex")&&n&&n._context.typesetMath&&"undefined"!==typeof MathJax&&S.match(l),O=r.select(t.node().parentNode);if(!O.empty()){var L=t.attr("class")?t.attr("class").split(" ")[0]:"text";return L+="-math",O.selectAll("svg."+L).remove(),O.selectAll("g."+L+"-group").remove(),t.style("display",null).attr({"data-unformatted":S,"data-math":"N"}),E?(n&&n._promises||[]).push(new Promise((function(e){t.style("display","none");var n=parseInt(t.node().style.fontSize,10),o={fontSize:n};!function(t,e,n){var a,o,s,l,h=parseInt((MathJax.version||"").split(".")[0]);if(2===h||3===h){var d=function(){return o=i.extendDeepAll({},MathJax.Hub.config),s=MathJax.Hub.processSectionDelay,void 0!==MathJax.Hub.processSectionDelay&&(MathJax.Hub.processSectionDelay=0),MathJax.Hub.Config({messageStyle:"none",tex2jax:{inlineMath:f},displayAlign:"left"})},p=function(){o=i.extendDeepAll({},MathJax.config),MathJax.config.tex||(MathJax.config.tex={}),MathJax.config.tex.inlineMath=f},g=function(){if("SVG"!==(a=MathJax.Hub.config.menuSettings.renderer))return MathJax.Hub.setRenderer("SVG")},v=function(){"svg"!==(a=MathJax.config.startup.output)&&(MathJax.config.startup.output="svg")},m=function(){var n="math-output-"+i.randstr({},64),a=(l=r.select("body").append("div").attr({id:n}).style({visibility:"hidden",position:"absolute","font-size":e.fontSize+"px"}).text(t.replace(c,"\\lt ").replace(u,"\\gt "))).node();return 2===h?MathJax.Hub.Typeset(a):MathJax.typeset([a])},y=function(){var e=l.select(2===h?".MathJax_SVG":".MathJax"),a=!e.empty()&&l.select("svg").node();if(a){var o,s=a.getBoundingClientRect();o=2===h?r.select("body").select("#MathJax_SVG_glyphs"):e.select("defs"),n(e,o,s)}else i.log("There was an error in the tex syntax.",t),n();l.remove()},b=function(){if("SVG"!==a)return MathJax.Hub.setRenderer(a)},x=function(){"svg"!==a&&(MathJax.config.startup.output=a)},w=function(){return void 0!==s&&(MathJax.Hub.processSectionDelay=s),MathJax.Hub.Config(o)},_=function(){MathJax.config=o};2===h?MathJax.Hub.Queue(d,g,m,y,b,w):3===h&&(p(),v(),MathJax.startup.defaultReady(),MathJax.startup.promise.then((function(){m(),y(),x(),_()})))}else i.warn("No MathJax version:",MathJax.version)}(E[2],o,(function(r,i,o){O.selectAll("svg."+L).remove(),O.selectAll("g."+L+"-group").remove();var s=r&&r.select("svg");if(!s||!s.node())return I(),void e();var l=O.append("g").classed(L+"-group",!0).attr({"pointer-events":"none","data-unformatted":S,"data-math":"Y"});l.node().appendChild(s.node()),i&&i.node()&&s.node().insertBefore(i.node().cloneNode(!0),s.node().firstChild);var c=o.width,u=o.height;s.attr({class:L,height:u,preserveAspectRatio:"xMinYMin meet"}).style({overflow:"visible","pointer-events":"none"});var f=t.node().style.fill||"black",h=s.select("g");h.attr({fill:f,stroke:f});var d=h.node().getBoundingClientRect(),p=d.width,g=d.height;(p>c||g>u)&&(s.style("overflow","hidden"),p=(d=s.node().getBoundingClientRect()).width,g=d.height);var m=+t.attr("x"),y=+t.attr("y"),b=-(n||t.node().getBoundingClientRect().height)/4;if("y"===L[0])l.attr({transform:"rotate("+[-90,m,y]+")"+a(-p/2,b-g/2)});else if("l"===L[0])y=b-g/2;else if("a"===L[0]&&0!==L.indexOf("atitle"))m=0,y=b;else{var x=t.attr("text-anchor");m-=p*("middle"===x?.5:"end"===x?1:0),y=y+b-g/2}s.attr({x:m,y:y}),v&&v.call(t,l),e(l)}))}))):I(),t}function I(){O.empty()||(L=t.attr("class")+"-math",O.select("svg."+L).remove()),t.text("").style("white-space","pre");var n=function(t,e){e=e.replace(m," ");var n,a=!1,l=[],c=-1;function u(){c++;var e=document.createElementNS(o.svg,"tspan");r.select(e).attr({class:"line",dy:c*s+"em"}),t.appendChild(e),n=e;var i=l;if(l=[{node:e}],i.length>1)for(var a=1;a doesnt match end tag <"+t+">. Pretending it did match.",e),n=l[l.length-1].node}else i.log("Ignoring unexpected end tag .",e)}var E=x.test(e);E?u():(n=t,l=[{node:t}]);for(var O=e.split(y),L=0;L|>|>)/g,f=[["$","$"],["\\(","\\)"]],h={sup:"font-size:70%",sub:"font-size:70%",b:"font-weight:bold",i:"font-style:italic",a:"cursor:pointer",span:"",em:"font-style:italic;font-weight:bold"},d={sub:"0.3em",sup:"-0.6em"},p={sub:"-0.21em",sup:"0.42em"},g="\u200b",v=["http:","https:","mailto:","",void 0,":"],m=e.NEWLINES=/(\r\n?|\n)/g,y=/(<[^<>]*>)/,b=/<(\/?)([^ >]*)(\s+(.*))?>/i,x=//i;e.BR_TAG_ALL=//gi;var w=/(^|[\s"'])style\s*=\s*("([^"]*);?"|'([^']*);?')/i,_=/(^|[\s"'])href\s*=\s*("([^"]*)"|'([^']*)')/i,k=/(^|[\s"'])target\s*=\s*("([^"\s]*)"|'([^'\s]*)')/i,T=/(^|[\s"'])popup\s*=\s*("([\w=,]*)"|'([\w=,]*)')/i;function M(t,e){if(!t)return null;var n=t.match(e),r=n&&(n[3]||n[4]);return r&&C(r)}var A=/(^|;)\s*color:/;e.plainText=function(t,e){for(var n=void 0!==(e=e||{}).len&&-1!==e.len?e.len:1/0,r=void 0!==e.allowedTags?e.allowedTags:["br"],i=t.split(y),a=[],o="",s=0,l=0;l3?a.push(c.substr(0,d-3)+"..."):a.push(c.substr(0,d));break}o=""}}return a.join("")};var S={mu:"\u03bc",amp:"&",lt:"<",gt:">",nbsp:"\xa0",times:"\xd7",plusmn:"\xb1",deg:"\xb0"},E=/&(#\d+|#x[\da-fA-F]+|[a-z]+);/g;function C(t){return t.replace(E,(function(t,e){return("#"===e.charAt(0)?function(t){if(!(t>1114111)){var e=String.fromCodePoint;if(e)return e(t);var n=String.fromCharCode;return t<=65535?n(t):n(55232+(t>>10),t%1024+56320)}}("x"===e.charAt(1)?parseInt(e.substr(2),16):parseInt(e.substr(1),10)):S[e])||t}))}function P(t){var e=encodeURI(decodeURI(t)),n=document.createElement("a"),r=document.createElement("a");n.href=t,r.href=e;var i=n.protocol,a=r.protocol;return-1!==v.indexOf(i)&&-1!==v.indexOf(a)?e:""}function O(t,e,n){var r,a,o,s=n.horizontalAlign,l=n.verticalAlign||"top",c=t.node().getBoundingClientRect(),u=e.node().getBoundingClientRect();return a="bottom"===l?function(){return c.bottom-r.height}:"middle"===l?function(){return c.top+(c.height-r.height)/2}:function(){return c.top},o="right"===s?function(){return c.right-r.width}:"center"===s?function(){return c.left+(c.width-r.width)/2}:function(){return c.left},function(){r=this.node().getBoundingClientRect();var t=o()-u.left,e=a()-u.top,s=n.gd||{};if(n.gd){s._fullLayout._calcInverseTransform(s);var l=i.apply3DTransform(s._fullLayout._invTransform)(t,e);t=l[0],e=l[1]}return this.style({top:e+"px",left:t+"px","z-index":1e3}),this}}e.convertEntities=C,e.sanitizeHTML=function(t){t=t.replace(m," ");for(var e=document.createElement("p"),n=e,i=[],a=t.split(y),o=0;oa.ts+e?l():a.timer=setTimeout((function(){l(),a.timer=null}),e)},e.done=function(t){var e=n[t];return e&&e.timer?new Promise((function(t){var n=e.onDone;e.onDone=function(){n&&n(),t(),e.onDone=null}})):Promise.resolve()},e.clear=function(t){if(t)r(n[t]),delete n[t];else for(var i in n)e.clear(i)}},58163:function(t,e,n){"use strict";var r=n(92770);t.exports=function(t,e){if(t>0)return Math.log(t)/Math.LN10;var n=Math.log(Math.min(e[0],e[1]))/Math.LN10;return r(n)||(n=Math.log(Math.max(e[0],e[1]))/Math.LN10-6),n}},90973:function(t,e,n){"use strict";var r=t.exports={},i=n(78776).locationmodeToLayer,a=n(96892).zL;r.getTopojsonName=function(t){return[t.scope.replace(/ /g,"-"),"_",t.resolution.toString(),"m"].join("")},r.getTopojsonPath=function(t,e){return t+e+".json"},r.getTopojsonFeatures=function(t,e){var n=i[t.locationmode],r=e.objects[n];return a(e,r).features}},37815:function(t){"use strict";t.exports={moduleType:"locale",name:"en-US",dictionary:{"Click to enter Colorscale title":"Click to enter Colorscale title"},format:{date:"%m/%d/%Y"}}},92177:function(t){"use strict";t.exports={moduleType:"locale",name:"en",dictionary:{"Click to enter Colorscale title":"Click to enter Colourscale title"},format:{days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],periods:["AM","PM"],dateTime:"%a %b %e %X %Y",date:"%d/%m/%Y",time:"%H:%M:%S",decimal:".",thousands:",",grouping:[3],currency:["$",""],year:"%Y",month:"%b %Y",dayMonth:"%b %-d",dayMonthYear:"%b %-d, %Y"}}},14458:function(t,e,n){"use strict";var r=n(73972);t.exports=function(t){for(var e,n,i=r.layoutArrayContainers,a=r.layoutArrayRegexes,o=t.split("[")[0],s=0;s0&&o.log("Clearing previous rejected promises from queue."),t._promises=[]},e.cleanLayout=function(t){var n,r;t||(t={}),t.xaxis1&&(t.xaxis||(t.xaxis=t.xaxis1),delete t.xaxis1),t.yaxis1&&(t.yaxis||(t.yaxis=t.yaxis1),delete t.yaxis1),t.scene1&&(t.scene||(t.scene=t.scene1),delete t.scene1);var a=(s.subplotsRegistry.cartesian||{}).attrRegex,l=(s.subplotsRegistry.polar||{}).attrRegex,f=(s.subplotsRegistry.ternary||{}).attrRegex,h=(s.subplotsRegistry.gl3d||{}).attrRegex,g=Object.keys(t);for(n=0;n3?(I.x=1.02,I.xanchor="left"):I.x<-2&&(I.x=-.02,I.xanchor="right"),I.y>3?(I.y=1.02,I.yanchor="bottom"):I.y<-2&&(I.y=-.02,I.yanchor="top")),p(t),"rotate"===t.dragmode&&(t.dragmode="orbit"),c.clean(t),t.template&&t.template.layout&&e.cleanLayout(t.template.layout),t},e.cleanData=function(t){for(var n=0;n0)return t.substr(0,e)}e.hasParent=function(t,e){for(var n=x(e);n;){if(n in t)return!0;n=x(n)}return!1};var w=["x","y","z"];e.clearAxisTypes=function(t,e,n){for(var r=0;r1&&a.warn("Full array edits are incompatible with other edits",f);var y=n[""][""];if(c(y))e.set(null);else{if(!Array.isArray(y))return a.warn("Unrecognized full array edit value",f,y),!0;e.set(y)}return!g&&(h(v,m),d(t),!0)}var b,x,w,_,k,T,M,A,S=Object.keys(n).map(Number).sort(o),E=e.get(),C=E||[],P=u(m,f).get(),O=[],L=-1,I=C.length;for(b=0;bC.length-(M?0:1))a.warn("index out of range",f,w);else if(void 0!==T)k.length>1&&a.warn("Insertion & removal are incompatible with edits to the same index.",f,w),c(T)?O.push(w):M?("add"===T&&(T={}),C.splice(w,0,T),P&&P.splice(w,0,{})):a.warn("Unrecognized full object edit value",f,w,T),-1===L&&(L=w);else for(x=0;x=0;b--)C.splice(O[b],1),P&&P.splice(O[b],1);if(C.length?E||e.set(C):e.set(null),g)return!1;if(h(v,m),p!==i){var D;if(-1===L)D=S;else{for(I=Math.max(C.length,I),D=[],b=0;b=L);b++)D.push(w);for(b=L;b=t.data.length||i<-t.data.length)throw new Error(n+" must be valid indices for gd.data.");if(e.indexOf(i,r+1)>-1||i>=0&&e.indexOf(-t.data.length+i)>-1||i<0&&e.indexOf(t.data.length+i)>-1)throw new Error("each index in "+n+" must be unique.")}}function L(t,e,n){if(!Array.isArray(t.data))throw new Error("gd.data must be an array.");if("undefined"===typeof e)throw new Error("currentIndices is a required argument.");if(Array.isArray(e)||(e=[e]),O(t,e,"currentIndices"),"undefined"===typeof n||Array.isArray(n)||(n=[n]),"undefined"!==typeof n&&O(t,n,"newIndices"),"undefined"!==typeof n&&e.length!==n.length)throw new Error("current and new indices must be of equal length.")}function I(t,e,n,r,a){!function(t,e,n,r){var i=o.isPlainObject(r);if(!Array.isArray(t.data))throw new Error("gd.data must be an array");if(!o.isPlainObject(e))throw new Error("update must be a key:value object");if("undefined"===typeof n)throw new Error("indices must be an integer or array of integers");for(var a in O(t,n,"indices"),e){if(!Array.isArray(e[a])||e[a].length!==n.length)throw new Error("attribute "+a+" must be an array of length equal to indices array length");if(i&&(!(a in r)||!Array.isArray(r[a])||r[a].length!==e[a].length))throw new Error("when maxPoints is set as a key:value object it must contain a 1:1 corrispondence with the keys and number of traces in the update object")}}(t,e,n,r);for(var l=function(t,e,n,r){var a,l,c,u,f,h=o.isPlainObject(r),d=[];for(var p in Array.isArray(n)||(n=[n]),n=P(n,t.data.length-1),e)for(var g=0;g-1&&-1===n.indexOf("grouptitlefont")?l(n,n.replace("titlefont","title.font")):n.indexOf("titleposition")>-1?l(n,n.replace("titleposition","title.position")):n.indexOf("titleside")>-1?l(n,n.replace("titleside","title.side")):n.indexOf("titleoffset")>-1&&l(n,n.replace("titleoffset","title.offset")):l(n,n.replace("title","title.text"));function l(e,n){t[n]=t[e],delete t[e]}}function U(t,e,n){t=o.getGraphDiv(t),w.clearPromiseQueue(t);var r={};if("string"===typeof e)r[e]=n;else{if(!o.isPlainObject(e))return o.warn("Relayout fail.",e,n),Promise.reject();r=o.extendFlat({},e)}Object.keys(r).length&&(t.changed=!0);var i=Y(t,r),a=i.flags;a.calc&&(t.calcdata=void 0);var s=[h.previousPromises];a.layoutReplot?s.push(_.layoutReplot):Object.keys(r).length&&(H(t,a,i)||h.supplyDefaults(t),a.legend&&s.push(_.doLegend),a.layoutstyle&&s.push(_.layoutStyles),a.axrange&&V(s,i.rangesAltered),a.ticks&&s.push(_.doTicksRelayout),a.modebar&&s.push(_.doModeBar),a.camera&&s.push(_.doCamera),a.colorbars&&s.push(_.doColorBars),s.push(A)),s.push(h.rehover,h.redrag,h.reselect),c.add(t,U,[t,i.undoit],U,[t,i.redoit]);var l=o.syncOrAsync(s,t);return l&&l.then||(l=Promise.resolve(t)),l.then((function(){return t.emit("plotly_relayout",i.eventData),t}))}function H(t,e,n){var r=t._fullLayout;if(!e.axrange)return!1;for(var i in e)if("axrange"!==i&&e[i])return!1;for(var a in n.rangesAltered){var o=d.id2name(a),s=t.layout[o],l=r[o];if(l.autorange=s.autorange,s.range&&(l.range=s.range.slice()),l.cleanRange(),l._matchGroup)for(var c in l._matchGroup)if(c!==a){var u=r[d.id2name(c)];u.autorange=l.autorange,u.range=l.range.slice(),u._input.range=l.range.slice()}}return!0}function V(t,e){var n=e?function(t){var n=[];for(var r in e){var i=d.getFromId(t,r);if(n.push(r),-1!==(i.ticklabelposition||"").indexOf("inside")&&i._anchorAxis&&n.push(i._anchorAxis._id),i._matchGroup)for(var a in i._matchGroup)e[a]||n.push(a)}return d.draw(t,n,{skipTitle:!0})}:function(t){return d.draw(t,"redraw")};t.push(y,_.doAutoRangeAndConstraints,n,_.drawData,_.finalDraw)}var q=/^[xyz]axis[0-9]*\.range(\[[0|1]\])?$/,G=/^[xyz]axis[0-9]*\.autorange$/,W=/^[xyz]axis[0-9]*\.domain(\[[0|1]\])?$/;function Y(t,e){var n,r,i,a=t.layout,l=t._fullLayout,c=l._guiEditing,h=N(l._preGUI,c),p=Object.keys(e),g=d.list(t),v=o.extendDeepAll({},e),m={};for(B(e),p=Object.keys(e),r=0;r0&&"string"!==typeof I.parts[R];)R--;var j=I.parts[R],F=I.parts[R-1]+"."+j,U=I.parts.slice(0,R).join("."),H=s(t.layout,U).get(),V=s(l,U).get(),Y=I.get();if(void 0!==D){A[L]=D,S[L]="reverse"===j?D:z(Y);var X=f.getLayoutValObject(l,I.parts);if(X&&X.impliedEdits&&null!==D)for(var K in X.impliedEdits)E(o.relativeAttr(L,K),X.impliedEdits[K]);if(-1!==["width","height"].indexOf(L))if(D){E("autosize",null);var Z="height"===L?"width":"height";E(Z,l[Z])}else l[L]=t._initialAutoSize[L];else if("autosize"===L)E("width",D?null:l.width),E("height",D?null:l.height);else if(F.match(q))O(F),s(l,U+"._inputRange").set(null);else if(F.match(G)){O(F),s(l,U+"._inputRange").set(null);var J=s(l,U).get();J._inputDomain&&(J._input.domain=J._inputDomain.slice())}else F.match(W)&&s(l,U+"._inputDomain").set(null);if("type"===j){C=H;var Q="linear"===V.type&&"log"===D,tt="log"===V.type&&"linear"===D;if(Q||tt){if(C&&C.range)if(V.autorange)Q&&(C.range=C.range[1]>C.range[0]?[1,2]:[2,1]);else{var et=C.range[0],nt=C.range[1];Q?(et<=0&&nt<=0&&E(U+".autorange",!0),et<=0?et=nt/1e6:nt<=0&&(nt=et/1e6),E(U+".range[0]",Math.log(et)/Math.LN10),E(U+".range[1]",Math.log(nt)/Math.LN10)):(E(U+".range[0]",Math.pow(10,et)),E(U+".range[1]",Math.pow(10,nt)))}else E(U+".autorange",!0);Array.isArray(l._subplots.polar)&&l._subplots.polar.length&&l[I.parts[0]]&&"radialaxis"===I.parts[1]&&delete l[I.parts[0]]._subplot.viewInitial["radialaxis.range"],u.getComponentMethod("annotations","convertCoords")(t,V,D,E),u.getComponentMethod("images","convertCoords")(t,V,D,E)}else E(U+".autorange",!0),E(U+".range",null);s(l,U+"._inputRange").set(null)}else if(j.match(T)){var rt=s(l,L).get(),it=(D||{}).type;it&&"-"!==it||(it="linear"),u.getComponentMethod("annotations","convertCoords")(t,rt,it,E),u.getComponentMethod("images","convertCoords")(t,rt,it,E)}var at=x.containerArrayMatch(L);if(at){n=at.array,r=at.index;var ot=at.property,st=X||{editType:"calc"};""!==r&&""===ot&&(x.isAddVal(D)?S[L]=null:x.isRemoveVal(D)?S[L]=(s(a,n).get()||[])[r]:o.warn("unrecognized full object value",e)),k.update(M,st),m[n]||(m[n]={});var lt=m[n][r];lt||(lt=m[n][r]={}),lt[ot]=D,delete e[L]}else"reverse"===j?(H.range?H.range.reverse():(E(U+".autorange",!0),H.range=[1,0]),V.autorange?M.calc=!0:M.plot=!0):("dragmode"===L&&(!1===D&&!1!==Y||!1!==D&&!1===Y)||l._has("scatter-like")&&l._has("regl")&&"dragmode"===L&&("lasso"===D||"select"===D)&&"lasso"!==Y&&"select"!==Y||l._has("gl2d")?M.plot=!0:X?k.update(M,X):M.calc=!0,I.set(D))}}for(n in m)x.applyContainerArrayChanges(t,h(a,n),m[n],M,h)||(M.plot=!0);for(var ct in P){var ut=(C=d.getFromId(t,ct))&&C._constraintGroup;if(ut)for(var ft in M.calc=!0,ut)P[ft]||(d.getFromId(t,ft)._constraintShrinkable=!0)}return($(t)||e.height||e.width)&&(M.plot=!0),(M.plot||M.calc)&&(M.layoutReplot=!0),{flags:M,rangesAltered:P,undoit:S,redoit:A,eventData:v}}function $(t){var e=t._fullLayout,n=e.width,r=e.height;return t.layout.autosize&&h.plotAutoSize(t,t.layout,e),e.width!==n||e.height!==r}function X(t,n,r,i){t=o.getGraphDiv(t),w.clearPromiseQueue(t),o.isPlainObject(n)||(n={}),o.isPlainObject(r)||(r={}),Object.keys(n).length&&(t.changed=!0),Object.keys(r).length&&(t.changed=!0);var a=w.coerceTraceIndices(t,i),s=F(t,o.extendFlat({},n),a),l=s.flags,u=Y(t,o.extendFlat({},r)),f=u.flags;(l.calc||f.calc)&&(t.calcdata=void 0),l.clearAxisTypes&&w.clearAxisTypes(t,a,r);var d=[];f.layoutReplot?d.push(_.layoutReplot):l.fullReplot?d.push(e._doPlot):(d.push(h.previousPromises),H(t,f,u)||h.supplyDefaults(t),l.style&&d.push(_.doTraceStyle),(l.colorbars||f.colorbars)&&d.push(_.doColorBars),f.legend&&d.push(_.doLegend),f.layoutstyle&&d.push(_.layoutStyles),f.axrange&&V(d,u.rangesAltered),f.ticks&&d.push(_.doTicksRelayout),f.modebar&&d.push(_.doModeBar),f.camera&&d.push(_.doCamera),d.push(A)),d.push(h.rehover,h.redrag,h.reselect),c.add(t,X,[t,s.undoit,u.undoit,s.traces],X,[t,s.redoit,u.redoit,s.traces]);var p=o.syncOrAsync(d,t);return p&&p.then||(p=Promise.resolve(t)),p.then((function(){return t.emit("plotly_update",{data:s.eventData,layout:u.eventData}),t}))}function K(t){return function(e){e._fullLayout._guiEditing=!0;var n=t.apply(null,arguments);return e._fullLayout._guiEditing=!1,n}}var Z=[{pattern:/^hiddenlabels/,attr:"legend.uirevision"},{pattern:/^((x|y)axis\d*)\.((auto)?range|title\.text)/},{pattern:/axis\d*\.showspikes$/,attr:"modebar.uirevision"},{pattern:/(hover|drag)mode$/,attr:"modebar.uirevision"},{pattern:/^(scene\d*)\.camera/},{pattern:/^(geo\d*)\.(projection|center|fitbounds)/},{pattern:/^(ternary\d*\.[abc]axis)\.(min|title\.text)$/},{pattern:/^(polar\d*\.radialaxis)\.((auto)?range|angle|title\.text)/},{pattern:/^(polar\d*\.angularaxis)\.rotation/},{pattern:/^(mapbox\d*)\.(center|zoom|bearing|pitch)/},{pattern:/^legend\.(x|y)$/,attr:"editrevision"},{pattern:/^(shapes|annotations)/,attr:"editrevision"},{pattern:/^title\.text$/,attr:"editrevision"}],J=[{pattern:/^selectedpoints$/,attr:"selectionrevision"},{pattern:/(^|value\.)visible$/,attr:"legend.uirevision"},{pattern:/^dimensions\[\d+\]\.constraintrange/},{pattern:/^node\.(x|y|groups)/},{pattern:/^level$/},{pattern:/(^|value\.)name$/},{pattern:/colorbar\.title\.text$/},{pattern:/colorbar\.(x|y)$/,attr:"editrevision"}];function Q(t,e){for(var n=0;n1;)if(r.pop(),void 0!==(n=s(e,r.join(".")+".uirevision").get()))return n;return e.uirevision}function et(t,e){for(var n=0;n=i.length?i[0]:i[t]:i}function l(t){return Array.isArray(a)?t>=a.length?a[0]:a[t]:a}function c(t,e){var n=0;return function(){if(t&&++n===e)return t()}}return void 0===r._frameWaitingCnt&&(r._frameWaitingCnt=0),new Promise((function(a,u){function f(){r._currentFrame&&r._currentFrame.onComplete&&r._currentFrame.onComplete();var e=r._currentFrame=r._frameQueue.shift();if(e){var n=e.name?e.name.toString():null;t._fullLayout._currentFrame=n,r._lastFrameAt=Date.now(),r._timeToNext=e.frameOpts.duration,h.transition(t,e.frame.data,e.frame.layout,w.coerceTraceIndices(t,e.frame.traces),e.frameOpts,e.transitionOpts).then((function(){e.onComplete&&e.onComplete()})),t.emit("plotly_animatingframe",{name:n,frame:e.frame,animation:{frame:e.frameOpts,transition:e.transitionOpts}})}else t.emit("plotly_animated"),window.cancelAnimationFrame(r._animationRaf),r._animationRaf=null}function d(){t.emit("plotly_animating"),r._lastFrameAt=-1/0,r._timeToNext=0,r._runningTransitions=0,r._currentFrame=null,function t(){r._animationRaf=window.requestAnimationFrame(t),Date.now()-r._lastFrameAt>r._timeToNext&&f()}()}var p,g,v=0;function m(t){return Array.isArray(i)?v>=i.length?t.transitionOpts=i[v]:t.transitionOpts=i[0]:t.transitionOpts=i,v++,t}var y=[],b=void 0===e||null===e,x=Array.isArray(e);if(b||x||!o.isPlainObject(e)){if(b||-1!==["string","number"].indexOf(typeof e))for(p=0;p0&&TT)&&M.push(g);y=M}}y.length>0?function(e){if(0!==e.length){for(var i=0;i=0;r--)if(o.isPlainObject(e[r])){var g=e[r].name,v=(u[g]||p[g]||{}).name,m=e[r].name,y=u[v]||p[v];v&&m&&"number"===typeof m&&y&&M<5&&(M++,o.warn('addFrames: overwriting frame "'+(u[v]||p[v]).name+'" with a frame whose name of type "number" also equates to "'+v+'". This is valid but may potentially lead to unexpected behavior since all plotly.js frame names are stored internally as strings.'),5===M&&o.warn("addFrames: This API call has yielded too many of these warnings. For the rest of this call, further warnings about numeric frame names will be suppressed.")),p[g]={name:g},d.push({frame:h.supplyFrameDefaults(e[r]),index:n&&void 0!==n[r]&&null!==n[r]?n[r]:f+r})}d.sort((function(t,e){return t.index>e.index?-1:t.index=0;r--){if("number"===typeof(i=d[r].frame).name&&o.warn("Warning: addFrames accepts frames with numeric names, but the numbers areimplicitly cast to strings"),!i.name)for(;u[i.name="frame "+t._transitionData._counter++];);if(u[i.name]){for(a=0;a=0;n--)r=e[n],a.push({type:"delete",index:r}),s.unshift({type:"insert",index:r,value:i[r]});var l=h.modifyFrames,u=h.modifyFrames,f=[t,s],d=[t,a];return c&&c.add(t,l,f,u,d),h.modifyFrames(t,a)},e.addTraces=function t(n,r,i){n=o.getGraphDiv(n);var a,s,l=[],u=e.deleteTraces,f=t,h=[n,l],d=[n,r];for(function(t,e,n){var r,i;if(!Array.isArray(t.data))throw new Error("gd.data must be an array.");if("undefined"===typeof e)throw new Error("traces must be defined.");for(Array.isArray(e)||(e=[e]),r=0;r=0&&n=0&&n=a.length)return!1;if(2===t.dimensions){if(n++,e.length===n)return t;var o=e[n];if(!w(o))return!1;t=a[i][o]}else t=a[i]}else t=a}}return t}function w(t){return t===Math.round(t)&&t>=0}function _(){var t,e,n={};for(t in f(n,o),r.subplotsRegistry)if((e=r.subplotsRegistry[t]).layoutAttributes)if(Array.isArray(e.attr))for(var i=0;i=l.length)return!1;i=(n=(r.transformsRegistry[l[c].type]||{}).attributes)&&n[e[2]],s=3}else{var u=t._module;if(u||(u=(r.modules[t.type||a.type.dflt]||{})._module),!u)return!1;if(!(i=(n=u.attributes)&&n[o])){var f=u.basePlotModule;f&&f.attributes&&(i=f.attributes[o])}i||(i=a[o])}return x(i,e,s)},e.getLayoutValObject=function(t,e){var n=function(t,e){var n,i,a,s,l=t._basePlotModules;if(l){var c;for(n=0;n=i&&(n._input||{})._templateitemname;s&&(o=i);var l,c=e+"["+o+"]";function u(){l={},s&&(l[c]={},l[c][a]=s)}function f(t,e){s?r.nestedProperty(l[c],t).set(e):l[c+"."+t]=e}function h(){var t=l;return u(),t}return u(),{modifyBase:function(t,e){l[t]=e},modifyItem:f,getUpdateObj:h,applyUpdate:function(e,n){e&&f(e,n);var i=h();for(var a in i)r.nestedProperty(t,a).set(i[a])}}}},61549:function(t,e,n){"use strict";var r=n(39898),i=n(73972),a=n(74875),o=n(71828),s=n(63893),l=n(33306),c=n(7901),u=n(91424),f=n(92998),h=n(64168),d=n(89298),p=n(18783),g=n(99082),v=g.enforce,m=g.clean,y=n(71739).doAutoRange,b="start",x="middle",w="end";function _(t,e,n){for(var r=0;r=t[1]||i[1]<=t[0])&&a[0]e[0])return!0}return!1}function k(t){var n,i,s,l,f,g,v=t._fullLayout,m=v._size,y=m.p,b=d.list(t,"",!0);if(v._paperdiv.style({width:t._context.responsive&&v.autosize&&!t._context._hasZeroWidth&&!t.layout.width?"100%":v.width+"px",height:t._context.responsive&&v.autosize&&!t._context._hasZeroHeight&&!t.layout.height?"100%":v.height+"px"}).selectAll(".main-svg").call(u.setSize,v.width,v.height),t._context.setBackground(t,v.paper_bgcolor),e.drawMainTitle(t),h.manage(t),!v._has("cartesian"))return a.previousPromises(t);function x(t,e,n){var r=t._lw/2;return"x"===t._id.charAt(0)?e?"top"===n?e._offset-y-r:e._offset+e._length+y+r:m.t+m.h*(1-(t.position||0))+r%1:e?"right"===n?e._offset+e._length+y+r:e._offset-y-r:m.l+m.w*(t.position||0)+r%1}for(n=0;n.5?"t":"b",o=t._fullLayout.margin[a],s=0;return"paper"===e.yref?s=n+e.pad.t+e.pad.b:"container"===e.yref&&(s=function(t,e,n,r,i){var a=0;return"middle"===n&&(a+=i/2),"t"===t?("top"===n&&(a+=i),a+=r-e*r):("bottom"===n&&(a+=i),a+=e*r),a}(a,r,i,t._fullLayout.height,n)+e.pad.t+e.pad.b),s>o?s:0}(t,n,v);m>0&&(function(t,e,n,r){var i="title.automargin",s=t._fullLayout.title,l=s.y>.5?"t":"b",c={x:s.x,y:s.y,t:0,b:0},u={};"paper"===s.yref&&function(t,e,n,r,i){var a="paper"===e.yref?t._fullLayout._size.h:t._fullLayout.height,s=o.isTopAnchor(e)?r:r-i,l="b"===n?a-s:s;return!(o.isTopAnchor(e)&&"t"===n||o.isBottomAnchor(e)&&"b"===n)&&lk?u.push({code:"unused",traceType:y,templateCount:_,dataCount:k}):k>_&&u.push({code:"reused",traceType:y,templateCount:_,dataCount:k})}}else u.push({code:"data"});if(function t(e,n){for(var r in e)if("_"!==r.charAt(0)){var a=e[r],o=g(e,r,n);i(a)?(Array.isArray(e)&&!1===a._template&&a.templateitemname&&u.push({code:"missing",path:o,templateitemname:a.templateitemname}),t(a,o)):Array.isArray(a)&&v(a)&&t(a,o)}}({data:d,layout:h},""),u.length)return u.map(m)}},403:function(t,e,n){"use strict";var r=n(92770),i=n(72391),a=n(74875),o=n(71828),s=n(25095),l=n(5900),c=n(70942),u=n(11506).version,f={format:{valType:"enumerated",values:["png","jpeg","webp","svg","full-json"],dflt:"png"},width:{valType:"number",min:1},height:{valType:"number",min:1},scale:{valType:"number",min:0,dflt:1},setBackground:{valType:"any",dflt:!1},imageDataOnly:{valType:"boolean",dflt:!1}};t.exports=function(t,e){var n,h,d,p;function g(t){return!(t in e)||o.validate(e[t],f[t])}if(e=e||{},o.isPlainObject(t)?(n=t.data||[],h=t.layout||{},d=t.config||{},p={}):(t=o.getGraphDiv(t),n=o.extendDeep([],t.data),h=o.extendDeep({},t.layout),d=t._context,p=t._fullLayout||{}),!g("width")&&null!==e.width||!g("height")&&null!==e.height)throw new Error("Height and width should be pixel values.");if(!g("format"))throw new Error("Export format is not "+o.join2(f.format.values,", "," or ")+".");var v={};function m(t,n){return o.coerce(e,v,f,t,n)}var y=m("format"),b=m("width"),x=m("height"),w=m("scale"),_=m("setBackground"),k=m("imageDataOnly"),T=document.createElement("div");T.style.position="absolute",T.style.left="-5000px",document.body.appendChild(T);var M=o.extendFlat({},h);b?M.width=b:null===e.width&&r(p.width)&&(M.width=p.width),x?M.height=x:null===e.height&&r(p.height)&&(M.height=p.height);var A=o.extendFlat({},d,{_exportedPlot:!0,staticPlot:!0,setBackground:_}),S=s.getRedrawFunc(T);function E(){return new Promise((function(t){setTimeout(t,s.getDelay(T._fullLayout))}))}function C(){return new Promise((function(t,e){var n=l(T,y,w),r=T._fullLayout.width,f=T._fullLayout.height;function h(){i.purge(T),document.body.removeChild(T)}if("full-json"===y){var d=a.graphJson(T,!1,"keepdata","object",!0,!0);return d.version=u,d=JSON.stringify(d),h(),t(k?d:s.encodeJSON(d))}if(h(),"svg"===y)return t(k?n:s.encodeSVG(n));var p=document.createElement("canvas");p.id=o.randstr(),c({format:y,width:r,height:f,scale:w,canvas:p,svg:n,promise:!0}).then(t).catch(e)}))}return new Promise((function(t,e){i.newPlot(T,n,M,A).then(S).then(E).then(C).then((function(e){t(function(t){return k?t.replace(s.IMAGE_URL_PREFIX,""):t}(e))})).catch((function(t){e(t)}))}))}},84936:function(t,e,n){"use strict";var r=n(71828),i=n(74875),a=n(86281),o=n(72075).dfltConfig,s=r.isPlainObject,l=Array.isArray,c=r.isArrayOrTypedArray;function u(t,e,n,i,a,o){o=o||[];for(var f=Object.keys(t),h=0;hb.length&&i.push(d("unused",a,m.concat(b.length)));var M,A,S,E,C,P=b.length,O=Array.isArray(T);if(O&&(P=Math.min(P,T.length)),2===x.dimensions)for(A=0;Ab[A].length&&i.push(d("unused",a,m.concat(A,b[A].length)));var L=b[A].length;for(M=0;M<(O?Math.min(L,T[A].length):L);M++)S=O?T[A][M]:T,E=y[A][M],C=b[A][M],r.validate(E,S)?C!==E&&C!==+E&&i.push(d("dynamic",a,m.concat(A,M),E,C)):i.push(d("value",a,m.concat(A,M),E))}else i.push(d("array",a,m.concat(A),y[A]));else for(A=0;A1&&h.push(d("object","layout"))),i.supplyDefaults(p);for(var g=p._fullData,v=n.length,m=0;m0&&Math.round(f)===f))return{vals:i};c=f}for(var h=e.calendar,d="start"===l,p="end"===l,g=t[n+"period0"],v=a(g,h)||0,m=[],y=[],b=[],x=i.length,w=0;wM;)T=o(T,-c,h);for(;T<=M;)T=o(T,c,h);k=o(T,-c,h)}else{for(T=v+(_=Math.round((M-v)/u))*u;T>M;)T-=u;for(;T<=M;)T+=u;k=T-u}m[w]=d?k:p?T:(k+T)/2,y[w]=k,b[w]=T}return{vals:m,starts:y,ends:b}}},89502:function(t){"use strict";t.exports={xaxis:{valType:"subplotid",dflt:"x",editType:"calc+clearAxisTypes"},yaxis:{valType:"subplotid",dflt:"y",editType:"calc+clearAxisTypes"}}},71739:function(t,e,n){"use strict";var r=n(39898),i=n(92770),a=n(71828),o=n(50606).FP_SAFE,s=n(73972),l=n(91424),c=n(41675),u=c.getFromId,f=c.isLinked;function h(t,e){var n,r,i=[],o=t._fullLayout,s=p(o,e,0),l=p(o,e,1),c=v(t,e),u=c.min,f=c.max;if(0===u.length||0===f.length)return a.simpleMap(e.range,e.r2l);var h=u[0].val,g=f[0].val;for(n=1;n0&&((k=E-s(b)-l(x))>C?T/k>P&&(w=b,_=x,P=T/k):T/E>P&&(w={val:b.val,nopad:1},_={val:x.val,nopad:1},P=T/E));if(h===g){var O=h-1,L=h+1;if(A)if(0===h)i=[0,1];else{var I=(h>0?f:u).reduce((function(t,e){return Math.max(t,l(e))}),0),D=h/(1-Math.min(.5,I/E));i=h>0?[0,D]:[D,0]}else i=S?[Math.max(0,O),Math.max(1,L)]:[O,L]}else A?(w.val>=0&&(w={val:0,nopad:1}),_.val<=0&&(_={val:0,nopad:1})):S&&(w.val-P*s(w)<0&&(w={val:0,nopad:1}),_.val<=0&&(_={val:1,nopad:1})),P=(_.val-w.val-d(e,b.val,x.val))/(E-s(w)-l(_)),i=[w.val-P*s(w),_.val+P*l(_)];return m&&i.reverse(),a.simpleMap(i,e.l2r||Number)}function d(t,e,n){var r=0;if(t.rangebreaks)for(var i=t.locateBreaks(e,n),a=0;a0?n.ppadplus:n.ppadminus)||n.ppad||0),S=M((t._m>0?n.ppadminus:n.ppadplus)||n.ppad||0),E=M(n.vpadplus||n.vpad),C=M(n.vpadminus||n.vpad);if(!k){if(h=1/0,d=-1/0,_)for(r=0;r0&&(h=a),a>d&&a-o&&(h=a),a>d&&a=L;r--)O(r);return{min:p,max:g,opts:n}},concatExtremes:v};var g=3;function v(t,e,n){var r,i,a,o=e._id,s=t._fullData,l=t._fullLayout,c=[],f=[];function h(t,e){for(r=0;r=n&&(c.extrapad||!o)){s=!1;break}i(e,c.val)&&c.pad<=n&&(o||!c.extrapad)&&(t.splice(l,1),l--)}if(s){var u=a&&0===e;t.push({val:e,pad:u?0:n,extrapad:!u&&o})}}function x(t){return i(t)&&Math.abs(t)=e}},89298:function(t,e,n){"use strict";var r=n(39898),i=n(92770),a=n(74875),o=n(73972),s=n(71828),l=s.strTranslate,c=n(63893),u=n(92998),f=n(7901),h=n(91424),d=n(13838),p=n(66287),g=n(50606),v=g.ONEMAXYEAR,m=g.ONEAVGYEAR,y=g.ONEMINYEAR,b=g.ONEMAXQUARTER,x=g.ONEAVGQUARTER,w=g.ONEMINQUARTER,_=g.ONEMAXMONTH,k=g.ONEAVGMONTH,T=g.ONEMINMONTH,M=g.ONEWEEK,A=g.ONEDAY,S=A/2,E=g.ONEHOUR,C=g.ONEMIN,P=g.ONESEC,O=g.MINUS_SIGN,L=g.BADNUM,I={K:"zeroline"},D={K:"gridline",L:"path"},R={K:"minor-gridline",L:"path"},z={K:"tick",L:"path"},N={K:"tick",L:"text"},j={width:["x","r","l","xl","xr"],height:["y","t","b","yt","yb"],right:["r","xr"],left:["l","xl"],top:["t","yt"],bottom:["b","yb"]},F=n(18783),B=F.MID_SHIFT,U=F.CAP_SHIFT,H=F.LINE_SPACING,V=F.OPPOSITE_SIDE,q=t.exports={};q.setConvert=n(21994);var G=n(4322),W=n(41675),Y=W.idSort,$=W.isLinked;q.id2name=W.id2name,q.name2id=W.name2id,q.cleanId=W.cleanId,q.list=W.list,q.listIds=W.listIds,q.getFromId=W.getFromId,q.getFromTrace=W.getFromTrace;var X=n(71739);q.getAutoRange=X.getAutoRange,q.findExtremes=X.findExtremes;var K=1e-4;function Z(t){var e=(t[1]-t[0])*K;return[t[0]-e,t[1]+e]}q.coerceRef=function(t,e,n,r,i,a){var o=r.charAt(r.length-1),l=n._fullLayout._subplots[o+"axis"],c=r+"ref",u={};return i||(i=l[0]||("string"===typeof a?a:a[0])),a||(a=i),l=l.concat(l.map((function(t){return t+" domain"}))),u[c]={valType:"enumerated",values:l.concat(a?"string"===typeof a?[a]:a:[]),dflt:i},s.coerce(t,e,u,c)},q.getRefType=function(t){return void 0===t?t:"paper"===t?"paper":"pixel"===t?"pixel":/( domain)$/.test(t)?"domain":"range"},q.coercePosition=function(t,e,n,r,i,a){var o,l;if("range"!==q.getRefType(r))o=s.ensureNumber,l=n(i,a);else{var c=q.getFromId(e,r);l=n(i,a=c.fraction2r(a)),o=c.cleanPos}t[i]=o(l)},q.cleanPosition=function(t,e,n){return("paper"===n||"pixel"===n?s.ensureNumber:q.getFromId(e,n).cleanPos)(t)},q.redrawComponents=function(t,e){e=e||q.listIds(t);var n=t._fullLayout;function r(r,i,a,s){for(var l=o.getComponentMethod(r,i),c={},u=0;un&&f2e-6||((n-t._forceTick0)/t._minDtick%1+1.000001)%1>2e-6)&&(t._minDtick=0)):t._minDtick=0},q.saveRangeInitial=function(t,e){for(var n=q.list(t,"",!0),r=!1,i=0;i.3*h||u(r)||u(a))){var d=n.dtick/2;t+=t+do){var l=Number(n.substr(1));a.exactYears>o&&l%12===0?t=q.tickIncrement(t,"M6","reverse")+1.5*A:a.exactMonths>o?t=q.tickIncrement(t,"M1","reverse")+15.5*A:t-=S;var c=q.tickIncrement(t,n);if(c<=r)return c}return t}(y,t,m,c,a)),v=y;v<=u;)v=q.tickIncrement(v,m,!1,a);return{start:e.c2r(y,0,a),end:e.c2r(v,0,a),size:m,_dataSpan:u-c}},q.prepMinorTicks=function(t,e,n){if(!e.minor.dtick){delete t.dtick;var r,a=e.dtick&&i(e._tmin);if(a){var o=q.tickIncrement(e._tmin,e.dtick,!0);r=[e._tmin,.99*o+.01*e._tmin]}else{var l=s.simpleMap(e.range,e.r2l);r=[l[0],.8*l[0]+.2*l[1]]}if(t.range=s.simpleMap(r,e.l2r),t._isMinor=!0,q.prepTicks(t,n),a){var c=i(e.dtick),u=i(t.dtick),f=c?e.dtick:+e.dtick.substring(1),h=u?t.dtick:+t.dtick.substring(1);c&&u?et(f,h)?f===2*M&&h===2*A&&(t.dtick=M):f===2*M&&h===3*A?t.dtick=M:f!==M||(e._input.minor||{}).nticks?nt(f/h,2.5)?t.dtick=f/2:t.dtick=f:t.dtick=A:"M"===String(e.dtick).charAt(0)?u?t.dtick="M1":et(f,h)?f>=12&&2===h&&(t.dtick="M3"):t.dtick=e.dtick:"L"===String(t.dtick).charAt(0)?"L"===String(e.dtick).charAt(0)?et(f,h)||(t.dtick=nt(f/h,2.5)?e.dtick/2:e.dtick):t.dtick="D1":"D2"===t.dtick&&+e.dtick>1&&(t.dtick=1)}t.range=e.range}void 0===e.minor._tick0Init&&(t.tick0=e.tick0)},q.prepTicks=function(t,e){var n=s.simpleMap(t.range,t.r2l,void 0,void 0,e);if("auto"===t.tickmode||!t.dtick){var r,a=t.nticks;a||("category"===t.type||"multicategory"===t.type?(r=t.tickfont?s.bigFont(t.tickfont.size||12):15,a=t._length/r):(r="y"===t._id.charAt(0)?40:80,a=s.constrain(t._length/r,4,9)+1),"radialaxis"===t._name&&(a*=2)),t.minor&&"array"!==t.minor.tickmode||"array"===t.tickmode&&(a*=100),t._roughDTick=Math.abs(n[1]-n[0])/a,q.autoTicks(t,t._roughDTick),t._minDtick>0&&t.dtick<2*t._minDtick&&(t.dtick=t._minDtick,t.tick0=t.l2r(t._forceTick0))}"period"===t.ticklabelmode&&function(t){var e;function n(){return!(i(t.dtick)||"M"!==t.dtick.charAt(0))}var r=n(),a=q.getTickFormat(t);if(a){var o=t._dtickInit!==t.dtick;/%[fLQsSMX]/.test(a)||(/%[HI]/.test(a)?(e=E,o&&!r&&t.dtick=(I?0:1);D--){var R=!D;D?(t._dtickInit=t.dtick,t._tick0Init=t.tick0):(t.minor._dtickInit=t.minor.dtick,t.minor._tick0Init=t.minor.tick0);var z=D?t:s.extendFlat({},t,t.minor);if(R?q.prepMinorTicks(z,t,e):q.prepTicks(z,e),"array"!==z.tickmode)if("sync"!==z.tickmode){var N=Z(u),j=N[0],F=N[1],B=i(z.dtick),U="log"===a&&!(B||"L"===z.dtick.charAt(0)),H=q.tickFirst(z,e);if(D){if(t._tmin=H,H=F:Y<=F;Y=q.tickIncrement(Y,$,f,o)){if(D&&V++,z.rangebreaks&&!f){if(Y=d)break}if(P.length>p||Y===W)break;W=Y;var X={value:Y};D?(U&&Y!==(0|Y)&&(X.simpleLabel=!0),l>1&&V%l&&(X.skipLabel=!0),P.push(X)):(X.minor=!0,O.push(X))}}else P=[],g=at(t);else D?(P=[],g=ot(t)):(O=[],C=ot(t))}if(I&&!("inside"===t.minor.ticks&&"outside"===t.ticks||"outside"===t.minor.ticks&&"inside"===t.ticks)){for(var K=P.map((function(t){return t.value})),J=[],Q=0;Q0?(a=r-1,o=r):(a=r,o=r);var s,l=t[a].value,c=t[o].value,u=Math.abs(c-l),f=n||u,h=0;f>=y?h=u>=y&&u<=v?u:m:n===x&&f>=w?h=u>=w&&u<=b?u:x:f>=T?h=u>=T&&u<=_?u:k:n===M&&f>=M?h=M:f>=A?h=A:n===S&&f>=S?h=S:n===E&&f>=E&&(h=E),h>=u&&(h=u,s=!0);var d=i+h;if(e.rangebreaks&&h>0){for(var p=0,g=0;g<84;g++){var C=(g+.5)/84;e.maskBreaks(i*(1-C)+C*d)!==L&&p++}(h*=p/84)||(t[r].drop=!0),s&&u>M&&(h=u)}(h>0||0===r)&&(t[r].periodX=i+h/2)}}(P,t,t._definedDelta),t.rangebreaks){var it="y"===t._id.charAt(0),st=1;"auto"===t.tickmode&&(st=t.tickfont?t.tickfont.size:12);var lt=NaN;for(n=P.length-1;n>-1;n--)if(P[n].drop)P.splice(n,1);else{P[n].value=Rt(P[n].value,t);var ct=t.c2p(P[n].value);(it?lt>ct-st:ltd||ftd&&(ut.periodX=d),ft10||"01-01"!==r.substr(5)?t._tickround="d":t._tickround=+e.substr(1)%12===0?"y":"m";else if(e>=A&&a<=10||e>=15*A)t._tickround="d";else if(e>=C&&a<=16||e>=E)t._tickround="M";else if(e>=P&&a<=19||e>=C)t._tickround="S";else{var o=t.l2r(n+e).replace(/^-/,"").length;t._tickround=Math.max(a,o)-20,t._tickround<0&&(t._tickround=4)}}else if(i(e)||"L"===e.charAt(0)){var s=t.range.map(t.r2d||Number);i(e)||(e=Number(e.substr(1))),t._tickround=2-Math.floor(Math.log(e)/Math.LN10+.01);var l=Math.max(Math.abs(s[0]),Math.abs(s[1])),c=Math.floor(Math.log(l)/Math.LN10+.01),u=void 0===t.minexponent?3:t.minexponent;Math.abs(c)>u&&(yt(t.exponentformat)&&!bt(c)?t._tickexponent=3*Math.round((c-1)/3):t._tickexponent=c)}else t._tickround=null}function vt(t,e,n){var r=t.tickfont||{};return{x:e,dx:0,dy:0,text:n||"",fontSize:r.size,font:r.family,fontColor:r.color}}q.autoTicks=function(t,e,n){var r;function a(t){return Math.pow(t,Math.floor(Math.log(e)/Math.LN10))}if("date"===t.type){t.tick0=s.dateTick0(t.calendar,0);var o=2*e;if(o>m)e/=m,r=a(10),t.dtick="M"+12*pt(e,r,st);else if(o>k)e/=k,t.dtick="M"+pt(e,1,lt);else if(o>A){if(t.dtick=pt(e,A,t._hasDayOfWeekBreaks?[1,2,7,14]:ut),!n){var l=q.getTickFormat(t),c="period"===t.ticklabelmode;c&&(t._rawTick0=t.tick0),/%[uVW]/.test(l)?t.tick0=s.dateTick0(t.calendar,2):t.tick0=s.dateTick0(t.calendar,1),c&&(t._dowTick0=t.tick0)}}else o>E?t.dtick=pt(e,E,lt):o>C?t.dtick=pt(e,C,ct):o>P?t.dtick=pt(e,P,ct):(r=a(10),t.dtick=pt(e,r,st))}else if("log"===t.type){t.tick0=0;var u=s.simpleMap(t.range,t.r2l);if(t._isMinor&&(e*=1.5),e>.7)t.dtick=Math.ceil(e);else if(Math.abs(u[1]-u[0])<1){var f=1.5*Math.abs((u[1]-u[0])/e);e=Math.abs(Math.pow(10,u[1])-Math.pow(10,u[0]))/f,r=a(10),t.dtick="L"+pt(e,r,st)}else t.dtick=e>.3?"D2":"D1"}else"category"===t.type||"multicategory"===t.type?(t.tick0=0,t.dtick=Math.ceil(Math.max(e,1))):Dt(t)?(t.tick0=0,r=1,t.dtick=pt(e,r,dt)):(t.tick0=0,r=a(10),t.dtick=pt(e,r,st));if(0===t.dtick&&(t.dtick=1),!i(t.dtick)&&"string"!==typeof t.dtick){var h=t.dtick;throw t.dtick=1,"ax.dtick error: "+String(h)}},q.tickIncrement=function(t,e,n,a){var o=n?-1:1;if(i(e))return s.increment(t,o*e);var l=e.charAt(0),c=o*Number(e.substr(1));if("M"===l)return s.incrementMonth(t,c,a);if("L"===l)return Math.log(Math.pow(10,t)+c)/Math.LN10;if("D"===l){var u="D2"===e?ht:ft,f=t+.01*o,h=s.roundUp(s.mod(f,1),u,n);return Math.floor(f)+Math.log(r.round(Math.pow(10,h),1))/Math.LN10}throw"unrecognized dtick "+String(e)},q.tickFirst=function(t,e){var n=t.r2l||Number,a=s.simpleMap(t.range,n,void 0,void 0,e),o=a[1] ")}else t._prevDateHead=l,c+="
"+l;e.text=c}(t,o,n,c):"log"===u?function(t,e,n,r,a){var o=t.dtick,l=e.x,c=t.tickformat,u="string"===typeof o&&o.charAt(0);if("never"===a&&(a=""),r&&"L"!==u&&(o="L3",u="L"),c||"L"===u)e.text=xt(Math.pow(10,l),t,a,r);else if(i(o)||"D"===u&&s.mod(l+.01,1)<.1){var f=Math.round(l),h=Math.abs(f),d=t.exponentformat;"power"===d||yt(d)&&bt(f)?(e.text=0===f?1:1===f?"10":"10"+(f>1?"":O)+h+"",e.fontSize*=1.25):("e"===d||"E"===d)&&h>2?e.text="1"+d+(f>0?"+":O)+h:(e.text=xt(Math.pow(10,l),t,"","fakehover"),"D1"===o&&"y"===t._id.charAt(0)&&(e.dy-=e.fontSize/6))}else{if("D"!==u)throw"unrecognized dtick "+String(o);e.text=String(Math.round(Math.pow(10,s.mod(l,1)))),e.fontSize*=.75}if("D1"===t.dtick){var p=String(e.text).charAt(0);"0"!==p&&"1"!==p||("y"===t._id.charAt(0)?e.dx-=e.fontSize/4:(e.dy+=e.fontSize/2,e.dx+=(t.range[1]>t.range[0]?1:-1)*e.fontSize*(l<0?.5:.25)))}}(t,o,0,c,g):"category"===u?function(t,e){var n=t._categories[Math.round(e.x)];void 0===n&&(n=""),e.text=String(n)}(t,o):"multicategory"===u?function(t,e,n){var r=Math.round(e.x),i=t._categories[r]||[],a=void 0===i[1]?"":String(i[1]),o=void 0===i[0]?"":String(i[0]);n?e.text=o+" - "+a:(e.text=a,e.text2=o)}(t,o,n):Dt(t)?function(t,e,n,r,i){if("radians"!==t.thetaunit||n)e.text=xt(e.x,t,i,r);else{var a=e.x/180;if(0===a)e.text="0";else{var o=function(t){function e(t,e){return Math.abs(t-e)<=1e-6}function n(t,r){return e(r,0)?t:n(r,t%r)}function r(t){for(var n=1;!e(Math.round(t*n)/n,t);)n*=10;return n}var i=r(t),a=t*i,o=Math.abs(n(a,i));return[Math.round(a/o),Math.round(i/o)]}(a);if(o[1]>=100)e.text=xt(s.deg2rad(e.x),t,i,r);else{var l=e.x<0;1===o[1]?1===o[0]?e.text="\u03c0":e.text=o[0]+"\u03c0":e.text=["",o[0],"","\u2044","",o[1],"","\u03c0"].join(""),l&&(e.text=O+e.text)}}}}(t,o,n,c,g):function(t,e,n,r,i){"never"===i?i="":"all"===t.showexponent&&Math.abs(e.x/t.dtick)<1e-6&&(i="hide"),e.text=xt(e.x,t,i,r)}(t,o,0,c,g),r||(t.tickprefix&&!p(t.showtickprefix)&&(o.text=t.tickprefix+o.text),t.ticksuffix&&!p(t.showticksuffix)&&(o.text+=t.ticksuffix)),t.labelalias&&t.labelalias.hasOwnProperty(o.text)){var v=t.labelalias[o.text];"string"===typeof v&&(o.text=v)}if("boundaries"===t.tickson||t.showdividers){var m=function(e){var n=t.l2p(e);return n>=0&&n<=t._length?e:null};o.xbnd=[m(o.x-.5),m(o.x+t.dtick-.5)]}return o},q.hoverLabelText=function(t,e,n){n&&(t=s.extendFlat({},t,{hoverformat:n}));var r=Array.isArray(e)?e[0]:e,i=Array.isArray(e)?e[1]:void 0;if(void 0!==i&&i!==r)return q.hoverLabelText(t,r,n)+" - "+q.hoverLabelText(t,i,n);var a="log"===t.type&&r<=0,o=q.tickText(t,t.c2l(a?-r:r),"hover").text;return a?0===r?"0":O+o:o};var mt=["f","p","n","\u03bc","m","","k","M","G","T"];function yt(t){return"SI"===t||"B"===t}function bt(t){return t>14||t<-15}function xt(t,e,n,r){var a=t<0,o=e._tickround,l=n||e.exponentformat||"B",c=e._tickexponent,u=q.getTickFormat(e),f=e.separatethousands;if(r){var h={exponentformat:l,minexponent:e.minexponent,dtick:"none"===e.showexponent?e.dtick:i(t)&&Math.abs(t)||1,range:"none"===e.showexponent?e.range.map(e.r2d):[0,t||1]};gt(h),o=(Number(h._tickround)||0)+4,c=h._tickexponent,e.hoverformat&&(u=e.hoverformat)}if(u)return e._numFormat(u)(t).replace(/-/g,O);var d,p=Math.pow(10,-o)/2;if("none"===l&&(c=0),(t=Math.abs(t))"+d+"":"B"===l&&9===c?t+="B":yt(l)&&(t+=mt[c/3+5])),a?O+t:t}function wt(t,e){if(t){var n=Object.keys(j).reduce((function(t,n){return-1!==e.indexOf(n)&&j[n].forEach((function(e){t[e]=1})),t}),{});Object.keys(t).forEach((function(e){n[e]||(1===e.length?t[e]=0:delete t[e])}))}}function _t(t,e){for(var n=[],r={},i=0;i1&&n=i.min&&t=r(t))}function s(t,e){var n=null===e[0],r=null===e[1],i=a(t,e[0])>=0,o=a(t,e[1])<=0;return(n||i)&&(r||o)}if(t.tickformatstops&&t.tickformatstops.length>0)switch(t.type){case"date":case"linear":for(e=0;e=0&&i.unshift(i.splice(r,1).shift())}}));var o={false:{left:0,right:0}};return s.syncOrAsync(i.map((function(e){return function(){if(e){var r=q.getFromId(t,e);n||(n={}),n.axShifts=o,n.overlayingShiftedAx=a;var i=q.drawOne(t,r,n);return r._shiftPusher&&jt(r,r._fullDepth||0,o,!0),r._r=r.range.slice(),r._rl=s.simpleMap(r._r,r.r2l),i}}})))},q.drawOne=function(t,e,n){var r,i,l,c=(n=n||{}).axShifts||{},d=n.overlayingShiftedAx||[];e.setScale();var p=t._fullLayout,g=e._id,v=g.charAt(0),m=q.counterLetter(g),y=p._plots[e._mainSubplot];if(y){if(e._shiftPusher=e.autoshift||-1!==d.indexOf(e._id)||-1!==d.indexOf(e.overlaying),e._shiftPusher&"free"===e.anchor){var b=e.linewidth/2||0;"inside"===e.ticks&&(b+=e.ticklen),jt(e,b,c,!0),jt(e,e.shift||0,c,!1)}!0===n.skipTitle&&void 0!==e._shift||(e._shift=function(t,e){return t.autoshift?e[t.overlaying][t.side]:t.shift||0}(e,c));var x=y[v+"axislayer"],w=e._mainLinePosition,_=w+=e._shift,k=e._mainMirrorPosition,T=e._vals=q.calcTicks(e),M=[e.mirror,_,k].join("_");for(r=0;r0?n.bottom-u:0,f))));var h=0,d=0;if(e._shiftPusher&&(h=Math.max(f,n.height>0?"l"===l?u-n.left:n.right-u:0),e.title.text!==p._dfltTitle[v]&&(d=(e._titleStandoff||0)+(e._titleScoot||0),"l"===l&&(d+=Mt(e))),e._fullDepth=Math.max(h,d)),e.automargin){r={x:0,y:0,r:0,l:0,t:0,b:0};var g=[0,1],y="number"===typeof e._shift?e._shift:0;if("x"===v){if("b"===l?r[l]=e._depth:(r[l]=e._depth=Math.max(n.width>0?u-n.top:0,f),g.reverse()),n.width>0){var b=n.right-(e._offset+e._length);b>0&&(r.xr=1,r.r=b);var x=e._offset-n.left;x>0&&(r.xl=0,r.l=x)}}else if("l"===l?(e._depth=Math.max(n.height>0?u-n.left:0,f),r[l]=e._depth-y):(e._depth=Math.max(n.height>0?n.right-u:0,f),r[l]=e._depth+y,g.reverse()),n.height>0){var w=n.bottom-(e._offset+e._length);w>0&&(r.yb=0,r.b=w);var _=e._offset-n.top;_>0&&(r.yt=1,r.t=_)}r[m]="free"===e.anchor?e.position:e._anchorAxis.domain[g[0]],e.title.text!==p._dfltTitle[v]&&(r[l]+=Mt(e)+(e.title.standoff||0)),e.mirror&&"free"!==e.anchor&&((i={x:0,y:0,r:0,l:0,t:0,b:0})[c]=e.linewidth,e.mirror&&!0!==e.mirror&&(i[c]+=f),!0===e.mirror||"ticks"===e.mirror?i[m]=e._anchorAxis.domain[g[1]]:"all"!==e.mirror&&"allticks"!==e.mirror||(i[m]=[e._counterDomainMin,e._counterDomainMax][g[1]]))}lt&&(s=o.getComponentMethod("rangeslider","autoMarginOpts")(t,e)),"string"===typeof e.automargin&&(wt(r,e.automargin),wt(i,e.automargin)),a.autoMargin(t,Et(e),r),a.autoMargin(t,Ct(e),i),a.autoMargin(t,Pt(e),s)})),s.syncOrAsync(ot)}}function ct(t){var n=g+(t||"tick");return A[n]||(A[n]=function(t,e){var n,r,i,a;return t._selections[e].size()?(n=1/0,r=-1/0,i=1/0,a=-1/0,t._selections[e].each((function(){var t=St(this),e=h.bBox(t.node().parentNode);n=Math.min(n,e.top),r=Math.max(r,e.bottom),i=Math.min(i,e.left),a=Math.max(a,e.right)}))):(n=0,r=0,i=0,a=0),{top:n,bottom:r,left:i,right:a,height:r-n,width:a-i}}(e,n)),A[n]}},q.getTickSigns=function(t,e){var n=t._id.charAt(0),r={x:"top",y:"right"}[n],i=t.side===r?1:-1,a=[-1,1,i,-i];return"inside"!==(e?(t.minor||{}).ticks:t.ticks)===("x"===n)&&(a=a.map((function(t){return-t}))),t.side&&a.push({l:-1,t:-1,r:1,b:1}[t.side.charAt(0)]),a},q.makeTransTickFn=function(t){return"x"===t._id.charAt(0)?function(e){return l(t._offset+t.l2p(e.x),0)}:function(e){return l(0,t._offset+t.l2p(e.x))}},q.makeTransTickLabelFn=function(t){var e=function(t){var e=t.ticklabelposition||"",n=function(t){return-1!==e.indexOf(t)},r=n("top"),i=n("left"),a=n("right"),o=n("bottom"),s=n("inside"),l=o||i||r||a;if(!l&&!s)return[0,0];var c=t.side,u=l?(t.tickwidth||0)/2:0,f=3,h=t.tickfont?t.tickfont.size:12;return(o||r)&&(u+=h*U,f+=(t.linewidth||0)/2),(i||a)&&(u+=(t.linewidth||0)/2,f+=3),s&&"top"===c&&(f-=h*(1-U)),(i||r)&&(u=-u),"bottom"!==c&&"right"!==c||(f=-f),[l?u:0,s?f:0]}(t),n=e[0],r=e[1];return"x"===t._id.charAt(0)?function(e){return l(n+t._offset+t.l2p(kt(e)),r)}:function(e){return l(r,n+t._offset+t.l2p(kt(e)))}},q.makeTickPath=function(t,e,n,r){r||(r={});var i=r.minor;if(i&&!t.minor)return"";var a=void 0!==r.len?r.len:i?t.minor.ticklen:t.ticklen,o=t._id.charAt(0),s=(t.linewidth||1)/2;return"x"===o?"M0,"+(e+s*n)+"v"+a*n:"M"+(e+s*n)+",0h"+a*n},q.makeLabelFns=function(t,e,n){var r=t.ticklabelposition||"",a=function(t){return-1!==r.indexOf(t)},o=a("top"),l=a("left"),c=a("right"),u=a("bottom")||l||o||c,f=a("inside"),h="inside"===r&&"inside"===t.ticks||!f&&"outside"===t.ticks&&"boundaries"!==t.tickson,d=0,p=0,g=h?t.ticklen:0;if(f?g*=-1:u&&(g=0),h&&(d+=g,n)){var v=s.deg2rad(n);d=g*Math.cos(v)+1,p=g*Math.sin(v)}t.showticklabels&&(h||t.showline)&&(d+=.2*t.tickfont.size);var m,y,b,x,w,_={labelStandoff:d+=(t.linewidth||1)/2*(f?-1:1),labelShift:p},k=0,T=t.side,M=t._id.charAt(0),A=t.tickangle;if("x"===M)x=(w=!f&&"bottom"===T||f&&"top"===T)?1:-1,f&&(x*=-1),m=p*x,y=e+d*x,b=w?1:-.2,90===Math.abs(A)&&(f?b+=B:b=-90===A&&"bottom"===T?U:90===A&&"top"===T?B:.5,k=B/2*(A/90)),_.xFn=function(t){return t.dx+m+k*t.fontSize},_.yFn=function(t){return t.dy+y+t.fontSize*b},_.anchorFn=function(t,e){if(u){if(l)return"end";if(c)return"start"}return i(e)&&0!==e&&180!==e?e*x<0!==f?"end":"start":"middle"},_.heightFn=function(e,n,r){return n<-60||n>60?-.5*r:"top"===t.side!==f?-r:0};else if("y"===M){if(x=(w=!f&&"left"===T||f&&"right"===T)?1:-1,f&&(x*=-1),m=d,y=p*x,b=0,f||90!==Math.abs(A)||(b=-90===A&&"left"===T||90===A&&"right"===T?U:.5),f){var S=i(A)?+A:0;if(0!==S){var E=s.deg2rad(S);k=Math.abs(Math.sin(E))*U*x,b=0}}_.xFn=function(t){return t.dx+e-(m+t.fontSize*b)*x+k*t.fontSize},_.yFn=function(t){return t.dy+y+t.fontSize*B},_.anchorFn=function(t,e){return i(e)&&90===Math.abs(e)?"middle":w?"end":"start"},_.heightFn=function(e,n,r){return"right"===t.side&&(n*=-1),n<-30?-r:n<30?-.5*r:0}}return _},q.drawTicks=function(t,e,n){n=n||{};var i=e._id+"tick",a=[].concat(e.minor&&e.minor.ticks?n.vals.filter((function(t){return t.minor&&!t.noTick})):[]).concat(e.ticks?n.vals.filter((function(t){return!t.minor&&!t.noTick})):[]),o=n.layer.selectAll("path."+i).data(a,Tt);o.exit().remove(),o.enter().append("path").classed(i,1).classed("ticks",1).classed("crisp",!1!==n.crisp).each((function(t){return f.stroke(r.select(this),t.minor?e.minor.tickcolor:e.tickcolor)})).style("stroke-width",(function(n){return h.crispRound(t,n.minor?e.minor.tickwidth:e.tickwidth,1)+"px"})).attr("d",n.path).style("display",null),Nt(e,[z]),o.attr("transform",n.transFn)},q.drawGrid=function(t,e,n){if(n=n||{},"sync"!==e.tickmode){var i=e._id+"grid",a=e.minor&&e.minor.showgrid,o=a?n.vals.filter((function(t){return t.minor})):[],s=e.showgrid?n.vals.filter((function(t){return!t.minor})):[],l=n.counterAxis;if(l&&q.shouldShowZeroLine(t,e,l))for(var c="array"===e.tickmode,u=0;u=0;m--){var y=m?g:v;if(y){var b=y.selectAll("path."+i).data(m?s:o,Tt);b.exit().remove(),b.enter().append("path").classed(i,1).classed("crisp",!1!==n.crisp),b.attr("transform",n.transFn).attr("d",n.path).each((function(t){return f.stroke(r.select(this),t.minor?e.minor.gridcolor:e.gridcolor||"#ddd")})).style("stroke-dasharray",(function(t){return h.dashStyle(t.minor?e.minor.griddash:e.griddash,t.minor?e.minor.gridwidth:e.gridwidth)})).style("stroke-width",(function(t){return(t.minor?p:e._gw)+"px"})).style("display",null),"function"===typeof n.path&&b.attr("d",n.path)}}Nt(e,[D,R])}},q.drawZeroLine=function(t,e,n){n=n||n;var r=e._id+"zl",i=q.shouldShowZeroLine(t,e,n.counterAxis),a=n.layer.selectAll("path."+r).data(i?[{x:0,id:e._id}]:[]);a.exit().remove(),a.enter().append("path").classed(r,1).classed("zl",1).classed("crisp",!1!==n.crisp).each((function(){n.layer.selectAll("path").sort((function(t,e){return Y(t.id,e.id)}))})),a.attr("transform",n.transFn).attr("d",n.path).call(f.stroke,e.zerolinecolor||f.defaultLine).style("stroke-width",h.crispRound(t,e.zerolinewidth,e._gw||1)+"px").style("display",null),Nt(e,[I])},q.drawLabels=function(t,e,n){n=n||{};var a=t._fullLayout,o=e._id,u=o.charAt(0),f=n.cls||o+"tick",d=n.vals.filter((function(t){return t.text})),p=n.labelFns,g=n.secondary?0:e.tickangle,v=(e._prevTickAngles||{})[f],m=n.layer.selectAll("g."+f).data(e.showticklabels?d:[],Tt),y=[];function b(t,a){t.each((function(t){var o=r.select(this),s=o.select(".text-math-group"),u=p.anchorFn(t,a),f=n.transFn.call(o.node(),t)+(i(a)&&0!==+a?" rotate("+a+","+p.xFn(t)+","+(p.yFn(t)-t.fontSize/2)+")":""),d=c.lineCount(o),g=H*t.fontSize,v=p.heightFn(t,i(a)?+a:0,(d-1)*g);if(v&&(f+=l(0,v)),s.empty()){var m=o.select("text");m.attr({transform:f,"text-anchor":u}),m.style("opacity",1),e._adjustTickLabelsOverflow&&e._adjustTickLabelsOverflow()}else{var y=h.bBox(s.node()).width*{end:-.5,start:.5}[u];s.attr("transform",f+l(y,0))}}))}m.enter().append("g").classed(f,1).append("text").attr("text-anchor","middle").each((function(e){var n=r.select(this),i=t._promises.length;n.call(c.positionText,p.xFn(e),p.yFn(e)).call(h.font,e.font,e.fontSize,e.fontColor).text(e.text).call(c.convertToTspans,t),t._promises[i]?y.push(t._promises.pop().then((function(){b(n,g)}))):b(n,g)})),Nt(e,[N]),m.exit().remove(),n.repositionOnUpdate&&m.each((function(t){r.select(this).select("text").call(c.positionText,p.xFn(t),p.yFn(t))})),e._adjustTickLabelsOverflow=function(){var n=e.ticklabeloverflow;if(n&&"allow"!==n){var i=-1!==n.indexOf("hide"),o="x"===e._id.charAt(0),l=0,c=o?t._fullLayout.width:t._fullLayout.height;if(-1!==n.indexOf("domain")){var u=s.simpleMap(e.range,e.r2l);l=e.l2p(u[0])+e._offset,c=e.l2p(u[1])+e._offset}var f=Math.min(l,c),d=Math.max(l,c),p=e.side,g=1/0,v=-1/0;for(var y in m.each((function(t){var n=r.select(this);if(n.select(".text-math-group").empty()){var a=h.bBox(n.node()),s=0;o?(a.right>d||a.leftd||a.top+(e.tickangle?0:t.fontSize/4)e["_visibleLabelMin_"+n._id]?l.style("display","none"):"tick"!==t.K||i||l.style("display",null)}))}))}))}))},b(m,v+1?v:g);var x=null;e._selections&&(e._selections[f]=m);var w=[function(){return y.length&&Promise.all(y)}];e.automargin&&a._redrawFromAutoMarginCount&&90===v?(x=90,w.push((function(){b(m,v)}))):w.push((function(){if(b(m,g),d.length&&"x"===u&&!i(g)&&("log"!==e.type||"D"!==String(e.dtick).charAt(0))){x=0;var t,r=0,a=[];if(m.each((function(t){r=Math.max(r,t.fontSize);var n=e.l2p(t.x),i=St(this),o=h.bBox(i.node());a.push({top:0,bottom:10,height:10,left:n-o.width/2,right:n+o.width/2+2,width:o.width+2})})),"boundaries"!==e.tickson&&!e.showdividers||n.secondary){var o=d.length,l=Math.abs((d[o-1].x-d[0].x)*e._m)/(o-1),c=e.ticklabelposition||"",f=function(t){return-1!==c.indexOf(t)},p=f("top"),v=f("left"),y=f("right"),w=f("bottom")||v||p||y?(e.tickwidth||0)+6:0,_=l<2.5*r||"multicategory"===e.type||"realaxis"===e._name;for(t=0;t1)for(r=1;r2*o}(i,e))return"date";var v="strict"!==n.autotypenumbers;return function(t,e){for(var n=t.length,r=f(n),i=0,o=0,s={},u=0;u2*i}(i,v)?"category":function(t,e){for(var n=t.length,r=0;r=2){var s,c,u="";if(2===o.length)for(s=0;s<2;s++)if(c=x(o[s])){u=v;break}var f=i("pattern",u);if(f===v)for(s=0;s<2;s++)(c=x(o[s]))&&(e.bounds[s]=o[s]=c-1);if(f)for(s=0;s<2;s++)switch(c=o[s],f){case v:if(!r(c))return void(e.enabled=!1);if((c=+c)!==Math.floor(c)||c<0||c>=7)return void(e.enabled=!1);e.bounds[s]=o[s]=c;break;case m:if(!r(c))return void(e.enabled=!1);if((c=+c)<0||c>24)return void(e.enabled=!1);e.bounds[s]=o[s]=c}if(!1===n.autorange){var h=n.range;if(h[0]h[1])return void(e.enabled=!1)}else if(o[0]>h[0]&&o[1]r?1:-1:+(t.substr(1)||1)-+(e.substr(1)||1)},e.ref2id=function(t){return!!/^[xyz]/.test(t)&&t.split(" ")[0]},e.isLinked=function(t,e){return a(e,t._axisMatchGroups)||a(e,t._axisConstraintGroups)}},15258:function(t){"use strict";t.exports=function(t,e,n,r){if("category"===e.type){var i,a=t.categoryarray,o=Array.isArray(a)&&a.length>0;o&&(i="array");var s,l=n("categoryorder",i);"array"===l&&(s=n("categoryarray")),o||"array"!==l||(l=e.categoryorder="trace"),"trace"===l?e._initialCategories=[]:"array"===l?e._initialCategories=s.slice():(s=function(t,e){var n,r,i,a=e.dataAttr||t._id.charAt(0),o={};if(e.axData)n=e.axData;else for(n=[],r=0;rr?i.substr(r):a.substr(n))+o:i+a+t*e:o}function v(t,e){for(var n=e._size,r=n.h/n.w,i={},a=Object.keys(t),o=0;oc*b)||k)for(n=0;nI&&NO&&(O=N);h/=(O-P)/(2*L),P=l.l2r(P),O=l.l2r(O),l.range=l._input.range=S=0?Math.min(t,.9):1/(1/Math.max(t,-.3)+3.222))}function F(t,e,n,r,i){return t.append("path").attr("class","zoombox").style({fill:e>.2?"rgba(0,0,0,0)":"rgba(255,255,255,0)","stroke-width":0}).attr("transform",c(n,r)).attr("d",i+"Z")}function B(t,e,n){return t.append("path").attr("class","zoombox-corners").style({fill:f.background,stroke:f.defaultLine,"stroke-width":1,opacity:0}).attr("transform",c(e,n)).attr("d","M0,0Z")}function U(t,e,n,r,i,a){t.attr("d",r+"M"+n.l+","+n.t+"v"+n.h+"h"+n.w+"v-"+n.h+"h-"+n.w+"Z"),H(t,e,i,a)}function H(t,e,n,r){n||(t.transition().style("fill",r>.2?"rgba(0,0,0,0.4)":"rgba(255,255,255,0.3)").duration(200),e.transition().style("opacity",1).duration(200))}function V(t){r.select(t).selectAll(".zoombox,.js-zoombox-backdrop,.js-zoombox-menu,.zoombox-corners").remove()}function q(t){L&&t.data&&t._context.showTips&&(i.notifier(i._(t,"Double-click to zoom back out"),"long"),L=!1)}function G(t){var e=Math.floor(Math.min(t.b-t.t,t.r-t.l,O)/2);return"M"+(t.l-3.5)+","+(t.t-.5+e)+"h3v"+-e+"h"+e+"v-3h-"+(e+3)+"ZM"+(t.r+3.5)+","+(t.t-.5+e)+"h-3v"+-e+"h"+-e+"v-3h"+(e+3)+"ZM"+(t.r+3.5)+","+(t.b+.5-e)+"h-3v"+e+"h"+-e+"v3h"+(e+3)+"ZM"+(t.l-3.5)+","+(t.b+.5-e)+"h3v"+e+"h"+e+"v3h-"+(e+3)+"Z"}function W(t,e,n,r,a){for(var o,s,l,c,u=!1,f={},h={},d=(a||{}).xaHash,p=(a||{}).yaHash,g=0;g=0)i._fullLayout._deactivateShape(i);else{var o=i._fullLayout.clickmode;if(V(i),2!==t||mt||qt(),vt)o.indexOf("select")>-1&&S(n,i,K,Z,e.id,Ot),o.indexOf("event")>-1&&d.click(i,n,e.id);else if(1===t&&mt){var s=g?I:L,c="s"===g||"w"===m?0:1,f=s._name+".range["+c+"]",h=function(t,e){var n,r=t.range[e],i=Math.abs(r-t.range[1-e]);return"date"===t.type?r:"log"===t.type?(n=Math.ceil(Math.max(0,-Math.log(i)/Math.LN10))+3,a("."+n+"g")(Math.pow(10,r))):(n=Math.floor(Math.log(Math.abs(r))/Math.LN10)-Math.floor(Math.log(i)/Math.LN10)+4,a("."+String(n)+"g")(r))}(s,c),p="left",v="middle";if(s.fixedrange)return;g?(v="n"===g?"top":"bottom","right"===s.side&&(p="right")):"e"===m&&(p="right"),i._context.showAxisRangeEntryBoxes&&r.select(xt).call(u.makeEditable,{gd:i,immediate:!0,background:i._fullLayout.paper_bgcolor,text:String(h),fill:s.tickfont?s.tickfont.color:"#444",horizontalAlign:p,verticalAlign:v}).on("edit",(function(t){var e=s.d2r(t);void 0!==e&&l.call("_guiRelayout",i,f,e)}))}}}function Dt(e,n){if(t._transitioningWithDuration)return!1;var r=Math.max(0,Math.min(tt,dt*e+wt)),i=Math.max(0,Math.min(et,pt*n+_t)),a=Math.abs(r-wt),o=Math.abs(i-_t);function s(){St="",kt.r=kt.l,kt.t=kt.b,Ct.attr("d","M0,0Z")}if(kt.l=Math.min(wt,r),kt.r=Math.max(wt,r),kt.t=Math.min(_t,i),kt.b=Math.max(_t,i),nt.isSubplotConstrained)a>O||o>O?(St="xy",a/tt>o/et?(o=a*et/tt,_t>i?kt.t=_t-o:kt.b=_t+o):(a=o*tt/et,wt>r?kt.l=wt-a:kt.r=wt+a),Ct.attr("d",G(kt))):s();else if(rt.isSubplotConstrained)if(a>O||o>O){St="xy";var l=Math.min(kt.l/tt,(et-kt.b)/et),c=Math.max(kt.r/tt,(et-kt.t)/et);kt.l=l*tt,kt.r=c*tt,kt.b=(1-l)*et,kt.t=(1-c)*et,Ct.attr("d",G(kt))}else s();else!at||o0){var u;if(rt.isSubplotConstrained||!it&&1===at.length){for(u=0;ux[1]-1/4096&&(e.domain=s),i.noneOrAll(t.domain,e.domain,s),"sync"===e.tickmode&&(e.tickmode="auto")}return n("layer"),e}},89426:function(t,e,n){"use strict";var r=n(59652);t.exports=function(t,e,n,i,a){a||(a={});var o=a.tickSuffixDflt,s=r(t);n("tickprefix")&&n("showtickprefix",s),n("ticksuffix",o)&&n("showticksuffix",s)}},42449:function(t,e,n){"use strict";var r=n(18783).FROM_BL;t.exports=function(t,e,n){void 0===n&&(n=r[t.constraintoward||"center"]);var i=[t.r2l(t.range[0]),t.r2l(t.range[1])],a=i[0]+(i[1]-i[0])*n;t.range=t._input.range=[t.l2r(a+(i[0]-a)*e),t.l2r(a+(i[1]-a)*e)],t.setScale()}},21994:function(t,e,n){"use strict";var r=n(39898),i=n(84096).g0,a=n(71828),o=a.numberFormat,s=n(92770),l=a.cleanNumber,c=a.ms2DateTime,u=a.dateTime2ms,f=a.ensureNumber,h=a.isArrayOrTypedArray,d=n(50606),p=d.FP_SAFE,g=d.BADNUM,v=d.LOG_CLIP,m=d.ONEWEEK,y=d.ONEDAY,b=d.ONEHOUR,x=d.ONEMIN,w=d.ONESEC,_=n(41675),k=n(85555),T=k.HOUR_PATTERN,M=k.WEEKDAY_PATTERN;function A(t){return Math.pow(10,t)}function S(t){return null!==t&&void 0!==t}t.exports=function(t,e){e=e||{};var n=t._id||"x",d=n.charAt(0);function E(e,n){if(e>0)return Math.log(e)/Math.LN10;if(e<=0&&n&&t.range&&2===t.range.length){var r=t.range[0],i=t.range[1];return.5*(r+i-2*v*Math.abs(r-i))}return g}function C(e,n,r,i){if((i||{}).msUTC&&s(e))return+e;var o=u(e,r||t.calendar);if(o===g){if(!s(e))return g;e=+e;var l=Math.floor(10*a.mod(e+.05,1)),c=Math.round(e-l/10);o=u(new Date(c))+l/10}return o}function P(e,n,r){return c(e,n,r||t.calendar)}function O(e){return t._categories[Math.round(e)]}function L(e){if(S(e)){if(void 0===t._categoriesMap&&(t._categoriesMap={}),void 0!==t._categoriesMap[e])return t._categoriesMap[e];t._categories.push("number"===typeof e?String(e):e);var n=t._categories.length-1;return t._categoriesMap[e]=n,n}return g}function I(e){if(t._categoriesMap)return t._categoriesMap[e]}function D(t){var e=I(t);return void 0!==e?e:s(t)?+t:void 0}function R(t){return s(t)?+t:I(t)}function z(t,e,n){return r.round(n+e*t,2)}function N(t,e,n){return(t-n)/e}var j=function(e){return s(e)?z(e,t._m,t._b):g},F=function(e){return N(e,t._m,t._b)};if(t.rangebreaks){var B="y"===d;j=function(e){if(!s(e))return g;var n=t._rangebreaks.length;if(!n)return z(e,t._m,t._b);var r=B;t.range[0]>t.range[1]&&(r=!r);for(var i=r?-1:1,a=i*e,o=0,l=0;lu)){o=a<(c+u)/2?l:l+1;break}o=l+1}var f=t._B[o]||0;return isFinite(f)?z(e,t._m2,f):0},F=function(e){var n=t._rangebreaks.length;if(!n)return N(e,t._m,t._b);for(var r=0,i=0;it._rangebreaks[i].pmax&&(r=i+1);return N(e,t._m2,t._B[r])}}t.c2l="log"===t.type?E:f,t.l2c="log"===t.type?A:f,t.l2p=j,t.p2l=F,t.c2p="log"===t.type?function(t,e){return j(E(t,e))}:j,t.p2c="log"===t.type?function(t){return A(F(t))}:F,-1!==["linear","-"].indexOf(t.type)?(t.d2r=t.r2d=t.d2c=t.r2c=t.d2l=t.r2l=l,t.c2d=t.c2r=t.l2d=t.l2r=f,t.d2p=t.r2p=function(e){return t.l2p(l(e))},t.p2d=t.p2r=F,t.cleanPos=f):"log"===t.type?(t.d2r=t.d2l=function(t,e){return E(l(t),e)},t.r2d=t.r2c=function(t){return A(l(t))},t.d2c=t.r2l=l,t.c2d=t.l2r=f,t.c2r=E,t.l2d=A,t.d2p=function(e,n){return t.l2p(t.d2r(e,n))},t.p2d=function(t){return A(F(t))},t.r2p=function(e){return t.l2p(l(e))},t.p2r=F,t.cleanPos=f):"date"===t.type?(t.d2r=t.r2d=a.identity,t.d2c=t.r2c=t.d2l=t.r2l=C,t.c2d=t.c2r=t.l2d=t.l2r=P,t.d2p=t.r2p=function(e,n,r){return t.l2p(C(e,0,r))},t.p2d=t.p2r=function(t,e,n){return P(F(t),e,n)},t.cleanPos=function(e){return a.cleanDate(e,g,t.calendar)}):"category"===t.type?(t.d2c=t.d2l=L,t.r2d=t.c2d=t.l2d=O,t.d2r=t.d2l_noadd=D,t.r2c=function(e){var n=R(e);return void 0!==n?n:t.fraction2r(.5)},t.l2r=t.c2r=f,t.r2l=R,t.d2p=function(e){return t.l2p(t.r2c(e))},t.p2d=function(t){return O(F(t))},t.r2p=t.d2p,t.p2r=F,t.cleanPos=function(t){return"string"===typeof t&&""!==t?t:f(t)}):"multicategory"===t.type&&(t.r2d=t.c2d=t.l2d=O,t.d2r=t.d2l_noadd=D,t.r2c=function(e){var n=D(e);return void 0!==n?n:t.fraction2r(.5)},t.r2c_just_indices=I,t.l2r=t.c2r=f,t.r2l=D,t.d2p=function(e){return t.l2p(t.r2c(e))},t.p2d=function(t){return O(F(t))},t.r2p=t.d2p,t.p2r=F,t.cleanPos=function(t){return Array.isArray(t)||"string"===typeof t&&""!==t?t:f(t)},t.setupMultiCategory=function(r){var i,o,s=t._traceIndices,l=t._matchGroup;if(l&&0===t._categories.length)for(var c in l)if(c!==n){var u=e[_.id2name(c)];s=s.concat(u._traceIndices)}var f=[[0,{}],[0,{}]],p=[];for(i=0;ip&&(o[r]=p),o[0]===o[1]){var c=Math.max(1,Math.abs(1e-6*o[0]));o[0]-=c,o[1]+=c}}else a.nestedProperty(t,e).set(i)},t.setScale=function(n){var r=e._size;if(t.overlaying){var i=_.getFromId({_fullLayout:e},t.overlaying);t.domain=i.domain}var a=n&&t._r?"_r":"range",o=t.calendar;t.cleanRange(a);var s,l,c=t.r2l(t[a][0],o),u=t.r2l(t[a][1],o),f="y"===d;if(f?(t._offset=r.t+(1-t.domain[1])*r.h,t._length=r.h*(t.domain[1]-t.domain[0]),t._m=t._length/(c-u),t._b=-t._m*u):(t._offset=r.l+t.domain[0]*r.w,t._length=r.w*(t.domain[1]-t.domain[0]),t._m=t._length/(u-c),t._b=-t._m*c),t._rangebreaks=[],t._lBreaks=0,t._m2=0,t._B=[],t.rangebreaks&&(t._rangebreaks=t.locateBreaks(Math.min(c,u),Math.max(c,u)),t._rangebreaks.length)){for(s=0;su&&(h=!h),h&&t._rangebreaks.reverse();var p=h?-1:1;for(t._m2=p*t._length/(Math.abs(u-c)-t._lBreaks),t._B.push(-t._m2*(f?u:c)),s=0;si&&(i+=7,oi&&(i+=24,o=r&&o=r&&e=s.min&&(ts.max&&(s.max=r),i=!1)}i&&c.push({min:t,max:r})}};for(r=0;rn.duration?(function(){for(var n={},r=0;r rect").call(o.setTranslate,0,0).call(o.setScale,1,1),t.plot.call(o.setTranslate,e._offset,n._offset).call(o.setScale,1,1);var r=t.plot.selectAll(".scatterlayer .trace");r.selectAll(".point").call(o.setPointGroupScale,1,1),r.selectAll(".textpoint").call(o.setTextPointsScale,1,1),r.call(o.hideOutsideRangePoints,t)}function v(e,n){var r=e.plotinfo,i=r.xaxis,l=r.yaxis,c=i._length,u=l._length,f=!!e.xr1,h=!!e.yr1,d=[];if(f){var p=a.simpleMap(e.xr0,i.r2l),g=a.simpleMap(e.xr1,i.r2l),v=p[1]-p[0],m=g[1]-g[0];d[0]=(p[0]*(1-n)+n*g[0]-p[0])/(p[1]-p[0])*c,d[2]=c*(1-n+n*m/v),i.range[0]=i.l2r(p[0]*(1-n)+n*g[0]),i.range[1]=i.l2r(p[1]*(1-n)+n*g[1])}else d[0]=0,d[2]=c;if(h){var y=a.simpleMap(e.yr0,l.r2l),b=a.simpleMap(e.yr1,l.r2l),x=y[1]-y[0],w=b[1]-b[0];d[1]=(y[1]*(1-n)+n*b[1]-y[1])/(y[0]-y[1])*u,d[3]=u*(1-n+n*w/x),l.range[0]=i.l2r(y[0]*(1-n)+n*b[0]),l.range[1]=l.l2r(y[1]*(1-n)+n*b[1])}else d[1]=0,d[3]=u;s.drawOne(t,i,{skipTitle:!0}),s.drawOne(t,l,{skipTitle:!0}),s.redrawComponents(t,[i._id,l._id]);var _=f?c/d[2]:1,k=h?u/d[3]:1,T=f?d[0]:0,M=h?d[1]:0,A=f?d[0]/d[2]*c:0,S=h?d[1]/d[3]*u:0,E=i._offset-A,C=l._offset-S;r.clipRect.call(o.setTranslate,T,M).call(o.setScale,1/_,1/k),r.plot.call(o.setTranslate,E,C).call(o.setScale,_,k),o.setPointGroupScale(r.zoomScalePts,1/_,1/k),o.setTextPointsScale(r.zoomScaleTxt,1/_,1/k)}s.redrawComponents(t)}},951:function(t,e,n){"use strict";var r=n(73972).traceIs,i=n(4322);function a(t){return{v:"x",h:"y"}[t.orientation||"v"]}function o(t,e){var n=a(t),i=r(t,"box-violin"),o=r(t._fullInput||{},"candlestick");return i&&!o&&e===n&&void 0===t[n]&&void 0===t[n+"0"]}t.exports=function(t,e,n,s){n("autotypenumbers",s.autotypenumbersDflt),"-"===n("type",(s.splomStash||{}).type)&&(function(t,e){if("-"===t.type){var n,s=t._id,l=s.charAt(0);-1!==s.indexOf("scene")&&(s=l);var c=function(t,e,n){for(var r=0;r0&&(i["_"+n+"axes"]||{})[e])return i;if((i[n+"axis"]||n)===e){if(o(i,n))return i;if((i[n]||[]).length||i[n+"0"])return i}}}(e,s,l);if(c)if("histogram"!==c.type||l!=={v:"y",h:"x"}[c.orientation||"v"]){var u=l+"calendar",f=c[u],h={noMultiCategory:!r(c,"cartesian")||r(c,"noMultiCategory")};if("box"===c.type&&c._hasPreCompStats&&l==={h:"x",v:"y"}[c.orientation||"v"]&&(h.noMultiCategory=!0),h.autotypenumbers=t.autotypenumbers,o(c,l)){var d=a(c),p=[];for(n=0;n0?".":"")+a;i.isPlainObject(o)?l(o,e,s,r+1):e(s,a,o)}}))}e.manageCommandObserver=function(t,n,r,o){var s={},l=!0;n&&n._commandObserver&&(s=n._commandObserver),s.cache||(s.cache={}),s.lookupTable={};var c=e.hasSimpleAPICommandBindings(t,r,s.lookupTable);if(n&&n._commandObserver){if(c)return s;if(n._commandObserver.remove)return n._commandObserver.remove(),n._commandObserver=null,s}if(c){a(t,c,s.cache),s.check=function(){if(l){var e=a(t,c,s.cache);return e.changed&&o&&void 0!==s.lookupTable[e.value]&&(s.disable(),Promise.resolve(o({value:e.value,type:c.type,prop:c.prop,traces:c.traces,index:s.lookupTable[e.value]})).then(s.enable,s.enable)),e.changed}};for(var u=["plotly_relayout","plotly_redraw","plotly_restyle","plotly_update","plotly_animatingframe","plotly_afterplot"],f=0;f0&&i<0&&(i+=360);var s=(i-r)/4;return{type:"Polygon",coordinates:[[[r,a],[r,o],[r+s,o],[r+2*s,o],[r+3*s,o],[i,o],[i,a],[i-s,a],[i-2*s,a],[i-3*s,a],[r,a]]]}}t.exports=function(t){return new A(t)},S.plot=function(t,e,n,r){var i=this;if(r)return i.update(t,e,!0);i._geoCalcData=t,i._fullLayout=e;var a=e[this.id],o=[],s=!1;for(var l in _.layerNameToAdjective)if("frame"!==l&&a["show"+l]){s=!0;break}for(var c=!1,u=0;u0&&o._module.calcGeoJSON(a,e)}if(!n){if(this.updateProjection(t,e))return;this.viewInitial&&this.scope===r.scope||this.saveViewInitial(r)}this.scope=r.scope,this.updateBaseLayers(e,r),this.updateDims(e,r),this.updateFx(e,r),p.generalUpdatePerTraceModule(this.graphDiv,this,t,r);var s=this.layers.frontplot.select(".scatterlayer");this.dataPoints.point=s.selectAll(".point"),this.dataPoints.text=s.selectAll("text"),this.dataPaths.line=s.selectAll(".js-line");var l=this.layers.backplot.select(".choroplethlayer");this.dataPaths.choropleth=l.selectAll("path"),this._render()},S.updateProjection=function(t,e){var n=this.graphDiv,r=e[this.id],l=e._size,u=r.domain,f=r.projection,h=r.lonaxis,d=r.lataxis,p=h._ax,g=d._ax,m=this.projection=function(t){var e=t.projection,n=e.type,r=_.projNames[n];r="geo"+c.titleCase(r);for(var l=(i[r]||s[r])(),u=t._isSatellite?180*Math.acos(1/e.distance)/Math.PI:t._isClipped?_.lonaxisSpan[n]/2:null,f=["center","rotate","parallels","clipExtent"],h=function(t){return t?l:[]},d=0;du*Math.PI/180}return!1},l.getPath=function(){return a().projection(l)},l.getBounds=function(t){return l.getPath().bounds(t)},l.precision(_.precision),t._isSatellite&&l.tilt(e.tilt).distance(e.distance),u&&l.clipAngle(u-_.clipPad),l}(r),y=[[l.l+l.w*u.x[0],l.t+l.h*(1-u.y[1])],[l.l+l.w*u.x[1],l.t+l.h*(1-u.y[0])]],b=r.center||{},x=f.rotation||{},w=h.range||[],k=d.range||[];if(r.fitbounds){p._length=y[1][0]-y[0][0],g._length=y[1][1]-y[0][1],p.range=v(n,p),g.range=v(n,g);var T=(p.range[0]+p.range[1])/2,M=(g.range[0]+g.range[1])/2;if(r._isScoped)b={lon:T,lat:M};else if(r._isClipped){b={lon:T,lat:M},x={lon:T,lat:M,roll:x.roll};var A=f.type,S=_.lonaxisSpan[A]/2||180,C=_.lataxisSpan[A]/2||90;w=[T-S,T+S],k=[M-C,M+C]}else b={lon:T,lat:M},x={lon:T,lat:x.lat,roll:x.roll}}m.center([b.lon-x.lon,b.lat-x.lat]).rotate([-x.lon,-x.lat,x.roll]).parallels(f.parallels);var P=E(w,k);m.fitExtent(y,P);var O=this.bounds=m.getBounds(P),L=this.fitScale=m.scale(),I=m.translate();if(r.fitbounds){var D=m.getBounds(E(p.range,g.range)),R=Math.min((O[1][0]-O[0][0])/(D[1][0]-D[0][0]),(O[1][1]-O[0][1])/(D[1][1]-D[0][1]));isFinite(R)?m.scale(R*L):c.warn("Something went wrong during"+this.id+"fitbounds computations.")}else m.scale(f.scale*L);var z=this.midPt=[(O[0][0]+O[1][0])/2,(O[0][1]+O[1][1])/2];if(m.translate([I[0]+(z[0]-I[0]),I[1]+(z[1]-I[1])]).clipExtent(O),r._isAlbersUsa){var N=m([b.lon,b.lat]),j=m.translate();m.translate([j[0]-(N[0]-j[0]),j[1]-(N[1]-j[1])])}},S.updateBaseLayers=function(t,e){var n=this,i=n.topojson,a=n.layers,o=n.basePaths;function s(t){return"lonaxis"===t||"lataxis"===t}function l(t){return Boolean(_.lineLayers[t])}function c(t){return Boolean(_.fillLayers[t])}var u=(this.hasChoropleth?_.layersForChoropleth:_.layers).filter((function(t){return l(t)||c(t)?e["show"+t]:!s(t)||e[t].showgrid})),d=n.framework.selectAll(".layer").data(u,String);d.exit().each((function(t){delete a[t],delete o[t],r.select(this).remove()})),d.enter().append("g").attr("class",(function(t){return"layer "+t})).each((function(t){var e=a[t]=r.select(this);"bg"===t?n.bgRect=e.append("rect").style("pointer-events","all"):s(t)?o[t]=e.append("path").style("fill","none"):"backplot"===t?e.append("g").classed("choroplethlayer",!0):"frontplot"===t?e.append("g").classed("scatterlayer",!0):l(t)?o[t]=e.append("path").style("fill","none").style("stroke-miterlimit",2):c(t)&&(o[t]=e.append("path").style("stroke","none"))})),d.order(),d.each((function(n){var r=o[n],a=_.layerNameToAdjective[n];"frame"===n?r.datum(_.sphereSVG):l(n)||c(n)?r.datum(M(i,i.objects[n])):s(n)&&r.datum(function(t,e,n){var r,i,a,o=1e-6,s=2.5,l=e[t],c=_.scopeDefaults[e.scope];"lonaxis"===t?(r=c.lonaxisRange,i=c.lataxisRange,a=function(t,e){return[t,e]}):"lataxis"===t&&(r=c.lataxisRange,i=c.lonaxisRange,a=function(t,e){return[e,t]});var u={type:"linear",range:[r[0],r[1]-o],tick0:l.tick0,dtick:l.dtick};g.setConvert(u,n);var f=g.calcTicks(u);e.isScoped||"lonaxis"!==t||f.pop();for(var h=f.length,d=new Array(h),p=0;p-1&&x(r.event,i,[n.xaxis],[n.yaxis],n.id,u),s.indexOf("event")>-1&&d.click(i,r.event))}))}function f(t){return n.projection.invert([t[0]+n.xaxis._offset,t[1]+n.yaxis._offset])}},S.makeFramework=function(){var t=this,e=t.graphDiv,n=e._fullLayout,i="clip"+n._uid+t.id;t.clipDef=n._clips.append("clipPath").attr("id",i),t.clipRect=t.clipDef.append("rect"),t.framework=r.select(t.container).append("g").attr("class","geo "+t.id).call(h.setClipUrl,i,e),t.project=function(e){var n=t.projection(e);return n?[n[0]-t.xaxis._offset,n[1]-t.yaxis._offset]:[null,null]},t.xaxis={_id:"x",c2p:function(e){return t.project(e)[0]}},t.yaxis={_id:"y",c2p:function(e){return t.project(e)[1]}},t.mockAxis={type:"linear",showexponent:"all",exponentformat:"B"},g.setConvert(t.mockAxis,n)},S.saveViewInitial=function(t){var e,n=t.center||{},r=t.projection,i=r.rotation||{};this.viewInitial={fitbounds:t.fitbounds,"projection.scale":r.scale},e=t._isScoped?{"center.lon":n.lon,"center.lat":n.lat}:t._isClipped?{"projection.rotation.lon":i.lon,"projection.rotation.lat":i.lat}:{"center.lon":n.lon,"center.lat":n.lat,"projection.rotation.lon":i.lon},c.extendFlat(this.viewInitial,e)},S.render=function(t){this._hasMarkerAngles&&t?this.plot(this._geoCalcData,this._fullLayout,[],!0):this._render()},S._render=function(){var t,e=this.projection,n=e.getPath();function r(t){var n=e(t.lonlat);return n?u(n[0],n[1]):null}function i(t){return e.isLonLatOverEdges(t.lonlat)?"none":null}for(t in this.basePaths)this.basePaths[t].attr("d",n);for(t in this.dataPaths)this.dataPaths[t].attr("d",(function(t){return n(t.geojson)}));for(t in this.dataPoints)this.dataPoints[t].attr("display",i).attr("transform",r)}},44622:function(t,e,n){"use strict";var r=n(27659).AU,i=n(71828).counterRegex,a=n(69082),o="geo",s=i(o),l={};l[o]={valType:"subplotid",dflt:o,editType:"calc"},t.exports={attr:o,name:o,idRoot:o,idRegex:s,attrRegex:s,attributes:l,layoutAttributes:n(77519),supplyLayoutDefaults:n(82161),plot:function(t){for(var e=t._fullLayout,n=t.calcdata,i=e._subplots[o],s=0;s0&&O<0&&(O+=360);var L,I,D,R=(P+O)/2;if(!d){var z=p?f.projRotate:[R,0,0];L=n("projection.rotation.lon",z[0]),n("projection.rotation.lat",z[1]),n("projection.rotation.roll",z[2]),n("showcoastlines",!p&&b)&&(n("coastlinecolor"),n("coastlinewidth")),n("showocean",!!b&&void 0)&&n("oceancolor")}d?(I=-96.6,D=38.7):(I=p?R:L,D=(C[0]+C[1])/2),n("center.lon",I),n("center.lat",D),g&&(n("projection.tilt"),n("projection.distance")),v&&n("projection.parallels",f.projParallels||[0,60]),n("projection.scale"),n("showland",!!b&&void 0)&&n("landcolor"),n("showlakes",!!b&&void 0)&&n("lakecolor"),n("showrivers",!!b&&void 0)&&(n("rivercolor"),n("riverwidth")),n("showcountries",p&&"usa"!==u&&b)&&(n("countrycolor"),n("countrywidth")),("usa"===u||"north america"===u&&50===c)&&(n("showsubunits",b),n("subunitcolor"),n("subunitwidth")),p||n("showframe",b)&&(n("framecolor"),n("framewidth")),n("bgcolor"),n("fitbounds")&&(delete e.projection.scale,p?(delete e.center.lon,delete e.center.lat):m?(delete e.center.lon,delete e.center.lat,delete e.projection.rotation.lon,delete e.projection.rotation.lat,delete e.lonaxis.range,delete e.lataxis.range):(delete e.center.lon,delete e.center.lat,delete e.projection.rotation.lon))}t.exports=function(t,e,n){i(t,e,n,{type:"geo",attributes:s,handleDefaults:c,fullData:n,partition:"y"})}},74455:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=n(73972),o=Math.PI/180,s=180/Math.PI,l={cursor:"pointer"},c={cursor:"auto"};function u(t,e){return r.behavior.zoom().translate(e.translate()).scale(e.scale())}function f(t,e,n){var r=t.id,o=t.graphDiv,s=o.layout,l=s[r],c=o._fullLayout,u=c[r],f={},h={};function d(t,e){f[r+"."+t]=i.nestedProperty(l,t).get(),a.call("_storeDirectGUIEdit",s,c._preGUI,f);var n=i.nestedProperty(u,t);n.get()!==e&&(n.set(e),i.nestedProperty(l,t).set(e),h[r+"."+t]=e)}n(d),d("projection.scale",e.scale()/t.fitScale),d("fitbounds",!1),o.emit("plotly_relayout",h)}function h(t,e){var n=u(0,e);function i(n){var r=e.invert(t.midPt);n("center.lon",r[0]),n("center.lat",r[1])}return n.on("zoomstart",(function(){r.select(this).style(l)})).on("zoom",(function(){e.scale(r.event.scale).translate(r.event.translate),t.render(!0);var n=e.invert(t.midPt);t.graphDiv.emit("plotly_relayouting",{"geo.projection.scale":e.scale()/t.fitScale,"geo.center.lon":n[0],"geo.center.lat":n[1]})})).on("zoomend",(function(){r.select(this).style(c),f(t,e,i)})),n}function d(t,e){var n,i,a,o,s,h,d,p,g,v=u(0,e),m=2;function y(t){return e.invert(t)}function b(n){var r=e.rotate(),i=e.invert(t.midPt);n("projection.rotation.lon",-r[0]),n("center.lon",i[0]),n("center.lat",i[1])}return v.on("zoomstart",(function(){r.select(this).style(l),n=r.mouse(this),i=e.rotate(),a=e.translate(),o=i,s=y(n)})).on("zoom",(function(){if(h=r.mouse(this),function(t){var n=y(t);if(!n)return!0;var r=e(n);return Math.abs(r[0]-t[0])>m||Math.abs(r[1]-t[1])>m}(n))return v.scale(e.scale()),void v.translate(e.translate());e.scale(r.event.scale),e.translate([a[0],r.event.translate[1]]),s?y(h)&&(p=y(h),d=[o[0]+(p[0]-s[0]),i[1],i[2]],e.rotate(d),o=d):s=y(n=h),g=!0,t.render(!0);var l=e.rotate(),c=e.invert(t.midPt);t.graphDiv.emit("plotly_relayouting",{"geo.projection.scale":e.scale()/t.fitScale,"geo.center.lon":c[0],"geo.center.lat":c[1],"geo.projection.rotation.lon":-l[0]})})).on("zoomend",(function(){r.select(this).style(c),g&&f(t,e,b)})),v}function p(t,e){var n,i={r:e.rotate(),k:e.scale()},a=u(0,e),h=function(t){for(var e=0,n=arguments.length,i=[];++ep?(a=(f>0?90:-90)-d,i=0):(a=Math.asin(f/p)*s-d,i=Math.sqrt(p*p-f*f));var g=180-a-2*d,m=(Math.atan2(h,u)-Math.atan2(c,i))*s,b=(Math.atan2(h,u)-Math.atan2(c,-i))*s,x=v(n[0],n[1],a,m),w=v(n[0],n[1],g,b);return x<=w?[a,m,n[2]]:[g,b,n[2]]}(l,n,f);isFinite(d[0])&&isFinite(d[1])&&isFinite(d[2])||(d=f),e.rotate(d),f=d}}else n=g(e,c=t);h.of(this,arguments)({type:"zoom"})})),t=h.of(this,arguments),d++||t({type:"zoomstart"})})).on("zoomend",(function(){var n;r.select(this).style(c),p.call(a,"zoom",null),n=h.of(this,arguments),--d||n({type:"zoomend"}),f(t,e,m)})).on("zoom.redraw",(function(){t.render(!0);var n=e.rotate();t.graphDiv.emit("plotly_relayouting",{"geo.projection.scale":e.scale()/t.fitScale,"geo.projection.rotation.lon":-n[0],"geo.projection.rotation.lat":-n[1]})})),r.rebind(a,h,"on")}function g(t,e){var n=t.invert(e);return n&&isFinite(n[0])&&isFinite(n[1])&&function(t){var e=t[0]*o,n=t[1]*o,r=Math.cos(n);return[r*Math.cos(e),r*Math.sin(e),Math.sin(n)]}(n)}function v(t,e,n,r){var i=m(n-t),a=m(r-e);return Math.sqrt(i*i+a*a)}function m(t){return(t%360+540)%360-180}function y(t,e,n){var r=n*o,i=t.slice(),a=0===e?1:0,s=2===e?1:2,l=Math.cos(r),c=Math.sin(r);return i[a]=t[a]*l-t[s]*c,i[s]=t[s]*l+t[a]*c,i}function b(t,e){for(var n=0,r=0,i=t.length;rMath.abs(s)?(c.boxEnd[1]=c.boxStart[1]+Math.abs(a)*w*(s>=0?1:-1),c.boxEnd[1]l[3]&&(c.boxEnd[1]=l[3],c.boxEnd[0]=c.boxStart[0]+(l[3]-c.boxStart[1])/Math.abs(w))):(c.boxEnd[0]=c.boxStart[0]+Math.abs(s)/w*(a>=0?1:-1),c.boxEnd[0]l[2]&&(c.boxEnd[0]=l[2],c.boxEnd[1]=c.boxStart[1]+(l[2]-c.boxStart[0])*Math.abs(w)))}}else c.boxEnabled?(a=c.boxStart[0]!==c.boxEnd[0],s=c.boxStart[1]!==c.boxEnd[1],a||s?(a&&(v(0,c.boxStart[0],c.boxEnd[0]),t.xaxis.autorange=!1),s&&(v(1,c.boxStart[1],c.boxEnd[1]),t.yaxis.autorange=!1),t.relayoutCallback()):t.glplot.setDirty(),c.boxEnabled=!1,c.boxInited=!1):c.boxInited&&(c.boxInited=!1);break;case"pan":c.boxEnabled=!1,c.boxInited=!1,e?(c.panning||(c.dragStart[0]=r,c.dragStart[1]=i),Math.abs(c.dragStart[0]-r).999&&(v="turntable"):v="turntable")}else v="turntable";n("dragmode",v),n("hovermode",r.getDfltFromLayout("hovermode"))}t.exports=function(t,e,n){var i=e._basePlotModules.length>1;o(t,e,n,{type:u,attributes:l,handleDefaults:f,fullLayout:e,font:e.font,fullData:n,getDfltFromLayout:function(e){if(!i)return r.validate(t[e],l[e])?t[e]:void 0},autotypenumbersDflt:e.autotypenumbers,paper_bgcolor:e.paper_bgcolor,calendar:e.calendar})}},65500:function(t,e,n){"use strict";var r=n(77894),i=n(27670).Y,a=n(1426).extendFlat,o=n(71828).counterRegex;function s(t,e,n){return{x:{valType:"number",dflt:t,editType:"camera"},y:{valType:"number",dflt:e,editType:"camera"},z:{valType:"number",dflt:n,editType:"camera"},editType:"camera"}}t.exports={_arrayAttrRegexps:[o("scene",".annotations",!0)],bgcolor:{valType:"color",dflt:"rgba(0,0,0,0)",editType:"plot"},camera:{up:a(s(0,0,1),{}),center:a(s(0,0,0),{}),eye:a(s(1.25,1.25,1.25),{}),projection:{type:{valType:"enumerated",values:["perspective","orthographic"],dflt:"perspective",editType:"calc"},editType:"calc"},editType:"camera"},domain:i({name:"scene",editType:"plot"}),aspectmode:{valType:"enumerated",values:["auto","cube","data","manual"],dflt:"auto",editType:"plot",impliedEdits:{"aspectratio.x":void 0,"aspectratio.y":void 0,"aspectratio.z":void 0}},aspectratio:{x:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},y:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},z:{valType:"number",min:0,editType:"plot",impliedEdits:{"^aspectmode":"manual"}},editType:"plot",impliedEdits:{aspectmode:"manual"}},xaxis:r,yaxis:r,zaxis:r,dragmode:{valType:"enumerated",values:["orbit","turntable","zoom","pan",!1],editType:"plot"},hovermode:{valType:"enumerated",values:["closest",!1],dflt:"closest",editType:"modebar"},uirevision:{valType:"any",editType:"none"},editType:"plot",_deprecated:{cameraposition:{valType:"info_array",editType:"camera"}}}},13133:function(t,e,n){"use strict";var r=n(78614),i=["xaxis","yaxis","zaxis"];function a(){this.enabled=[!0,!0,!0],this.colors=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.drawSides=[!0,!0,!0],this.lineWidth=[1,1,1]}a.prototype.merge=function(t){for(var e=0;e<3;++e){var n=t[i[e]];n.visible?(this.enabled[e]=n.showspikes,this.colors[e]=r(n.spikecolor),this.drawSides[e]=n.spikesides,this.lineWidth[e]=n.spikethickness):(this.enabled[e]=!1,this.drawSides[e]=!1)}},t.exports=function(t){var e=new a;return e.merge(t),e}},96085:function(t,e,n){"use strict";t.exports=function(t){for(var e=t.axesOptions,n=t.glplot.axesPixels,s=t.fullSceneLayout,l=[[],[],[]],c=0;c<3;++c){var u=s[a[c]];if(u._length=(n[c].hi-n[c].lo)*n[c].pixelsPerDataUnit/t.dataScale[c],Math.abs(u._length)===1/0||isNaN(u._length))l[c]=[];else{u._input_range=u.range.slice(),u.range[0]=n[c].lo/t.dataScale[c],u.range[1]=n[c].hi/t.dataScale[c],u._m=1/(t.dataScale[c]*n[c].pixelsPerDataUnit),u.range[0]===u.range[1]&&(u.range[0]-=1,u.range[1]+=1);var f=u.tickmode;if("auto"===u.tickmode){u.tickmode="linear";var h=u.nticks||i.constrain(u._length/40,4,9);r.autoTicks(u,Math.abs(u.range[1]-u.range[0])/h)}for(var d=r.calcTicks(u,{msUTC:!0}),p=0;p/g," "));l[c]=d,u.tickmode=f}}for(e.ticks=l,c=0;c<3;++c)for(o[c]=.5*(t.glplot.bounds[0][c]+t.glplot.bounds[1][c]),p=0;p<2;++p)e.bounds[p][c]=t.glplot.bounds[p][c];t.contourLevels=function(t){for(var e=new Array(3),n=0;n<3;++n){for(var r=t[n],i=new Array(r.length),a=0;an.deltaY?1.1:1/1.1,a=t.glplot.getAspectratio();t.glplot.setAspectratio({x:r*a.x,y:r*a.y,z:r*a.z})}i(t)}}),!!c&&{passive:!1}),t.glplot.canvas.addEventListener("mousemove",(function(){if(!1!==t.fullSceneLayout.dragmode&&0!==t.camera.mouseListener.buttons){var e=r();t.graphDiv.emit("plotly_relayouting",e)}})),t.staticMode||t.glplot.canvas.addEventListener("webglcontextlost",(function(n){e&&e.emit&&e.emit("plotly_webglcontextlost",{event:n,layer:t.id})}),!1)),t.glplot.oncontextloss=function(){t.recoverContext()},t.glplot.onrender=function(){t.render()},!0},k.render=function(){var t,e=this,n=e.graphDiv,r=e.svgContainer,i=e.container.getBoundingClientRect();n._fullLayout._calcInverseTransform(n);var a=n._fullLayout._invScaleX,o=n._fullLayout._invScaleY,s=i.width*a,l=i.height*o;r.setAttributeNS(null,"viewBox","0 0 "+s+" "+l),r.setAttributeNS(null,"width",s),r.setAttributeNS(null,"height",l),x(e),e.glplot.axes.update(e.axesOptions);for(var c=Object.keys(e.traces),u=null,h=e.glplot.selection,g=0;g")):"isosurface"===t.type||"volume"===t.type?(T.valueLabel=d.hoverLabelText(e._mockAxis,e._mockAxis.d2l(h.traceCoordinate[3]),t.valuehoverformat),E.push("value: "+T.valueLabel),h.textLabel&&E.push(h.textLabel),b=E.join("
")):b=h.textLabel;var C={x:h.traceCoordinate[0],y:h.traceCoordinate[1],z:h.traceCoordinate[2],data:_._input,fullData:_,curveNumber:_.index,pointNumber:k};p.appendArrayPointValue(C,_,k),t._module.eventData&&(C=_._module.eventData(C,h,_,{},k));var P={points:[C]};if(e.fullSceneLayout.hovermode){var O=[];p.loneHover({trace:_,x:(.5+.5*y[0]/y[3])*s,y:(.5-.5*y[1]/y[3])*l,xLabel:T.xLabel,yLabel:T.yLabel,zLabel:T.zLabel,text:b,name:u.name,color:p.castHoverOption(_,k,"bgcolor")||u.color,borderColor:p.castHoverOption(_,k,"bordercolor"),fontFamily:p.castHoverOption(_,k,"font.family"),fontSize:p.castHoverOption(_,k,"font.size"),fontColor:p.castHoverOption(_,k,"font.color"),nameLength:p.castHoverOption(_,k,"namelength"),textAlign:p.castHoverOption(_,k,"align"),hovertemplate:f.castOption(_,k,"hovertemplate"),hovertemplateLabels:f.extendFlat({},C,T),eventData:[C]},{container:r,gd:n,inOut_bbox:O}),C.bbox=O[0]}h.distance<5&&(h.buttons||w)?n.emit("plotly_click",P):n.emit("plotly_hover",P),this.oldEventData=P}else p.loneUnhover(r),this.oldEventData&&n.emit("plotly_unhover",this.oldEventData),this.oldEventData=void 0;e.drawAnnotations(e)},k.recoverContext=function(){var t=this;t.glplot.dispose(),requestAnimationFrame((function e(){t.glplot.gl.isContextLost()?requestAnimationFrame(e):t.initializeGLPlot()?t.plot.apply(t,t.plotArgs):f.error("Catastrophic and unrecoverable WebGL error. Context lost.")}))};var M=["xaxis","yaxis","zaxis"];function A(t,e,n){for(var r=t.fullSceneLayout,i=0;i<3;i++){var a=M[i],o=a.charAt(0),s=r[a],l=e[o],c=e[o+"calendar"],u=e["_"+o+"length"];if(f.isArrayOrTypedArray(l))for(var h,d=0;d<(u||l.length);d++)if(f.isArrayOrTypedArray(l[d]))for(var p=0;pm[1][o])m[0][o]=-1,m[1][o]=1;else{var P=m[1][o]-m[0][o];m[0][o]-=P/32,m[1][o]+=P/32}if("reversed"===l.autorange){var O=m[0][o];m[0][o]=m[1][o],m[1][o]=O}}else{var L=l.range;m[0][o]=l.r2l(L[0]),m[1][o]=l.r2l(L[1])}m[0][o]===m[1][o]&&(m[0][o]-=1,m[1][o]+=1),y[o]=m[1][o]-m[0][o],r.glplot.setBounds(o,{min:m[0][o]*d[o],max:m[1][o]*d[o]})}var I=u.aspectmode;if("cube"===I)v=[1,1,1];else if("manual"===I){var D=u.aspectratio;v=[D.x,D.y,D.z]}else{if("auto"!==I&&"data"!==I)throw new Error("scene.js aspectRatio was not one of the enumerated types");var R=[1,1,1];for(o=0;o<3;++o){var z=b[c=(l=u[M[o]]).type];R[o]=Math.pow(z.acc,1/z.count)/d[o]}v="data"===I||Math.max.apply(null,R)/Math.min.apply(null,R)<=4?R:[1,1,1]}u.aspectratio.x=f.aspectratio.x=v[0],u.aspectratio.y=f.aspectratio.y=v[1],u.aspectratio.z=f.aspectratio.z=v[2],r.glplot.setAspectratio(u.aspectratio),r.viewInitial.aspectratio||(r.viewInitial.aspectratio={x:u.aspectratio.x,y:u.aspectratio.y,z:u.aspectratio.z}),r.viewInitial.aspectmode||(r.viewInitial.aspectmode=u.aspectmode);var N=u.domain||null,j=e._size||null;if(N&&j){var F=r.container.style;F.position="absolute",F.left=j.l+N.x[0]*j.w+"px",F.top=j.t+(1-N.y[1])*j.h+"px",F.width=j.w*(N.x[1]-N.x[0])+"px",F.height=j.h*(N.y[1]-N.y[0])+"px"}r.glplot.redraw()}},k.destroy=function(){var t=this;t.glplot&&(t.camera.mouseListener.enabled=!1,t.container.removeEventListener("wheel",t.camera.wheelListener),t.camera=null,t.glplot.dispose(),t.container.parentNode.removeChild(t.container),t.glplot=null)},k.getCamera=function(){var t,e=this;return e.camera.view.recalcMatrix(e.camera.view.lastT()),{up:{x:(t=e.camera).up[0],y:t.up[1],z:t.up[2]},center:{x:t.center[0],y:t.center[1],z:t.center[2]},eye:{x:t.eye[0],y:t.eye[1],z:t.eye[2]},projection:{type:!0===t._ortho?"orthographic":"perspective"}}},k.setViewport=function(t){var e,n=this,r=t.camera;n.camera.lookAt.apply(this,[[(e=r).eye.x,e.eye.y,e.eye.z],[e.center.x,e.center.y,e.center.z],[e.up.x,e.up.y,e.up.z]]),n.glplot.setAspectratio(t.aspectratio),"orthographic"===r.projection.type!==n.camera._ortho&&(n.glplot.redraw(),n.glplot.clearRGBA(),n.glplot.dispose(),n.initializeGLPlot())},k.isCameraChanged=function(t){var e=this.getCamera(),n=f.nestedProperty(t,this.id+".camera").get();function r(t,e,n,r){var i=["up","center","eye"],a=["x","y","z"];return e[i[n]]&&t[i[n]][a[r]]===e[i[n]][a[r]]}var i=!1;if(void 0===n)i=!0;else{for(var a=0;a<3;a++)for(var o=0;o<3;o++)if(!r(e,n,a,o)){i=!0;break}(!n.projection||e.projection&&e.projection.type!==n.projection.type)&&(i=!0)}return i},k.isAspectChanged=function(t){var e=this.glplot.getAspectratio(),n=f.nestedProperty(t,this.id+".aspectratio").get();return void 0===n||n.x!==e.x||n.y!==e.y||n.z!==e.z},k.saveLayout=function(t){var e,n,r,i,a,o,s=this,l=s.fullLayout,c=s.isCameraChanged(t),h=s.isAspectChanged(t),d=c||h;if(d){var p={};c&&(e=s.getCamera(),r=(n=f.nestedProperty(t,s.id+".camera")).get(),p[s.id+".camera"]=r),h&&(i=s.glplot.getAspectratio(),o=(a=f.nestedProperty(t,s.id+".aspectratio")).get(),p[s.id+".aspectratio"]=o),u.call("_storeDirectGUIEdit",t,l._preGUI,p),c&&(n.set(e),f.nestedProperty(l,s.id+".camera").set(e)),h&&(a.set(i),f.nestedProperty(l,s.id+".aspectratio").set(i),s.glplot.redraw())}return d},k.updateFx=function(t,e){var n=this,r=n.camera;if(r)if("orbit"===t)r.mode="orbit",r.keyBindingMode="rotate";else if("turntable"===t){r.up=[0,0,1],r.mode="turntable",r.keyBindingMode="rotate";var i=n.graphDiv,a=i._fullLayout,o=n.fullSceneLayout.camera,s=o.up.x,l=o.up.y,c=o.up.z;if(c/Math.sqrt(s*s+l*l+c*c)<.999){var h=n.id+".camera.up",d={x:0,y:0,z:1},p={};p[h]=d;var g=i.layout;u.call("_storeDirectGUIEdit",g,a._preGUI,p),o.up=d,f.nestedProperty(g,h).set(d)}}else r.keyBindingMode=t;n.fullSceneLayout.hovermode=e},k.toImage=function(t){var e=this;t||(t="png"),e.staticMode&&e.container.appendChild(r),e.glplot.redraw();var n=e.glplot.gl,i=n.drawingBufferWidth,a=n.drawingBufferHeight;n.bindFramebuffer(n.FRAMEBUFFER,null);var o=new Uint8Array(i*a*4);n.readPixels(0,0,i,a,n.RGBA,n.UNSIGNED_BYTE,o),function(t,e,n){for(var r=0,i=n-1;r0)for(var s=255/o,l=0;l<3;++l)t[a+l]=Math.min(s*t[a+l],255)}}(o,i,a);var s=document.createElement("canvas");s.width=i,s.height=a;var l,c=s.getContext("2d",{willReadFrequently:!0}),u=c.createImageData(i,a);switch(u.data.set(o),c.putImageData(u,0,0),t){case"jpeg":l=s.toDataURL("image/jpeg");break;case"webp":l=s.toDataURL("image/webp");break;default:l=s.toDataURL("image/png")}return e.staticMode&&e.container.removeChild(r),l},k.setConvert=function(){for(var t=0;t<3;t++){var e=this.fullSceneLayout[M[t]];d.setConvert(e,this.fullLayout),e.setScale=f.noop}},k.make4thDimension=function(){var t=this,e=t.graphDiv._fullLayout;t._mockAxis={type:"linear",showexponent:"all",exponentformat:"B"},d.setConvert(t._mockAxis,e)},t.exports=_},90060:function(t){"use strict";t.exports=function(t,e,n,r){r=r||t.length;for(var i=new Array(r),a=0;aOpenStreetMap contributors',o=['\xa9 Carto',a].join(" "),s=['Map tiles by Stamen Design','under CC BY 3.0',"|",'Data by OpenStreetMap contributors','under ODbL'].join(" "),l={"open-street-map":{id:"osm",version:8,sources:{"plotly-osm-tiles":{type:"raster",attribution:a,tiles:["https://a.tile.openstreetmap.org/{z}/{x}/{y}.png","https://b.tile.openstreetmap.org/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-osm-tiles",type:"raster",source:"plotly-osm-tiles",minzoom:0,maxzoom:22}]},"white-bg":{id:"white-bg",version:8,sources:{},layers:[{id:"white-bg",type:"background",paint:{"background-color":"#FFFFFF"},minzoom:0,maxzoom:22}]},"carto-positron":{id:"carto-positron",version:8,sources:{"plotly-carto-positron":{type:"raster",attribution:o,tiles:["https://cartodb-basemaps-c.global.ssl.fastly.net/light_all/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-carto-positron",type:"raster",source:"plotly-carto-positron",minzoom:0,maxzoom:22}]},"carto-darkmatter":{id:"carto-darkmatter",version:8,sources:{"plotly-carto-darkmatter":{type:"raster",attribution:o,tiles:["https://cartodb-basemaps-c.global.ssl.fastly.net/dark_all/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-carto-darkmatter",type:"raster",source:"plotly-carto-darkmatter",minzoom:0,maxzoom:22}]},"stamen-terrain":{id:"stamen-terrain",version:8,sources:{"plotly-stamen-terrain":{type:"raster",attribution:s,tiles:["https://stamen-tiles.a.ssl.fastly.net/terrain/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-terrain",type:"raster",source:"plotly-stamen-terrain",minzoom:0,maxzoom:22}]},"stamen-toner":{id:"stamen-toner",version:8,sources:{"plotly-stamen-toner":{type:"raster",attribution:s,tiles:["https://stamen-tiles.a.ssl.fastly.net/toner/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-toner",type:"raster",source:"plotly-stamen-toner",minzoom:0,maxzoom:22}]},"stamen-watercolor":{id:"stamen-watercolor",version:8,sources:{"plotly-stamen-watercolor":{type:"raster",attribution:['Map tiles by Stamen Design','under CC BY 3.0',"|",'Data by OpenStreetMap contributors','under CC BY SA'].join(" "),tiles:["https://stamen-tiles.a.ssl.fastly.net/watercolor/{z}/{x}/{y}.png"],tileSize:256}},layers:[{id:"plotly-stamen-watercolor",type:"raster",source:"plotly-stamen-watercolor",minzoom:0,maxzoom:22}]}},c=r(l);t.exports={requiredVersion:i,styleUrlPrefix:"mapbox://styles/mapbox/",styleUrlSuffix:"v9",styleValuesMapbox:["basic","streets","outdoors","light","dark","satellite","satellite-streets"],styleValueDflt:"basic",stylesNonMapbox:l,styleValuesNonMapbox:c,traceLayerPrefix:"plotly-trace-layer-",layoutLayerPrefix:"plotly-layout-layer-",wrongVersionErrorMsg:["Your custom plotly.js bundle is not using the correct mapbox-gl version","Please install mapbox-gl@"+i+"."].join("\n"),noAccessTokenErrorMsg:["Missing Mapbox access token.","Mapbox trace type require a Mapbox access token to be registered.","For example:"," Plotly.newPlot(gd, data, layout, { mapboxAccessToken: 'my-access-token' });","More info here: https://www.mapbox.com/help/define-access-token/"].join("\n"),missingStyleErrorMsg:["No valid mapbox style found, please set `mapbox.style` to one of:",c.join(", "),"or register a Mapbox access token to use a Mapbox-served style."].join("\n"),multipleTokensErrorMsg:["Set multiple mapbox access token across different mapbox subplot,","using first token found as mapbox-gl does not allow multipleaccess tokens on the same page."].join("\n"),mapOnErrorMsg:"Mapbox error.",mapboxLogo:{path0:"m 10.5,1.24 c -5.11,0 -9.25,4.15 -9.25,9.25 0,5.1 4.15,9.25 9.25,9.25 5.1,0 9.25,-4.15 9.25,-9.25 0,-5.11 -4.14,-9.25 -9.25,-9.25 z m 4.39,11.53 c -1.93,1.93 -4.78,2.31 -6.7,2.31 -0.7,0 -1.41,-0.05 -2.1,-0.16 0,0 -1.02,-5.64 2.14,-8.81 0.83,-0.83 1.95,-1.28 3.13,-1.28 1.27,0 2.49,0.51 3.39,1.42 1.84,1.84 1.89,4.75 0.14,6.52 z",path1:"M 10.5,-0.01 C 4.7,-0.01 0,4.7 0,10.49 c 0,5.79 4.7,10.5 10.5,10.5 5.8,0 10.5,-4.7 10.5,-10.5 C 20.99,4.7 16.3,-0.01 10.5,-0.01 Z m 0,19.75 c -5.11,0 -9.25,-4.15 -9.25,-9.25 0,-5.1 4.14,-9.26 9.25,-9.26 5.11,0 9.25,4.15 9.25,9.25 0,5.13 -4.14,9.26 -9.25,9.26 z",path2:"M 14.74,6.25 C 12.9,4.41 9.98,4.35 8.23,6.1 5.07,9.27 6.09,14.91 6.09,14.91 c 0,0 5.64,1.02 8.81,-2.14 C 16.64,11 16.59,8.09 14.74,6.25 Z m -2.27,4.09 -0.91,1.87 -0.9,-1.87 -1.86,-0.91 1.86,-0.9 0.9,-1.87 0.91,1.87 1.86,0.9 z",polygon:"11.56,12.21 10.66,10.34 8.8,9.43 10.66,8.53 11.56,6.66 12.47,8.53 14.33,9.43 12.47,10.34"},styleRules:{map:"overflow:hidden;position:relative;","missing-css":"display:none;",canary:"background-color:salmon;","ctrl-bottom-left":"position: absolute; pointer-events: none; z-index: 2; bottom: 0; left: 0;","ctrl-bottom-right":"position: absolute; pointer-events: none; z-index: 2; right: 0; bottom: 0;",ctrl:"clear: both; pointer-events: auto; transform: translate(0, 0);","ctrl-attrib.mapboxgl-compact .mapboxgl-ctrl-attrib-inner":"display: none;","ctrl-attrib.mapboxgl-compact:hover .mapboxgl-ctrl-attrib-inner":"display: block; margin-top:2px","ctrl-attrib.mapboxgl-compact:hover":"padding: 2px 24px 2px 4px; visibility: visible; margin-top: 6px;","ctrl-attrib.mapboxgl-compact::after":'content: ""; cursor: pointer; position: absolute; background-image: url(\'data:image/svg+xml;charset=utf-8,%3Csvg viewBox="0 0 20 20" xmlns="http://www.w3.org/2000/svg"%3E %3Cpath fill="%23333333" fill-rule="evenodd" d="M4,10a6,6 0 1,0 12,0a6,6 0 1,0 -12,0 M9,7a1,1 0 1,0 2,0a1,1 0 1,0 -2,0 M9,10a1,1 0 1,1 2,0l0,3a1,1 0 1,1 -2,0"/%3E %3C/svg%3E\'); background-color: rgba(255, 255, 255, 0.5); width: 24px; height: 24px; box-sizing: border-box; border-radius: 12px;',"ctrl-attrib.mapboxgl-compact":"min-height: 20px; padding: 0; margin: 10px; position: relative; background-color: #fff; border-radius: 3px 12px 12px 3px;","ctrl-bottom-right > .mapboxgl-ctrl-attrib.mapboxgl-compact::after":"bottom: 0; right: 0","ctrl-bottom-left > .mapboxgl-ctrl-attrib.mapboxgl-compact::after":"bottom: 0; left: 0","ctrl-bottom-left .mapboxgl-ctrl":"margin: 0 0 10px 10px; float: left;","ctrl-bottom-right .mapboxgl-ctrl":"margin: 0 10px 10px 0; float: right;","ctrl-attrib":"color: rgba(0, 0, 0, 0.75); text-decoration: none; font-size: 12px","ctrl-attrib a":"color: rgba(0, 0, 0, 0.75); text-decoration: none; font-size: 12px","ctrl-attrib a:hover":"color: inherit; text-decoration: underline;","ctrl-attrib .mapbox-improve-map":"font-weight: bold; margin-left: 2px;","attrib-empty":"display: none;","ctrl-logo":'display:block; width: 21px; height: 21px; background-image: url(\'data:image/svg+xml;charset=utf-8,%3C?xml version="1.0" encoding="utf-8"?%3E %3Csvg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 21 21" style="enable-background:new 0 0 21 21;" xml:space="preserve"%3E%3Cg transform="translate(0,0.01)"%3E%3Cpath d="m 10.5,1.24 c -5.11,0 -9.25,4.15 -9.25,9.25 0,5.1 4.15,9.25 9.25,9.25 5.1,0 9.25,-4.15 9.25,-9.25 0,-5.11 -4.14,-9.25 -9.25,-9.25 z m 4.39,11.53 c -1.93,1.93 -4.78,2.31 -6.7,2.31 -0.7,0 -1.41,-0.05 -2.1,-0.16 0,0 -1.02,-5.64 2.14,-8.81 0.83,-0.83 1.95,-1.28 3.13,-1.28 1.27,0 2.49,0.51 3.39,1.42 1.84,1.84 1.89,4.75 0.14,6.52 z" style="opacity:0.9;fill:%23ffffff;enable-background:new" class="st0"/%3E%3Cpath d="M 10.5,-0.01 C 4.7,-0.01 0,4.7 0,10.49 c 0,5.79 4.7,10.5 10.5,10.5 5.8,0 10.5,-4.7 10.5,-10.5 C 20.99,4.7 16.3,-0.01 10.5,-0.01 Z m 0,19.75 c -5.11,0 -9.25,-4.15 -9.25,-9.25 0,-5.1 4.14,-9.26 9.25,-9.26 5.11,0 9.25,4.15 9.25,9.25 0,5.13 -4.14,9.26 -9.25,9.26 z" style="opacity:0.35;enable-background:new" class="st1"/%3E%3Cpath d="M 14.74,6.25 C 12.9,4.41 9.98,4.35 8.23,6.1 5.07,9.27 6.09,14.91 6.09,14.91 c 0,0 5.64,1.02 8.81,-2.14 C 16.64,11 16.59,8.09 14.74,6.25 Z m -2.27,4.09 -0.91,1.87 -0.9,-1.87 -1.86,-0.91 1.86,-0.9 0.9,-1.87 0.91,1.87 1.86,0.9 z" style="opacity:0.35;enable-background:new" class="st1"/%3E%3Cpolygon points="11.56,12.21 10.66,10.34 8.8,9.43 10.66,8.53 11.56,6.66 12.47,8.53 14.33,9.43 12.47,10.34 " style="opacity:0.9;fill:%23ffffff;enable-background:new" class="st0"/%3E%3C/g%3E%3C/svg%3E\')'}}},13056:function(t,e,n){"use strict";var r=n(71828);t.exports=function(t,e){var n=t.split(" "),i=n[0],a=n[1],o=r.isArrayOrTypedArray(e)?r.mean(e):e,s=.5+o/100,l=1.5+o/100,c=["",""],u=[0,0];switch(i){case"top":c[0]="top",u[1]=-l;break;case"bottom":c[0]="bottom",u[1]=l}switch(a){case"left":c[1]="right",u[0]=-s;break;case"right":c[1]="left",u[0]=s}return{anchor:c[0]&&c[1]?c.join("-"):c[0]?c[0]:c[1]?c[1]:"center",offset:u}}},50101:function(t,e,n){"use strict";var r=n(44517),i=n(71828),a=i.strTranslate,o=i.strScale,s=n(27659).AU,l=n(77922),c=n(39898),u=n(91424),f=n(63893),h=n(10481),d="mapbox",p=e.constants=n(77734);function g(t){return"string"===typeof t&&(-1!==p.styleValuesMapbox.indexOf(t)||0===t.indexOf("mapbox://"))}e.name=d,e.attr="subplot",e.idRoot=d,e.idRegex=e.attrRegex=i.counterRegex(d),e.attributes={subplot:{valType:"subplotid",dflt:"mapbox",editType:"calc"}},e.layoutAttributes=n(23585),e.supplyLayoutDefaults=n(77882),e.plot=function(t){var e=t._fullLayout,n=t.calcdata,a=e._subplots[d];if(r.version!==p.requiredVersion)throw new Error(p.wrongVersionErrorMsg);var o=function(t,e){var n=t._fullLayout,r=t._context;if(""===r.mapboxAccessToken)return"";for(var a=[],o=[],s=!1,l=!1,c=0;c1&&i.warn(p.multipleTokensErrorMsg),a[0]):(o.length&&i.log(["Listed mapbox access token(s)",o.join(","),"but did not use a Mapbox map style, ignoring token(s)."].join(" ")),"")}(t,a);r.accessToken=o;for(var l=0;l_/2){var k=y.split("|").join("
");x.text(k).attr("data-unformatted",k).call(f.convertToTspans,t),w=u.bBox(x.node())}x.attr("transform",a(-3,8-w.height)),b.insert("rect",".static-attribution").attr({x:-w.width-6,y:-w.height-3,width:w.width+6,height:w.height+3,fill:"rgba(255, 255, 255, 0.75)"});var T=1;w.width+6>_&&(T=_/(w.width+6));var M=[r.l+r.w*h.x[1],r.t+r.h*(1-h.y[0])];b.attr("transform",a(M[0],M[1])+o(T))}},e.updateFx=function(t){for(var e=t._fullLayout,n=e._subplots[d],r=0;r0){for(var n=0;n0}function u(t){var e={},n={};switch(t.type){case"circle":r.extendFlat(n,{"circle-radius":t.circle.radius,"circle-color":t.color,"circle-opacity":t.opacity});break;case"line":r.extendFlat(n,{"line-width":t.line.width,"line-color":t.color,"line-opacity":t.opacity,"line-dasharray":t.line.dash});break;case"fill":r.extendFlat(n,{"fill-color":t.color,"fill-outline-color":t.fill.outlinecolor,"fill-opacity":t.opacity});break;case"symbol":var i=t.symbol,o=a(i.textposition,i.iconsize);r.extendFlat(e,{"icon-image":i.icon+"-15","icon-size":i.iconsize/10,"text-field":i.text,"text-size":i.textfont.size,"text-anchor":o.anchor,"text-offset":o.offset,"symbol-placement":i.placement}),r.extendFlat(n,{"icon-color":t.color,"text-color":i.textfont.color,"text-opacity":t.opacity});break;case"raster":r.extendFlat(n,{"raster-fade-duration":0,"raster-opacity":t.opacity})}return{layout:e,paint:n}}l.update=function(t){this.visible?this.needsNewImage(t)?this.updateImage(t):this.needsNewSource(t)?(this.removeLayer(),this.updateSource(t),this.updateLayer(t)):this.needsNewLayer(t)?this.updateLayer(t):this.updateStyle(t):(this.updateSource(t),this.updateLayer(t)),this.visible=c(t)},l.needsNewImage=function(t){return this.subplot.map.getSource(this.idSource)&&"image"===this.sourceType&&"image"===t.sourcetype&&(this.source!==t.source||JSON.stringify(this.coordinates)!==JSON.stringify(t.coordinates))},l.needsNewSource=function(t){return this.sourceType!==t.sourcetype||JSON.stringify(this.source)!==JSON.stringify(t.source)||this.layerType!==t.type},l.needsNewLayer=function(t){return this.layerType!==t.type||this.below!==this.subplot.belowLookup["layout-"+this.index]},l.lookupBelow=function(){return this.subplot.belowLookup["layout-"+this.index]},l.updateImage=function(t){this.subplot.map.getSource(this.idSource).updateImage({url:t.source,coordinates:t.coordinates});var e=this.findFollowingMapboxLayerId(this.lookupBelow());null!==e&&this.subplot.map.moveLayer(this.idLayer,e)},l.updateSource=function(t){var e=this.subplot.map;if(e.getSource(this.idSource)&&e.removeSource(this.idSource),this.sourceType=t.sourcetype,this.source=t.source,c(t)){var n=function(t){var e,n=t.sourcetype,r=t.source,a={type:n};return"geojson"===n?e="data":"vector"===n?e="string"===typeof r?"url":"tiles":"raster"===n?(e="tiles",a.tileSize=256):"image"===n&&(e="url",a.coordinates=t.coordinates),a[e]=r,t.sourceattribution&&(a.attribution=i(t.sourceattribution)),a}(t);e.addSource(this.idSource,n)}},l.findFollowingMapboxLayerId=function(t){if("traces"===t)for(var e=this.subplot.getMapLayers(),n=0;n1)for(n=0;n-1&&v(e.originalEvent,r,[n.xaxis],[n.yaxis],n.id,t),i.indexOf("event")>-1&&c.click(r,e.originalEvent)}}},x.updateFx=function(t){var e=this,n=e.map,r=e.gd;if(!e.isStatic){var a,o=t.dragmode;a=function(t,n){n.isRect?(t.range={})[e.id]=[c([n.xmin,n.ymin]),c([n.xmax,n.ymax])]:(t.lassoPoints={})[e.id]=n.map(c)};var s=e.dragOptions;e.dragOptions=i.extendDeep(s||{},{dragmode:t.dragmode,element:e.div,gd:r,plotinfo:{id:e.id,domain:t[e.id].domain,xaxis:e.xaxis,yaxis:e.yaxis,fillRangeItems:a},xaxes:[e.xaxis],yaxes:[e.yaxis],subplot:e.id}),n.off("click",e.onClickInPanHandler),h(o)||f(o)?(n.dragPan.disable(),n.on("zoomstart",e.clearOutline),e.dragOptions.prepFn=function(t,n,r){d(t,n,r,e.dragOptions,o)},l.init(e.dragOptions)):(n.dragPan.enable(),n.off("zoomstart",e.clearOutline),e.div.onmousedown=null,e.div.ontouchstart=null,e.div.removeEventListener("touchstart",e.div._ontouchstart),e.onClickInPanHandler=e.onClickInPanFn(e.dragOptions),n.on("click",e.onClickInPanHandler))}function c(t){var n=e.map.unproject(t);return[n.lng,n.lat]}},x.updateFramework=function(t){var e=t[this.id].domain,n=t._size,r=this.div.style;r.width=n.w*(e.x[1]-e.x[0])+"px",r.height=n.h*(e.y[1]-e.y[0])+"px",r.left=n.l+e.x[0]*n.w+"px",r.top=n.t+(1-e.y[1])*n.h+"px",this.xaxis._offset=n.l+e.x[0]*n.w,this.xaxis._length=n.w*(e.x[1]-e.x[0]),this.yaxis._offset=n.t+(1-e.y[1])*n.h,this.yaxis._length=n.h*(e.y[1]-e.y[0])},x.updateLayers=function(t){var e,n=t[this.id].layers,r=this.layerList;if(n.length!==r.length){for(e=0;e=e.width-20?(a["text-anchor"]="start",a.x=5):(a["text-anchor"]="end",a.x=e._paper.attr("width")-7),n.attr(a);var o=n.select(".js-link-to-tool"),s=n.select(".js-link-spacer"),l=n.select(".js-sourcelinks");t._context.showSources&&t._context.showSources(t),t._context.showLink&&function(t,e){e.text("");var n=e.append("a").attr({"xlink:xlink:href":"#",class:"link--impt link--embedview","font-weight":"bold"}).text(t._context.linkText+" "+String.fromCharCode(187));if(t._context.sendData)n.on("click",(function(){w.sendDataToCloud(t)}));else{var r=window.location.pathname.split("/"),i=window.location.search;n.attr({"xlink:xlink:show":"new","xlink:xlink:href":"/"+r[2].split(".")[0]+"/"+r[1]+i})}}(t,o),s.text(o.text()&&l.text()?" - ":"")}},w.sendDataToCloud=function(t){var e=(window.PLOTLYENV||{}).BASE_URL||t._context.plotlyServerURL;if(e){t.emit("plotly_beforeexport");var n=r.select(t).append("div").attr("id","hiddenform").style("display","none"),i=n.append("form").attr({action:e+"/external",method:"post",target:"_blank"});return i.append("input").attr({type:"text",name:"data"}).node().value=w.graphJson(t,!1,"keepdata"),i.node().submit(),n.remove(),t.emit("plotly_afterexport"),!1}};var T=["days","shortDays","months","shortMonths","periods","dateTime","date","time","decimal","thousands","grouping","currency"],M=["year","month","dayMonth","dayMonthYear"];function A(t,e){var n=t._context.locale;n||(n="en-US");var r=!1,i={};function a(t){for(var n=!0,a=0;a1&&D.length>1){for(s.getComponentMethod("grid","sizeDefaults")(c,l),o=0;o15&&D.length>15&&0===l.shapes.length&&0===l.images.length,w.linkSubplots(h,l,f,r),w.cleanPlot(h,l,f,r);var F=!(!r._has||!r._has("gl2d")),B=!(!l._has||!l._has("gl2d")),U=!(!r._has||!r._has("cartesian"))||F,H=!(!l._has||!l._has("cartesian"))||B;U&&!H?r._bgLayer.remove():H&&!U&&(l._shouldCreateBgLayer=!0),r._zoomlayer&&!t._dragging&&p({_fullLayout:r}),function(t,e){var n,r=[];e.meta&&(n=e._meta={meta:e.meta,layout:{meta:e.meta}});for(var i=0;i0){var f=1-2*s;r=Math.round(f*r),i=Math.round(f*i)}}var h=w.layoutAttributes.width.min,d=w.layoutAttributes.height.min;r1,g=!e.height&&Math.abs(n.height-i)>1;(g||p)&&(p&&(n.width=r),g&&(n.height=i)),t._initialAutoSize||(t._initialAutoSize={width:r,height:i}),w.sanitizeMargins(n)},w.supplyLayoutModuleDefaults=function(t,e,n,r){var i,a,o,l=s.componentsRegistry,c=e._basePlotModules,f=s.subplotsRegistry.cartesian;for(i in l)(o=l[i]).includeBasePlot&&o.includeBasePlot(t,e);for(var h in c.length||c.push(f),e._has("cartesian")&&(s.getComponentMethod("grid","contentDefaults")(t,e),f.finalizeSubplots(t,e)),e._subplots)e._subplots[h].sort(u.subplotSort);for(a=0;a1&&(n.l/=m,n.r/=m)}if(d){var y=(n.t+n.b)/d;y>1&&(n.t/=y,n.b/=y)}var b=void 0!==n.xl?n.xl:n.x,x=void 0!==n.xr?n.xr:n.x,_=void 0!==n.yt?n.yt:n.y,k=void 0!==n.yb?n.yb:n.y;p[e]={l:{val:b,size:n.l+v},r:{val:x,size:n.r+v},b:{val:k,size:n.b+v},t:{val:_,size:n.t+v}},g[e]=1}else delete p[e],delete g[e];if(!r._replotting)return w.doAutoMargin(t)}},w.doAutoMargin=function(t){var e=t._fullLayout,n=e.width,r=e.height;e._size||(e._size={}),O(e);var i=e._size,a=e.margin,l={t:0,b:0,l:0,r:0},c=u.extendFlat({},i),f=a.l,h=a.r,p=a.t,g=a.b,v=e._pushmargin,m=e._pushmarginIds,y=e.minreducedwidth,b=e.minreducedheight;if(!1!==a.autoexpand){for(var x in v)m[x]||delete v[x];var _=t._fullLayout._reservedMargin;for(var k in _)for(var T in _[k]){var M=_[k][T];l[T]=Math.max(l[T],M)}for(var A in v.base={l:{val:0,size:f},r:{val:1,size:h},t:{val:1,size:p},b:{val:0,size:g}},l){var S=0;for(var E in v)"base"!==E&&o(v[E][A].size)&&(S=v[E][A].size>S?v[E][A].size:S);var C=Math.max(0,a[A]-S);l[A]=Math.max(0,l[A]-C)}for(var P in v){var L=v[P].l||{},I=v[P].b||{},D=L.val,R=L.size,z=I.val,N=I.size,j=n-l.r-l.l,F=r-l.t-l.b;for(var B in v){if(o(R)&&v[B].r){var U=v[B].r.val,H=v[B].r.size;if(U>D){var V=(R*U+(H-j)*D)/(U-D),q=(H*(1-D)+(R-j)*(1-U))/(U-D);V+q>f+h&&(f=V,h=q)}}if(o(N)&&v[B].t){var G=v[B].t.val,W=v[B].t.size;if(G>z){var Y=(N*G+(W-F)*z)/(G-z),$=(W*(1-z)+(N-F)*(1-G))/(G-z);Y+$>g+p&&(g=Y,p=$)}}}}}var X=u.constrain(n-a.l-a.r,2,y),K=u.constrain(r-a.t-a.b,2,b),Z=Math.max(0,n-X),J=Math.max(0,r-K);if(Z){var Q=(f+h)/Z;Q>1&&(f/=Q,h/=Q)}if(J){var tt=(g+p)/J;tt>1&&(g/=tt,p/=tt)}if(i.l=Math.round(f)+l.l,i.r=Math.round(h)+l.r,i.t=Math.round(p)+l.t,i.b=Math.round(g)+l.b,i.p=Math.round(a.pad),i.w=Math.round(n)-i.l-i.r,i.h=Math.round(r)-i.t-i.b,!e._replotting&&(w.didMarginChange(c,i)||function(t){if("_redrawFromAutoMarginCount"in t._fullLayout)return!1;var e=d.list(t,"",!0);for(var n in e)if(e[n].autoshift||e[n].shift)return!0;return!1}(t))){"_redrawFromAutoMarginCount"in e?e._redrawFromAutoMarginCount++:e._redrawFromAutoMarginCount=1;var et=3*(1+Object.keys(m).length);if(e._redrawFromAutoMarginCount0&&(t._transitioningWithDuration=!0),t._transitionData._interruptCallbacks.push((function(){r=!0})),n.redraw&&t._transitionData._interruptCallbacks.push((function(){return s.call("redraw",t)})),t._transitionData._interruptCallbacks.push((function(){t.emit("plotly_transitioninterrupted",[])}));var a=0,o=0;function l(){return a++,function(){var e;o++,r||o!==a||(e=i,t._transitionData&&(function(t){if(t)for(;t.length;)t.shift()}(t._transitionData._interruptCallbacks),Promise.resolve().then((function(){if(n.redraw)return s.call("redraw",t)})).then((function(){t._transitioning=!1,t._transitioningWithDuration=!1,t.emit("plotly_transitioned",[])})).then(e)))}}n.runFn(l),setTimeout(l())}))}],a=u.syncOrAsync(i,t);return a&&a.then||(a=Promise.resolve()),a.then((function(){return t}))}w.didMarginChange=function(t,e){for(var n=0;n1)return!0}return!1},w.graphJson=function(t,e,n,r,i,a){(i&&e&&!t._fullData||i&&!e&&!t._fullLayout)&&w.supplyDefaults(t);var o=i?t._fullData:t.data,s=i?t._fullLayout:t.layout,l=(t._transitionData||{})._frames;function c(t,e){if("function"===typeof t)return e?"_function_":null;if(u.isPlainObject(t)){var r,i={};return Object.keys(t).sort().forEach((function(a){if(-1===["_","["].indexOf(a.charAt(0)))if("function"!==typeof t[a]){if("keepdata"===n){if("src"===a.substr(a.length-3))return}else if("keepstream"===n){if("string"===typeof(r=t[a+"src"])&&r.indexOf(":")>0&&!u.isPlainObject(t.stream))return}else if("keepall"!==n&&"string"===typeof(r=t[a+"src"])&&r.indexOf(":")>0)return;i[a]=c(t[a],e)}else e&&(i[a]="_function")})),i}return Array.isArray(t)?t.map((function(t){return c(t,e)})):u.isTypedArray(t)?u.simpleMap(t,u.identity):u.isJSDate(t)?u.ms2DateTimeLocal(+t):t}var f={data:(o||[]).map((function(t){var n=c(t);return e&&delete n.fit,n}))};if(!e&&(f.layout=c(s),i)){var h=s._size;f.layout.computed={margin:{b:h.b,l:h.l,r:h.r,t:h.t}}}return l&&(f.frames=c(l)),a&&(f.config=c(t._context,!0)),"object"===r?f:JSON.stringify(f)},w.modifyFrames=function(t,e){var n,r,i,a=t._transitionData._frames,o=t._transitionData._frameHash;for(n=0;n=0;a--)if(s[a].enabled){n._indexToPoints=s[a]._indexToPoints;break}r&&r.calc&&(o=r.calc(t,n))}Array.isArray(o)&&o[0]||(o=[{x:h,y:h}]),o[0].t||(o[0].t={}),o[0].trace=n,p[e]=o}}for(R(o,c,f),i=0;i1e-10?t:0}function h(t,e,n){e=e||0,n=n||0;for(var r=t.length,i=new Array(r),a=0;a0?n:1/0})),i=r.mod(n+1,e.length);return[e[n],e[i]]},findIntersectionXY:c,findXYatLength:function(t,e,n,r){var i=-e*n,a=e*e+1,o=2*(e*i-n),s=i*i+n*n-t*t,l=Math.sqrt(o*o-4*a*s),c=(-o+l)/(2*a),u=(-o-l)/(2*a);return[[c,e*c+i+r],[u,e*u+i+r]]},clampTiny:f,pathPolygon:function(t,e,n,r,i,a){return"M"+h(u(t,e,n,r),i,a).join("L")},pathPolygonAnnulus:function(t,e,n,r,i,a,o){var s,l;t=90||l>90&&c>=450?1:f<=0&&d<=0?0:Math.max(f,d),e=l<=180&&c>=180||l>180&&c>=540?-1:u>=0&&h>=0?0:Math.min(u,h),n=l<=270&&c>=270||l>270&&c>=630?-1:f>=0&&d>=0?0:Math.min(f,d),r=c>=360?1:u<=0&&h<=0?0:Math.max(u,h),[e,n,r,i]}(p),w=x[2]-x[0],_=x[3]-x[1],k=d/h,T=Math.abs(_/w);k>T?(g=h,b=(d-(v=h*T))/i.h/2,m=[s[0],s[1]],y=[f[0]+b,f[1]-b]):(v=d,b=(h-(g=d/T))/i.w/2,m=[s[0]+b,s[1]-b],y=[f[0],f[1]]),n.xLength2=g,n.yLength2=v,n.xDomain2=m,n.yDomain2=y;var M,A=n.xOffset2=i.l+i.w*m[0],S=n.yOffset2=i.t+i.h*(1-y[1]),E=n.radius=g/w,C=n.innerRadius=n.getHole(e)*E,P=n.cx=A-E*x[0],O=n.cy=S+E*x[3],L=n.cxx=P-A,I=n.cyy=O-S,D=a.side;"counterclockwise"===D?(M=D,D="top"):"clockwise"===D&&(M=D,D="bottom"),n.radialAxis=n.mockAxis(t,e,a,{_id:"x",side:D,_trueSide:M,domain:[C/i.w,E/i.w]}),n.angularAxis=n.mockAxis(t,e,o,{side:"right",domain:[0,Math.PI],autorange:!1}),n.doAutoRange(t,e),n.updateAngularAxis(t,e),n.updateRadialAxis(t,e),n.updateRadialAxisTitle(t,e),n.xaxis=n.mockCartesianAxis(t,e,{_id:"x",domain:m}),n.yaxis=n.mockCartesianAxis(t,e,{_id:"y",domain:y});var N=n.pathSubplot();n.clipPaths.forTraces.select("path").attr("d",N).attr("transform",l(L,I)),r.frontplot.attr("transform",l(A,S)).call(u.setClipUrl,n._hasClipOnAxisFalse?null:n.clipIds.forTraces,n.gd),r.bg.attr("d",N).attr("transform",l(P,O)).call(c.fill,e.bgcolor)},F.mockAxis=function(t,e,n,r){var i=o.extendFlat({},n,r);return p(i,e,t),i},F.mockCartesianAxis=function(t,e,n){var r=this,i=r.isSmith,a=n._id,s=o.extendFlat({type:"linear"},n);d(s,t);var l={x:[0,2],y:[1,3]};return s.setRange=function(){var t=r.sectorBBox,n=l[a],i=r.radialAxis._rl,o=(i[1]-i[0])/(1-r.getHole(e));s.range=[t[n[0]]*o,t[n[1]]*o]},s.isPtWithinRange="x"!==a||i?function(){return!0}:function(t){return r.isPtInside(t)},s.setRange(),s.setScale(),s},F.doAutoRange=function(t,e){var n=this,r=n.gd,i=n.radialAxis,a=n.getRadial(e);g(r,i);var o=i.range;a.range=o.slice(),a._input.range=o.slice(),i._rl=[i.r2l(o[0],null,"gregorian"),i.r2l(o[1],null,"gregorian")]},F.updateRadialAxis=function(t,e){var n=this,r=n.gd,i=n.layers,a=n.radius,u=n.innerRadius,f=n.cx,d=n.cy,p=n.getRadial(e),g=R(n.getSector(e)[0],360),v=n.radialAxis,m=u90&&g<=270&&(v.tickangle=180);var b=y?function(t){var e=I(n,P([t.x,0]));return l(e[0]-f,e[1]-d)}:function(t){return l(v.l2p(t.x)+u,0)},x=y?function(t){return L(n,t.x,-1/0,1/0)}:function(t){return n.pathArc(v.r2p(t.x)+u)},w=B(p);if(n.radialTickLayout!==w&&(i["radial-axis"].selectAll(".xtick").remove(),n.radialTickLayout=w),m){v.setScale();var _=0,k=y?(v.tickvals||[]).filter((function(t){return t>=0})).map((function(t){return h.tickText(v,t,!0,!1)})):h.calcTicks(v),T=y?k:h.clipEnds(v,k),M=h.getTickSigns(v)[2];y&&(("top"===v.ticks&&"bottom"===v.side||"bottom"===v.ticks&&"top"===v.side)&&(M=-M),"top"===v.ticks&&"top"===v.side&&(_=-v.ticklen),"bottom"===v.ticks&&"bottom"===v.side&&(_=v.ticklen)),h.drawTicks(r,v,{vals:k,layer:i["radial-axis"],path:h.makeTickPath(v,0,M),transFn:b,crisp:!1}),h.drawGrid(r,v,{vals:T,layer:i["radial-grid"],path:x,transFn:o.noop,crisp:!1}),h.drawLabels(r,v,{vals:k,layer:i["radial-axis"],transFn:b,labelFns:h.makeLabelFns(v,_)})}var A=n.radialAxisAngle=n.vangles?N(U(z(p.angle),n.vangles)):p.angle,S=l(f,d),E=S+s(-A);H(i["radial-axis"],m&&(p.showticklabels||p.ticks),{transform:E}),H(i["radial-grid"],m&&p.showgrid,{transform:y?"":S}),H(i["radial-line"].select("line"),m&&p.showline,{x1:y?-a:u,y1:0,x2:a,y2:0,transform:E}).attr("stroke-width",p.linewidth).call(c.stroke,p.linecolor)},F.updateRadialAxisTitle=function(t,e,n){if(!this.isSmith){var r=this,i=r.gd,a=r.radius,o=r.cx,s=r.cy,l=r.getRadial(e),c=r.id+"title",f=0;if(l.title){var h=u.bBox(r.layers["radial-axis"].node()).height,d=l.title.font.size,p=l.side;f="top"===p?d:"counterclockwise"===p?-(h+.4*d):h+.8*d}var g=void 0!==n?n:r.radialAxisAngle,v=z(g),m=Math.cos(v),y=Math.sin(v),x=o+a/2*m+f*y,w=s-a/2*y+f*m;r.layers["radial-axis-title"]=b.draw(i,c,{propContainer:l,propName:r.id+".radialaxis.title",placeholder:D(i,"Click to enter radial axis title"),attributes:{x:x,y:w,"text-anchor":"middle"},transform:{rotate:-g}})}},F.updateAngularAxis=function(t,e){var n=this,r=n.gd,i=n.layers,a=n.radius,u=n.innerRadius,f=n.cx,d=n.cy,p=n.getAngular(e),g=n.angularAxis,v=n.isSmith;v||(n.fillViewInitialKey("angularaxis.rotation",p.rotation),g.setGeometry(),g.setScale());var m=v?function(t){var e=I(n,P([0,t.x]));return Math.atan2(e[0]-f,e[1]-d)-Math.PI/2}:function(t){return g.t2g(t.x)};"linear"===g.type&&"radians"===g.thetaunit&&(g.tick0=N(g.tick0),g.dtick=N(g.dtick));var y=function(t){return l(f+a*Math.cos(t),d-a*Math.sin(t))},b=v?function(t){var e=I(n,P([0,t.x]));return l(e[0],e[1])}:function(t){return y(m(t))},x=v?function(t){var e=I(n,P([0,t.x])),r=Math.atan2(e[0]-f,e[1]-d)-Math.PI/2;return l(e[0],e[1])+s(-N(r))}:function(t){var e=m(t);return y(e)+s(-N(e))},w=v?function(t){return O(n,t.x,0,1/0)}:function(t){var e=m(t),n=Math.cos(e),r=Math.sin(e);return"M"+[f+u*n,d-u*r]+"L"+[f+a*n,d-a*r]},_=h.makeLabelFns(g,0).labelStandoff,k={xFn:function(t){var e=m(t);return Math.cos(e)*_},yFn:function(t){var e=m(t),n=Math.sin(e)>0?.2:1;return-Math.sin(e)*(_+t.fontSize*n)+Math.abs(Math.cos(e))*(t.fontSize*A)},anchorFn:function(t){var e=m(t),n=Math.cos(e);return Math.abs(n)<.1?"middle":n>0?"start":"end"},heightFn:function(t,e,n){var r=m(t);return-.5*(1+Math.sin(r))*n}},T=B(p);n.angularTickLayout!==T&&(i["angular-axis"].selectAll("."+g._id+"tick").remove(),n.angularTickLayout=T);var M,S=v?[1/0].concat(g.tickvals||[]).map((function(t){return h.tickText(g,t,!0,!1)})):h.calcTicks(g);if(v&&(S[0].text="\u221e",S[0].fontSize*=1.75),"linear"===e.gridshape?(M=S.map(m),o.angleDelta(M[0],M[1])<0&&(M=M.slice().reverse())):M=null,n.vangles=M,"category"===g.type&&(S=S.filter((function(t){return o.isAngleInsideSector(m(t),n.sectorInRad)}))),g.visible){var E="inside"===g.ticks?-1:1,C=(g.linewidth||1)/2;h.drawTicks(r,g,{vals:S,layer:i["angular-axis"],path:"M"+E*C+",0h"+E*g.ticklen,transFn:x,crisp:!1}),h.drawGrid(r,g,{vals:S,layer:i["angular-grid"],path:w,transFn:o.noop,crisp:!1}),h.drawLabels(r,g,{vals:S,layer:i["angular-axis"],repositionOnUpdate:!0,transFn:b,labelFns:k})}H(i["angular-line"].select("path"),p.showline,{d:n.pathSubplot(),transform:l(f,d)}).attr("stroke-width",p.linewidth).call(c.stroke,p.linecolor)},F.updateFx=function(t,e){this.gd._context.staticPlot||(!this.isSmith&&(this.updateAngularDrag(t),this.updateRadialDrag(t,e,0),this.updateRadialDrag(t,e,1)),this.updateHoverAndMainDrag(t))},F.updateHoverAndMainDrag=function(t){var e,n,s=this,c=s.isSmith,u=s.gd,f=s.layers,h=t._zoomlayer,d=S.MINZOOM,p=S.OFFEDGE,g=s.radius,b=s.innerRadius,k=s.cx,T=s.cy,M=s.cxx,A=s.cyy,C=s.sectorInRad,P=s.vangles,O=s.radialAxis,L=E.clampTiny,I=E.findXYatLength,D=E.findEnclosingVertexAngles,R=S.cornerHalfWidth,z=S.cornerLen/2,N=v.makeDragger(f,"path","maindrag",!1===t.dragmode?"none":"crosshair");r.select(N).attr("d",s.pathSubplot()).attr("transform",l(k,T)),N.onmousemove=function(t){y.hover(u,t,s.id),u._fullLayout._lasthover=N,u._fullLayout._hoversubplot=s.id},N.onmouseout=function(t){u._dragging||m.unhover(u,t)};var j,F,B,U,H,V,q,G,W,Y={element:N,gd:u,subplot:s.id,plotinfo:{id:s.id,xaxis:s.xaxis,yaxis:s.yaxis},xaxes:[s.xaxis],yaxes:[s.yaxis]};function $(t,e){return Math.sqrt(t*t+e*e)}function X(t,e){return $(t-M,e-A)}function K(t,e){return Math.atan2(A-e,t-M)}function Z(t,e){return[t*Math.cos(e),t*Math.sin(-e)]}function J(t,e){if(0===t)return s.pathSector(2*R);var n=z/t,r=e-n,i=e+n,a=Math.max(0,Math.min(t,g)),o=a-R,l=a+R;return"M"+Z(o,r)+"A"+[o,o]+" 0,0,0 "+Z(o,i)+"L"+Z(l,i)+"A"+[l,l]+" 0,0,1 "+Z(l,r)+"Z"}function Q(t,e,n){if(0===t)return s.pathSector(2*R);var r,i,a=Z(t,e),o=Z(t,n),l=L((a[0]+o[0])/2),c=L((a[1]+o[1])/2);if(l&&c){var u=c/l,f=-1/u,h=I(R,u,l,c);r=I(z,f,h[0][0],h[0][1]),i=I(z,f,h[1][0],h[1][1])}else{var d,p;c?(d=z,p=R):(d=R,p=z),r=[[l-d,c-p],[l+d,c-p]],i=[[l-d,c+p],[l+d,c+p]]}return"M"+r.join("L")+"L"+i.reverse().join("L")+"Z"}function tt(t,e){return e=Math.max(Math.min(e,g),b),td?(t-1&&1===t&&w(e,u,[s.xaxis],[s.yaxis],s.id,Y),n.indexOf("event")>-1&&y.click(u,e,s.id)}Y.prepFn=function(t,r,a){var l=u._fullLayout.dragmode,f=N.getBoundingClientRect();u._fullLayout._calcInverseTransform(u);var d=u._fullLayout._invTransform;e=u._fullLayout._invScaleX,n=u._fullLayout._invScaleY;var p=o.apply3DTransform(d)(r-f.left,a-f.top);if(j=p[0],F=p[1],P){var m=E.findPolygonOffset(g,C[0],C[1],P);j+=M+m[0],F+=A+m[1]}switch(l){case"zoom":Y.clickFn=st,c||(Y.moveFn=P?it:nt,Y.doneFn=at,function(){B=null,U=null,H=s.pathSubplot(),V=!1;var t=u._fullLayout[s.id];q=i(t.bgcolor).getLuminance(),(G=v.makeZoombox(h,q,k,T,H)).attr("fill-rule","evenodd"),W=v.makeCorners(h,k,T),_(u)}());break;case"select":case"lasso":x(t,r,a,Y,l)}},m.init(Y)},F.updateRadialDrag=function(t,e,n){var i=this,c=i.gd,u=i.layers,f=i.radius,h=i.innerRadius,d=i.cx,p=i.cy,g=i.radialAxis,y=S.radialDragBoxSize,b=y/2;if(g.visible){var x,w,k,A=z(i.radialAxisAngle),E=g._rl,C=E[0],P=E[1],O=E[n],L=.75*(E[1]-E[0])/(1-i.getHole(e))/f;n?(x=d+(f+b)*Math.cos(A),w=p-(f+b)*Math.sin(A),k="radialdrag"):(x=d+(h-b)*Math.cos(A),w=p-(h-b)*Math.sin(A),k="radialdrag-inner");var I,D,R,j=v.makeRectDragger(u,k,"crosshair",-b,-b,y,y),F={element:j,gd:c};!1===t.dragmode&&(F.dragmode=!1),H(r.select(j),g.visible&&h0===(n?R>C:Rr?function(t){return t<=0}:function(t){return t>=0};t.c2g=function(n){var r=t.c2l(n)-e;return(s(r)?r:0)+o},t.g2c=function(n){return t.l2c(n+e-o)},t.g2p=function(t){return t*a},t.c2p=function(e){return t.g2p(t.c2g(e))}}}(t,e);break;case"angularaxis":!function(t,e){var n=t.type;if("linear"===n){var i=t.d2c,s=t.c2d;t.d2c=function(t,e){return function(t,e){return"degrees"===e?a(t):t}(i(t),e)},t.c2d=function(t,e){return s(function(t,e){return"degrees"===e?o(t):t}(t,e))}}t.makeCalcdata=function(e,i){var a,o,s=e[i],l=e._length,c=function(n){return t.d2c(n,e.thetaunit)};if(s){if(r.isTypedArray(s)&&"linear"===n){if(l===s.length)return s;if(s.subarray)return s.subarray(0,l)}for(a=new Array(l),o=0;o0?1:0}function n(t){var e=t[0],n=t[1];if(!isFinite(e)||!isFinite(n))return[1,0];var r=(e+1)*(e+1)+n*n;return[(e*e+n*n-1)/r,2*n/r]}function r(t,e){var n=e[0],r=e[1];return[n*t.radius+t.cx,-r*t.radius+t.cy]}function i(t,e){return e*t.radius}t.exports={smith:n,reactanceArc:function(t,e,a,o){var s=r(t,n([a,e])),l=s[0],c=s[1],u=r(t,n([o,e])),f=u[0],h=u[1];if(0===e)return["M"+l+","+c,"L"+f+","+h].join(" ");var d=i(t,1/Math.abs(e));return["M"+l+","+c,"A"+d+","+d+" 0 0,"+(e<0?1:0)+" "+f+","+h].join(" ")},resistanceArc:function(t,a,o,s){var l=i(t,1/(a+1)),c=r(t,n([a,o])),u=c[0],f=c[1],h=r(t,n([a,s])),d=h[0],p=h[1];if(e(o)!==e(s)){var g=r(t,n([a,0]));return["M"+u+","+f,"A"+l+","+l+" 0 0,"+(00){for(var r=[],i=0;i=u&&(h.min=0,p.min=0,v.min=0,t.aaxis&&delete t.aaxis.min,t.baxis&&delete t.baxis.min,t.caxis&&delete t.caxis.min)}function g(t,e,n,r){var i=h[e._name];function o(n,r){return a.coerce(t,e,i,n,r)}o("uirevision",r.uirevision),e.type="linear";var d=o("color"),p=d!==i.color.dflt?d:n.font.color,g=e._name.charAt(0).toUpperCase(),v="Component "+g,m=o("title.text",v);e._hovertitle=m===v?m:g,a.coerceFont(o,"title.font",{family:n.font.family,size:a.bigFont(n.font.size),color:p}),o("min"),u(t,e,o,"linear"),l(t,e,o,"linear"),s(t,e,o,"linear"),c(t,e,o,{outerTicks:!0}),o("showticklabels")&&(a.coerceFont(o,"tickfont",{family:n.font.family,size:n.font.size,color:p}),o("tickangle"),o("tickformat")),f(t,e,o,{dfltColor:d,bgColor:n.bgColor,blend:60,showLine:!0,showGrid:!0,noZeroLine:!0,attributes:i}),o("hoverformat"),o("layer")}t.exports=function(t,e,n){o(t,e,n,{type:"ternary",attributes:h,handleDefaults:p,font:e.font,paper_bgcolor:e.paper_bgcolor})}},64380:function(t,e,n){"use strict";var r=n(39898),i=n(84267),a=n(73972),o=n(71828),s=o.strTranslate,l=o._,c=n(7901),u=n(91424),f=n(21994),h=n(1426).extendFlat,d=n(74875),p=n(89298),g=n(28569),v=n(30211),m=n(64505),y=m.freeMode,b=m.rectMode,x=n(92998),w=n(47322).prepSelect,_=n(47322).selectOnClick,k=n(47322).clearOutline,T=n(47322).clearSelectionsCache,M=n(85555);function A(t,e){this.id=t.id,this.graphDiv=t.graphDiv,this.init(e),this.makeFramework(e),this.aTickLayout=null,this.bTickLayout=null,this.cTickLayout=null}t.exports=A;var S=A.prototype;S.init=function(t){this.container=t._ternarylayer,this.defs=t._defs,this.layoutId=t._uid,this.traceHash={},this.layers={}},S.plot=function(t,e){var n=this,r=e[n.id],i=e._size;n._hasClipOnAxisFalse=!1;for(var a=0;aE*x?i=(a=x)*E:a=(i=b)/E,o=m*i/b,l=y*a/x,n=e.l+e.w*g-i/2,r=e.t+e.h*(1-v)-a/2,d.x0=n,d.y0=r,d.w=i,d.h=a,d.sum=w,d.xaxis={type:"linear",range:[_+2*T-w,w-_-2*k],domain:[g-o/2,g+o/2],_id:"x"},f(d.xaxis,d.graphDiv._fullLayout),d.xaxis.setScale(),d.xaxis.isPtWithinRange=function(t){return t.a>=d.aaxis.range[0]&&t.a<=d.aaxis.range[1]&&t.b>=d.baxis.range[1]&&t.b<=d.baxis.range[0]&&t.c>=d.caxis.range[1]&&t.c<=d.caxis.range[0]},d.yaxis={type:"linear",range:[_,w-k-T],domain:[v-l/2,v+l/2],_id:"y"},f(d.yaxis,d.graphDiv._fullLayout),d.yaxis.setScale(),d.yaxis.isPtWithinRange=function(){return!0};var M=d.yaxis.domain[0],A=d.aaxis=h({},t.aaxis,{range:[_,w-k-T],side:"left",tickangle:(+t.aaxis.tickangle||0)-30,domain:[M,M+l*E],anchor:"free",position:0,_id:"y",_length:i});f(A,d.graphDiv._fullLayout),A.setScale();var S=d.baxis=h({},t.baxis,{range:[w-_-T,k],side:"bottom",domain:d.xaxis.domain,anchor:"free",position:0,_id:"x",_length:i});f(S,d.graphDiv._fullLayout),S.setScale();var C=d.caxis=h({},t.caxis,{range:[w-_-k,T],side:"right",tickangle:(+t.caxis.tickangle||0)+30,domain:[M,M+l*E],anchor:"free",position:0,_id:"y",_length:i});f(C,d.graphDiv._fullLayout),C.setScale();var P="M"+n+","+(r+a)+"h"+i+"l-"+i/2+",-"+a+"Z";d.clipDef.select("path").attr("d",P),d.layers.plotbg.select("path").attr("d",P);var O="M0,"+a+"h"+i+"l-"+i/2+",-"+a+"Z";d.clipDefRelative.select("path").attr("d",O);var L=s(n,r);d.plotContainer.selectAll(".scatterlayer,.maplayer").attr("transform",L),d.clipDefRelative.select("path").attr("transform",null);var I=s(n-S._offset,r+a);d.layers.baxis.attr("transform",I),d.layers.bgrid.attr("transform",I);var D=s(n+i/2,r)+"rotate(30)"+s(0,-A._offset);d.layers.aaxis.attr("transform",D),d.layers.agrid.attr("transform",D);var R=s(n+i/2,r)+"rotate(-30)"+s(0,-C._offset);d.layers.caxis.attr("transform",R),d.layers.cgrid.attr("transform",R),d.drawAxes(!0),d.layers.aline.select("path").attr("d",A.showline?"M"+n+","+(r+a)+"l"+i/2+",-"+a:"M0,0").call(c.stroke,A.linecolor||"#000").style("stroke-width",(A.linewidth||0)+"px"),d.layers.bline.select("path").attr("d",S.showline?"M"+n+","+(r+a)+"h"+i:"M0,0").call(c.stroke,S.linecolor||"#000").style("stroke-width",(S.linewidth||0)+"px"),d.layers.cline.select("path").attr("d",C.showline?"M"+(n+i/2)+","+r+"l"+i/2+","+a:"M0,0").call(c.stroke,C.linecolor||"#000").style("stroke-width",(C.linewidth||0)+"px"),d.graphDiv._context.staticPlot||d.initInteractions(),u.setClipUrl(d.layers.frontplot,d._hasClipOnAxisFalse?null:d.clipId,d.graphDiv)},S.drawAxes=function(t){var e=this,n=e.graphDiv,r=e.id.substr(7)+"title",i=e.layers,a=e.aaxis,o=e.baxis,s=e.caxis;if(e.drawAx(a),e.drawAx(o),e.drawAx(s),t){var c=Math.max(a.showticklabels?a.tickfont.size/2:0,(s.showticklabels?.75*s.tickfont.size:0)+("outside"===s.ticks?.87*s.ticklen:0)),u=(o.showticklabels?o.tickfont.size:0)+("outside"===o.ticks?o.ticklen:0)+3;i["a-title"]=x.draw(n,"a"+r,{propContainer:a,propName:e.id+".aaxis.title",placeholder:l(n,"Click to enter Component A title"),attributes:{x:e.x0+e.w/2,y:e.y0-a.title.font.size/3-c,"text-anchor":"middle"}}),i["b-title"]=x.draw(n,"b"+r,{propContainer:o,propName:e.id+".baxis.title",placeholder:l(n,"Click to enter Component B title"),attributes:{x:e.x0-u,y:e.y0+e.h+.83*o.title.font.size+u,"text-anchor":"middle"}}),i["c-title"]=x.draw(n,"c"+r,{propContainer:s,propName:e.id+".caxis.title",placeholder:l(n,"Click to enter Component C title"),attributes:{x:e.x0+e.w+u,y:e.y0+e.h+.83*s.title.font.size+u,"text-anchor":"middle"}})}},S.drawAx=function(t){var e,n=this,r=n.graphDiv,i=t._name,a=i.charAt(0),s=t._id,l=n.layers[i],c=a+"tickLayout",u=(e=t).ticks+String(e.ticklen)+String(e.showticklabels);n[c]!==u&&(l.selectAll("."+s+"tick").remove(),n[c]=u),t.setScale();var f=p.calcTicks(t),h=p.clipEnds(t,f),d=p.makeTransTickFn(t),g=p.getTickSigns(t)[2],v=o.deg2rad(30),m=g*(t.linewidth||1)/2,y=g*t.ticklen,b=n.w,x=n.h,w="b"===a?"M0,"+m+"l"+Math.sin(v)*y+","+Math.cos(v)*y:"M"+m+",0l"+Math.cos(v)*y+","+-Math.sin(v)*y,_={a:"M0,0l"+x+",-"+b/2,b:"M0,0l-"+b/2+",-"+x,c:"M0,0l-"+x+","+b/2}[a];p.drawTicks(r,t,{vals:"inside"===t.ticks?h:f,layer:l,path:w,transFn:d,crisp:!1}),p.drawGrid(r,t,{vals:h,layer:n.layers[a+"grid"],path:_,transFn:d,crisp:!1}),p.drawLabels(r,t,{vals:f,layer:l,transFn:d,labelFns:p.makeLabelFns(t,0,30)})};var C=M.MINZOOM/2+.87,P="m-0.87,.5h"+C+"v3h-"+(C+5.2)+"l"+(C/2+2.6)+",-"+(.87*C+4.5)+"l2.6,1.5l-"+C/2+","+.87*C+"Z",O="m0.87,.5h-"+C+"v3h"+(C+5.2)+"l-"+(C/2+2.6)+",-"+(.87*C+4.5)+"l-2.6,1.5l"+C/2+","+.87*C+"Z",L="m0,1l"+C/2+","+.87*C+"l2.6,-1.5l-"+(C/2+2.6)+",-"+(.87*C+4.5)+"l-"+(C/2+2.6)+","+(.87*C+4.5)+"l2.6,1.5l"+C/2+",-"+.87*C+"Z",I=!0;function D(t){r.select(t).selectAll(".zoombox,.js-zoombox-backdrop,.js-zoombox-menu,.zoombox-corners").remove()}S.clearOutline=function(){T(this.dragOptions),k(this.dragOptions.gd)},S.initInteractions=function(){var t,e,n,r,f,h,d,p,m,x,k,T,A=this,S=A.layers.plotbg.select("path").node(),C=A.graphDiv,R=C._fullLayout._zoomlayer;function z(t){var e={};return e[A.id+".aaxis.min"]=t.a,e[A.id+".baxis.min"]=t.b,e[A.id+".caxis.min"]=t.c,e}function N(t,e){var n=C._fullLayout.clickmode;D(C),2===t&&(C.emit("plotly_doubleclick",null),a.call("_guiRelayout",C,z({a:0,b:0,c:0}))),n.indexOf("select")>-1&&1===t&&_(e,C,[A.xaxis],[A.yaxis],A.id,A.dragOptions),n.indexOf("event")>-1&&v.click(C,e,A.id)}function j(t,e){return 1-e/A.h}function F(t,e){return 1-(t+(A.h-e)/Math.sqrt(3))/A.w}function B(t,e){return(t-(A.h-e)/Math.sqrt(3))/A.w}function U(i,a){var o=n+i*t,s=r+a*e,l=Math.max(0,Math.min(1,j(0,r),j(0,s))),c=Math.max(0,Math.min(1,F(n,r),F(o,s))),u=Math.max(0,Math.min(1,B(n,r),B(o,s))),g=(l/2+u)*A.w,v=(1-l/2-c)*A.w,y=(g+v)/2,b=v-g,w=(1-l)*A.h,_=w-b/E;b.2?"rgba(0,0,0,0.4)":"rgba(255,255,255,0.3)").duration(200),T.transition().style("opacity",1).duration(200),x=!0),C.emit("plotly_relayouting",z(d))}function H(){D(C),d!==f&&(a.call("_guiRelayout",C,z(d)),I&&C.data&&C._context.showTips&&(o.notifier(l(C,"Double-click to zoom back out"),"long"),I=!1))}function V(t,e){var n=t/A.xaxis._m,r=e/A.yaxis._m,i=[(d={a:f.a-r,b:f.b+(n+r)/2,c:f.c-(n-r)/2}).a,d.b,d.c].sort(o.sorterAsc),a=i.indexOf(d.a),l=i.indexOf(d.b),c=i.indexOf(d.c);i[0]<0&&(i[1]+i[0]/2<0?(i[2]+=i[0]+i[1],i[0]=i[1]=0):(i[2]+=i[0]/2,i[1]+=i[0]/2,i[0]=0),d={a:i[a],b:i[l],c:i[c]},e=(f.a-d.a)*A.yaxis._m,t=(f.c-d.c-f.b+d.b)*A.xaxis._m);var h=s(A.x0+t,A.y0+e);A.plotContainer.selectAll(".scatterlayer,.maplayer").attr("transform",h);var p=s(-t,-e);A.clipDefRelative.select("path").attr("transform",p),A.aaxis.range=[d.a,A.sum-d.b-d.c],A.baxis.range=[A.sum-d.a-d.c,d.b],A.caxis.range=[A.sum-d.a-d.b,d.c],A.drawAxes(!1),A._hasClipOnAxisFalse&&A.plotContainer.select(".scatterlayer").selectAll(".trace").call(u.hideOutsideRangePoints,A),C.emit("plotly_relayouting",z(d))}function q(){a.call("_guiRelayout",C,z(d))}this.dragOptions={element:S,gd:C,plotinfo:{id:A.id,domain:C._fullLayout[A.id].domain,xaxis:A.xaxis,yaxis:A.yaxis},subplot:A.id,prepFn:function(a,l,u){A.dragOptions.xaxes=[A.xaxis],A.dragOptions.yaxes=[A.yaxis],t=C._fullLayout._invScaleX,e=C._fullLayout._invScaleY;var g=A.dragOptions.dragmode=C._fullLayout.dragmode;y(g)?A.dragOptions.minDrag=1:A.dragOptions.minDrag=void 0,"zoom"===g?(A.dragOptions.moveFn=U,A.dragOptions.clickFn=N,A.dragOptions.doneFn=H,function(t,e,a){var l=S.getBoundingClientRect();n=e-l.left,r=a-l.top,C._fullLayout._calcInverseTransform(C);var u=C._fullLayout._invTransform,g=o.apply3DTransform(u)(n,r);n=g[0],r=g[1],f={a:A.aaxis.range[0],b:A.baxis.range[1],c:A.caxis.range[1]},d=f,h=A.aaxis.range[1]-f.a,p=i(A.graphDiv._fullLayout[A.id].bgcolor).getLuminance(),m="M0,"+A.h+"L"+A.w/2+", 0L"+A.w+","+A.h+"Z",x=!1,k=R.append("path").attr("class","zoombox").attr("transform",s(A.x0,A.y0)).style({fill:p>.2?"rgba(0,0,0,0)":"rgba(255,255,255,0)","stroke-width":0}).attr("d",m),T=R.append("path").attr("class","zoombox-corners").attr("transform",s(A.x0,A.y0)).style({fill:c.background,stroke:c.defaultLine,"stroke-width":1,opacity:0}).attr("d","M0,0Z"),A.clearOutline(C)}(0,l,u)):"pan"===g?(A.dragOptions.moveFn=V,A.dragOptions.clickFn=N,A.dragOptions.doneFn=q,f={a:A.aaxis.range[0],b:A.baxis.range[1],c:A.caxis.range[1]},d=f,A.clearOutline(C)):(b(g)||y(g))&&w(a,l,u,A.dragOptions,g)}},S.onmousemove=function(t){v.hover(C,t,A.id),C._fullLayout._lasthover=S,C._fullLayout._hoversubplot=A.id},S.onmouseout=function(t){C._dragging||g.unhover(C,t)},g.init(this.dragOptions)}},73972:function(t,e,n){"use strict";var r=n(47769),i=n(64213),a=n(75138),o=n(41965),s=n(24401).addStyleRule,l=n(1426),c=n(9012),u=n(10820),f=l.extendFlat,h=l.extendDeepAll;function d(t){var n=t.name,i=t.categories,a=t.meta;if(e.modules[n])r.log("Type "+n+" already registered");else{e.subplotsRegistry[t.basePlotModule.name]||function(t){var n=t.name;if(e.subplotsRegistry[n])r.log("Plot type "+n+" already registered.");else for(var i in m(t),e.subplotsRegistry[n]=t,e.componentsRegistry)x(i,t.name)}(t.basePlotModule);for(var o={},l=0;l-1&&(f[d[n]].title={text:""});for(n=0;n")?"":e.html(t).text()}));return e.remove(),n}(_)).replace(/&(?!\w+;|\#[0-9]+;| \#x[0-9A-F]+;)/g,"&")).replace(u,"'"),i.isIE()&&(_=(_=(_=_.replace(/"/gi,"'")).replace(/(\('#)([^']*)('\))/gi,'("#$2")')).replace(/(\\')/gi,'"')),_}},75341:function(t,e,n){"use strict";var r=n(71828);t.exports=function(t,e){for(var n=0;nf+c||!r(u))}for(var d=0;da))return e}return void 0!==n?n:t.dflt},e.coerceColor=function(t,e,n){return i(e).isValid()?e:void 0!==n?n:t.dflt},e.coerceEnumerated=function(t,e,n){return t.coerceNumber&&(e=+e),-1!==t.values.indexOf(e)?e:void 0!==n?n:t.dflt},e.getValue=function(t,e){var n;return Array.isArray(t)?e0?e+=n:u<0&&(e-=n)}return e}function D(t){var e=u,n=t.b,i=I(t);return r.inbox(n-e,i-e,w+(i-e)/(i-n)-1)}var R=t[f+"a"],z=t[h+"a"];g=Math.abs(R.r2c(R.range[1])-R.r2c(R.range[0]));var N=r.getDistanceFunction(i,d,p,(function(t){return(d(t)+p(t))/2}));if(r.getClosest(v,N,t),!1!==t.index&&v[t.index].p!==c){T||(C=function(t){return Math.min(M(t),t.p-y.bargroupwidth/2)},P=function(t){return Math.max(A(t),t.p+y.bargroupwidth/2)});var j=v[t.index],F=m.base?j.b+j.s:j.s;t[h+"0"]=t[h+"1"]=z.c2p(j[h],!0),t[h+"LabelVal"]=F;var B=y.extents[y.extents.round(j.p)];t[f+"0"]=R.c2p(b?C(j):B[0],!0),t[f+"1"]=R.c2p(b?P(j):B[1],!0);var U=void 0!==j.orig_p;return t[f+"LabelVal"]=U?j.orig_p:j.p,t.labelLabel=l(R,t[f+"LabelVal"],m[f+"hoverformat"]),t.valueLabel=l(z,t[h+"LabelVal"],m[h+"hoverformat"]),t.baseLabel=l(z,j.b,m[h+"hoverformat"]),t.spikeDistance=(function(t){var e=u,n=t.b,i=I(t);return r.inbox(n-e,i-e,_+(i-e)/(i-n)-1)}(j)+function(t){return O(M(t),A(t),_)}(j))/2,t[f+"Spike"]=R.c2p(j.p,!0),o(j,m,t),t.hovertemplate=m.hovertemplate,t}}function f(t,e){var n=e.mcc||t.marker.color,r=e.mlcc||t.marker.line.color,i=s(t,e);return a.opacity(n)?n:a.opacity(r)&&i?r:void 0}t.exports={hoverPoints:function(t,e,n,r,a){var o=u(t,e,n,r,a);if(o){var s=o.cd,l=s[0].trace,c=s[o.index];return o.color=f(l,c),i.getComponentMethod("errorbars","hoverInfo")(c,l,o),[o]}},hoverOnBars:u,getTraceColor:f}},60822:function(t,e,n){"use strict";t.exports={attributes:n(1486),layoutAttributes:n(43641),supplyDefaults:n(90769).supplyDefaults,crossTraceDefaults:n(90769).crossTraceDefaults,supplyLayoutDefaults:n(13957),calc:n(92290),crossTraceCalc:n(11661).crossTraceCalc,colorbar:n(4898),arraysToCalcdata:n(75341),plot:n(17295).plot,style:n(16688).style,styleOnSelect:n(16688).styleOnSelect,hoverPoints:n(95423).hoverPoints,eventData:n(58065),selectPoints:n(81974),moduleType:"trace",name:"bar",basePlotModule:n(93612),categories:["bar-like","cartesian","svg","bar","oriented","errorBarsOK","showLegend","zoomScale"],animatable:!0,meta:{}}},43641:function(t){"use strict";t.exports={barmode:{valType:"enumerated",values:["stack","group","overlay","relative"],dflt:"group",editType:"calc"},barnorm:{valType:"enumerated",values:["","fraction","percent"],dflt:"",editType:"calc"},bargap:{valType:"number",min:0,max:1,editType:"calc"},bargroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},13957:function(t,e,n){"use strict";var r=n(73972),i=n(89298),a=n(71828),o=n(43641);t.exports=function(t,e,n){function s(n,r){return a.coerce(t,e,o,n,r)}for(var l=!1,c=!1,u=!1,f={},h=s("barmode"),d=0;d0}function S(t){return"auto"===t?0:t}function E(t,e){var n=Math.PI/180*e,r=Math.abs(Math.sin(n)),i=Math.abs(Math.cos(n));return{x:t.width*i+t.height*r,y:t.width*r+t.height*i}}function C(t,e,n,r,i,a){var o=!!a.isHorizontal,s=!!a.constrained,l=a.angle||0,c=a.anchor||"end",u="end"===c,f="start"===c,h=((a.leftToRight||0)+1)/2,d=1-h,p=i.width,g=i.height,v=Math.abs(e-t),m=Math.abs(r-n),y=v>2*w&&m>2*w?w:0;v-=2*y,m-=2*y;var b=S(l);"auto"!==l||p<=v&&g<=m||!(p>v||g>m)||(p>m||g>v)&&p.01?G:function(t,e,n){return n&&t===e?t:Math.abs(t-e)>=2?G(t):t>e?Math.ceil(t):Math.floor(t)};F=W(F,B,z),B=W(B,F,z),U=W(U,H,!z),H=W(H,U,!z)}var Y=M(a.ensureSingle(I,"path"),O,v,m);if(Y.style("vector-effect",L?"none":"non-scaling-stroke").attr("d",isNaN((B-F)*(H-U))||V&&t._context.staticPlot?"M0,0Z":"M"+F+","+U+"V"+H+"H"+B+"V"+U+"Z").call(l.setClipUrl,e.layerClipId,t),!O.uniformtext.mode&&N){var $=l.makePointStyleFns(f);l.singlePointStyle(c,Y,f,$,t)}!function(t,e,n,r,i,s,c,f,d,v,m){var _,k=e.xaxis,A=e.yaxis,P=t._fullLayout;function O(e,n,r){return a.ensureSingle(e,"text").text(n).attr({class:"bartext bartext-"+_,"text-anchor":"middle","data-notex":1}).call(l.font,r).call(o.convertToTspans,t)}var L=r[0].trace,I="h"===L.orientation,D=function(t,e,n,r,i){var o,s=e[0].trace,l=s.texttemplate;return o=l?function(t,e,n,r,i){var o=e[0].trace,s=a.castOption(o,n,"texttemplate");if(!s)return"";var l,c,f,h,d="histogram"===o.type,p="waterfall"===o.type,g="funnel"===o.type,v="h"===o.orientation;function m(t){return u(c,c.c2l(t),!0).text}function y(t){return u(h,h.c2l(t),!0).text}v?(l="y",c=i,f="x",h=r):(l="x",c=r,f="y",h=i);var b=e[n],w={};w.label=b.p,w.labelLabel=w[l+"Label"]=m(b.p);var _=a.castOption(o,b.i,"text");(0===_||_)&&(w.text=_),w.value=b.s,w.valueLabel=w[f+"Label"]=y(b.s);var k={};x(k,o,b.i),(d||void 0===k.x)&&(k.x=v?w.value:w.label),(d||void 0===k.y)&&(k.y=v?w.label:w.value),(d||void 0===k.xLabel)&&(k.xLabel=v?w.valueLabel:w.labelLabel),(d||void 0===k.yLabel)&&(k.yLabel=v?w.labelLabel:w.valueLabel),p&&(w.delta=+b.rawS||b.s,w.deltaLabel=y(w.delta),w.final=b.v,w.finalLabel=y(w.final),w.initial=w.final-w.delta,w.initialLabel=y(w.initial)),g&&(w.value=b.s,w.valueLabel=y(w.value),w.percentInitial=b.begR,w.percentInitialLabel=a.formatPercent(b.begR),w.percentPrevious=b.difR,w.percentPreviousLabel=a.formatPercent(b.difR),w.percentTotal=b.sumR,w.percenTotalLabel=a.formatPercent(b.sumR));var T=a.castOption(o,b.i,"customdata");return T&&(w.customdata=T),a.texttemplateString(s,w,t._d3locale,k,w,o._meta||{})}(t,e,n,r,i):s.textinfo?function(t,e,n,r){var i=t[0].trace,o="h"===i.orientation,s="waterfall"===i.type,l="funnel"===i.type;function c(t){return u(o?r:n,t,!0).text}function f(t){return u(o?n:r,+t,!0).text}var h,d=i.textinfo,p=t[e],g=d.split("+"),v=[],m=function(t){return-1!==g.indexOf(t)};if(m("label")&&v.push(c(t[e].p)),m("text")&&(0===(h=a.castOption(i,p.i,"text"))||h)&&v.push(h),s){var y=+p.rawS||p.s,b=p.v,x=b-y;m("initial")&&v.push(f(x)),m("delta")&&v.push(f(y)),m("final")&&v.push(f(b))}if(l){m("value")&&v.push(f(p.s));var w=0;m("percent initial")&&w++,m("percent previous")&&w++,m("percent total")&&w++;var _=w>1;m("percent initial")&&(h=a.formatPercent(p.begR),_&&(h+=" of initial"),v.push(h)),m("percent previous")&&(h=a.formatPercent(p.difR),_&&(h+=" of previous"),v.push(h)),m("percent total")&&(h=a.formatPercent(p.sumR),_&&(h+=" of total"),v.push(h))}return v.join("
")}(e,n,r,i):g.getValue(s.text,n),g.coerceString(y,o)}(P,r,i,k,A);_=function(t,e){var n=g.getValue(t.textposition,e);return g.coerceEnumerated(b,n)}(L,i);var R="stack"===v.mode||"relative"===v.mode,z=r[i],N=!R||z._outmost;if(D&&"none"!==_&&(!z.isBlank&&s!==c&&f!==d||"auto"!==_&&"inside"!==_)){var j=P.font,F=p.getBarColor(r[i],L),B=p.getInsideTextFont(L,i,j,F),U=p.getOutsideTextFont(L,i,j),H=n.datum();I?"log"===k.type&&H.s0<=0&&(s=k.range[0]0&&W>0&&(G<=$&&W<=X||G<=X&&W<=$||(I?$>=G*(X/W):X>=W*($/G)))?_="inside":(_="outside",V.remove(),V=null)):_="inside"),!V){var K=(V=O(n,D,Y=a.ensureUniformFontSize(t,"outside"===_?U:B))).attr("transform");if(V.attr("transform",""),G=(q=l.bBox(V.node())).width,W=q.height,V.attr("transform",K),G<=0||W<=0)return void V.remove()}var Z,J=L.textangle;Z="outside"===_?function(t,e,n,r,i,a){var o,s=!!a.isHorizontal,l=!!a.constrained,c=a.angle||0,u=i.width,f=i.height,h=Math.abs(e-t),d=Math.abs(r-n);o=s?d>2*w?w:0:h>2*w?w:0;var p=1;l&&(p=s?Math.min(1,d/f):Math.min(1,h/u));var g=S(c),v=E(i,g),m=(s?v.x:v.y)/2,y=(i.left+i.right)/2,b=(i.top+i.bottom)/2,x=(t+e)/2,_=(n+r)/2,k=0,M=0,A=s?T(e,t):T(n,r);return s?(x=e-A*o,k=A*m):(_=r+A*o,M=-A*m),{textX:y,textY:b,targetX:x,targetY:_,anchorX:k,anchorY:M,scale:p,rotate:g}}(s,c,f,d,q,{isHorizontal:I,constrained:"both"===L.constraintext||"outside"===L.constraintext,angle:J}):C(s,c,f,d,q,{isHorizontal:I,constrained:"both"===L.constraintext||"inside"===L.constraintext,angle:J,anchor:L.insidetextanchor}),Z.fontSize=Y.size,h("histogram"===L.type?"bar":L.type,Z,P),z.transform=Z;var Q=M(V,P,v,m);a.setTransormAndDisplay(Q,Z)}else n.select("text").remove()}(t,e,I,n,d,F,B,U,H,v,m),e.layerClipId&&l.hideOutsideRangePoint(c,I.select("text"),_,P,f.xcalendar,f.ycalendar)}));var U=!1===f.cliponaxis;l.setClipUrl(c,U?null:e.layerClipId,t)}));c.getComponentMethod("errorbars","plot")(t,I,e,v)},toMoveInsideBar:C}},81974:function(t){"use strict";function e(t,e,n,r,i){var a=e.c2p(r?t.s0:t.p0,!0),o=e.c2p(r?t.s1:t.p1,!0),s=n.c2p(r?t.p0:t.s0,!0),l=n.c2p(r?t.p1:t.s1,!0);return i?[(a+o)/2,(s+l)/2]:r?[o,(s+l)/2]:[(a+o)/2,l]}t.exports=function(t,n){var r,i=t.cd,a=t.xaxis,o=t.yaxis,s=i[0].trace,l="funnel"===s.type,c="h"===s.orientation,u=[];if(!1===n)for(r=0;r1||0===i.bargap&&0===i.bargroupgap&&!t[0].trace.marker.line.width)&&r.select(this).attr("shape-rendering","crispEdges")})),e.selectAll("g.points").each((function(e){p(r.select(this),e[0].trace,t)})),s.getComponentMethod("errorbars","style")(e)},styleTextPoints:g,styleOnSelect:function(t,e,n){var i=e[0].trace;i.selectedpoints?function(t,e,n){a.selectedPointStyle(t.selectAll("path"),e),function(t,e,n){t.each((function(t){var i,s=r.select(this);if(t.selected){i=o.ensureUniformFontSize(n,v(s,t,e,n));var l=e.selected.textfont&&e.selected.textfont.color;l&&(i.color=l),a.font(s,i)}else a.selectedTextStyle(s,e)}))}(t.selectAll("text"),e,n)}(n,i,t):(p(n,i,t),s.getComponentMethod("errorbars","style")(n))},getInsideTextFont:y,getOutsideTextFont:b,getBarColor:w,resizeText:l}},98340:function(t,e,n){"use strict";var r=n(7901),i=n(52075).hasColorscale,a=n(1586),o=n(71828).coercePattern;t.exports=function(t,e,n,s,l){var c=n("marker.color",s),u=i(t,"marker");u&&a(t,e,l,n,{prefix:"marker.",cLetter:"c"}),n("marker.line.color",r.defaultLine),i(t,"marker.line")&&a(t,e,l,n,{prefix:"marker.line.",cLetter:"c"}),n("marker.line.width"),n("marker.opacity"),o(n,"marker.pattern",c,u),n("selected.marker.color"),n("unselected.marker.color")}},72597:function(t,e,n){"use strict";var r=n(39898),i=n(71828);function a(t){return"_"+t+"Text_minsize"}t.exports={recordMinTextSize:function(t,e,n){if(n.uniformtext.mode){var r=a(t),i=n.uniformtext.minsize,o=e.scale*e.fontSize;e.hide=oh.range[1]&&(b+=Math.PI),r.getClosest(c,(function(t){return g(y,b,[t.rp0,t.rp1],[t.thetag0,t.thetag1],p)?v+Math.min(1,Math.abs(t.thetag1-t.thetag0)/m)-1+(t.rp1-y)/(t.rp1-t.rp0)-1:1/0}),t),!1!==t.index){var x=c[t.index];t.x0=t.x1=x.ct[0],t.y0=t.y1=x.ct[1];var w=i.extendFlat({},x,{r:x.s,theta:x.p});return o(x,u,t),s(w,u,f,t),t.hovertemplate=u.hovertemplate,t.color=a(u,x),t.xLabelVal=t.yLabelVal=void 0,x.s<0&&(t.idealAlign="left"),[t]}}},23381:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"barpolar",basePlotModule:n(23580),categories:["polar","bar","showLegend"],attributes:n(55023),layoutAttributes:n(40151),supplyDefaults:n(6135),supplyLayoutDefaults:n(19860),calc:n(74692).calc,crossTraceCalc:n(74692).crossTraceCalc,plot:n(60173),colorbar:n(4898),formatLabels:n(98608),style:n(16688).style,styleOnSelect:n(16688).styleOnSelect,hoverPoints:n(27379),selectPoints:n(81974),meta:{}}},40151:function(t){"use strict";t.exports={barmode:{valType:"enumerated",values:["stack","overlay"],dflt:"stack",editType:"calc"},bargap:{valType:"number",dflt:.1,min:0,max:1,editType:"calc"}}},19860:function(t,e,n){"use strict";var r=n(71828),i=n(40151);t.exports=function(t,e,n){var a,o={};function s(n,o){return r.coerce(t[a]||{},e[a],i,n,o)}for(var l=0;l0?(c=o,u=l):(c=l,u=o);var f=[s.findEnclosingVertexAngles(c,t.vangles)[0],(c+u)/2,s.findEnclosingVertexAngles(u,t.vangles)[1]];return s.pathPolygonAnnulus(r,i,c,u,f,e,n)}:function(t,r,i,o){return a.pathAnnulus(t,r,i,o,e,n)}}(e),p=e.layers.frontplot.select("g.barlayer");a.makeTraceGroups(p,n,"trace bars").each((function(){var n=r.select(this),s=a.ensureSingle(n,"g","points").selectAll("g.point").data(a.identity);s.enter().append("g").style("vector-effect",l?"none":"non-scaling-stroke").style("stroke-miterlimit",2).classed("point",!0),s.exit().remove(),s.each((function(t){var e,n=r.select(this),o=t.rp0=f.c2p(t.s0),s=t.rp1=f.c2p(t.s1),l=t.thetag0=h.c2g(t.p0),p=t.thetag1=h.c2g(t.p1);if(i(o)&&i(s)&&i(l)&&i(p)&&o!==s&&l!==p){var g=f.c2g(t.s1),v=(l+p)/2;t.ct=[c.c2p(g*Math.cos(v)),u.c2p(g*Math.sin(v))],e=d(o,s,l,p)}else e="M0,0Z";a.ensureSingle(n,"path").attr("d",e)})),o.setClipUrl(n,e._hasClipOnAxisFalse?e.clipIds.forTraces:null,t)}))}},53522:function(t,e,n){"use strict";var r=n(82196),i=n(1486),a=n(22399),o=n(12663).axisHoverFormat,s=n(5386).fF,l=n(1426).extendFlat,c=r.marker,u=c.line;t.exports={y:{valType:"data_array",editType:"calc+clearAxisTypes"},x:{valType:"data_array",editType:"calc+clearAxisTypes"},x0:{valType:"any",editType:"calc+clearAxisTypes"},y0:{valType:"any",editType:"calc+clearAxisTypes"},dx:{valType:"number",editType:"calc"},dy:{valType:"number",editType:"calc"},xperiod:r.xperiod,yperiod:r.yperiod,xperiod0:r.xperiod0,yperiod0:r.yperiod0,xperiodalignment:r.xperiodalignment,yperiodalignment:r.yperiodalignment,xhoverformat:o("x"),yhoverformat:o("y"),name:{valType:"string",editType:"calc+clearAxisTypes"},q1:{valType:"data_array",editType:"calc+clearAxisTypes"},median:{valType:"data_array",editType:"calc+clearAxisTypes"},q3:{valType:"data_array",editType:"calc+clearAxisTypes"},lowerfence:{valType:"data_array",editType:"calc"},upperfence:{valType:"data_array",editType:"calc"},notched:{valType:"boolean",editType:"calc"},notchwidth:{valType:"number",min:0,max:.5,dflt:.25,editType:"calc"},notchspan:{valType:"data_array",editType:"calc"},boxpoints:{valType:"enumerated",values:["all","outliers","suspectedoutliers",!1],editType:"calc"},jitter:{valType:"number",min:0,max:1,editType:"calc"},pointpos:{valType:"number",min:-2,max:2,editType:"calc"},boxmean:{valType:"enumerated",values:[!0,"sd",!1],editType:"calc"},mean:{valType:"data_array",editType:"calc"},sd:{valType:"data_array",editType:"calc"},orientation:{valType:"enumerated",values:["v","h"],editType:"calc+clearAxisTypes"},quartilemethod:{valType:"enumerated",values:["linear","exclusive","inclusive"],dflt:"linear",editType:"calc"},width:{valType:"number",min:0,dflt:0,editType:"calc"},marker:{outliercolor:{valType:"color",dflt:"rgba(0, 0, 0, 0)",editType:"style"},symbol:l({},c.symbol,{arrayOk:!1,editType:"plot"}),opacity:l({},c.opacity,{arrayOk:!1,dflt:1,editType:"style"}),angle:l({},c.angle,{arrayOk:!1,editType:"calc"}),size:l({},c.size,{arrayOk:!1,editType:"calc"}),color:l({},c.color,{arrayOk:!1,editType:"style"}),line:{color:l({},u.color,{arrayOk:!1,dflt:a.defaultLine,editType:"style"}),width:l({},u.width,{arrayOk:!1,dflt:0,editType:"style"}),outliercolor:{valType:"color",editType:"style"},outlierwidth:{valType:"number",min:0,dflt:1,editType:"style"},editType:"style"},editType:"plot"},line:{color:{valType:"color",editType:"style"},width:{valType:"number",min:0,dflt:2,editType:"style"},editType:"plot"},fillcolor:r.fillcolor,whiskerwidth:{valType:"number",min:0,max:1,dflt:.5,editType:"calc"},offsetgroup:i.offsetgroup,alignmentgroup:i.alignmentgroup,selected:{marker:r.selected.marker,editType:"style"},unselected:{marker:r.unselected.marker,editType:"style"},text:l({},r.text,{}),hovertext:l({},r.hovertext,{}),hovertemplate:s({}),hoveron:{valType:"flaglist",flags:["boxes","points"],dflt:"boxes+points",editType:"style"}}},48518:function(t,e,n){"use strict";var r=n(92770),i=n(89298),a=n(42973),o=n(71828),s=n(50606).BADNUM,l=o._;t.exports=function(t,e){var n,c,y,b,x,w,_,k=t._fullLayout,T=i.getFromId(t,e.xaxis||"x"),M=i.getFromId(t,e.yaxis||"y"),A=[],S="violin"===e.type?"_numViolins":"_numBoxes";"h"===e.orientation?(y=T,b="x",x=M,w="y",_=!!e.yperiodalignment):(y=M,b="y",x=T,w="x",_=!!e.xperiodalignment);var E,C,P,O,L,I,D=function(t,e,n,i){var s,l=e in t,c=e+"0"in t,u="d"+e in t;if(l||c&&u){var f=n.makeCalcdata(t,e);return[a(t,n,e,f).vals,f]}s=c?t[e+"0"]:"name"in t&&("category"===n.type||r(t.name)&&-1!==["linear","log"].indexOf(n.type)||o.isDateTime(t.name)&&"date"===n.type)?t.name:i;for(var h="multicategory"===n.type?n.r2c_just_indices(s):n.d2c(s,0,t[e+"calendar"]),d=t._length,p=new Array(d),g=0;gE.uf};if(e._hasPreCompStats){var U=e[b],H=function(t){return y.d2c((e[t]||[])[n])},V=1/0,q=-1/0;for(n=0;n=E.q1&&E.q3>=E.med){var W=H("lowerfence");E.lf=W!==s&&W<=E.q1?W:d(E,P,O);var Y=H("upperfence");E.uf=Y!==s&&Y>=E.q3?Y:p(E,P,O);var $=H("mean");E.mean=$!==s?$:O?o.mean(P,O):(E.q1+E.q3)/2;var X=H("sd");E.sd=$!==s&&X>=0?X:O?o.stdev(P,O,E.mean):E.q3-E.q1,E.lo=g(E),E.uo=v(E);var K=H("notchspan");K=K!==s&&K>0?K:m(E,O),E.ln=E.med-K,E.un=E.med+K;var Z=E.lf,J=E.uf;e.boxpoints&&P.length&&(Z=Math.min(Z,P[0]),J=Math.max(J,P[O-1])),e.notched&&(Z=Math.min(Z,E.ln),J=Math.max(J,E.un)),E.min=Z,E.max=J}else{var Q;o.warn(["Invalid input - make sure that q1 <= median <= q3","q1 = "+E.q1,"median = "+E.med,"q3 = "+E.q3].join("\n")),Q=E.med!==s?E.med:E.q1!==s?E.q3!==s?(E.q1+E.q3)/2:E.q1:E.q3!==s?E.q3:0,E.med=Q,E.q1=E.q3=Q,E.lf=E.uf=Q,E.mean=E.sd=Q,E.ln=E.un=Q,E.min=E.max=Q}V=Math.min(V,E.min),q=Math.max(q,E.max),E.pts2=C.filter(B),A.push(E)}}e._extremes[y._id]=i.findExtremes(y,[V,q],{padded:!0})}else{var tt=y.makeCalcdata(e,b),et=function(t,e){for(var n=t.length,r=new Array(n+1),i=0;i=0&&it0){var ut,ft;(E={}).pos=E[w]=j[n],C=E.pts=rt[n].sort(f),O=(P=E[b]=C.map(h)).length,E.min=P[0],E.max=P[O-1],E.mean=o.mean(P,O),E.sd=o.stdev(P,O,E.mean),E.med=o.interp(P,.5),O%2&&(lt||ct)?(lt?(ut=P.slice(0,O/2),ft=P.slice(O/2+1)):ct&&(ut=P.slice(0,O/2+1),ft=P.slice(O/2)),E.q1=o.interp(ut,.5),E.q3=o.interp(ft,.5)):(E.q1=o.interp(P,.25),E.q3=o.interp(P,.75)),E.lf=d(E,P,O),E.uf=p(E,P,O),E.lo=g(E),E.uo=v(E);var ht=m(E,O);E.ln=E.med-ht,E.un=E.med+ht,at=Math.min(at,E.ln),ot=Math.max(ot,E.un),E.pts2=C.filter(B),A.push(E)}e._extremes[y._id]=i.findExtremes(y,e.notched?tt.concat([at,ot]):tt,{padded:!0})}return function(t,e){if(o.isArrayOrTypedArray(e.selectedpoints))for(var n=0;n0?(A[0].t={num:k[S],dPos:F,posLetter:w,valLetter:b,labels:{med:l(t,"median:"),min:l(t,"min:"),q1:l(t,"q1:"),q3:l(t,"q3:"),max:l(t,"max:"),mean:"sd"===e.boxmean?l(t,"mean \xb1 \u03c3:"):l(t,"mean:"),lf:l(t,"lower fence:"),uf:l(t,"upper fence:")}},k[S]++,A):[{t:{empty:!0}}]};var c={text:"tx",hovertext:"htx"};function u(t,e,n){for(var r in c)o.isArrayOrTypedArray(e[r])&&(Array.isArray(n)?o.isArrayOrTypedArray(e[r][n[0]])&&(t[c[r]]=e[r][n[0]][n[1]]):t[c[r]]=e[r][n])}function f(t,e){return t.v-e.v}function h(t){return t.v}function d(t,e,n){return 0===n?t.q1:Math.min(t.q1,e[Math.min(o.findBin(2.5*t.q1-1.5*t.q3,e,!0)+1,n-1)])}function p(t,e,n){return 0===n?t.q3:Math.max(t.q3,e[Math.max(o.findBin(2.5*t.q3-1.5*t.q1,e),0)])}function g(t){return 4*t.q1-3*t.q3}function v(t){return 4*t.q3-3*t.q1}function m(t,e){return 0===e?0:1.57*(t.q3-t.q1)/Math.sqrt(e)}},37188:function(t,e,n){"use strict";var r=n(89298),i=n(71828),a=n(99082).getAxisGroup,o=["v","h"];function s(t,e,n,o){var s,l,c,u=e.calcdata,f=e._fullLayout,h=o._id,d=h.charAt(0),p=[],g=0;for(s=0;s1,x=1-f[t+"gap"],w=1-f[t+"groupgap"];for(s=0;s0){var q=E.pointpos,G=E.jitter,W=E.marker.size/2,Y=0;q+G>=0&&((Y=H*(q+G))>A?(V=!0,B=W,j=Y):Y>z&&(B=W,j=A)),Y<=A&&(j=A);var $=0;q-G<=0&&(($=-H*(q-G))>S?(V=!0,U=W,F=$):$>N&&(U=W,F=S)),$<=S&&(F=S)}else j=A,F=S;var X=new Array(c.length);for(l=0;l0?(v="v",m=b>0?Math.min(w,x):Math.min(x)):b>0?(v="h",m=Math.min(w)):m=0;if(m){e._length=m;var S=n("orientation",v);e._hasPreCompStats?"v"===S&&0===b?(n("x0",0),n("dx",1)):"h"===S&&0===y&&(n("y0",0),n("dy",1)):"v"===S&&0===b?n("x0"):"h"===S&&0===y&&n("y0"),i.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x","y"],a)}else e.visible=!1}function f(t,e,n,i){var a=i.prefix,o=r.coerce2(t,e,c,"marker.outliercolor"),s=n("marker.line.outliercolor"),l="outliers";e._hasPreCompStats?l="all":(o||s)&&(l="suspectedoutliers");var u=n(a+"points",l);u?(n("jitter","all"===u?.3:0),n("pointpos","all"===u?-1.5:0),n("marker.symbol"),n("marker.opacity"),n("marker.size"),n("marker.angle"),n("marker.color",e.line.color),n("marker.line.color"),n("marker.line.width"),"suspectedoutliers"===u&&(n("marker.line.outliercolor",e.marker.color),n("marker.line.outlierwidth")),n("selected.marker.color"),n("unselected.marker.color"),n("selected.marker.size"),n("unselected.marker.size"),n("text"),n("hovertext")):delete e.marker;var f=n("hoveron");"all"!==f&&-1===f.indexOf("points")||n("hovertemplate"),r.coerceSelectionMarkerOpacity(e,n)}t.exports={supplyDefaults:function(t,e,n,i){function s(n,i){return r.coerce(t,e,c,n,i)}if(u(t,e,s,i),!1!==e.visible){o(t,e,i,s),s("xhoverformat"),s("yhoverformat");var l=e._hasPreCompStats;l&&(s("lowerfence"),s("upperfence")),s("line.color",(t.marker||{}).color||n),s("line.width"),s("fillcolor",a.addOpacity(e.line.color,.5));var h=!1;if(l){var d=s("mean"),p=s("sd");d&&d.length&&(h=!0,p&&p.length&&(h="sd"))}s("boxmean",h),s("whiskerwidth"),s("width"),s("quartilemethod");var g=!1;if(l){var v=s("notchspan");v&&v.length&&(g=!0)}else r.validate(t.notchwidth,c.notchwidth)&&(g=!0);s("notched",g)&&s("notchwidth"),f(t,e,s,{prefix:"box"})}},crossTraceDefaults:function(t,e){var n,i;function a(t){return r.coerce(i._input,i,c,t)}for(var o=0;ot.lo&&(w.so=!0)}return a}));p.enter().append("path").classed("point",!0),p.exit().remove(),p.call(a.translatePoints,l,c)}function u(t,e,n,a){var o,s,l=e.val,c=e.pos,u=!!c.rangebreaks,f=a.bPos,h=a.bPosPxOffset||0,d=n.boxmean||(n.meanline||{}).visible;Array.isArray(a.bdPos)?(o=a.bdPos[0],s=a.bdPos[1]):(o=a.bdPos,s=a.bdPos);var p=t.selectAll("path.mean").data("box"===n.type&&n.boxmean||"violin"===n.type&&n.box.visible&&n.meanline.visible?i.identity:[]);p.enter().append("path").attr("class","mean").style({fill:"none","vector-effect":"non-scaling-stroke"}),p.exit().remove(),p.each((function(t){var e=c.c2l(t.pos+f,!0),i=c.l2p(e-o)+h,a=c.l2p(e+s)+h,p=u?(i+a)/2:c.l2p(e)+h,g=l.c2p(t.mean,!0),v=l.c2p(t.mean-t.sd,!0),m=l.c2p(t.mean+t.sd,!0);"h"===n.orientation?r.select(this).attr("d","M"+g+","+i+"V"+a+("sd"===d?"m0,0L"+v+","+p+"L"+g+","+i+"L"+m+","+p+"Z":"")):r.select(this).attr("d","M"+i+","+g+"H"+a+("sd"===d?"m0,0L"+p+","+v+"L"+i+","+g+"L"+p+","+m+"Z":""))}))}t.exports={plot:function(t,e,n,a){var o=t._context.staticPlot,s=e.xaxis,f=e.yaxis;i.makeTraceGroups(a,n,"trace boxes").each((function(t){var e,n,i=r.select(this),a=t[0],h=a.t,d=a.trace;h.wdPos=h.bdPos*d.whiskerwidth,!0!==d.visible||h.empty?i.remove():("h"===d.orientation?(e=f,n=s):(e=s,n=f),l(i,{pos:e,val:n},d,h,o),c(i,{x:s,y:f},d,h),u(i,{pos:e,val:n},d,h))}))},plotBoxAndWhiskers:l,plotPoints:c,plotBoxMean:u}},24626:function(t){"use strict";t.exports=function(t,e){var n,r,i=t.cd,a=t.xaxis,o=t.yaxis,s=[];if(!1===e)for(n=0;n=10)return null;for(var n=1/0,a=-1/0,o=t.length,s=0;s0?Math.floor:Math.ceil,L=C>0?Math.ceil:Math.floor,I=C>0?Math.min:Math.max,D=C>0?Math.max:Math.min,R=O(S+P),z=L(E-P),N=[[f=A(S)]];for(a=R;a*C=0;i--)a[u-i]=t[f][i],o[u-i]=e[f][i];for(s.push({x:a,y:o,bicubic:l}),i=f,a=[],o=[];i>=0;i--)a[f-i]=t[i][0],o[f-i]=e[i][0];return s.push({x:a,y:o,bicubic:c}),s}},20347:function(t,e,n){"use strict";var r=n(89298),i=n(1426).extendFlat;t.exports=function(t,e,n){var a,o,s,l,c,u,f,h,d,p,g,v,m,y,b=t["_"+e],x=t[e+"axis"],w=x._gridlines=[],_=x._minorgridlines=[],k=x._boundarylines=[],T=t["_"+n],M=t[n+"axis"];"array"===x.tickmode&&(x.tickvals=b.slice());var A=t._xctrl,S=t._yctrl,E=A[0].length,C=A.length,P=t._a.length,O=t._b.length;r.prepTicks(x),"array"===x.tickmode&&delete x.tickvals;var L=x.smoothing?3:1;function I(r){var i,a,o,s,l,c,u,f,d,p,g,v,m=[],y=[],b={};if("b"===e)for(a=t.b2j(r),o=Math.floor(Math.max(0,Math.min(O-2,a))),s=a-o,b.length=O,b.crossLength=P,b.xy=function(e){return t.evalxy([],e,a)},b.dxy=function(e,n){return t.dxydi([],e,o,n,s)},i=0;i0&&(d=t.dxydi([],i-1,o,0,s),m.push(l[0]+d[0]/3),y.push(l[1]+d[1]/3),p=t.dxydi([],i-1,o,1,s),m.push(f[0]-p[0]/3),y.push(f[1]-p[1]/3)),m.push(f[0]),y.push(f[1]),l=f;else for(i=t.a2i(r),c=Math.floor(Math.max(0,Math.min(P-2,i))),u=i-c,b.length=P,b.crossLength=O,b.xy=function(e){return t.evalxy([],i,e)},b.dxy=function(e,n){return t.dxydj([],c,e,u,n)},a=0;a0&&(g=t.dxydj([],c,a-1,u,0),m.push(l[0]+g[0]/3),y.push(l[1]+g[1]/3),v=t.dxydj([],c,a-1,u,1),m.push(f[0]-v[0]/3),y.push(f[1]-v[1]/3)),m.push(f[0]),y.push(f[1]),l=f;return b.axisLetter=e,b.axis=x,b.crossAxis=M,b.value=r,b.constvar=n,b.index=h,b.x=m,b.y=y,b.smoothing=M.smoothing,b}function D(r){var i,a,o,s,l,c=[],u=[],f={};if(f.length=b.length,f.crossLength=T.length,"b"===e)for(o=Math.max(0,Math.min(O-2,r)),l=Math.min(1,Math.max(0,r-o)),f.xy=function(e){return t.evalxy([],e,r)},f.dxy=function(e,n){return t.dxydi([],e,o,n,l)},i=0;ib.length-1||w.push(i(D(o),{color:x.gridcolor,width:x.gridwidth,dash:x.griddash}));for(h=u;hb.length-1)&&!(g<0||g>b.length-1))for(v=b[s],m=b[g],a=0;ab[b.length-1]||_.push(i(I(p),{color:x.minorgridcolor,width:x.minorgridwidth,dash:x.minorgriddash}));x.startline&&k.push(i(D(0),{color:x.startlinecolor,width:x.startlinewidth})),x.endline&&k.push(i(D(b.length-1),{color:x.endlinecolor,width:x.endlinewidth}))}else{for(l=5e-15,u=(c=[Math.floor((b[b.length-1]-x.tick0)/x.dtick*(1+l)),Math.ceil((b[0]-x.tick0)/x.dtick/(1+l))].sort((function(t,e){return t-e})))[0],f=c[1],h=u;h<=f;h++)d=x.tick0+x.dtick*h,w.push(i(I(d),{color:x.gridcolor,width:x.gridwidth,dash:x.griddash}));for(h=u-1;hb[b.length-1]||_.push(i(I(p),{color:x.minorgridcolor,width:x.minorgridwidth,dash:x.minorgriddash}));x.startline&&k.push(i(I(b[0]),{color:x.startlinecolor,width:x.startlinewidth})),x.endline&&k.push(i(I(b[b.length-1]),{color:x.endlinecolor,width:x.endlinewidth}))}}},83311:function(t,e,n){"use strict";var r=n(89298),i=n(1426).extendFlat;t.exports=function(t,e){var n,a,o,s=e._labels=[],l=e._gridlines;for(n=0;ne.length&&(t=t.slice(0,e.length)):t=[],i=0;i90&&(d-=180,l=-l),{angle:d,flip:l,p:t.c2p(r,e,n),offsetMultplier:c}}},89740:function(t,e,n){"use strict";var r=n(39898),i=n(91424),a=n(27669),o=n(67961),s=n(11651),l=n(63893),c=n(71828),u=c.strRotate,f=c.strTranslate,h=n(18783);function d(t,e,n,s,l,c,u){var f="const-"+l+"-lines",h=n.selectAll("."+f).data(c);h.enter().append("path").classed(f,!0).style("vector-effect",u?"none":"non-scaling-stroke"),h.each((function(n){var s=n,l=s.x,c=s.y,u=a([],l,t.c2p),f=a([],c,e.c2p),h="M"+o(u,f,s.smoothing);r.select(this).attr("d",h).style("stroke-width",s.width).style("stroke",s.color).style("stroke-dasharray",i.dashStyle(s.dash,s.width)).style("fill","none")})),h.exit().remove()}function p(t,e,n,a,o,c,h,d){var p=c.selectAll("text."+d).data(h);p.enter().append("text").classed(d,!0);var g=0,v={};return p.each((function(o,c){var h;if("auto"===o.axis.tickangle)h=s(a,e,n,o.xy,o.dxy);else{var d=(o.axis.tickangle+180)*Math.PI/180;h=s(a,e,n,o.xy,[Math.cos(d),Math.sin(d)])}c||(v={angle:h.angle,flip:h.flip});var p=(o.endAnchor?-1:1)*h.flip,m=r.select(this).attr({"text-anchor":p>0?"start":"end","data-notex":1}).call(i.font,o.font).text(o.text).call(l.convertToTspans,t),y=i.bBox(this);m.attr("transform",f(h.p[0],h.p[1])+u(h.angle)+f(o.axis.labelpadding*p,.3*y.height)),g=Math.max(g,y.width+o.axis.labelpadding)})),p.exit().remove(),v.maxExtent=g,v}t.exports=function(t,e,n,i){var l=t._context.staticPlot,u=e.xaxis,f=e.yaxis,h=t._fullLayout._clips;c.makeTraceGroups(i,n,"trace").each((function(e){var n=r.select(this),i=e[0],g=i.trace,v=g.aaxis,y=g.baxis,b=c.ensureSingle(n,"g","minorlayer"),x=c.ensureSingle(n,"g","majorlayer"),w=c.ensureSingle(n,"g","boundarylayer"),_=c.ensureSingle(n,"g","labellayer");n.style("opacity",g.opacity),d(u,f,x,0,"a",v._gridlines,!0),d(u,f,x,0,"b",y._gridlines,!0),d(u,f,b,0,"a",v._minorgridlines,!0),d(u,f,b,0,"b",y._minorgridlines,!0),d(u,f,w,0,"a-boundary",v._boundarylines,l),d(u,f,w,0,"b-boundary",y._boundarylines,l);var k=p(t,u,f,g,0,_,v._labels,"a-label"),T=p(t,u,f,g,0,_,y._labels,"b-label");!function(t,e,n,r,i,a,o,l){var u,f,h,d,p=c.aggNums(Math.min,null,n.a),g=c.aggNums(Math.max,null,n.a),v=c.aggNums(Math.min,null,n.b),y=c.aggNums(Math.max,null,n.b);u=.5*(p+g),f=v,h=n.ab2xy(u,f,!0),d=n.dxyda_rough(u,f),void 0===o.angle&&c.extendFlat(o,s(n,i,a,h,n.dxydb_rough(u,f))),m(t,e,n,0,h,d,n.aaxis,i,a,o,"a-title"),u=p,f=.5*(v+y),h=n.ab2xy(u,f,!0),d=n.dxydb_rough(u,f),void 0===l.angle&&c.extendFlat(l,s(n,i,a,h,n.dxyda_rough(u,f))),m(t,e,n,0,h,d,n.baxis,i,a,l,"b-title")}(t,_,g,0,u,f,k,T),function(t,e,n,r,i){var s,l,u,f,h=n.select("#"+t._clipPathId);h.size()||(h=n.append("clipPath").classed("carpetclip",!0));var d=c.ensureSingle(h,"path","carpetboundary"),p=e.clipsegments,g=[];for(f=0;f90&&y<270,x=r.select(this);x.text(h.title.text).call(l.convertToTspans,t),b&&(w=(-l.lineCount(x)+v)*g*a-w),x.attr("transform",f(e.p[0],e.p[1])+u(e.angle)+f(0,w)).attr("text-anchor","middle").call(i.font,h.title.font)})),x.exit().remove()}},11435:function(t,e,n){"use strict";var r=n(35509),i=n(65888).findBin,a=n(45664),o=n(20349),s=n(54495),l=n(73057);t.exports=function(t){var e=t._a,n=t._b,c=e.length,u=n.length,f=t.aaxis,h=t.baxis,d=e[0],p=e[c-1],g=n[0],v=n[u-1],m=e[e.length-1]-e[0],y=n[n.length-1]-n[0],b=m*r.RELATIVE_CULL_TOLERANCE,x=y*r.RELATIVE_CULL_TOLERANCE;d-=b,p+=b,g-=x,v+=x,t.isVisible=function(t,e){return t>d&&tg&&ep||ev},t.setScale=function(){var e=t._x,n=t._y,r=a(t._xctrl,t._yctrl,e,n,f.smoothing,h.smoothing);t._xctrl=r[0],t._yctrl=r[1],t.evalxy=o([t._xctrl,t._yctrl],c,u,f.smoothing,h.smoothing),t.dxydi=s([t._xctrl,t._yctrl],f.smoothing,h.smoothing),t.dxydj=l([t._xctrl,t._yctrl],f.smoothing,h.smoothing)},t.i2a=function(t){var n=Math.max(0,Math.floor(t[0]),c-2),r=t[0]-n;return(1-r)*e[n]+r*e[n+1]},t.j2b=function(t){var e=Math.max(0,Math.floor(t[1]),c-2),r=t[1]-e;return(1-r)*n[e]+r*n[e+1]},t.ij2ab=function(e){return[t.i2a(e[0]),t.j2b(e[1])]},t.a2i=function(t){var n=Math.max(0,Math.min(i(t,e),c-2)),r=e[n],a=e[n+1];return Math.max(0,Math.min(c-1,n+(t-r)/(a-r)))},t.b2j=function(t){var e=Math.max(0,Math.min(i(t,n),u-2)),r=n[e],a=n[e+1];return Math.max(0,Math.min(u-1,e+(t-r)/(a-r)))},t.ab2ij=function(e){return[t.a2i(e[0]),t.b2j(e[1])]},t.i2c=function(e,n){return t.evalxy([],e,n)},t.ab2xy=function(r,i,a){if(!a&&(re[c-1]|in[u-1]))return[!1,!1];var o=t.a2i(r),s=t.b2j(i),l=t.evalxy([],o,s);if(a){var f,h,d,p,g=0,v=0,m=[];re[c-1]?(f=c-2,h=1,g=(r-e[c-1])/(e[c-1]-e[c-2])):h=o-(f=Math.max(0,Math.min(c-2,Math.floor(o)))),in[u-1]?(d=u-2,p=1,v=(i-n[u-1])/(n[u-1]-n[u-2])):p=s-(d=Math.max(0,Math.min(u-2,Math.floor(s)))),g&&(t.dxydi(m,f,d,h,p),l[0]+=m[0]*g,l[1]+=m[1]*g),v&&(t.dxydj(m,f,d,h,p),l[0]+=m[0]*v,l[1]+=m[1]*v)}return l},t.c2p=function(t,e,n){return[e.c2p(t[0]),n.c2p(t[1])]},t.p2x=function(t,e,n){return[e.p2c(t[0]),n.p2c(t[1])]},t.dadi=function(t){var n=Math.max(0,Math.min(e.length-2,t));return e[n+1]-e[n]},t.dbdj=function(t){var e=Math.max(0,Math.min(n.length-2,t));return n[e+1]-n[e]},t.dxyda=function(e,n,r,i){var a=t.dxydi(null,e,n,r,i),o=t.dadi(e,r);return[a[0]/o,a[1]/o]},t.dxydb=function(e,n,r,i){var a=t.dxydj(null,e,n,r,i),o=t.dbdj(n,i);return[a[0]/o,a[1]/o]},t.dxyda_rough=function(e,n,r){var i=m*(r||.1),a=t.ab2xy(e+i,n,!0),o=t.ab2xy(e-i,n,!0);return[.5*(a[0]-o[0])/i,.5*(a[1]-o[1])/i]},t.dxydb_rough=function(e,n,r){var i=y*(r||.1),a=t.ab2xy(e,n+i,!0),o=t.ab2xy(e,n-i,!0);return[.5*(a[0]-o[0])/i,.5*(a[1]-o[1])/i]},t.dpdx=function(t){return t._m},t.dpdy=function(t){return t._m}}},72505:function(t,e,n){"use strict";var r=n(71828);t.exports=function(t,e,n){var i,a,o,s=[],l=[],c=t[0].length,u=t.length;function f(e,n){var r,i=0,a=0;return e>0&&void 0!==(r=t[n][e-1])&&(a++,i+=r),e0&&void 0!==(r=t[n-1][e])&&(a++,i+=r),n0&&a0&&i1e-5);return r.log("Smoother converged to",T,"after",M,"iterations"),t}},19237:function(t,e,n){"use strict";var r=n(71828).isArray1D;t.exports=function(t,e,n){var i=n("x"),a=i&&i.length,o=n("y"),s=o&&o.length;if(!a&&!s)return!1;if(e._cheater=!i,a&&!r(i)||s&&!r(o))e._length=null;else{var l=a?i.length:1/0;s&&(l=Math.min(l,o.length)),e.a&&e.a.length&&(l=Math.min(l,e.a.length)),e.b&&e.b.length&&(l=Math.min(l,e.b.length)),e._length=l}return!0}},69568:function(t,e,n){"use strict";var r=n(5386).fF,i=n(19316),a=n(50693),o=n(9012),s=n(22399).defaultLine,l=n(1426).extendFlat,c=i.marker.line;t.exports=l({locations:{valType:"data_array",editType:"calc"},locationmode:i.locationmode,z:{valType:"data_array",editType:"calc"},geojson:l({},i.geojson,{}),featureidkey:i.featureidkey,text:l({},i.text,{}),hovertext:l({},i.hovertext,{}),marker:{line:{color:l({},c.color,{dflt:s}),width:l({},c.width,{dflt:1}),editType:"calc"},opacity:{valType:"number",arrayOk:!0,min:0,max:1,dflt:1,editType:"style"},editType:"calc"},selected:{marker:{opacity:i.selected.marker.opacity,editType:"plot"},editType:"plot"},unselected:{marker:{opacity:i.unselected.marker.opacity,editType:"plot"},editType:"plot"},hoverinfo:l({},o.hoverinfo,{editType:"calc",flags:["location","z","text","name"]}),hovertemplate:r(),showlegend:l({},o.showlegend,{dflt:!1})},a("",{cLetter:"z",editTypeOverride:"calc"}))},38675:function(t,e,n){"use strict";var r=n(92770),i=n(50606).BADNUM,a=n(78803),o=n(75225),s=n(66279);function l(t){return t&&"string"===typeof t}t.exports=function(t,e){var n,c=e._length,u=new Array(c);n=e.geojson?function(t){return l(t)||r(t)}:l;for(var f=0;f")}}(t,f,o),[t]}},51319:function(t,e,n){"use strict";t.exports={attributes:n(69568),supplyDefaults:n(61869),colorbar:n(61243),calc:n(38675),calcGeoJSON:n(99841).calcGeoJSON,plot:n(99841).plot,style:n(99636).style,styleOnSelect:n(99636).styleOnSelect,hoverPoints:n(42300),eventData:n(92069),selectPoints:n(81253),moduleType:"trace",name:"choropleth",basePlotModule:n(44622),categories:["geo","noOpacity","showLegend"],meta:{}}},99841:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=n(41327),o=n(90973).getTopojsonFeatures,s=n(71739).findExtremes,l=n(99636).style;t.exports={calcGeoJSON:function(t,e){for(var n=t[0].trace,r=e[n.geo],i=r._subplot,l=n.locationmode,c=n._length,u="geojson-id"===l?a.extractTraceFeature(t):o(n,i.topojson),f=[],h=[],d=0;d=0;r--){var i=n[r].id;if("string"===typeof i&&0===i.indexOf("water"))for(var a=r+1;a=0;n--)t.removeLayer(e[n][1])},s.dispose=function(){var t=this.subplot.map;this._removeLayers(),t.removeSource(this.sourceId)},t.exports=function(t,e){var n=e[0].trace,i=new o(t,n.uid),a=i.sourceId,s=r(e),l=i.below=t.belowLookup["trace-"+n.uid];return t.map.addSource(a,{type:"geojson",data:s.geojson}),i._addLayers(s,l),e[0].trace._glTrace=i,i}},12674:function(t,e,n){"use strict";var r=n(50693),i=n(12663).axisHoverFormat,a=n(5386).fF,o=n(2418),s=n(9012),l=n(1426).extendFlat,c={x:{valType:"data_array",editType:"calc+clearAxisTypes"},y:{valType:"data_array",editType:"calc+clearAxisTypes"},z:{valType:"data_array",editType:"calc+clearAxisTypes"},u:{valType:"data_array",editType:"calc"},v:{valType:"data_array",editType:"calc"},w:{valType:"data_array",editType:"calc"},sizemode:{valType:"enumerated",values:["scaled","absolute"],editType:"calc",dflt:"scaled"},sizeref:{valType:"number",editType:"calc",min:0},anchor:{valType:"enumerated",editType:"calc",values:["tip","tail","cm","center"],dflt:"cm"},text:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertemplate:a({editType:"calc"},{keys:["norm"]}),uhoverformat:i("u",1),vhoverformat:i("v",1),whoverformat:i("w",1),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),showlegend:l({},s.showlegend,{dflt:!1})};l(c,r("",{colorAttr:"u/v/w norm",showScaleDflt:!0,editTypeOverride:"calc"})),["opacity","lightposition","lighting"].forEach((function(t){c[t]=o[t]})),c.hoverinfo=l({},s.hoverinfo,{editType:"calc",flags:["x","y","z","u","v","w","norm","text","name"],dflt:"x+y+z+norm+text+name"}),c.transforms=void 0,t.exports=c},31371:function(t,e,n){"use strict";var r=n(78803);t.exports=function(t,e){for(var n=e.u,i=e.v,a=e.w,o=Math.min(e.x.length,e.y.length,e.z.length,n.length,i.length,a.length),s=-1/0,l=1/0,c=0;co.level||o.starts.length&&a===o.level)}break;case"constraint":if(r.prefixBoundary=!1,r.edgepaths.length)return;var s=r.x.length,l=r.y.length,c=-1/0,u=1/0;for(n=0;n":d>c&&(r.prefixBoundary=!0);break;case"<":(dc||r.starts.length&&h===u)&&(r.prefixBoundary=!0);break;case"][":f=Math.min(d[0],d[1]),h=Math.max(d[0],d[1]),fc&&(r.prefixBoundary=!0)}}}},90654:function(t,e,n){"use strict";var r=n(21081),i=n(86068),a=n(53572);t.exports={min:"zmin",max:"zmax",calc:function(t,e,n){var o=e.contours,s=e.line,l=o.size||1,c=o.coloring,u=i(e,{isColorbar:!0});if("heatmap"===c){var f=r.extractOpts(e);n._fillgradient=f.reversescale?r.flipScale(f.colorscale):f.colorscale,n._zrange=[f.min,f.max]}else"fill"===c&&(n._fillcolor=u);n._line={color:"lines"===c?u:s.color,width:!1!==o.showlines?s.width:0,dash:s.dash},n._levels={start:o.start,end:a(o),size:l}}}},36914:function(t){"use strict";t.exports={BOTTOMSTART:[1,9,13,104,713],TOPSTART:[4,6,7,104,713],LEFTSTART:[8,12,14,208,1114],RIGHTSTART:[2,3,11,208,1114],NEWDELTA:[null,[-1,0],[0,-1],[-1,0],[1,0],null,[0,-1],[-1,0],[0,1],[0,1],null,[0,1],[1,0],[1,0],[0,-1]],CHOOSESADDLE:{104:[4,1],208:[2,8],713:[7,13],1114:[11,14]},SADDLEREMAINDER:{1:4,2:8,4:1,7:13,8:2,11:14,13:7,14:11},LABELDISTANCE:2,LABELINCREASE:10,LABELMIN:3,LABELMAX:10,LABELOPTIMIZER:{EDGECOST:1,ANGLECOST:1,NEIGHBORCOST:5,SAMELEVELFACTOR:10,SAMELEVELDISTANCE:5,MAXCOST:100,INITIALSEARCHPOINTS:10,ITERATIONS:5}}},83179:function(t,e,n){"use strict";var r=n(92770),i=n(14523),a=n(7901),o=a.addOpacity,s=a.opacity,l=n(74808),c=l.CONSTRAINT_REDUCTION,u=l.COMPARISON_OPS2;t.exports=function(t,e,n,a,l,f){var h,d,p,g=e.contours,v=n("contours.operation");g._operation=c[v],function(t,e){var n;-1===u.indexOf(e.operation)?(t("contours.value",[0,1]),Array.isArray(e.value)?e.value.length>2?e.value=e.value.slice(2):0===e.length?e.value=[0,1]:e.length<2?(n=parseFloat(e.value[0]),e.value=[n,n+1]):e.value=[parseFloat(e.value[0]),parseFloat(e.value[1])]:r(e.value)&&(n=parseFloat(e.value),e.value=[n,n+1])):(t("contours.value",0),r(e.value)||(Array.isArray(e.value)?e.value=parseFloat(e.value[0]):e.value=0))}(n,g),"="===v?h=g.showlines=!0:(h=n("contours.showlines"),p=n("fillcolor",o((t.line||{}).color||l,.5))),h&&(d=n("line.color",p&&s(p)?o(e.fillcolor,1):l),n("line.width",2),n("line.dash")),n("line.smoothing"),i(n,a,d,f)}},64237:function(t,e,n){"use strict";var r=n(74808),i=n(92770);function a(t,e){var n,a=Array.isArray(e);function o(t){return i(t)?+t:null}return-1!==r.COMPARISON_OPS2.indexOf(t)?n=o(a?e[0]:e):-1!==r.INTERVAL_OPS.indexOf(t)?n=a?[o(e[0]),o(e[1])]:[o(e),o(e)]:-1!==r.SET_OPS.indexOf(t)&&(n=a?e.map(o):[o(e)]),n}function o(t){return function(e){e=a(t,e);var n=Math.min(e[0],e[1]),r=Math.max(e[0],e[1]);return{start:n,end:r,size:r-n}}}function s(t){return function(e){return{start:e=a(t,e),end:1/0,size:1/0}}}t.exports={"[]":o("[]"),"][":o("]["),">":s(">"),"<":s("<"),"=":s("=")}},67217:function(t){"use strict";t.exports=function(t,e,n,r){var i=r("contours.start"),a=r("contours.end"),o=!1===i||!1===a,s=n("contours.size");!(o?e.autocontour=!0:n("autocontour",!1))&&s||n("ncontours")}},84857:function(t,e,n){"use strict";var r=n(71828);function i(t){return r.extendFlat({},t,{edgepaths:r.extendDeep([],t.edgepaths),paths:r.extendDeep([],t.paths),starts:r.extendDeep([],t.starts)})}t.exports=function(t,e){var n,a,o,s=function(t){return t.reverse()},l=function(t){return t};switch(e){case"=":case"<":return t;case">":for(1!==t.length&&r.warn("Contour data invalid for the specified inequality operation."),a=t[0],n=0;n1e3){r.warn("Too many contours, clipping at 1000",t);break}return l}},53572:function(t){"use strict";t.exports=function(t){return t.end+t.size/1e6}},81696:function(t,e,n){"use strict";var r=n(71828),i=n(36914);function a(t,e,n,r){return Math.abs(t[0]-e[0])20&&e?208===t||1114===t?r=0===n[0]?1:-1:a=0===n[1]?1:-1:-1!==i.BOTTOMSTART.indexOf(t)?a=1:-1!==i.LEFTSTART.indexOf(t)?r=1:-1!==i.TOPSTART.indexOf(t)?a=-1:r=-1,[r,a]}(h,n,e),p=[l(t,e,[-d[0],-d[1]])],g=t.z.length,v=t.z[0].length,m=e.slice(),y=d.slice();for(u=0;u<1e4;u++){if(h>20?(h=i.CHOOSESADDLE[h][(d[0]||d[1])<0?0:1],t.crossings[f]=i.SADDLEREMAINDER[h]):delete t.crossings[f],!(d=i.NEWDELTA[h])){r.log("Found bad marching index:",h,e,t.level);break}p.push(l(t,e,d)),e[0]+=d[0],e[1]+=d[1],f=e.join(","),a(p[p.length-1],p[p.length-2],s,c)&&p.pop();var b=d[0]&&(e[0]<0||e[0]>v-2)||d[1]&&(e[1]<0||e[1]>g-2);if(e[0]===m[0]&&e[1]===m[1]&&d[0]===y[0]&&d[1]===y[1]||n&&b)break;h=t.crossings[f]}1e4===u&&r.log("Infinite loop in contour?");var x,w,_,k,T,M,A,S,E,C,P,O=a(p[0],p[p.length-1],s,c),L=0,I=.2*t.smoothing,D=[],R=0;for(u=1;u=R;u--)if((x=D[u])=R&&x+D[w]S&&E--,t.edgepaths[E]=P.concat(p,C));break}F||(t.edgepaths[S]=p.concat(C))}for(S=0;St?0:1)+(e[0][1]>t?0:2)+(e[1][1]>t?0:4)+(e[1][0]>t?0:8);return 5===n||10===n?t>(e[0][0]+e[0][1]+e[1][0]+e[1][1])/4?5===n?713:1114:5===n?104:208:15===n?0:n}t.exports=function(t){var e,n,a,o,s,l,c,u,f,h=t[0].z,d=h.length,p=h[0].length,g=2===d||2===p;for(n=0;n=0&&(r=y,s=l):Math.abs(n[1]-r[1])<.01?Math.abs(n[1]-y[1])<.01&&(y[0]-n[0])*(r[0]-y[0])>=0&&(r=y,s=l):i.log("endpt to newendpt is not vert. or horz.",n,r,y)}if(n=r,s>=0)break;f+="L"+r}if(s===t.edgepaths.length){i.log("unclosed perimeter path");break}h=s,(p=-1===d.indexOf(h))&&(h=d[0],f+="Z")}for(h=0;hr.center?r.right-s:s-r.left)/(u+Math.abs(Math.sin(c)*o)),d=(l>r.middle?r.bottom-l:l-r.top)/(Math.abs(f)+Math.cos(c)*o);if(h<1||d<1)return 1/0;var p=m.EDGECOST*(1/(h-1)+1/(d-1));p+=m.ANGLECOST*c*c;for(var g=s-u,v=l-f,y=s+u,b=l+f,x=0;x2*m.MAXCOST)break;d&&(s/=2),l=(o=c-s/2)+1.5*s}if(h<=m.MAXCOST)return u},e.addLabelData=function(t,e,n,r){var i=e.fontSize,a=e.width+i/3,o=Math.max(0,e.height-i/3),s=t.x,l=t.y,c=t.theta,u=Math.sin(c),f=Math.cos(c),h=function(t,e){return[s+t*f-e*u,l+t*u+e*f]},d=[h(-a/2,-o/2),h(-a/2,o/2),h(a/2,o/2),h(a/2,-o/2)];n.push({text:e.text,x:s,y:l,dy:e.dy,theta:c,level:e.level,width:a,height:o}),r.push(d)},e.drawLabels=function(t,e,n,a,o){var l=t.selectAll("text").data(e,(function(t){return t.text+","+t.x+","+t.y+","+t.theta}));if(l.exit().remove(),l.enter().append("text").attr({"data-notex":1,"text-anchor":"middle"}).each((function(t){var e=t.x+Math.sin(t.theta)*t.dy,i=t.y-Math.cos(t.theta)*t.dy;r.select(this).text(t.text).attr({x:e,y:i,transform:"rotate("+180*t.theta/Math.PI+" "+e+" "+i+")"}).call(s.convertToTspans,n)})),o){for(var c="",u=0;un.end&&(n.start=n.end=(n.start+n.end)/2),t._input.contours||(t._input.contours={}),i.extendFlat(t._input.contours,{start:n.start,end:n.end,size:n.size}),t._input.autocontour=!0}else if("constraint"!==n.type){var c,u=n.start,f=n.end,h=t._input.contours;u>f&&(n.start=h.start=f,f=n.end=h.end=u,u=n.start),n.size>0||(c=u===f?1:a(u,f,t.ncontours).dtick,h.size=n.size=c)}}},84426:function(t,e,n){"use strict";var r=n(39898),i=n(91424),a=n(70035),o=n(86068);t.exports=function(t){var e=r.select(t).selectAll("g.contour");e.style("opacity",(function(t){return t[0].trace.opacity})),e.each((function(t){var e=r.select(this),n=t[0].trace,a=n.contours,s=n.line,l=a.size||1,c=a.start,u="constraint"===a.type,f=!u&&"lines"===a.coloring,h=!u&&"fill"===a.coloring,d=f||h?o(n):null;e.selectAll("g.contourlevel").each((function(t){r.select(this).selectAll("path").call(i.lineGroupStyle,s.width,f?d(t.level):s.color,s.dash)}));var p=a.labelfont;if(e.selectAll("g.contourlabels text").each((function(t){i.font(r.select(this),{family:p.family,size:p.size,color:p.color||(f?d(t.level):s.color)})})),u)e.selectAll("g.contourfill path").style("fill",n.fillcolor);else if(h){var g;e.selectAll("g.contourfill path").style("fill",(function(t){return void 0===g&&(g=t.level),d(t.level+.5*l)})),void 0===g&&(g=c),e.selectAll("g.contourbg path").style("fill",d(g-.5*l))}})),a(t)}},8724:function(t,e,n){"use strict";var r=n(1586),i=n(14523);t.exports=function(t,e,n,a,o){var s,l=n("contours.coloring"),c="";"fill"===l&&(s=n("contours.showlines")),!1!==s&&("lines"!==l&&(c=n("line.color","#000")),n("line.width",.5),n("line.dash")),"none"!==l&&(!0!==t.showlegend&&(e.showlegend=!1),e._dfltShowLegend=!1,r(t,e,a,n,{prefix:"",cLetter:"z"})),n("line.smoothing"),i(n,a,c,o)}},88085:function(t,e,n){"use strict";var r=n(21606),i=n(70600),a=n(50693),o=n(1426).extendFlat,s=i.contours;t.exports=o({carpet:{valType:"string",editType:"calc"},z:r.z,a:r.x,a0:r.x0,da:r.dx,b:r.y,b0:r.y0,db:r.dy,text:r.text,hovertext:r.hovertext,transpose:r.transpose,atype:r.xtype,btype:r.ytype,fillcolor:i.fillcolor,autocontour:i.autocontour,ncontours:i.ncontours,contours:{type:s.type,start:s.start,end:s.end,size:s.size,coloring:{valType:"enumerated",values:["fill","lines","none"],dflt:"fill",editType:"calc"},showlines:s.showlines,showlabels:s.showlabels,labelfont:s.labelfont,labelformat:s.labelformat,operation:s.operation,value:s.value,editType:"calc",impliedEdits:{autocontour:!1}},line:{color:i.line.color,width:i.line.width,dash:i.line.dash,smoothing:i.line.smoothing,editType:"plot"},transforms:void 0},a("",{cLetter:"z",autoColorDflt:!1}))},59885:function(t,e,n){"use strict";var r=n(78803),i=n(71828),a=n(68296),o=n(4742),s=n(824),l=n(43907),c=n(70769),u=n(75005),f=n(22882),h=n(18670);t.exports=function(t,e){var n=e._carpetTrace=f(t,e);if(n&&n.visible&&"legendonly"!==n.visible){if(!e.a||!e.b){var d=t.data[n.index],p=t.data[e.index];p.a||(p.a=d.a),p.b||(p.b=d.b),u(p,e,e._defaultColor,t._fullLayout)}var g=function(t,e){var n,u,f,h,d,p,g,v=e._carpetTrace,m=v.aaxis,y=v.baxis;m._minDtick=0,y._minDtick=0,i.isArray1D(e.z)&&a(e,m,y,"a","b",["z"]),n=e._a=e._a||e.a,h=e._b=e._b||e.b,n=n?m.makeCalcdata(e,"_a"):[],h=h?y.makeCalcdata(e,"_b"):[],u=e.a0||0,f=e.da||1,d=e.b0||0,p=e.db||1,g=e._z=o(e._z||e.z,e.transpose),e._emptypoints=l(g),s(g,e._emptypoints);var b=i.maxRowLength(g),x="scaled"===e.xtype?"":n,w=c(e,x,u,f,b,m),_="scaled"===e.ytype?"":h,k=c(e,_,d,p,g.length,y),T={a:w,b:k,z:g};return"levels"===e.contours.type&&"none"!==e.contours.coloring&&r(t,e,{vals:g,containerStr:"",cLetter:"z"}),[T]}(t,e);return h(e,e._z),g}}},75005:function(t,e,n){"use strict";var r=n(71828),i=n(67684),a=n(88085),o=n(83179),s=n(67217),l=n(8724);t.exports=function(t,e,n,c){function u(n,i){return r.coerce(t,e,a,n,i)}if(u("carpet"),t.a&&t.b){if(!i(t,e,u,c,"a","b"))return void(e.visible=!1);u("text"),"constraint"===u("contours.type")?o(t,e,u,c,n,{hasHover:!1}):(s(t,e,u,(function(n){return r.coerce2(t,e,a,n)})),l(t,e,u,c,{hasHover:!1}))}else e._defaultColor=n,e._length=null}},93740:function(t,e,n){"use strict";t.exports={attributes:n(88085),supplyDefaults:n(75005),colorbar:n(90654),calc:n(59885),plot:n(51048),style:n(84426),moduleType:"trace",name:"contourcarpet",basePlotModule:n(93612),categories:["cartesian","svg","carpet","contour","symbols","showLegend","hasLines","carpetDependent","noHover","noSortingByValue"],meta:{}}},51048:function(t,e,n){"use strict";var r=n(39898),i=n(27669),a=n(67961),o=n(91424),s=n(71828),l=n(87678),c=n(81696),u=n(29854),f=n(36914),h=n(84857),d=n(87558),p=n(20083),g=n(22882),v=n(4536);function m(t,e,n){var r=t.getPointAtLength(e),i=t.getPointAtLength(n),a=i.x-r.x,o=i.y-r.y,s=Math.sqrt(a*a+o*o);return[a/s,o/s]}function y(t){var e=Math.sqrt(t[0]*t[0]+t[1]*t[1]);return[t[0]/e,t[1]/e]}function b(t,e){var n=Math.abs(t[0]*e[0]+t[1]*e[1]);return Math.sqrt(1-n*n)/n}t.exports=function(t,e,n,x){var w=e.xaxis,_=e.yaxis;s.makeTraceGroups(x,n,"contour").each((function(n){var x=r.select(this),k=n[0],T=k.trace,M=T._carpetTrace=g(t,T),A=t.calcdata[M.index][0];if(M.visible&&"legendonly"!==M.visible){var S=k.a,E=k.b,C=T.contours,P=d(C,e,k),O="constraint"===C.type,L=C._operation,I=O?"="===L?"lines":"fill":C.coloring,D=[[S[0],E[E.length-1]],[S[S.length-1],E[E.length-1]],[S[S.length-1],E[0]],[S[0],E[0]]];l(P);var R=1e-8*(S[S.length-1]-S[0]),z=1e-8*(E[E.length-1]-E[0]);c(P,R,z);var N,j,F,B,U=P;"constraint"===C.type&&(U=h(P,L)),function(t,e){var n,r,i,a,o,s,l,c,u;for(n=0;n=0;B--)N=A.clipsegments[B],j=i([],N.x,w.c2p),F=i([],N.y,_.c2p),j.reverse(),F.reverse(),H.push(a(j,F,N.bicubic));var V="M"+H.join("L")+"Z";!function(t,e,n,r,o,l){var c,u,f,h,d=s.ensureSingle(t,"g","contourbg").selectAll("path").data("fill"!==l||o?[]:[0]);d.enter().append("path"),d.exit().remove();var p=[];for(h=0;h=0&&(h=C,p=g):Math.abs(f[1]-h[1])<_?Math.abs(f[1]-C[1])<_&&(C[0]-f[0])*(h[0]-C[0])>=0&&(h=C,p=g):s.log("endpt to newendpt is not vert. or horz.",f,h,C)}if(p>=0)break;y+=S(f,h),f=h}if(p===e.edgepaths.length){s.log("unclosed perimeter path");break}u=p,(x=-1===b.indexOf(u))&&(u=b[0],y+=S(f,h)+"Z",f=null)}for(u=0;ug&&(r.max=g),r.len=r.max-r.min}function v(t,e){var n,r=0,o=.1;return(Math.abs(t[0]-l)0?+d[u]:0),f.push({type:"Feature",geometry:{type:"Point",coordinates:m},properties:y})}}var x=o.extractOpts(e),w=x.reversescale?o.flipScale(x.colorscale):x.colorscale,_=w[0][1],k=["interpolate",["linear"],["heatmap-density"],0,a.opacity(_)<1?_:a.addOpacity(_,0)];for(u=1;u=0;n--)t.removeLayer(e[n][1])},o.dispose=function(){var t=this.subplot.map;this._removeLayers(),t.removeSource(this.sourceId)},t.exports=function(t,e){var n=e[0].trace,i=new a(t,n.uid),o=i.sourceId,s=r(e),l=i.below=t.belowLookup["trace-"+n.uid];return t.map.addSource(o,{type:"geojson",data:s.geojson}),i._addLayers(s,l),i}},49789:function(t,e,n){"use strict";var r=n(71828);t.exports=function(t,e){for(var n=0;n"),l.color=function(t,e){var n=t.marker,i=e.mc||n.color,a=e.mlc||n.line.color,o=e.mlw||n.line.width;return r(i)?i:r(a)&&o?a:void 0}(u,h),[l]}}},51759:function(t,e,n){"use strict";t.exports={attributes:n(1285),layoutAttributes:n(10440),supplyDefaults:n(26199).supplyDefaults,crossTraceDefaults:n(26199).crossTraceDefaults,supplyLayoutDefaults:n(93138),calc:n(9532),crossTraceCalc:n(8984),plot:n(80461),style:n(68266).style,hoverPoints:n(63341),eventData:n(34598),selectPoints:n(81974),moduleType:"trace",name:"funnel",basePlotModule:n(93612),categories:["bar-like","cartesian","svg","oriented","showLegend","zoomScale"],meta:{}}},10440:function(t){"use strict";t.exports={funnelmode:{valType:"enumerated",values:["stack","group","overlay"],dflt:"stack",editType:"calc"},funnelgap:{valType:"number",min:0,max:1,editType:"calc"},funnelgroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},93138:function(t,e,n){"use strict";var r=n(71828),i=n(10440);t.exports=function(t,e,n){var a=!1;function o(n,a){return r.coerce(t,e,i,n,a)}for(var s=0;s path").each((function(t){if(!t.isBlank){var e=s.marker;r.select(this).call(a.fill,t.mc||e.color).call(a.stroke,t.mlc||e.line.color).call(i.dashLine,e.line.dash,t.mlw||e.line.width).style("opacity",s.selectedpoints&&!t.selected?o:1)}})),c(n,s,t),n.selectAll(".regions").each((function(){r.select(this).selectAll("path").style("stroke-width",0).call(a.fill,s.connector.fillcolor)})),n.selectAll(".lines").each((function(){var t=s.connector.line;i.lineGroupStyle(r.select(this).selectAll("path"),t.width,t.color,t.dash)}))}))}}},86807:function(t,e,n){"use strict";var r=n(34e3),i=n(9012),a=n(27670).Y,o=n(5386).fF,s=n(5386).si,l=n(1426).extendFlat;t.exports={labels:r.labels,label0:r.label0,dlabel:r.dlabel,values:r.values,marker:{colors:r.marker.colors,line:{color:l({},r.marker.line.color,{dflt:null}),width:l({},r.marker.line.width,{dflt:1}),editType:"calc"},pattern:r.marker.pattern,editType:"calc"},text:r.text,hovertext:r.hovertext,scalegroup:l({},r.scalegroup,{}),textinfo:l({},r.textinfo,{flags:["label","text","value","percent"]}),texttemplate:s({editType:"plot"},{keys:["label","color","value","text","percent"]}),hoverinfo:l({},i.hoverinfo,{flags:["label","text","value","percent","name"]}),hovertemplate:o({},{keys:["label","color","value","text","percent"]}),textposition:l({},r.textposition,{values:["inside","none"],dflt:"inside"}),textfont:r.textfont,insidetextfont:r.insidetextfont,title:{text:r.title.text,font:r.title.font,position:l({},r.title.position,{values:["top left","top center","top right"],dflt:"top center"}),editType:"plot"},domain:a({name:"funnelarea",trace:!0,editType:"calc"}),aspectratio:{valType:"number",min:0,dflt:1,editType:"plot"},baseratio:{valType:"number",min:0,max:1,dflt:.333,editType:"plot"}}},6452:function(t,e,n){"use strict";var r=n(74875);e.name="funnelarea",e.plot=function(t,n,i,a){r.plotBasePlot(e.name,t,n,i,a)},e.clean=function(t,n,i,a){r.cleanBasePlot(e.name,t,n,i,a)}},89574:function(t,e,n){"use strict";var r=n(32354);t.exports={calc:function(t,e){return r.calc(t,e)},crossTraceCalc:function(t){r.crossTraceCalc(t,{type:"funnelarea"})}}},86282:function(t,e,n){"use strict";var r=n(71828),i=n(86807),a=n(27670).c,o=n(90769).handleText,s=n(37434).handleLabelsAndValues,l=n(37434).handleMarkerDefaults;t.exports=function(t,e,n,c){function u(n,a){return r.coerce(t,e,i,n,a)}var f=u("labels"),h=u("values"),d=s(f,h),p=d.len;if(e._hasLabels=d.hasLabels,e._hasValues=d.hasValues,!e._hasLabels&&e._hasValues&&(u("label0"),u("dlabel")),p){e._length=p,l(t,e,c,u),u("scalegroup");var g,v=u("text"),m=u("texttemplate");if(m||(g=u("textinfo",Array.isArray(v)?"text+percent":"percent")),u("hovertext"),u("hovertemplate"),m||g&&"none"!==g){var y=u("textposition");o(t,e,c,u,y,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}a(e,c,u),u("title.text")&&(u("title.position"),r.coerceFont(u,"title.font",c.font)),u("aspectratio"),u("baseratio")}else e.visible=!1}},10421:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"funnelarea",basePlotModule:n(6452),categories:["pie-like","funnelarea","showLegend"],attributes:n(86807),layoutAttributes:n(80097),supplyDefaults:n(86282),supplyLayoutDefaults:n(57402),calc:n(89574).calc,crossTraceCalc:n(89574).crossTraceCalc,plot:n(79187),style:n(71858),styleOne:n(63463),meta:{}}},80097:function(t,e,n){"use strict";var r=n(92774).hiddenlabels;t.exports={hiddenlabels:r,funnelareacolorway:{valType:"colorlist",editType:"calc"},extendfunnelareacolors:{valType:"boolean",dflt:!0,editType:"calc"}}},57402:function(t,e,n){"use strict";var r=n(71828),i=n(80097);t.exports=function(t,e){function n(n,a){return r.coerce(t,e,i,n,a)}n("hiddenlabels"),n("funnelareacolorway",e.colorway),n("extendfunnelareacolors")}},79187:function(t,e,n){"use strict";var r=n(39898),i=n(91424),a=n(71828),o=a.strScale,s=a.strTranslate,l=n(63893),c=n(17295).toMoveInsideBar,u=n(72597),f=u.recordMinTextSize,h=u.clearMinTextSize,d=n(53581),p=n(14575),g=p.attachFxHandlers,v=p.determineInsideTextFont,m=p.layoutAreas,y=p.prerenderTitles,b=p.positionTitleOutside,x=p.formatSliceLabel;function w(t,e){return"l"+(e[0]-t[0])+","+(e[1]-t[1])}function _(t,e){return[.5*(t[0]+e[0]),.5*(t[1]+e[1])]}t.exports=function(t,e){var n=t._context.staticPlot,u=t._fullLayout;h("funnelarea",u),y(e,t),m(e,u._size),a.makeTraceGroups(u._funnelarealayer,e,"trace").each((function(e){var h=r.select(this),p=e[0],m=p.trace;!function(t){if(t.length){var e=t[0],n=e.trace,r=n.aspectratio,i=n.baseratio;i>.999&&(i=.999);var a,o,s,l=Math.pow(i,2),c=e.vTotal,u=c,f=c*l/(1-l)/c,h=[];for(h.push(E()),o=t.length-1;o>-1;o--)if(!(s=t[o]).hidden){var d=s.v/u;f+=d,h.push(E())}var p=1/0,g=-1/0;for(o=0;o-1;o--)if(!(s=t[o]).hidden){var M=h[T+=1][0],A=h[T][1];s.TL=[-M,A],s.TR=[M,A],s.BL=w,s.BR=k,s.pxmid=_(s.TR,s.BR),w=s.TL,k=s.TR}}function S(){var t=Math.sqrt(f);return{x:t,y:-t}}function E(){var t=S();return[t.x,t.y]}}(e),h.each((function(){var h=r.select(this).selectAll("g.slice").data(e);h.enter().append("g").classed("slice",!0),h.exit().remove(),h.each((function(o,s){if(o.hidden)r.select(this).selectAll("path,g").remove();else{o.pointNumber=o.i,o.curveNumber=m.index;var h=p.cx,y=p.cy,b=r.select(this),_=b.selectAll("path.surface").data([o]);_.enter().append("path").classed("surface",!0).style({"pointer-events":n?"none":"all"}),b.call(g,t,e);var k="M"+(h+o.TR[0])+","+(y+o.TR[1])+w(o.TR,o.BR)+w(o.BR,o.BL)+w(o.BL,o.TL)+"Z";_.attr("d",k),x(t,o,p);var T=d.castOption(m.textposition,o.pts),M=b.selectAll("g.slicetext").data(o.text&&"none"!==T?[0]:[]);M.enter().append("g").classed("slicetext",!0),M.exit().remove(),M.each((function(){var n=a.ensureSingle(r.select(this),"text","",(function(t){t.attr("data-notex",1)})),d=a.ensureUniformFontSize(t,v(m,o,u.font));n.text(o.text).attr({class:"slicetext",transform:"","text-anchor":"middle"}).call(i.font,d).call(l.convertToTspans,t);var p,g,b,x=i.bBox(n.node()),w=Math.min(o.BL[1],o.BR[1])+y,_=Math.max(o.TL[1],o.TR[1])+y;g=Math.max(o.TL[0],o.BL[0])+h,b=Math.min(o.TR[0],o.BR[0])+h,(p=c(g,b,w,_,x,{isHorizontal:!0,constrained:!0,angle:0,anchor:"middle"})).fontSize=d.size,f(m.type,p,u),e[s].transform=p,a.setTransormAndDisplay(n,p)}))}}));var y=r.select(this).selectAll("g.titletext").data(m.title.text?[0]:[]);y.enter().append("g").classed("titletext",!0),y.exit().remove(),y.each((function(){var e=a.ensureSingle(r.select(this),"text","",(function(t){t.attr("data-notex",1)})),n=m.title.text;m._meta&&(n=a.templateString(n,m._meta)),e.text(n).attr({class:"titletext",transform:"","text-anchor":"middle"}).call(i.font,m.title.font).call(l.convertToTspans,t);var c=b(p,u._size);e.attr("transform",s(c.x,c.y)+o(Math.min(1,c.scale))+s(c.tx,c.ty))}))}))}))}},71858:function(t,e,n){"use strict";var r=n(39898),i=n(63463),a=n(72597).resizeText;t.exports=function(t){var e=t._fullLayout._funnelarealayer.selectAll(".trace");a(t,e,"funnelarea"),e.each((function(e){var n=e[0].trace,a=r.select(this);a.style({opacity:n.opacity}),a.selectAll("path.surface").each((function(e){r.select(this).call(i,e,n,t)}))}))}},21606:function(t,e,n){"use strict";var r=n(82196),i=n(9012),a=n(41940),o=n(12663).axisHoverFormat,s=n(5386).fF,l=n(5386).si,c=n(50693),u=n(1426).extendFlat;t.exports=u({z:{valType:"data_array",editType:"calc"},x:u({},r.x,{impliedEdits:{xtype:"array"}}),x0:u({},r.x0,{impliedEdits:{xtype:"scaled"}}),dx:u({},r.dx,{impliedEdits:{xtype:"scaled"}}),y:u({},r.y,{impliedEdits:{ytype:"array"}}),y0:u({},r.y0,{impliedEdits:{ytype:"scaled"}}),dy:u({},r.dy,{impliedEdits:{ytype:"scaled"}}),xperiod:u({},r.xperiod,{impliedEdits:{xtype:"scaled"}}),yperiod:u({},r.yperiod,{impliedEdits:{ytype:"scaled"}}),xperiod0:u({},r.xperiod0,{impliedEdits:{xtype:"scaled"}}),yperiod0:u({},r.yperiod0,{impliedEdits:{ytype:"scaled"}}),xperiodalignment:u({},r.xperiodalignment,{impliedEdits:{xtype:"scaled"}}),yperiodalignment:u({},r.yperiodalignment,{impliedEdits:{ytype:"scaled"}}),text:{valType:"data_array",editType:"calc"},hovertext:{valType:"data_array",editType:"calc"},transpose:{valType:"boolean",dflt:!1,editType:"calc"},xtype:{valType:"enumerated",values:["array","scaled"],editType:"calc+clearAxisTypes"},ytype:{valType:"enumerated",values:["array","scaled"],editType:"calc+clearAxisTypes"},zsmooth:{valType:"enumerated",values:["fast","best",!1],dflt:!1,editType:"calc"},hoverongaps:{valType:"boolean",dflt:!0,editType:"none"},connectgaps:{valType:"boolean",editType:"calc"},xgap:{valType:"number",dflt:0,min:0,editType:"plot"},ygap:{valType:"number",dflt:0,min:0,editType:"plot"},xhoverformat:o("x"),yhoverformat:o("y"),zhoverformat:o("z",1),hovertemplate:s(),texttemplate:l({arrayOk:!1,editType:"plot"},{keys:["x","y","z","text"]}),textfont:a({editType:"plot",autoSize:!0,autoColor:!0,colorEditType:"style"}),showlegend:u({},i.showlegend,{dflt:!1})},{transforms:void 0},c("",{cLetter:"z",autoColorDflt:!1}))},90757:function(t,e,n){"use strict";var r=n(73972),i=n(71828),a=n(89298),o=n(42973),s=n(17562),l=n(78803),c=n(68296),u=n(4742),f=n(824),h=n(43907),d=n(70769),p=n(50606).BADNUM;function g(t){for(var e=[],n=t.length,r=0;r1){var e=(t[t.length-1]-t[0])/(t.length-1),n=Math.abs(e/100);for(T=0;Tn)return!1}return!0}(A.rangebreaks||S.rangebreaks)&&(k=function(t,e,n){for(var r=[],i=-1,a=0;a=0;o--)(s=((f[[(n=(a=h[o])[0])-1,i=a[1]]]||g)[2]+(f[[n+1,i]]||g)[2]+(f[[n,i-1]]||g)[2]+(f[[n,i+1]]||g)[2])/20)&&(l[a]=[n,i,s],h.splice(o,1),c=!0);if(!c)throw"findEmpties iterated with no new neighbors";for(a in l)f[a]=l[a],u.push(l[a])}return u.sort((function(t,e){return e[2]-t[2]}))}},46248:function(t,e,n){"use strict";var r=n(30211),i=n(71828),a=n(89298),o=n(21081).extractOpts;t.exports=function(t,e,n,s,l){l||(l={});var c,u,f,h,d=l.isContour,p=t.cd[0],g=p.trace,v=t.xa,m=t.ya,y=p.x,b=p.y,x=p.z,w=p.xCenter,_=p.yCenter,k=p.zmask,T=g.zhoverformat,M=y,A=b;if(!1!==t.index){try{f=Math.round(t.index[1]),h=Math.round(t.index[0])}catch(F){return void i.error("Error hovering on heatmap, pointNumber must be [row,col], found:",t.index)}if(f<0||f>=x[0].length||h<0||h>x.length)return}else{if(r.inbox(e-y[0],e-y[y.length-1],0)>0||r.inbox(n-b[0],n-b[b.length-1],0)>0)return;if(d){var S;for(M=[2*y[0]-y[1]],S=1;Sg&&(m=Math.max(m,Math.abs(t[a][o]-p)/(v-g))))}return m}t.exports=function(t,e){var n,i=1;for(o(t,e),n=0;n.01;n++)i=o(t,e,a(i));return i>.01&&r.log("interp2d didn't converge quickly",i),t}},58623:function(t,e,n){"use strict";var r=n(71828);t.exports=function(t,e){t("texttemplate");var n=r.extendFlat({},e.font,{color:"auto",size:"auto"});r.coerceFont(t,"textfont",n)}},70769:function(t,e,n){"use strict";var r=n(73972),i=n(71828).isArrayOrTypedArray;t.exports=function(t,e,n,a,o,s){var l,c,u,f=[],h=r.traceIs(t,"contour"),d=r.traceIs(t,"histogram"),p=r.traceIs(t,"gl2d");if(i(e)&&e.length>1&&!d&&"category"!==s.type){var g=e.length;if(!(g<=o))return h?e.slice(0,o):e.slice(0,o+1);if(h||p)f=e.slice(0,o);else if(1===o)f=[e[0]-.5,e[0]+.5];else{for(f=[1.5*e[0]-.5*e[1]],u=1;u0;)T=M.c2p(F[P]),P--;for(T0;)C=A.c2p(B[P]),P--;C=M._length||T<=0||E>=A._length||C<=0)return I.selectAll("image").data([]).exit().remove(),void x(I);"fast"===X?(Z=W,J=G):(Z=Q,J=tt);var et=document.createElement("canvas");et.width=Z,et.height=J;var nt,rt,it=et.getContext("2d"),at=d(R,{noNumericCheck:!0,returnArray:!0});"fast"===X?(nt=Y?function(t){return W-1-t}:l.identity,rt=$?function(t){return G-1-t}:l.identity):(nt=function(t){return l.constrain(Math.round(M.c2p(F[t])-n),0,Q)},rt=function(t){return l.constrain(Math.round(A.c2p(B[t])-E),0,tt)});var ot,st,lt,ct,ut=rt(0),ft=[ut,ut],ht=Y?0:1,dt=$?0:1,pt=0,gt=0,vt=0,mt=0;function yt(t,e){if(void 0!==t){var n=at(t);return n[0]=Math.round(n[0]),n[1]=Math.round(n[1]),n[2]=Math.round(n[2]),pt+=e,gt+=n[0]*e,vt+=n[1]*e,mt+=n[2]*e,n}return[0,0,0,0]}function bt(t,e,n,r){var i=t[n.bin0];if(void 0===i)return yt(void 0,1);var a,o=t[n.bin1],s=e[n.bin0],l=e[n.bin1],c=o-i||0,u=s-i||0;return a=void 0===o?void 0===l?0:void 0===s?2*(l-i):2*(2*l-s-i)/3:void 0===l?void 0===s?0:2*(2*i-o-s)/3:void 0===s?2*(2*l-o-i)/3:l+i-o-s,yt(i+n.frac*c+r.frac*(u+n.frac*a))}if("default"!==X){var xt,wt=0;try{xt=new Uint8Array(Z*J*4)}catch(me){xt=new Array(Z*J*4)}if("smooth"===X){var _t,kt,Tt,Mt=U||F,At=H||B,St=new Array(Mt.length),Et=new Array(At.length),Ct=new Array(Q),Pt=U?_:w,Ot=H?_:w;for(P=0;PXt||Xt>A._length))for(O=Gt;OZt||Zt>M._length)){var Jt=u({x:Kt,y:$t},R,t._fullLayout);Jt.x=Kt,Jt.y=$t;var Qt=D.z[P][O];void 0===Qt?(Jt.z="",Jt.zLabel=""):(Jt.z=Qt,Jt.zLabel=s.tickText(Ut,Qt,"hover").text);var te=D.text&&D.text[P]&&D.text[P][O];void 0!==te&&!1!==te||(te=""),Jt.text=te;var ee=l.texttemplateString(Ft,Jt,t._fullLayout._d3locale,Jt,R._meta||{});if(ee){var ne=ee.split("
"),re=ne.length,ie=0;for(L=0;L0&&(a=!0);for(var l=0;la){var o=a-n[t];return n[t]=a,o}}return 0},max:function(t,e,n,i){var a=i[e];if(r(a)){if(a=Number(a),!r(n[t]))return n[t]=a,a;if(n[t]c?t>o?t>1.1*i?i:t>1.1*a?a:o:t>s?s:t>l?l:c:Math.pow(10,Math.floor(Math.log(t)/Math.LN10))}function d(t,e,n,r,a,s){if(r&&t>o){var l=p(e,a,s),c=p(n,a,s),u=t===i?0:1;return l[u]!==c[u]}return Math.floor(n/t)-Math.floor(e/t)>.1}function p(t,e,n){var r=e.c2d(t,i,n).split("-");return""===r[0]&&(r.unshift(),r[0]="-"+r[0]),r}t.exports=function(t,e,n,r,a){var s,l,c=-1.1*e,h=-.1*e,d=t-h,p=n[0],g=n[1],v=Math.min(f(p+h,p+d,r,a),f(g+h,g+d,r,a)),m=Math.min(f(p+c,p+h,r,a),f(g+c,g+h,r,a));if(v>m&&mo){var y=s===i?1:6,b=s===i?"M12":"M1";return function(e,n){var o=r.c2d(e,i,a),s=o.indexOf("-",y);s>0&&(o=o.substr(0,s));var c=r.d2c(o,0,a);if(cn.r2l(j)&&(B=o.tickIncrement(B,x.size,!0,d)),D.start=n.l2r(B),N||i.nestedProperty(e,m+".start").set(D.start)}var U=x.end,H=n.r2l(I.end),V=void 0!==H;if((x.endFound||V)&&H!==n.r2l(U)){var q=V?H:i.aggNums(Math.max,null,p);D.end=n.l2r(q),V||i.nestedProperty(e,m+".start").set(D.end)}var G="autobin"+s;return!1===e._input[G]&&(e._input[m]=i.extendFlat({},e[m]||{}),delete e._input[G],delete e[G]),[D,p]}t.exports={calc:function(t,e){var n,a,d,p,g=[],v=[],m="h"===e.orientation,y=o.getFromId(t,m?e.yaxis:e.xaxis),b=m?"y":"x",x={x:"y",y:"x"}[b],w=e[b+"calendar"],_=e.cumulative,k=h(t,e,y,b),T=k[0],M=k[1],A="string"===typeof T.size,S=[],E=A?S:T,C=[],P=[],O=[],L=0,I=e.histnorm,D=e.histfunc,R=-1!==I.indexOf("density");_.enabled&&R&&(I=I.replace(/ ?density$/,""),R=!1);var z,N="max"===D||"min"===D?null:0,j=l.count,F=c[I],B=!1,U=function(t){return y.r2c(t,0,w)};for(i.isArrayOrTypedArray(e[x])&&"count"!==D&&(z=e[x],B="avg"===D,j=l[D]),n=U(T.start),d=U(T.end)+(n-o.tickIncrement(n,T.size,!1,w))/1e6;n=0&&p=0;r--)s(r);else if("increasing"===e){for(r=1;r=0;r--)t[r]+=t[r+1];"exclude"===n&&(t.push(0),t.shift())}}(v,_.direction,_.currentbin);var Z=Math.min(g.length,v.length),J=[],Q=0,tt=Z-1;for(n=0;n=Q;n--)if(v[n]){tt=n;break}for(n=Q;n<=tt;n++)if(r(g[n])&&r(v[n])){var et={p:g[n],s:v[n],b:0};_.enabled||(et.pts=O[n],W?et.ph0=et.ph1=O[n].length?M[O[n][0]]:g[n]:(e._computePh=!0,et.ph0=q(S[n]),et.ph1=q(S[n+1],!0))),J.push(et)}return 1===J.length&&(J[0].width1=o.tickIncrement(J[0].p,T.size,!1,w)-J[0].p),s(J,e),i.isArrayOrTypedArray(e.selectedpoints)&&i.tagSelected(J,e,X),J},calcAllAutoBins:h}},72406:function(t){"use strict";t.exports={eventDataKeys:["binNumber"]}},82222:function(t,e,n){"use strict";var r=n(71828),i=n(41675),a=n(73972).traceIs,o=n(26125),s=r.nestedProperty,l=n(99082).getAxisGroup,c=[{aStr:{x:"xbins.start",y:"ybins.start"},name:"start"},{aStr:{x:"xbins.end",y:"ybins.end"},name:"end"},{aStr:{x:"xbins.size",y:"ybins.size"},name:"size"},{aStr:{x:"nbinsx",y:"nbinsy"},name:"nbins"}],u=["x","y"];t.exports=function(t,e){var n,f,h,d,p,g,v,m=e._histogramBinOpts={},y=[],b={},x=[];function w(t,e){return r.coerce(n._input,n,n._module.attributes,t,e)}function _(t){return"v"===t.orientation?"x":"y"}function k(t,n,a){var o=t.uid+"__"+a;n||(n=o);var s=function(t,n){return i.getFromTrace({_fullLayout:e},t,n).type}(t,a),l=t[a+"calendar"]||"",c=m[n],u=!0;c&&(s===c.axType&&l===c.calendar?(u=!1,c.traces.push(t),c.dirs.push(a)):(n=o,s!==c.axType&&r.warn(["Attempted to group the bins of trace",t.index,"set on a","type:"+s,"axis","with bins on","type:"+c.axType,"axis."].join(" ")),l!==c.calendar&&r.warn(["Attempted to group the bins of trace",t.index,"set with a",l,"calendar","with bins",c.calendar?"on a "+c.calendar+" calendar":"w/o a set calendar"].join(" ")))),u&&(m[n]={traces:[t],dirs:[a],axType:s,calendar:t[a+"calendar"]||""}),t["_"+a+"bingroup"]=n}for(p=0;pS&&k.splice(S,k.length-S),A.length>S&&A.splice(S,A.length-S);var E=[],C=[],P=[],O="string"===typeof _.size,L="string"===typeof M.size,I=[],D=[],R=O?I:_,z=L?D:M,N=0,j=[],F=[],B=e.histnorm,U=e.histfunc,H=-1!==B.indexOf("density"),V="max"===U||"min"===U?null:0,q=a.count,G=o[B],W=!1,Y=[],$=[],X="z"in e?e.z:"marker"in e&&Array.isArray(e.marker.color)?e.marker.color:"";X&&"count"!==U&&(W="avg"===U,q=a[U]);var K=_.size,Z=b(_.start),J=b(_.end)+(Z-i.tickIncrement(Z,K,!1,m))/1e6;for(n=Z;n=0&&d=0&&p-1,flipY:P.tiling.flip.indexOf("y")>-1,orientation:P.tiling.orientation,pad:{inner:P.tiling.pad},maxDepth:P._maxDepth}).descendants(),R=1/0,z=-1/0;D.forEach((function(t){var e=t.depth;e>=P._maxDepth?(t.x0=t.x1=(t.x0+t.x1)/2,t.y0=t.y1=(t.y0+t.y1)/2):(R=Math.min(R,e),z=Math.max(z,e))})),p=p.data(D,u.getPtId),P._maxVisibleLayers=isFinite(z)?z-R+1:0,p.enter().append("g").classed("slice",!0),T(p,d,{},[v,m],x),p.order();var N=null;if(k&&S){var j=u.getPtId(S);p.each((function(t){null===N&&u.getPtId(t)===j&&(N={x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1})}))}var F=function(){return N||{x0:0,x1:v,y0:0,y1:m}},B=p;return k&&(B=B.transition().each("end",(function(){var e=r.select(this);u.setSliceCursor(e,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})}))),B.each((function(s){s._x0=y(s.x0),s._x1=y(s.x1),s._y0=b(s.y0),s._y1=b(s.y1),s._hoverX=y(s.x1-P.tiling.pad),s._hoverY=b(I?s.y1-P.tiling.pad/2:s.y0+P.tiling.pad/2);var p=r.select(this),g=i.ensureSingle(p,"path","surface",(function(t){t.style("pointer-events",E?"none":"all")}));k?g.transition().attrTween("d",(function(t){var e=M(t,d,F(),[v,m],{orientation:P.tiling.orientation,flipX:P.tiling.flip.indexOf("x")>-1,flipY:P.tiling.flip.indexOf("y")>-1});return function(t){return x(e(t))}})):g.attr("d",x),p.call(f,n,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{isTransitioning:t._transitioning}),g.call(l,s,P,t,{hovered:!1}),s.x0===s.x1||s.y0===s.y1?s._text="":s._text=h(s,n,P,e,C)||"";var T=i.ensureSingle(p,"g","slicetext"),S=i.ensureSingle(T,"text","",(function(t){t.attr("data-notex",1)})),D=i.ensureUniformFontSize(t,u.determineTextFont(P,s,C.font));S.text(s._text||" ").classed("slicetext",!0).attr("text-anchor",L?"end":O?"start":"middle").call(a.font,D).call(o.convertToTspans,t),s.textBB=a.bBox(S.node()),s.transform=w(s,{fontSize:D.size}),s.transform.fontSize=D.size,k?S.transition().attrTween("transform",(function(t){var e=A(t,d,F(),[v,m]);return function(t){return _(e(t))}})):S.attr("transform",_(s))})),N}},69816:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"icicle",basePlotModule:n(96346),categories:[],animatable:!0,attributes:n(46291),layoutAttributes:n(92894),supplyDefaults:n(56524),supplyLayoutDefaults:n(21070),calc:n(46584).y,crossTraceCalc:n(46584).T,plot:n(85596),style:n(82454).style,colorbar:n(4898),meta:{}}},92894:function(t){"use strict";t.exports={iciclecolorway:{valType:"colorlist",editType:"calc"},extendiciclecolors:{valType:"boolean",dflt:!0,editType:"calc"}}},21070:function(t,e,n){"use strict";var r=n(71828),i=n(92894);t.exports=function(t,e){function n(n,a){return r.coerce(t,e,i,n,a)}n("iciclecolorway",e.colorway),n("extendiciclecolors")}},21538:function(t,e,n){"use strict";var r=n(674),i=n(14102);t.exports=function(t,e,n){var a=n.flipX,o=n.flipY,s="h"===n.orientation,l=n.maxDepth,c=e[0],u=e[1];l&&(c=(t.height+1)*e[0]/Math.min(t.height+1,l),u=(t.height+1)*e[1]/Math.min(t.height+1,l));var f=r.partition().padding(n.pad.inner).size(s?[e[1],c]:[e[0],u])(t);return(s||a||o)&&i(f,e,{swapXY:s,flipX:a,flipY:o}),f}},85596:function(t,e,n){"use strict";var r=n(80694),i=n(90666);t.exports=function(t,e,n,a){return r(t,e,n,a,{type:"icicle",drawDescendants:i})}},82454:function(t,e,n){"use strict";var r=n(39898),i=n(7901),a=n(71828),o=n(72597).resizeText,s=n(43467);function l(t,e,n,r){var o=e.data.data,l=!e.children,c=o.i,u=a.castOption(n,c,"marker.line.color")||i.defaultLine,f=a.castOption(n,c,"marker.line.width")||0;t.call(s,e,n,r).style("stroke-width",f).call(i.stroke,u).style("opacity",l?n.leaf.opacity:null)}t.exports={style:function(t){var e=t._fullLayout._iciclelayer.selectAll(".trace");o(t,e,"icicle"),e.each((function(e){var n=r.select(this),i=e[0].trace;n.style("opacity",i.opacity),n.selectAll("path.surface").each((function(e){r.select(this).call(l,e,i,t)}))}))},styleOne:l}},17230:function(t,e,n){"use strict";for(var r=n(9012),i=n(5386).fF,a=n(1426).extendFlat,o=n(51877).colormodel,s=["rgb","rgba","rgba256","hsl","hsla"],l=[],c=[],u=0;u0||r.inbox(n-o.y0,n-(o.y0+o.h*s.dy),0)>0)){var u,f=Math.floor((e-o.x0)/s.dx),h=Math.floor(Math.abs(n-o.y0)/s.dy);if(s._hasZ?u=o.z[h][f]:s._hasSource&&(u=s._canvas.el.getContext("2d",{willReadFrequently:!0}).getImageData(f,h,1,1).data),u){var d,p=o.hi||s.hoverinfo;if(p){var g=p.split("+");-1!==g.indexOf("all")&&(g=["color"]),-1!==g.indexOf("color")&&(d=!0)}var v,m=a.colormodel[s.colormodel],y=m.colormodel||s.colormodel,b=y.length,x=s._scaler(u),w=m.suffix,_=[];(s.hovertemplate||d)&&(_.push("["+[x[0]+w[0],x[1]+w[1],x[2]+w[2]].join(", ")),4===b&&_.push(", "+x[3]+w[3]),_.push("]"),_=_.join(""),t.extraText=y.toUpperCase()+": "+_),Array.isArray(s.hovertext)&&Array.isArray(s.hovertext[h])?v=s.hovertext[h][f]:Array.isArray(s.text)&&Array.isArray(s.text[h])&&(v=s.text[h][f]);var k=c.c2p(o.y0+(h+.5)*s.dy),T=o.x0+(f+.5)*s.dx,M=o.y0+(h+.5)*s.dy,A="["+u.slice(0,s.colormodel.length).join(", ")+"]";return[i.extendFlat(t,{index:[h,f],x0:l.c2p(o.x0+f*s.dx),x1:l.c2p(o.x0+(f+1)*s.dx),y0:k,y1:k,color:x,xVal:T,xLabelVal:T,yVal:M,yLabelVal:M,zLabelVal:A,text:v,hovertemplateLabels:{zLabel:A,colorLabel:_,"color[0]Label":x[0]+w[0],"color[1]Label":x[1]+w[1],"color[2]Label":x[2]+w[2],"color[3]Label":x[3]+w[3]}})]}}}},94507:function(t,e,n){"use strict";t.exports={attributes:n(17230),supplyDefaults:n(13245),calc:n(71113),plot:n(60775),style:n(12826),hoverPoints:n(28749),eventData:n(30835),moduleType:"trace",name:"image",basePlotModule:n(93612),categories:["cartesian","svg","2dMap","noSortingByValue"],animatable:!1,meta:{}}},60775:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=i.strTranslate,o=n(77922),s=n(51877),l=n(3883),c=n(32396).STYLE;t.exports=function(t,e,n,u){var f=e.xaxis,h=e.yaxis,d=!t._context._exportedPlot&&l();i.makeTraceGroups(u,n,"im").each((function(e){var n=r.select(this),l=e[0],u=l.trace,p=("fast"===u.zsmooth||!1===u.zsmooth&&d)&&!u._hasZ&&u._hasSource&&"linear"===f.type&&"linear"===h.type;u._realImage=p;var g,v,m,y,b,x,w=l.z,_=l.x0,k=l.y0,T=l.w,M=l.h,A=u.dx,S=u.dy;for(x=0;void 0===g&&x0;)v=f.c2p(_+x*A),x--;for(x=0;void 0===y&&x0;)b=h.c2p(k+x*S),x--;vI[0];if(D||R){var z=g+E/2,N=y+C/2;O+="transform:"+a(z+"px",N+"px")+"scale("+(D?-1:1)+","+(R?-1:1)+")"+a(-z+"px",-N+"px")+";"}}P.attr("style",O);var j=new Promise((function(t){if(u._hasZ)t();else if(u._hasSource)if(u._canvas&&u._canvas.el.width===T&&u._canvas.el.height===M&&u._canvas.source===u.source)t();else{var e=document.createElement("canvas");e.width=T,e.height=M;var n=e.getContext("2d",{willReadFrequently:!0});u._image=u._image||new Image;var r=u._image;r.onload=function(){n.drawImage(r,0,0),u._canvas={el:e,source:u.source},t()},r.setAttribute("src",u.source)}})).then((function(){var t,e;if(u._hasZ)e=F((function(t,e){return w[e][t]})),t=e.toDataURL("image/png");else if(u._hasSource)if(p)t=u.source;else{var n=u._canvas.el.getContext("2d",{willReadFrequently:!0}).getImageData(0,0,T,M).data;e=F((function(t,e){var r=4*(e*T+t);return[n[r],n[r+1],n[r+2],n[r+3]]})),t=e.toDataURL("image/png")}P.attr({"xlink:href":t,height:C,width:E,x:g,y:y})}));t._promises.push(j)}function F(t){var e=document.createElement("canvas");e.width=E,e.height=C;var n,r=e.getContext("2d",{willReadFrequently:!0}),a=function(t){return i.constrain(Math.round(f.c2p(_+t*A)-g),0,E)},o=function(t){return i.constrain(Math.round(h.c2p(k+t*S)-y),0,C)},c=s.colormodel[u.colormodel],d=c.colormodel||u.colormodel,p=c.fmt;for(x=0;x0}function k(t){t.each((function(t){y.stroke(r.select(this),t.line.color)})).each((function(t){y.fill(r.select(this),t.color)})).style("stroke-width",(function(t){return t.line.width}))}function T(t,e,n){var r=t._fullLayout,i=o.extendFlat({type:"linear",ticks:"outside",range:n,showline:!0},e),a={type:"linear",_id:"x"+e._id},s={letter:"x",font:r.font,noHover:!0,noTickson:!0};function l(t,e){return o.coerce(i,a,m,t,e)}return g(i,a,l,s,r),v(i,a,l,s),a}function M(t,e,n){return[Math.min(e/t.width,n/t.height),t,e+"x"+n]}function A(t,e,n,i){var a=document.createElementNS("http://www.w3.org/2000/svg","text"),o=r.select(a);return o.text(t).attr("x",0).attr("y",0).attr("text-anchor",n).attr("data-unformatted",t).call(d.convertToTspans,i).call(f.font,e),f.bBox(o.node())}function S(t,e,n,r,i,a){var s="_cache"+e;t[s]&&t[s].key===i||(t[s]={key:i,value:n});var l=o.aggNums(a,null,[t[s].value,r],2);return t[s].value=l,l}t.exports=function(t,e,n,g){var v,m=t._fullLayout;_(n)&&g&&(v=g()),o.makeTraceGroups(m._indicatorlayer,e,"trace").each((function(e){var g,E,C,P,O,L=e[0].trace,I=r.select(this),D=L._hasGauge,R=L._isAngular,z=L._isBullet,N=L.domain,j={w:m._size.w*(N.x[1]-N.x[0]),h:m._size.h*(N.y[1]-N.y[0]),l:m._size.l+m._size.w*N.x[0],r:m._size.r+m._size.w*(1-N.x[1]),t:m._size.t+m._size.h*(1-N.y[1]),b:m._size.b+m._size.h*N.y[0]},F=j.l+j.w/2,B=j.t+j.h/2,U=Math.min(j.w/2,j.h),H=h.innerRadius*U,V=L.align||"center";if(E=B,D){if(R&&(g=F,E=B+U/2,C=function(t){return function(t,e){var n=Math.sqrt(t.width/2*(t.width/2)+t.height*t.height),r=e/n;return[r,t,e]}(t,.9*H)}),z){var q=h.bulletPadding,G=1-h.bulletNumberDomainSize+q;g=j.l+(G+(1-G)*x[V])*j.w,C=function(t){return M(t,(h.bulletNumberDomainSize-q)*j.w,j.h)}}}else g=j.l+x[V]*j.w,C=function(t){return M(t,j.w,j.h)};!function(t,e,n,i){var c,u,h,g=n[0].trace,v=i.numbersX,m=i.numbersY,k=g.align||"center",M=b[k],E=i.transitionOpts,C=i.onComplete,P=o.ensureSingle(e,"g","numbers"),O=[];g._hasNumber&&O.push("number"),g._hasDelta&&(O.push("delta"),"left"===g.delta.position&&O.reverse());var L=P.selectAll("text").data(O);function I(e,n,r,i){if(!e.match("s")||r>=0===i>=0||n(r).slice(-1).match(w)||n(i).slice(-1).match(w))return n;var a=e.slice().replace("s","f").replace(/\d+/,(function(t){return parseInt(t)-1})),o=T(t,{tickformat:a});return function(t){return Math.abs(t)<1?p.tickText(o,t).text:n(t)}}function D(){var e=T(t,{tickformat:g.number.valueformat},g._range);e.setScale(),p.prepTicks(e);var i=function(t){return p.tickText(e,t).text},o=g.number.suffix,s=g.number.prefix,l=P.select("text.number");function u(){var e="number"===typeof n[0].y?s+i(n[0].y)+o:"-";l.text(e).call(f.font,g.number.font).call(d.convertToTspans,t)}return _(E)?l.transition().duration(E.duration).ease(E.easing).each("end",(function(){u(),C&&C()})).each("interrupt",(function(){u(),C&&C()})).attrTween("text",(function(){var t=r.select(this),e=a(n[0].lastY,n[0].y);g._lastValue=n[0].y;var l=I(g.number.valueformat,i,n[0].lastY,n[0].y);return function(n){t.text(s+l(e(n))+o)}})):u(),c=A(s+i(n[0].y)+o,g.number.font,M,t),l}function R(){var e=T(t,{tickformat:g.delta.valueformat},g._range);e.setScale(),p.prepTicks(e);var i=function(t){return p.tickText(e,t).text},o=g.delta.suffix,s=g.delta.prefix,l=function(t){return g.delta.relative?t.relativeDelta:t.delta},c=function(t,e){return 0===t||"number"!==typeof t||isNaN(t)?"-":(t>0?g.delta.increasing.symbol:g.delta.decreasing.symbol)+s+e(t)+o},h=function(t){return t.delta>=0?g.delta.increasing.color:g.delta.decreasing.color};void 0===g._deltaLastValue&&(g._deltaLastValue=l(n[0]));var v=P.select("text.delta");function m(){v.text(c(l(n[0]),i)).call(y.fill,h(n[0])).call(d.convertToTspans,t)}return v.call(f.font,g.delta.font).call(y.fill,h({delta:g._deltaLastValue})),_(E)?v.transition().duration(E.duration).ease(E.easing).tween("text",(function(){var t=r.select(this),e=l(n[0]),o=g._deltaLastValue,s=I(g.delta.valueformat,i,o,e),u=a(o,e);return g._deltaLastValue=e,function(e){t.text(c(u(e),s)),t.call(y.fill,h({delta:u(e)}))}})).each("end",(function(){m(),C&&C()})).each("interrupt",(function(){m(),C&&C()})):m(),u=A(c(l(n[0]),i),g.delta.font,M,t),v}L.enter().append("text"),L.attr("text-anchor",(function(){return M})).attr("class",(function(t){return t})).attr("x",null).attr("y",null).attr("dx",null).attr("dy",null),L.exit().remove();var z,N=g.mode+g.align;if(g._hasDelta&&(z=R(),N+=g.delta.position+g.delta.font.size+g.delta.font.family+g.delta.valueformat,N+=g.delta.increasing.symbol+g.delta.decreasing.symbol,h=u),g._hasNumber&&(D(),N+=g.number.font.size+g.number.font.family+g.number.valueformat+g.number.suffix+g.number.prefix,h=c),g._hasDelta&&g._hasNumber){var j,F,B=[(c.left+c.right)/2,(c.top+c.bottom)/2],U=[(u.left+u.right)/2,(u.top+u.bottom)/2],H=.75*g.delta.font.size;"left"===g.delta.position&&(j=S(g,"deltaPos",0,-1*(c.width*x[g.align]+u.width*(1-x[g.align])+H),N,Math.min),F=B[1]-U[1],h={width:c.width+u.width+H,height:Math.max(c.height,u.height),left:u.left+j,right:c.right,top:Math.min(c.top,u.top+F),bottom:Math.max(c.bottom,u.bottom+F)}),"right"===g.delta.position&&(j=S(g,"deltaPos",0,c.width*(1-x[g.align])+u.width*x[g.align]+H,N,Math.max),F=B[1]-U[1],h={width:c.width+u.width+H,height:Math.max(c.height,u.height),left:c.left,right:u.right+j,top:Math.min(c.top,u.top+F),bottom:Math.max(c.bottom,u.bottom+F)}),"bottom"===g.delta.position&&(j=null,F=u.height,h={width:Math.max(c.width,u.width),height:c.height+u.height,left:Math.min(c.left,u.left),right:Math.max(c.right,u.right),top:c.bottom-c.height,bottom:c.bottom+u.height}),"top"===g.delta.position&&(j=null,F=c.top,h={width:Math.max(c.width,u.width),height:c.height+u.height,left:Math.min(c.left,u.left),right:Math.max(c.right,u.right),top:c.bottom-c.height-u.height,bottom:c.bottom}),z.attr({dx:j,dy:F})}(g._hasNumber||g._hasDelta)&&P.attr("transform",(function(){var t=i.numbersScaler(h);N+=t[2];var e,n=S(g,"numbersScale",1,t[0],N,Math.min);g._scaleNumbers||(n=1),e=g._isAngular?m-n*h.bottom:m-n*(h.top+h.bottom)/2,g._numbersTop=n*h.top+e;var r=h[k];"center"===k&&(r=(h.left+h.right)/2);var a=v-n*r;return a=S(g,"numbersTranslate",0,a,N,Math.max),l(a,e)+s(n)}))}(t,I,e,{numbersX:g,numbersY:E,numbersScaler:C,transitionOpts:n,onComplete:v}),D&&(P={range:L.gauge.axis.range,color:L.gauge.bgcolor,line:{color:L.gauge.bordercolor,width:0},thickness:1},O={range:L.gauge.axis.range,color:"rgba(0, 0, 0, 0)",line:{color:L.gauge.bordercolor,width:L.gauge.borderwidth},thickness:1});var W=I.selectAll("g.angular").data(R?e:[]);W.exit().remove();var Y=I.selectAll("g.angularaxis").data(R?e:[]);Y.exit().remove(),R&&function(t,e,n,a){var o,s,f,h,d=n[0].trace,g=a.size,v=a.radius,m=a.innerRadius,y=a.gaugeBg,b=a.gaugeOutline,x=[g.l+g.w/2,g.t+g.h/2+v/2],w=a.gauge,M=a.layer,A=a.transitionOpts,S=a.onComplete,E=Math.PI/2;function C(t){var e=d.gauge.axis.range[0],n=(t-e)/(d.gauge.axis.range[1]-e)*Math.PI-E;return n<-E?-E:n>E?E:n}function P(t){return r.svg.arc().innerRadius((m+v)/2-t/2*(v-m)).outerRadius((m+v)/2+t/2*(v-m)).startAngle(-E)}function O(t){t.attr("d",(function(t){return P(t.thickness).startAngle(C(t.range[0])).endAngle(C(t.range[1]))()}))}w.enter().append("g").classed("angular",!0),w.attr("transform",l(x[0],x[1])),M.enter().append("g").classed("angularaxis",!0).classed("crisp",!0),M.selectAll("g.xangularaxistick,path,text").remove(),(o=T(t,d.gauge.axis)).type="linear",o.range=d.gauge.axis.range,o._id="xangularaxis",o.ticklabeloverflow="allow",o.setScale();var L=function(t){return(o.range[0]-t.x)/(o.range[1]-o.range[0])*Math.PI+Math.PI},I={},D=p.makeLabelFns(o,0).labelStandoff;I.xFn=function(t){var e=L(t);return Math.cos(e)*D},I.yFn=function(t){var e=L(t),n=Math.sin(e)>0?.2:1;return-Math.sin(e)*(D+t.fontSize*n)+Math.abs(Math.cos(e))*(t.fontSize*u)},I.anchorFn=function(t){var e=L(t),n=Math.cos(e);return Math.abs(n)<.1?"middle":n>0?"start":"end"},I.heightFn=function(t,e,n){var r=L(t);return-.5*(1+Math.sin(r))*n};var R=function(t){return l(x[0]+v*Math.cos(t),x[1]-v*Math.sin(t))};f=function(t){return R(L(t))};var z=function(t){var e=L(t);return R(e)+"rotate("+-c(e)+")"};if(s=p.calcTicks(o),h=p.getTickSigns(o)[2],o.visible){h="inside"===o.ticks?-1:1;var N=(o.linewidth||1)/2;p.drawTicks(t,o,{vals:s,layer:M,path:"M"+h*N+",0h"+h*o.ticklen,transFn:z}),p.drawLabels(t,o,{vals:s,layer:M,transFn:f,labelFns:I})}var j=[y].concat(d.gauge.steps),F=w.selectAll("g.bg-arc").data(j);F.enter().append("g").classed("bg-arc",!0).append("path"),F.select("path").call(O).call(k),F.exit().remove();var B=P(d.gauge.bar.thickness),U=w.selectAll("g.value-arc").data([d.gauge.bar]);U.enter().append("g").classed("value-arc",!0).append("path");var H,V,q,G=U.select("path");_(A)?(G.transition().duration(A.duration).ease(A.easing).each("end",(function(){S&&S()})).each("interrupt",(function(){S&&S()})).attrTween("d",(H=B,V=C(n[0].lastY),q=C(n[0].y),function(){var t=i(V,q);return function(e){return H.endAngle(t(e))()}})),d._lastValue=n[0].y):G.attr("d","number"===typeof n[0].y?B.endAngle(C(n[0].y)):"M0,0Z"),G.call(k),U.exit().remove(),j=[];var W=d.gauge.threshold.value;(W||0===W)&&j.push({range:[W,W],color:d.gauge.threshold.color,line:{color:d.gauge.threshold.line.color,width:d.gauge.threshold.line.width},thickness:d.gauge.threshold.thickness});var Y=w.selectAll("g.threshold-arc").data(j);Y.enter().append("g").classed("threshold-arc",!0).append("path"),Y.select("path").call(O).call(k),Y.exit().remove();var $=w.selectAll("g.gauge-outline").data([b]);$.enter().append("g").classed("gauge-outline",!0).append("path"),$.select("path").call(O).call(k),$.exit().remove()}(t,0,e,{radius:U,innerRadius:H,gauge:W,layer:Y,size:j,gaugeBg:P,gaugeOutline:O,transitionOpts:n,onComplete:v});var $=I.selectAll("g.bullet").data(z?e:[]);$.exit().remove();var X=I.selectAll("g.bulletaxis").data(z?e:[]);X.exit().remove(),z&&function(t,e,n,r){var i,a,o,s,c,u=n[0].trace,f=r.gauge,d=r.layer,g=r.gaugeBg,v=r.gaugeOutline,m=r.size,b=u.domain,x=r.transitionOpts,w=r.onComplete;f.enter().append("g").classed("bullet",!0),f.attr("transform",l(m.l,m.t)),d.enter().append("g").classed("bulletaxis",!0).classed("crisp",!0),d.selectAll("g.xbulletaxistick,path,text").remove();var M=m.h,A=u.gauge.bar.thickness*M,S=b.x[0],E=b.x[0]+(b.x[1]-b.x[0])*(u._hasNumber||u._hasDelta?1-h.bulletNumberDomainSize:1);function C(t){t.attr("width",(function(t){return Math.max(0,i.c2p(t.range[1])-i.c2p(t.range[0]))})).attr("x",(function(t){return i.c2p(t.range[0])})).attr("y",(function(t){return.5*(1-t.thickness)*M})).attr("height",(function(t){return t.thickness*M}))}(i=T(t,u.gauge.axis))._id="xbulletaxis",i.domain=[S,E],i.setScale(),a=p.calcTicks(i),o=p.makeTransTickFn(i),s=p.getTickSigns(i)[2],c=m.t+m.h,i.visible&&(p.drawTicks(t,i,{vals:"inside"===i.ticks?p.clipEnds(i,a):a,layer:d,path:p.makeTickPath(i,c,s),transFn:o}),p.drawLabels(t,i,{vals:a,layer:d,transFn:o,labelFns:p.makeLabelFns(i,c)}));var P=[g].concat(u.gauge.steps),O=f.selectAll("g.bg-bullet").data(P);O.enter().append("g").classed("bg-bullet",!0).append("rect"),O.select("rect").call(C).call(k),O.exit().remove();var L=f.selectAll("g.value-bullet").data([u.gauge.bar]);L.enter().append("g").classed("value-bullet",!0).append("rect"),L.select("rect").attr("height",A).attr("y",(M-A)/2).call(k),_(x)?L.select("rect").transition().duration(x.duration).ease(x.easing).each("end",(function(){w&&w()})).each("interrupt",(function(){w&&w()})).attr("width",Math.max(0,i.c2p(Math.min(u.gauge.axis.range[1],n[0].y)))):L.select("rect").attr("width","number"===typeof n[0].y?Math.max(0,i.c2p(Math.min(u.gauge.axis.range[1],n[0].y))):0),L.exit().remove();var I=n.filter((function(){return u.gauge.threshold.value||0===u.gauge.threshold.value})),D=f.selectAll("g.threshold-bullet").data(I);D.enter().append("g").classed("threshold-bullet",!0).append("line"),D.select("line").attr("x1",i.c2p(u.gauge.threshold.value)).attr("x2",i.c2p(u.gauge.threshold.value)).attr("y1",(1-u.gauge.threshold.thickness)/2*M).attr("y2",(1-(1-u.gauge.threshold.thickness)/2)*M).call(y.stroke,u.gauge.threshold.line.color).style("stroke-width",u.gauge.threshold.line.width),D.exit().remove();var R=f.selectAll("g.gauge-outline").data([v]);R.enter().append("g").classed("gauge-outline",!0).append("rect"),R.select("rect").call(C).call(k),R.exit().remove()}(t,0,e,{gauge:$,layer:X,size:j,gaugeBg:P,gaugeOutline:O,transitionOpts:n,onComplete:v});var K=I.selectAll("text.title").data(e);K.exit().remove(),K.enter().append("text").classed("title",!0),K.attr("text-anchor",(function(){return z?b.right:b[L.title.align]})).text(L.title.text).call(f.font,L.title.font).call(d.convertToTspans,t),K.attr("transform",(function(){var t,e=j.l+j.w*x[L.title.align],n=h.titlePadding,r=f.bBox(K.node());return D?(R&&(t=L.gauge.axis.visible?f.bBox(Y.node()).top-n-r.bottom:j.t+j.h/2-U/2-r.bottom-n),z&&(t=E-(r.top+r.bottom)/2,e=j.l-h.bulletPadding*j.w)):t=L._numbersTop-n-r.bottom,l(e,t)}))}))}},16249:function(t,e,n){"use strict";var r=n(50693),i=n(12663).axisHoverFormat,a=n(5386).fF,o=n(2418),s=n(9012),l=n(1426).extendFlat,c=n(30962).overrideAll,u=t.exports=c(l({x:{valType:"data_array"},y:{valType:"data_array"},z:{valType:"data_array"},value:{valType:"data_array"},isomin:{valType:"number"},isomax:{valType:"number"},surface:{show:{valType:"boolean",dflt:!0},count:{valType:"integer",dflt:2,min:1},fill:{valType:"number",min:0,max:1,dflt:1},pattern:{valType:"flaglist",flags:["A","B","C","D","E"],extras:["all","odd","even"],dflt:"all"}},spaceframe:{show:{valType:"boolean",dflt:!1},fill:{valType:"number",min:0,max:1,dflt:.15}},slices:{x:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}},y:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}},z:{show:{valType:"boolean",dflt:!1},locations:{valType:"data_array",dflt:[]},fill:{valType:"number",min:0,max:1,dflt:1}}},caps:{x:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}},y:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}},z:{show:{valType:"boolean",dflt:!0},fill:{valType:"number",min:0,max:1,dflt:1}}},text:{valType:"string",dflt:"",arrayOk:!0},hovertext:{valType:"string",dflt:"",arrayOk:!0},hovertemplate:a(),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),valuehoverformat:i("value",1),showlegend:l({},s.showlegend,{dflt:!1})},r("",{colorAttr:"`value`",showScaleDflt:!0,editTypeOverride:"calc"}),{opacity:o.opacity,lightposition:o.lightposition,lighting:o.lighting,flatshading:o.flatshading,contour:o.contour,hoverinfo:l({},s.hoverinfo)}),"calc","nested");u.flatshading.dflt=!0,u.lighting.facenormalsepsilon.dflt=0,u.x.editType=u.y.editType=u.z.editType=u.value.editType="calc+clearAxisTypes",u.transforms=void 0},56959:function(t,e,n){"use strict";var r=n(78803),i=n(88489).processGrid,a=n(88489).filter;t.exports=function(t,e){e._len=Math.min(e.x.length,e.y.length,e.z.length,e.value.length),e._x=a(e.x,e._len),e._y=a(e.y,e._len),e._z=a(e.z,e._len),e._value=a(e.value,e._len);var n=i(e);e._gridFill=n.fill,e._Xs=n.Xs,e._Ys=n.Ys,e._Zs=n.Zs,e._len=n.len;for(var o=1/0,s=-1/0,l=0;l0;n--){var r=Math.min(e[n],e[n-1]),i=Math.max(e[n],e[n-1]);if(i>r&&r-1}function z(t,e){return null===t?e:t}function N(e,n,r){P();var i,a,o,l=[n],c=[r];if(s>=1)l=[n],c=[r];else if(s>0){var u=function(t,e){var n=t[0],r=t[1],i=t[2],a=function(t,e,n){for(var r=[],i=0;i-1?r[d]:C(p,g,m);h[d]=b>-1?b:L(p,g,m,z(e,y))}i=h[0],a=h[1],o=h[2],t._meshI.push(i),t._meshJ.push(a),t._meshK.push(o),++v}}function j(t,e,n,r){var i=t[3];ir&&(i=r);for(var a=(t[3]-i)/(t[3]-e[3]+1e-9),o=[],s=0;s<4;s++)o[s]=(1-a)*t[s]+a*e[s];return o}function F(t,e,n){return t>=e&&t<=n}function B(t){var e=.001*(E-S);return t>=S-e&&t<=E+e}function U(e){for(var n=[],r=0;r<4;r++){var i=e[r];n.push([t._x[i],t._y[i],t._z[i],t._value[i]])}return n}var H=3;function V(t,e,n,r,i,a){a||(a=1),n=[-1,-1,-1];var o=!1,s=[F(e[0][3],r,i),F(e[1][3],r,i),F(e[2][3],r,i)];if(!s[0]&&!s[1]&&!s[2])return!1;var l=function(t,e,n){return B(e[0][3])&&B(e[1][3])&&B(e[2][3])?(N(t,e,n),!0):aMath.abs(k-A)?[M,k]:[k,A];tt(n,C[0],C[1])}}var P=[[Math.min(S,A),Math.max(S,A)],[Math.min(M,E),Math.max(M,E)]];["x","y","z"].forEach((function(n){for(var r=[],i=0;i0&&(f.push(p.id),"x"===n?h.push([p.distRatio,0,0]):"y"===n?h.push([0,p.distRatio,0]):h.push([0,0,p.distRatio]))}else u=it(1,"x"===n?x-1:"y"===n?w-1:_-1);f.length>0&&(r[a]="x"===n?et(e,f,o,s,h,r[a]):"y"===n?nt(e,f,o,s,h,r[a]):rt(e,f,o,s,h,r[a]),a++),u.length>0&&(r[a]="x"===n?K(e,u,o,s,r[a]):"y"===n?Z(e,u,o,s,r[a]):J(e,u,o,s,r[a]),a++)}var g=t.caps[n];g.show&&g.fill&&(D(g.fill),r[a]="x"===n?K(e,[0,x-1],o,s,r[a]):"y"===n?Z(e,[0,w-1],o,s,r[a]):J(e,[0,_-1],o,s,r[a]),a++)}})),0===v&&O(),t._meshX=r,t._meshY=i,t._meshZ=a,t._meshIntensity=o,t._Xs=m,t._Ys=y,t._Zs=b}(),t}t.exports={findNearestOnAxis:l,generateIsoMeshes:h,createIsosurfaceTrace:function(t,e){var n=t.glplot.gl,i=r({gl:n}),a=new c(t,i,e.uid);return i._trace=a,a.update(e),t.glplot.add(i),a}}},82738:function(t,e,n){"use strict";var r=n(71828),i=n(73972),a=n(16249),o=n(1586);function s(t,e,n,r,a){var s=a("isomin"),l=a("isomax");void 0!==l&&null!==l&&void 0!==s&&null!==s&&s>l&&(e.isomin=null,e.isomax=null);var c=a("x"),u=a("y"),f=a("z"),h=a("value");c&&c.length&&u&&u.length&&f&&f.length&&h&&h.length?(i.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x","y","z"],r),a("valuehoverformat"),["x","y","z"].forEach((function(t){a(t+"hoverformat");var e="caps."+t;a(e+".show")&&a(e+".fill");var n="slices."+t;a(n+".show")&&(a(n+".fill"),a(n+".locations"))})),a("spaceframe.show")&&a("spaceframe.fill"),a("surface.show")&&(a("surface.count"),a("surface.fill"),a("surface.pattern")),a("contour.show")&&(a("contour.color"),a("contour.width")),["text","hovertext","hovertemplate","lighting.ambient","lighting.diffuse","lighting.specular","lighting.roughness","lighting.fresnel","lighting.vertexnormalsepsilon","lighting.facenormalsepsilon","lightposition.x","lightposition.y","lightposition.z","flatshading","opacity"].forEach((function(t){a(t)})),o(t,e,r,a,{prefix:"",cLetter:"c"}),e._length=null):e.visible=!1}t.exports={supplyDefaults:function(t,e,n,i){s(t,e,0,i,(function(n,i){return r.coerce(t,e,a,n,i)}))},supplyIsoDefaults:s}},64943:function(t,e,n){"use strict";t.exports={attributes:n(16249),supplyDefaults:n(82738).supplyDefaults,calc:n(56959),colorbar:{min:"cmin",max:"cmax"},plot:n(22674).createIsosurfaceTrace,moduleType:"trace",name:"isosurface",basePlotModule:n(58547),categories:["gl3d","showLegend"],meta:{}}},2418:function(t,e,n){"use strict";var r=n(50693),i=n(12663).axisHoverFormat,a=n(5386).fF,o=n(54532),s=n(9012),l=n(1426).extendFlat;t.exports=l({x:{valType:"data_array",editType:"calc+clearAxisTypes"},y:{valType:"data_array",editType:"calc+clearAxisTypes"},z:{valType:"data_array",editType:"calc+clearAxisTypes"},i:{valType:"data_array",editType:"calc"},j:{valType:"data_array",editType:"calc"},k:{valType:"data_array",editType:"calc"},text:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"calc"},hovertemplate:a({editType:"calc"}),xhoverformat:i("x"),yhoverformat:i("y"),zhoverformat:i("z"),delaunayaxis:{valType:"enumerated",values:["x","y","z"],dflt:"z",editType:"calc"},alphahull:{valType:"number",dflt:-1,editType:"calc"},intensity:{valType:"data_array",editType:"calc"},intensitymode:{valType:"enumerated",values:["vertex","cell"],dflt:"vertex",editType:"calc"},color:{valType:"color",editType:"calc"},vertexcolor:{valType:"data_array",editType:"calc"},facecolor:{valType:"data_array",editType:"calc"},transforms:void 0},r("",{colorAttr:"`intensity`",showScaleDflt:!0,editTypeOverride:"calc"}),{opacity:o.opacity,flatshading:{valType:"boolean",dflt:!1,editType:"calc"},contour:{show:l({},o.contours.x.show,{}),color:o.contours.x.color,width:o.contours.x.width,editType:"calc"},lightposition:{x:l({},o.lightposition.x,{dflt:1e5}),y:l({},o.lightposition.y,{dflt:1e5}),z:l({},o.lightposition.z,{dflt:0}),editType:"calc"},lighting:l({vertexnormalsepsilon:{valType:"number",min:0,max:1,dflt:1e-12,editType:"calc"},facenormalsepsilon:{valType:"number",min:0,max:1,dflt:1e-6,editType:"calc"},editType:"calc"},o.lighting),hoverinfo:l({},s.hoverinfo,{editType:"calc"}),showlegend:l({},s.showlegend,{dflt:!1})})},82932:function(t,e,n){"use strict";var r=n(78803);t.exports=function(t,e){e.intensity&&r(t,e,{vals:e.intensity,containerStr:"",cLetter:"c"})}},91134:function(t,e,n){"use strict";var r=n(9330).gl_mesh3d,i=n(9330).delaunay_triangulate,a=n(9330).alpha_shape,o=n(9330).convex_hull,s=n(81697).parseColorScale,l=n(78614),c=n(21081).extractOpts,u=n(90060);function f(t,e,n){this.scene=t,this.uid=n,this.mesh=e,this.name="",this.color="#fff",this.data=null,this.showContour=!1}var h=f.prototype;function d(t){for(var e=[],n=t.length,r=0;r=e-.5)return!1;return!0}h.handlePick=function(t){if(t.object===this.mesh){var e=t.index=t.data.index;t.data._cellCenter?t.traceCoordinate=t.data.dataCoordinate:t.traceCoordinate=[this.data.x[e],this.data.y[e],this.data.z[e]];var n=this.data.hovertext||this.data.text;return Array.isArray(n)&&void 0!==n[e]?t.textLabel=n[e]:n&&(t.textLabel=n),!0}},h.update=function(t){var e=this.scene,n=e.fullSceneLayout;this.data=t;var r,f=t.x.length,h=u(p(n.xaxis,t.x,e.dataScale[0],t.xcalendar),p(n.yaxis,t.y,e.dataScale[1],t.ycalendar),p(n.zaxis,t.z,e.dataScale[2],t.zcalendar));if(t.i&&t.j&&t.k){if(t.i.length!==t.j.length||t.j.length!==t.k.length||!v(t.i,f)||!v(t.j,f)||!v(t.k,f))return;r=u(g(t.i),g(t.j),g(t.k))}else r=0===t.alphahull?o(h):t.alphahull>0?a(t.alphahull,h):function(t,e){for(var n=["x","y","z"].indexOf(t),r=[],a=e.length,o=0;om):v=M>_,m=M;var A=c(_,k,T,M);A.pos=w,A.yc=(_+M)/2,A.i=x,A.dir=v?"increasing":"decreasing",A.x=A.pos,A.y=[T,k],y&&(A.orig_p=n[x]),p&&(A.tx=e.text[x]),g&&(A.htx=e.hovertext[x]),b.push(A)}else b.push({pos:w,empty:!0})}return e._extremes[l._id]=a.findExtremes(l,r.concat(h,f),{padded:!0}),b.length&&(b[0].t={labels:{open:i(t,"open:")+" ",high:i(t,"high:")+" ",low:i(t,"low:")+" ",close:i(t,"close:")+" "}}),b}t.exports={calc:function(t,e){var n=a.getFromId(t,e.xaxis),i=a.getFromId(t,e.yaxis),s=function(t,e,n){var i=n._minDiff;if(!i){var a,s=t._fullData,l=[];for(i=1/0,a=0;a"+c.labels[b]+r.hoverLabelText(s,x,l.yhoverformat):((y=i.extendFlat({},h)).y0=y.y1=w,y.yLabelVal=x,y.yLabel=c.labels[b]+r.hoverLabelText(s,x,l.yhoverformat),y.name="",f.push(y),v[x]=y)}return f}function h(t,e,n,i){var a=t.cd,o=t.ya,l=a[0].trace,f=a[0].t,h=u(t,e,n,i);if(!h)return[];var d=a[h.index],p=h.index=d.i,g=d.dir;function v(t){return f.labels[t]+r.hoverLabelText(o,l[t][p],l.yhoverformat)}var m=d.hi||l.hoverinfo,y=m.split("+"),b="all"===m,x=b||-1!==y.indexOf("y"),w=b||-1!==y.indexOf("text"),_=x?[v("open"),v("high"),v("low"),v("close")+" "+c[g]]:[];return w&&s(d,l,_),h.extraText=_.join("
"),h.y0=h.y1=o.c2p(d.yc,!0),[h]}t.exports={hoverPoints:function(t,e,n,r){return t.cd[0].trace.hoverlabel.split?f(t,e,n,r):h(t,e,n,r)},hoverSplit:f,hoverOnPoints:h}},54186:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"ohlc",basePlotModule:n(93612),categories:["cartesian","svg","showLegend"],meta:{},attributes:n(2522),supplyDefaults:n(16169),calc:n(3485).calc,plot:n(72314),style:n(53101),hoverPoints:n(66449).hoverPoints,selectPoints:n(67324)}},14555:function(t,e,n){"use strict";var r=n(73972),i=n(71828);t.exports=function(t,e,n,a){var o=n("x"),s=n("open"),l=n("high"),c=n("low"),u=n("close");if(n("hoverlabel.split"),r.getComponentMethod("calendars","handleTraceDefaults")(t,e,["x"],a),s&&l&&c&&u){var f=Math.min(s.length,l.length,c.length,u.length);return o&&(f=Math.min(f,i.minRowLength(o))),e._length=f,f}}},72314:function(t,e,n){"use strict";var r=n(39898),i=n(71828);t.exports=function(t,e,n,a){var o=e.yaxis,s=e.xaxis,l=!!s.rangebreaks;i.makeTraceGroups(a,n,"trace ohlc").each((function(t){var e=r.select(this),n=t[0],a=n.t;if(!0!==n.trace.visible||a.empty)e.remove();else{var c=a.tickLen,u=e.selectAll("path").data(i.identity);u.enter().append("path"),u.exit().remove(),u.attr("d",(function(t){if(t.empty)return"M0,0Z";var e=s.c2p(t.pos-c,!0),n=s.c2p(t.pos+c,!0),r=l?(e+n)/2:s.c2p(t.pos,!0);return"M"+e+","+o.c2p(t.o,!0)+"H"+r+"M"+r+","+o.c2p(t.h,!0)+"V"+o.c2p(t.l,!0)+"M"+n+","+o.c2p(t.c,!0)+"H"+r}))}}))}},67324:function(t){"use strict";t.exports=function(t,e){var n,r=t.cd,i=t.xaxis,a=t.yaxis,o=[],s=r[0].t.bPos||0;if(!1===e)for(n=0;n=t.length)return!1;if(void 0!==e[t[n]])return!1;e[t[n]]=!0}return!0}(n))for(e=0;e0;c&&(o="array");var u=n("categoryorder",o);"array"===u?(n("categoryarray"),n("ticktext")):(delete t.categoryarray,delete t.ticktext),c||"array"!==u||(e.categoryorder="trace")}}t.exports=function(t,e,n,f){function h(n,i){return r.coerce(t,e,l,n,i)}var d=s(t,e,{name:"dimensions",handleItemDefaults:u}),p=function(t,e,n,o,s){s("line.shape"),s("line.hovertemplate");var l=s("line.color",o.colorway[0]);if(i(t,"line")&&r.isArrayOrTypedArray(l)){if(l.length)return s("line.colorscale"),a(t,e,o,s,{prefix:"line.",cLetter:"c"}),l.length;e.line.color=n}return 1/0}(t,e,n,f,h);o(e,f,h),Array.isArray(d)&&d.length||(e.visible=!1),c(e,d,"values",p),h("hoveron"),h("hovertemplate"),h("arrangement"),h("bundlecolors"),h("sortpaths"),h("counts");var g={family:f.font.family,size:Math.round(f.font.size),color:f.font.color};r.coerceFont(h,"labelfont",g);var v={family:f.font.family,size:Math.round(f.font.size/1.2),color:f.font.color};r.coerceFont(h,"tickfont",v)}},94873:function(t,e,n){"use strict";t.exports={attributes:n(99506),supplyDefaults:n(14647),calc:n(28699),plot:n(45784),colorbar:{container:"line",min:"cmin",max:"cmax"},moduleType:"trace",name:"parcats",basePlotModule:n(27677),categories:["noOpacity"],meta:{}}},45460:function(t,e,n){"use strict";var r=n(39898),i=n(81684).k4,a=n(72391),o=n(30211),s=n(71828),l=s.strTranslate,c=n(91424),u=n(84267),f=n(63893);function h(t,e,n,i){var a=e._context.staticPlot,o=t.map(N.bind(0,e,n)),u=i.selectAll("g.parcatslayer").data([null]);u.enter().append("g").attr("class","parcatslayer").style("pointer-events",a?"none":"all");var h=u.selectAll("g.trace.parcats").data(o,d),y=h.enter().append("g").attr("class","trace parcats");h.attr("transform",(function(t){return l(t.x,t.y)})),y.append("g").attr("class","paths");var b=h.select("g.paths").selectAll("path.path").data((function(t){return t.paths}),d);b.attr("fill",(function(t){return t.model.color}));var _=b.enter().append("path").attr("class","path").attr("stroke-opacity",0).attr("fill",(function(t){return t.model.color})).attr("fill-opacity",0);w(_),b.attr("d",(function(t){return t.svgD})),_.empty()||b.sort(g),b.exit().remove(),b.on("mouseover",v).on("mouseout",m).on("click",x),y.append("g").attr("class","dimensions");var M=h.select("g.dimensions").selectAll("g.dimension").data((function(t){return t.dimensions}),d);M.enter().append("g").attr("class","dimension"),M.attr("transform",(function(t){return l(t.x,0)})),M.exit().remove();var A=M.selectAll("g.category").data((function(t){return t.categories}),d),S=A.enter().append("g").attr("class","category");A.attr("transform",(function(t){return l(0,t.y)})),S.append("rect").attr("class","catrect").attr("pointer-events","none"),A.select("rect.catrect").attr("fill","none").attr("width",(function(t){return t.width})).attr("height",(function(t){return t.height})),k(S);var E=A.selectAll("rect.bandrect").data((function(t){return t.bands}),d);E.each((function(){s.raiseToTop(this)})),E.attr("fill",(function(t){return t.color}));var D=E.enter().append("rect").attr("class","bandrect").attr("stroke-opacity",0).attr("fill",(function(t){return t.color})).attr("fill-opacity",0);E.attr("fill",(function(t){return t.color})).attr("width",(function(t){return t.width})).attr("height",(function(t){return t.height})).attr("y",(function(t){return t.y})).attr("cursor",(function(t){return"fixed"===t.parcatsViewModel.arrangement?"default":"perpendicular"===t.parcatsViewModel.arrangement?"ns-resize":"move"})),T(D),E.exit().remove(),S.append("text").attr("class","catlabel").attr("pointer-events","none");var R=e._fullLayout.paper_bgcolor;A.select("text.catlabel").attr("text-anchor",(function(t){return p(t)?"start":"end"})).attr("alignment-baseline","middle").style("text-shadow",f.makeTextShadow(R)).style("fill","rgb(0, 0, 0)").attr("x",(function(t){return p(t)?t.width+5:-5})).attr("y",(function(t){return t.height/2})).text((function(t){return t.model.categoryLabel})).each((function(t){c.font(r.select(this),t.parcatsViewModel.categorylabelfont),f.convertToTspans(r.select(this),e)})),S.append("text").attr("class","dimlabel"),A.select("text.dimlabel").attr("text-anchor","middle").attr("alignment-baseline","baseline").attr("cursor",(function(t){return"fixed"===t.parcatsViewModel.arrangement?"default":"ew-resize"})).attr("x",(function(t){return t.width/2})).attr("y",-5).text((function(t,e){return 0===e?t.parcatsViewModel.model.dimensions[t.model.dimensionInd].dimensionLabel:null})).each((function(t){c.font(r.select(this),t.parcatsViewModel.labelfont)})),A.selectAll("rect.bandrect").on("mouseover",C).on("mouseout",P),A.exit().remove(),M.call(r.behavior.drag().origin((function(t){return{x:t.x,y:0}})).on("dragstart",O).on("drag",L).on("dragend",I)),h.each((function(t){t.traceSelection=r.select(this),t.pathSelection=r.select(this).selectAll("g.paths").selectAll("path.path"),t.dimensionSelection=r.select(this).selectAll("g.dimensions").selectAll("g.dimension")})),h.exit().remove()}function d(t){return t.key}function p(t){var e=t.parcatsViewModel.dimensions.length,n=t.parcatsViewModel.dimensions[e-1].model.dimensionInd;return t.model.dimensionInd===n}function g(t,e){return t.model.rawColor>e.model.rawColor?1:t.model.rawColor"),C=r.mouse(f)[0];o.loneHover({trace:h,x:x-p.left+g.left,y:w-p.top+g.top,text:E,color:t.model.color,borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontSize:10,fontColor:k,idealAlign:C1&&h.displayInd===f.dimensions.length-1?(i=c.left,a="left"):(i=c.left+c.width,a="right");var g=u.model.count,v=u.model.categoryLabel,m=g/u.parcatsViewModel.model.count,y={countLabel:g,categoryLabel:v,probabilityLabel:m.toFixed(3)},b=[];-1!==u.parcatsViewModel.hoverinfoItems.indexOf("count")&&b.push(["Count:",y.countLabel].join(" ")),-1!==u.parcatsViewModel.hoverinfoItems.indexOf("probability")&&b.push(["P("+y.categoryLabel+"):",y.probabilityLabel].join(" "));var x=b.join("
");return{trace:d,x:o*(i-e.left),y:s*(p-e.top),text:x,color:"lightgray",borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontSize:12,fontColor:"black",idealAlign:a,hovertemplate:d.hovertemplate,hovertemplateLabels:y,eventData:[{data:d._input,fullData:d,count:g,category:v,probability:m}]}}function C(t){if(!t.parcatsViewModel.dragDimension&&-1===t.parcatsViewModel.hoverinfoItems.indexOf("skip")){if(r.mouse(this)[1]<-1)return;var e,n=t.parcatsViewModel.graphDiv,i=n._fullLayout,a=i._paperdiv.node().getBoundingClientRect(),l=t.parcatsViewModel.hoveron,c=this;"color"===l?(function(t){var e=r.select(t).datum(),n=M(e);_(n),n.each((function(){s.raiseToTop(this)})),r.select(t.parentNode).selectAll("rect.bandrect").filter((function(t){return t.color===e.color})).each((function(){s.raiseToTop(this),r.select(this).attr("stroke","black").attr("stroke-width",1.5)}))}(c),S(c,"plotly_hover",r.event)):(function(t){r.select(t.parentNode).selectAll("rect.bandrect").each((function(t){var e=M(t);_(e),e.each((function(){s.raiseToTop(this)}))})),r.select(t.parentNode).select("rect.catrect").attr("stroke","black").attr("stroke-width",2.5)}(c),A(c,"plotly_hover",r.event)),-1===t.parcatsViewModel.hoverinfoItems.indexOf("none")&&("category"===l?e=E(n,a,c):"color"===l?e=function(t,e,n){t._fullLayout._calcInverseTransform(t);var i,a,o=t._fullLayout._invScaleX,s=t._fullLayout._invScaleY,l=n.getBoundingClientRect(),c=r.select(n).datum(),f=c.categoryViewModel,h=f.parcatsViewModel,d=h.model.dimensions[f.model.dimensionInd],p=h.trace,g=l.y+l.height/2;h.dimensions.length>1&&d.displayInd===h.dimensions.length-1?(i=l.left,a="left"):(i=l.left+l.width,a="right");var v=f.model.categoryLabel,m=c.parcatsViewModel.model.count,y=0;c.categoryViewModel.bands.forEach((function(t){t.color===c.color&&(y+=t.count)}));var b=f.model.count,x=0;h.pathSelection.each((function(t){t.model.color===c.color&&(x+=t.model.count)}));var w=y/m,_=y/x,k=y/b,T={countLabel:m,categoryLabel:v,probabilityLabel:w.toFixed(3)},M=[];-1!==f.parcatsViewModel.hoverinfoItems.indexOf("count")&&M.push(["Count:",T.countLabel].join(" ")),-1!==f.parcatsViewModel.hoverinfoItems.indexOf("probability")&&(M.push("P(color \u2229 "+v+"): "+T.probabilityLabel),M.push("P("+v+" | color): "+_.toFixed(3)),M.push("P(color | "+v+"): "+k.toFixed(3)));var A=M.join("
"),S=u.mostReadable(c.color,["black","white"]);return{trace:p,x:o*(i-e.left),y:s*(g-e.top),text:A,color:c.color,borderColor:"black",fontFamily:'Monaco, "Courier New", monospace',fontColor:S,fontSize:10,idealAlign:a,hovertemplate:p.hovertemplate,hovertemplateLabels:T,eventData:[{data:p._input,fullData:p,category:v,count:m,probability:w,categorycount:b,colorcount:x,bandcolorcount:y}]}}(n,a,c):"dimension"===l&&(e=function(t,e,n){var i=[];return r.select(n.parentNode.parentNode).selectAll("g.category").select("rect.catrect").each((function(){i.push(E(t,e,this))})),i}(n,a,c)),e&&o.loneHover(e,{container:i._hoverlayer.node(),outerContainer:i._paper.node(),gd:n}))}}function P(t){var e=t.parcatsViewModel;e.dragDimension||(w(e.pathSelection),k(e.dimensionSelection.selectAll("g.category")),T(e.dimensionSelection.selectAll("g.category").selectAll("rect.bandrect")),o.loneUnhover(e.graphDiv._fullLayout._hoverlayer.node()),e.pathSelection.sort(g),-1!==e.hoverinfoItems.indexOf("skip"))||("color"===t.parcatsViewModel.hoveron?S(this,"plotly_unhover",r.event):A(this,"plotly_unhover",r.event))}function O(t){"fixed"!==t.parcatsViewModel.arrangement&&(t.dragDimensionDisplayInd=t.model.displayInd,t.initialDragDimensionDisplayInds=t.parcatsViewModel.model.dimensions.map((function(t){return t.displayInd})),t.dragHasMoved=!1,t.dragCategoryDisplayInd=null,r.select(this).selectAll("g.category").select("rect.catrect").each((function(e){var n=r.mouse(this)[0],i=r.mouse(this)[1];-2<=n&&n<=e.width+2&&-2<=i&&i<=e.height+2&&(t.dragCategoryDisplayInd=e.model.displayInd,t.initialDragCategoryDisplayInds=t.model.categories.map((function(t){return t.displayInd})),e.model.dragY=e.y,s.raiseToTop(this.parentNode),r.select(this.parentNode).selectAll("rect.bandrect").each((function(e){e.yf.y+f.height/2&&(o.model.displayInd=f.model.displayInd,f.model.displayInd=l),t.dragCategoryDisplayInd=o.model.displayInd}if(null===t.dragCategoryDisplayInd||"freeform"===t.parcatsViewModel.arrangement){a.model.dragX=r.event.x;var h=t.parcatsViewModel.dimensions[n],d=t.parcatsViewModel.dimensions[i];void 0!==h&&a.model.dragXd.x&&(a.model.displayInd=d.model.displayInd,d.model.displayInd=t.dragDimensionDisplayInd),t.dragDimensionDisplayInd=a.model.displayInd}B(t.parcatsViewModel),F(t.parcatsViewModel),z(t.parcatsViewModel),R(t.parcatsViewModel)}}function I(t){if("fixed"!==t.parcatsViewModel.arrangement&&null!==t.dragDimensionDisplayInd){r.select(this).selectAll("text").attr("font-weight","normal");var e={},n=D(t.parcatsViewModel),i=t.parcatsViewModel.model.dimensions.map((function(t){return t.displayInd})),o=t.initialDragDimensionDisplayInds.some((function(t,e){return t!==i[e]}));o&&i.forEach((function(n,r){var i=t.parcatsViewModel.model.dimensions[r].containerInd;e["dimensions["+i+"].displayindex"]=n}));var s=!1;if(null!==t.dragCategoryDisplayInd){var l=t.model.categories.map((function(t){return t.displayInd}));if(s=t.initialDragCategoryDisplayInds.some((function(t,e){return t!==l[e]}))){var c=t.model.categories.slice().sort((function(t,e){return t.displayInd-e.displayInd})),u=c.map((function(t){return t.categoryValue})),f=c.map((function(t){return t.categoryLabel}));e["dimensions["+t.model.containerInd+"].categoryarray"]=[u],e["dimensions["+t.model.containerInd+"].ticktext"]=[f],e["dimensions["+t.model.containerInd+"].categoryorder"]="array"}}-1===t.parcatsViewModel.hoverinfoItems.indexOf("skip")&&!t.dragHasMoved&&t.potentialClickBand&&("color"===t.parcatsViewModel.hoveron?S(t.potentialClickBand,"plotly_click",r.event.sourceEvent):A(t.potentialClickBand,"plotly_click",r.event.sourceEvent)),t.model.dragX=null,null!==t.dragCategoryDisplayInd&&(t.parcatsViewModel.dimensions[t.dragDimensionDisplayInd].categories[t.dragCategoryDisplayInd].model.dragY=null,t.dragCategoryDisplayInd=null),t.dragDimensionDisplayInd=null,t.parcatsViewModel.dragDimension=null,t.dragHasMoved=null,t.potentialClickBand=null,B(t.parcatsViewModel),F(t.parcatsViewModel),r.transition().duration(300).ease("cubic-in-out").each((function(){z(t.parcatsViewModel,!0),R(t.parcatsViewModel,!0)})).each("end",(function(){(o||s)&&a.restyle(t.parcatsViewModel.graphDiv,e,[n])}))}}function D(t){for(var e,n=t.graphDiv._fullData,r=0;r=0;s--)u+="C"+c[s]+","+(e[s+1]+r)+" "+l[s]+","+(e[s]+r)+" "+(t[s]+n[s])+","+(e[s]+r),u+="l-"+n[s]+",0 ";return u+="Z"}function F(t){var e=t.dimensions,n=t.model,r=e.map((function(t){return t.categories.map((function(t){return t.y}))})),i=t.model.dimensions.map((function(t){return t.categories.map((function(t){return t.displayInd}))})),a=t.model.dimensions.map((function(t){return t.displayInd})),o=t.dimensions.map((function(t){return t.model.dimensionInd})),s=e.map((function(t){return t.x})),l=e.map((function(t){return t.width})),c=[];for(var u in n.paths)n.paths.hasOwnProperty(u)&&c.push(n.paths[u]);function f(t){var e=t.categoryInds.map((function(t,e){return i[e][t]}));return o.map((function(t){return e[t]}))}c.sort((function(e,n){var r=f(e),i=f(n);return"backward"===t.sortpaths&&(r.reverse(),i.reverse()),r.push(e.valueInds[0]),i.push(n.valueInds[0]),t.bundlecolors&&(r.unshift(e.rawColor),i.unshift(n.rawColor)),ri?1:0}));for(var h=new Array(c.length),d=e[0].model.count,p=e[0].categories.map((function(t){return t.height})).reduce((function(t,e){return t+e})),g=0;g0?p*(m.count/d):0;for(var y,b=new Array(r.length),x=0;x1?(t.width-80-16)/(r-1):0)*i;var a,o,s,l,c,u=[],f=t.model.maxCats,h=e.categories.length,d=e.count,p=t.height-8*(f-1),g=8*(f-h)/2,v=e.categories.map((function(t){return{displayInd:t.displayInd,categoryInd:t.categoryInd}}));for(v.sort((function(t,e){return t.displayInd-e.displayInd})),c=0;c0?o.count/d*p:0,s={key:o.valueInds[0],model:o,width:16,height:a,y:null!==o.dragY?o.dragY:g,bands:[],parcatsViewModel:t},g=g+a+8,u.push(s);return{key:e.dimensionInd,x:null!==e.dragX?e.dragX:n,y:0,width:16,model:e,categories:u,parcatsViewModel:t,dragCategoryDisplayInd:null,dragDimensionDisplayInd:null,initialDragDimensionDisplayInds:null,initialDragCategoryDisplayInds:null,dragHasMoved:null,potentialClickBand:null}}t.exports=function(t,e,n,r){h(n,t,r,e)}},45784:function(t,e,n){"use strict";var r=n(45460);t.exports=function(t,e,n,i){var a=t._fullLayout,o=a._paper,s=a._size;r(t,o,e,{width:s.w,height:s.h,margin:{t:s.t,r:s.r,b:s.b,l:s.l}},n,i)}},73362:function(t,e,n){"use strict";var r=n(50693),i=n(13838),a=n(41940),o=n(27670).Y,s=n(1426).extendFlat,l=n(44467).templatedArray;t.exports={domain:o({name:"parcoords",trace:!0,editType:"plot"}),labelangle:{valType:"angle",dflt:0,editType:"plot"},labelside:{valType:"enumerated",values:["top","bottom"],dflt:"top",editType:"plot"},labelfont:a({editType:"plot"}),tickfont:a({editType:"plot"}),rangefont:a({editType:"plot"}),dimensions:l("dimension",{label:{valType:"string",editType:"plot"},tickvals:s({},i.tickvals,{editType:"plot"}),ticktext:s({},i.ticktext,{editType:"plot"}),tickformat:s({},i.tickformat,{editType:"plot"}),visible:{valType:"boolean",dflt:!0,editType:"plot"},range:{valType:"info_array",items:[{valType:"number",editType:"plot"},{valType:"number",editType:"plot"}],editType:"plot"},constraintrange:{valType:"info_array",freeLength:!0,dimensions:"1-2",items:[{valType:"any",editType:"plot"},{valType:"any",editType:"plot"}],editType:"plot"},multiselect:{valType:"boolean",dflt:!0,editType:"plot"},values:{valType:"data_array",editType:"calc"},editType:"calc"}),line:s({editType:"calc"},r("line",{colorscaleDflt:"Viridis",autoColorDflt:!1,editTypeOverride:"calc"})),unselected:{line:{color:{valType:"color",dflt:"#7f7f7f",editType:"plot"},opacity:{valType:"number",min:0,max:1,dflt:"auto",editType:"plot"},editType:"plot"},editType:"plot"}}},57920:function(t,e,n){"use strict";var r=n(25706),i=n(39898),a=n(28984).keyFun,o=n(28984).repeat,s=n(71828).sorterAsc,l=n(71828).strTranslate,c=r.bar.snapRatio;function u(t,e){return t*(1-c)+e*c}var f=r.bar.snapClose;function h(t,e){return t*(1-f)+e*f}function d(t,e,n,r){if(function(t,e){for(var n=0;n=e[n][0]&&t<=e[n][1])return!0;return!1}(n,r))return n;var i=t?-1:1,a=0,o=e.length-1;if(i<0){var s=a;a=o,o=s}for(var l=e[a],c=l,f=a;i*fe){h=n;break}}if(a=u,isNaN(a)&&(a=isNaN(f)||isNaN(h)?isNaN(f)?h:f:e-c[f][1]t[1]+n||e=.9*t[1]+.1*t[0]?"n":e<=.9*t[0]+.1*t[1]?"s":"ns"}(p,e);g&&(o.interval=l[a],o.intervalPix=p,o.region=g)}}if(t.ordinal&&!o.region){var v=t.unitTickvals,y=t.unitToPaddedPx.invert(e);for(n=0;n=b[0]&&y<=b[1]){o.clickableOrdinalRange=b;break}}}return o}function _(t,e){i.event.sourceEvent.stopPropagation();var n=e.height-i.mouse(t)[1]-2*r.verticalPadding,a=e.brush.svgBrush;a.wasDragged=!0,a._dragging=!0,a.grabbingBar?a.newExtent=[n-a.grabPoint,n+a.barLength-a.grabPoint].map(e.unitToPaddedPx.invert):a.newExtent=[a.startExtent,e.unitToPaddedPx.invert(n)].sort(s),e.brush.filterSpecified=!0,a.extent=a.stayingIntervals.concat([a.newExtent]),a.brushCallback(e),x(t.parentNode)}function k(t,e){var n=w(e,e.height-i.mouse(t)[1]-2*r.verticalPadding),a="crosshair";n.clickableOrdinalRange?a="pointer":n.region&&(a=n.region+"-resize"),i.select(document.body).style("cursor",a)}function T(t){t.on("mousemove",(function(t){i.event.preventDefault(),t.parent.inBrushDrag||k(this,t)})).on("mouseleave",(function(t){t.parent.inBrushDrag||y()})).call(i.behavior.drag().on("dragstart",(function(t){!function(t,e){i.event.sourceEvent.stopPropagation();var n=e.height-i.mouse(t)[1]-2*r.verticalPadding,a=e.unitToPaddedPx.invert(n),o=e.brush,s=w(e,n),l=s.interval,c=o.svgBrush;if(c.wasDragged=!1,c.grabbingBar="ns"===s.region,c.grabbingBar){var u=l.map(e.unitToPaddedPx);c.grabPoint=n-u[0]-r.verticalPadding,c.barLength=u[1]-u[0]}c.clickableOrdinalRange=s.clickableOrdinalRange,c.stayingIntervals=e.multiselect&&o.filterSpecified?o.filter.getConsolidated():[],l&&(c.stayingIntervals=c.stayingIntervals.filter((function(t){return t[0]!==l[0]&&t[1]!==l[1]}))),c.startExtent=s.region?l["s"===s.region?1:0]:a,e.parent.inBrushDrag=!0,c.brushStartCallback()}(this,t)})).on("drag",(function(t){_(this,t)})).on("dragend",(function(t){!function(t,e){var n=e.brush,r=n.filter,a=n.svgBrush;a._dragging||(k(t,e),_(t,e),e.brush.svgBrush.wasDragged=!1),a._dragging=!1,i.event.sourceEvent.stopPropagation();var o=a.grabbingBar;if(a.grabbingBar=!1,a.grabLocation=void 0,e.parent.inBrushDrag=!1,y(),!a.wasDragged)return a.wasDragged=void 0,a.clickableOrdinalRange?n.filterSpecified&&e.multiselect?a.extent.push(a.clickableOrdinalRange):(a.extent=[a.clickableOrdinalRange],n.filterSpecified=!0):o?(a.extent=a.stayingIntervals,0===a.extent.length&&A(n)):A(n),a.brushCallback(e),x(t.parentNode),void a.brushEndCallback(n.filterSpecified?r.getConsolidated():[]);var s=function(){r.set(r.getConsolidated())};if(e.ordinal){var l=e.unitTickvals;l[l.length-1]a.newExtent[0];a.extent=a.stayingIntervals.concat(c?[a.newExtent]:[]),a.extent.length||A(n),a.brushCallback(e),c?x(t.parentNode,s):(s(),x(t.parentNode))}else s();a.brushEndCallback(n.filterSpecified?r.getConsolidated():[])}(this,t)})))}function M(t,e){return t[0]-e[0]}function A(t){t.filterSpecified=!1,t.svgBrush.extent=[[-1/0,1/0]]}function S(t){for(var e,n=t.slice(),r=[],i=n.shift();i;){for(e=i.slice();(i=n.shift())&&i[0]<=e[1];)e[1]=Math.max(e[1],i[1]);r.push(e)}return 1===r.length&&r[0][0]>r[0][1]&&(r=[]),r}t.exports={makeBrush:function(t,e,n,r,i,a){var o,l=function(){var t,e,n=[];return{set:function(r){1===(n=r.map((function(t){return t.slice().sort(s)})).sort(M)).length&&n[0][0]===-1/0&&n[0][1]===1/0&&(n=[[0,-1]]),t=S(n),e=n.reduce((function(t,e){return[Math.min(t[0],e[0]),Math.max(t[1],e[1])]}),[1/0,-1/0])},get:function(){return n.slice()},getConsolidated:function(){return t},getBounds:function(){return e}}}();return l.set(n),{filter:l,filterSpecified:e,svgBrush:{extent:[],brushStartCallback:r,brushCallback:(o=i,function(t){var e=t.brush,n=function(t){return t.svgBrush.extent.map((function(t){return t.slice()}))}(e),r=n.slice();e.filter.set(r),o()}),brushEndCallback:a}}},ensureAxisBrush:function(t,e,n){var i=t.selectAll("."+r.cn.axisBrush).data(o,a);i.enter().append("g").classed(r.cn.axisBrush,!0),function(t,e,n){var i=n._context.staticPlot,a=t.selectAll(".background").data(o);a.enter().append("rect").classed("background",!0).call(p).call(g).style("pointer-events",i?"none":"auto").attr("transform",l(0,r.verticalPadding)),a.call(T).attr("height",(function(t){return t.height-r.verticalPadding}));var s=t.selectAll(".highlight-shadow").data(o);s.enter().append("line").classed("highlight-shadow",!0).attr("x",-r.bar.width/2).attr("stroke-width",r.bar.width+r.bar.strokeWidth).attr("stroke",e).attr("opacity",r.bar.strokeOpacity).attr("stroke-linecap","butt"),s.attr("y1",(function(t){return t.height})).call(b);var c=t.selectAll(".highlight").data(o);c.enter().append("line").classed("highlight",!0).attr("x",-r.bar.width/2).attr("stroke-width",r.bar.width-r.bar.strokeWidth).attr("stroke",r.bar.fillColor).attr("opacity",r.bar.fillOpacity).attr("stroke-linecap","butt"),c.attr("y1",(function(t){return t.height})).call(b)}(i,e,n)},cleanRanges:function(t,e){if(Array.isArray(t[0])?(t=t.map((function(t){return t.sort(s)})),t=e.multiselect?S(t.sort(M)):[t[0]]):t=[t.sort(s)],e.tickvals){var n=e.tickvals.slice().sort(s);if(!(t=t.map((function(t){var e=[d(0,n,t[0],[]),d(1,n,t[1],[])];if(e[1]>e[0])return e})).filter((function(t){return t}))).length)return}return t.length>1?t:t[0]}}},71791:function(t,e,n){"use strict";t.exports={attributes:n(73362),supplyDefaults:n(3633),calc:n(24639),colorbar:{container:"line",min:"cmin",max:"cmax"},moduleType:"trace",name:"parcoords",basePlotModule:n(49351),categories:["gl","regl","noOpacity","noHover"],meta:{}}},49351:function(t,e,n){"use strict";var r=n(39898),i=n(27659).a0,a=n(21341),o=n(77922);e.name="parcoords",e.plot=function(t){var e=i(t.calcdata,"parcoords")[0];e.length&&a(t,e)},e.clean=function(t,e,n,r){var i=r._has&&r._has("parcoords"),a=e._has&&e._has("parcoords");i&&!a&&(r._paperdiv.selectAll(".parcoords").remove(),r._glimages.selectAll("*").remove())},e.toSVG=function(t){var e=t._fullLayout._glimages,n=r.select(t).selectAll(".svg-container");n.filter((function(t,e){return e===n.size()-1})).selectAll(".gl-canvas-context, .gl-canvas-focus").each((function(){var t=this,n=t.toDataURL("image/png");e.append("svg:image").attr({xmlns:o.svg,"xlink:href":n,preserveAspectRatio:"none",x:0,y:0,width:t.style.width,height:t.style.height})})),window.setTimeout((function(){r.selectAll("#filterBarPattern").attr("id","filterBarPattern")}),60)}},24639:function(t,e,n){"use strict";var r=n(71828).isArrayOrTypedArray,i=n(21081),a=n(28984).wrap;t.exports=function(t,e){var n,o;return i.hasColorscale(e,"line")&&r(e.line.color)?(n=e.line.color,o=i.extractOpts(e.line).colorscale,i.calc(t,e,{vals:n,containerStr:"line",cLetter:"c"})):(n=function(t){for(var e=new Array(t),n=0;nf&&(r.log("parcoords traces support up to "+f+" dimensions at the moment"),p.splice(f));var g=s(t,e,{name:"dimensions",layout:l,handleItemDefaults:d}),v=function(t,e,n,o,s){var l=s("line.color",n);if(i(t,"line")&&r.isArrayOrTypedArray(l)){if(l.length)return s("line.colorscale"),a(t,e,o,s,{prefix:"line.",cLetter:"c"}),l.length;e.line.color=n}return 1/0}(t,e,n,l,u);o(e,l,u),Array.isArray(g)&&g.length||(e.visible=!1),h(e,g,"values",v);var m={family:l.font.family,size:Math.round(l.font.size/1.2),color:l.font.color};r.coerceFont(u,"labelfont",m),r.coerceFont(u,"tickfont",m),r.coerceFont(u,"rangefont",m),u("labelangle"),u("labelside"),u("unselected.line.color"),u("unselected.line.opacity")}},1602:function(t,e,n){"use strict";var r=n(71828).isTypedArray;e.convertTypedArray=function(t){return r(t)?Array.prototype.slice.call(t):t},e.isOrdinal=function(t){return!!t.tickvals},e.isVisible=function(t){return t.visible||!("visible"in t)}},67618:function(t,e,n){"use strict";var r=n(71791);r.plot=n(21341),t.exports=r},83398:function(t,e,n){"use strict";var r=n(56068),i=r(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nattribute vec4 p01_04, p05_08, p09_12, p13_16,\n p17_20, p21_24, p25_28, p29_32,\n p33_36, p37_40, p41_44, p45_48,\n p49_52, p53_56, p57_60, colors;\n\nuniform mat4 dim0A, dim1A, dim0B, dim1B, dim0C, dim1C, dim0D, dim1D,\n loA, hiA, loB, hiB, loC, hiC, loD, hiD;\n\nuniform vec2 resolution, viewBoxPos, viewBoxSize;\nuniform float maskHeight;\nuniform float drwLayer; // 0: context, 1: focus, 2: pick\nuniform vec4 contextColor;\nuniform sampler2D maskTexture, palette;\n\nbool isPick = (drwLayer > 1.5);\nbool isContext = (drwLayer < 0.5);\n\nconst vec4 ZEROS = vec4(0.0, 0.0, 0.0, 0.0);\nconst vec4 UNITS = vec4(1.0, 1.0, 1.0, 1.0);\n\nfloat val(mat4 p, mat4 v) {\n return dot(matrixCompMult(p, v) * UNITS, UNITS);\n}\n\nfloat axisY(float ratio, mat4 A, mat4 B, mat4 C, mat4 D) {\n float y1 = val(A, dim0A) + val(B, dim0B) + val(C, dim0C) + val(D, dim0D);\n float y2 = val(A, dim1A) + val(B, dim1B) + val(C, dim1C) + val(D, dim1D);\n return y1 * (1.0 - ratio) + y2 * ratio;\n}\n\nint iMod(int a, int b) {\n return a - b * (a / b);\n}\n\nbool fOutside(float p, float lo, float hi) {\n return (lo < hi) && (lo > p || p > hi);\n}\n\nbool vOutside(vec4 p, vec4 lo, vec4 hi) {\n return (\n fOutside(p[0], lo[0], hi[0]) ||\n fOutside(p[1], lo[1], hi[1]) ||\n fOutside(p[2], lo[2], hi[2]) ||\n fOutside(p[3], lo[3], hi[3])\n );\n}\n\nbool mOutside(mat4 p, mat4 lo, mat4 hi) {\n return (\n vOutside(p[0], lo[0], hi[0]) ||\n vOutside(p[1], lo[1], hi[1]) ||\n vOutside(p[2], lo[2], hi[2]) ||\n vOutside(p[3], lo[3], hi[3])\n );\n}\n\nbool outsideBoundingBox(mat4 A, mat4 B, mat4 C, mat4 D) {\n return mOutside(A, loA, hiA) ||\n mOutside(B, loB, hiB) ||\n mOutside(C, loC, hiC) ||\n mOutside(D, loD, hiD);\n}\n\nbool outsideRasterMask(mat4 A, mat4 B, mat4 C, mat4 D) {\n mat4 pnts[4];\n pnts[0] = A;\n pnts[1] = B;\n pnts[2] = C;\n pnts[3] = D;\n\n for(int i = 0; i < 4; ++i) {\n for(int j = 0; j < 4; ++j) {\n for(int k = 0; k < 4; ++k) {\n if(0 == iMod(\n int(255.0 * texture2D(maskTexture,\n vec2(\n (float(i * 2 + j / 2) + 0.5) / 8.0,\n (pnts[i][j][k] * (maskHeight - 1.0) + 1.0) / maskHeight\n ))[3]\n ) / int(pow(2.0, float(iMod(j * 4 + k, 8)))),\n 2\n )) return true;\n }\n }\n }\n return false;\n}\n\nvec4 position(bool isContext, float v, mat4 A, mat4 B, mat4 C, mat4 D) {\n float x = 0.5 * sign(v) + 0.5;\n float y = axisY(x, A, B, C, D);\n float z = 1.0 - abs(v);\n\n z += isContext ? 0.0 : 2.0 * float(\n outsideBoundingBox(A, B, C, D) ||\n outsideRasterMask(A, B, C, D)\n );\n\n return vec4(\n 2.0 * (vec2(x, y) * viewBoxSize + viewBoxPos) / resolution - 1.0,\n z,\n 1.0\n );\n}\n\nvoid main() {\n mat4 A = mat4(p01_04, p05_08, p09_12, p13_16);\n mat4 B = mat4(p17_20, p21_24, p25_28, p29_32);\n mat4 C = mat4(p33_36, p37_40, p41_44, p45_48);\n mat4 D = mat4(p49_52, p53_56, p57_60, ZEROS);\n\n float v = colors[3];\n\n gl_Position = position(isContext, v, A, B, C, D);\n\n fragColor =\n isContext ? vec4(contextColor) :\n isPick ? vec4(colors.rgb, 1.0) : texture2D(palette, vec2(abs(v), 0.5));\n}\n"]),a=r(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nvoid main() {\n gl_FragColor = fragColor;\n}\n"]),o=n(25706).maxDimensionCount,s=n(71828),l=1e-6,c=2048,u=new Uint8Array(4),f=new Uint8Array(4),h={shape:[256,1],format:"rgba",type:"uint8",mag:"nearest",min:"nearest"};function d(t,e,n,r,i){var a=t._gl;a.enable(a.SCISSOR_TEST),a.scissor(e,n,r,i),t.clear({color:[0,0,0,0],depth:1})}function p(t,e,n,r,i,a){var o=a.key;n.drawCompleted||(function(t){t.read({x:0,y:0,width:1,height:1,data:u})}(t),n.drawCompleted=!0),function s(l){var c=Math.min(r,i-l*r);0===l&&(window.cancelAnimationFrame(n.currentRafs[o]),delete n.currentRafs[o],d(t,a.scissorX,a.scissorY,a.scissorWidth,a.viewBoxSize[1])),n.clearOnly||(a.count=2*c,a.offset=2*l*r,e(a),l*r+c>>8*e)%256/255}function m(t,e,n){for(var r=new Array(8*e),i=0,a=0;af&&(f=t[i].dim1.canvasX,o=i);0===s&&d(M,0,0,n.canvasWidth,n.canvasHeight);var h=function(t){var e,n,r,i=[[],[]];for(r=0;r<64;r++){var a=!t&&ro._length&&(S=S.slice(0,o._length));var C,P=o.tickvals;function O(t,e){return{val:t,text:C[e]}}function L(t,e){return t.val-e.val}if(Array.isArray(P)&&P.length){C=o.ticktext,Array.isArray(C)&&C.length?C.length>P.length?C=C.slice(0,P.length):P.length>C.length&&(P=P.slice(0,C.length)):C=P.map(a(o.tickformat));for(var I=1;I=n||l>=i)return;var c=t.lineLayer.readPixel(s,i-1-l),u=0!==c[3],f=u?c[2]+256*(c[1]+256*c[0]):null,h={x:s,y:l,clientX:e.clientX,clientY:e.clientY,dataIndex:t.model.key,curveNumber:f};f!==N&&(u?a.hover(h):a.unhover&&a.unhover(h),N=f)}})),z.style("opacity",(function(t){return t.pick?0:1})),d.style("background","rgba(255, 255, 255, 0)");var j=d.selectAll("."+y.cn.parcoords).data(S,p);j.exit().remove(),j.enter().append("g").classed(y.cn.parcoords,!0).style("shape-rendering","crispEdges").style("pointer-events","none"),j.attr("transform",(function(t){return c(t.model.translateX,t.model.translateY)}));var F=j.selectAll("."+y.cn.parcoordsControlView).data(g,p);F.enter().append("g").classed(y.cn.parcoordsControlView,!0),F.attr("transform",(function(t){return c(t.model.pad.l,t.model.pad.t)}));var B=F.selectAll("."+y.cn.yAxis).data((function(t){return t.dimensions}),p);B.enter().append("g").classed(y.cn.yAxis,!0),F.each((function(t){I(B,t,k)})),z.each((function(t){if(t.viewModel){!t.lineLayer||a?t.lineLayer=x(this,t):t.lineLayer.update(t),(t.key||0===t.key)&&(t.viewModel[t.key]=t.lineLayer);var e=!t.context||a;t.lineLayer.render(t.viewModel.panels,e)}})),B.attr("transform",(function(t){return c(t.xScale(t.xIndex),0)})),B.call(r.behavior.drag().origin((function(t){return t})).on("drag",(function(t){var e=t.parent;A.linePickActive(!1),t.x=Math.max(-y.overdrag,Math.min(t.model.width+y.overdrag,r.event.x)),t.canvasX=t.x*t.model.canvasPixelRatio,B.sort((function(t,e){return t.x-e.x})).each((function(e,n){e.xIndex=n,e.x=t===e?e.x:e.xScale(e.xIndex),e.canvasX=e.x*e.model.canvasPixelRatio})),I(B,e,k),B.filter((function(e){return 0!==Math.abs(t.xIndex-e.xIndex)})).attr("transform",(function(t){return c(t.xScale(t.xIndex),0)})),r.select(this).attr("transform",c(t.x,0)),B.each((function(n,r,i){i===t.parent.key&&(e.dimensions[r]=n)})),e.contextLayer&&e.contextLayer.render(e.panels,!1,!E(e)),e.focusLayer.render&&e.focusLayer.render(e.panels)})).on("dragend",(function(t){var e=t.parent;t.x=t.xScale(t.xIndex),t.canvasX=t.x*t.model.canvasPixelRatio,I(B,e,k),r.select(this).attr("transform",(function(t){return c(t.x,0)})),e.contextLayer&&e.contextLayer.render(e.panels,!1,!E(e)),e.focusLayer&&e.focusLayer.render(e.panels),e.pickLayer&&e.pickLayer.render(e.panels,!0),A.linePickActive(!0),a&&a.axesMoved&&a.axesMoved(e.key,e.dimensions.map((function(t){return t.crossfilterDimensionIndex})))}))),B.exit().remove();var U=B.selectAll("."+y.cn.axisOverlays).data(g,p);U.enter().append("g").classed(y.cn.axisOverlays,!0),U.selectAll("."+y.cn.axis).remove();var H=U.selectAll("."+y.cn.axis).data(g,p);H.enter().append("g").classed(y.cn.axis,!0),H.each((function(t){var e=t.model.height/t.model.tickDistance,n=t.domainScale,i=n.domain();r.select(this).call(r.svg.axis().orient("left").tickSize(4).outerTickSize(2).ticks(e,t.tickFormat).tickValues(t.ordinal?i:null).tickFormat((function(e){return m.isOrdinal(t)?e:D(t.model.dimensions[t.visibleIndex],e)})).scale(n)),f.font(H.selectAll("text"),t.model.tickFont)})),H.selectAll(".domain, .tick>line").attr("fill","none").attr("stroke","black").attr("stroke-opacity",.25).attr("stroke-width","1px"),H.selectAll("text").style("text-shadow",u.makeTextShadow(M)).style("cursor","default");var V=U.selectAll("."+y.cn.axisHeading).data(g,p);V.enter().append("g").classed(y.cn.axisHeading,!0);var q=V.selectAll("."+y.cn.axisTitle).data(g,p);q.enter().append("text").classed(y.cn.axisTitle,!0).attr("text-anchor","middle").style("cursor","ew-resize").style("pointer-events",o?"none":"auto"),q.text((function(t){return t.label})).each((function(e){var n=r.select(this);f.font(n,e.model.labelFont),u.convertToTspans(n,t)})).attr("transform",(function(t){var e=L(t.model.labelAngle,t.model.labelSide),n=y.axisTitleOffset;return(e.dir>0?"":c(0,2*n+t.model.height))+l(e.degrees)+c(-n*e.dx,-n*e.dy)})).attr("text-anchor",(function(t){var e=L(t.model.labelAngle,t.model.labelSide);return 2*Math.abs(e.dx)>Math.abs(e.dy)?e.dir*e.dx<0?"start":"end":"middle"}));var G=U.selectAll("."+y.cn.axisExtent).data(g,p);G.enter().append("g").classed(y.cn.axisExtent,!0);var W=G.selectAll("."+y.cn.axisExtentTop).data(g,p);W.enter().append("g").classed(y.cn.axisExtentTop,!0),W.attr("transform",c(0,-y.axisExtentOffset));var Y=W.selectAll("."+y.cn.axisExtentTopText).data(g,p);Y.enter().append("text").classed(y.cn.axisExtentTopText,!0).call(O),Y.text((function(t){return R(t,!0)})).each((function(t){f.font(r.select(this),t.model.rangeFont)}));var $=G.selectAll("."+y.cn.axisExtentBottom).data(g,p);$.enter().append("g").classed(y.cn.axisExtentBottom,!0),$.attr("transform",(function(t){return c(0,t.model.height+y.axisExtentOffset)}));var X=$.selectAll("."+y.cn.axisExtentBottomText).data(g,p);X.enter().append("text").classed(y.cn.axisExtentBottomText,!0).attr("dy","0.75em").call(O),X.text((function(t){return R(t,!1)})).each((function(t){f.font(r.select(this),t.model.rangeFont)})),b.ensureAxisBrush(U,M,t)}},21341:function(t,e,n){"use strict";var r=n(17171),i=n(79749),a=n(1602).isVisible,o={};function s(t,e,n){var r=e.indexOf(n),i=t.indexOf(r);return-1===i&&(i+=e.length),i}(t.exports=function(t,e){var n=t._fullLayout;if(i(t,[],o)){var l={},c={},u={},f={},h=n._size;e.forEach((function(e,n){var r=e[0].trace;u[n]=r.index;var i=f[n]=r._fullInput.index;l[n]=t.data[i].dimensions,c[n]=t.data[i].dimensions.slice()})),r(t,e,{width:h.w,height:h.h,margin:{t:h.t,r:h.r,b:h.b,l:h.l}},{filterChanged:function(e,r,i){var a=c[e][r],o=i.map((function(t){return t.slice()})),s="dimensions["+r+"].constraintrange",l=n._tracePreGUI[t._fullData[u[e]]._fullInput.uid];if(void 0===l[s]){var h=a.constraintrange;l[s]=h||null}var d=t._fullData[u[e]].dimensions[r];o.length?(1===o.length&&(o=o[0]),a.constraintrange=o,d.constraintrange=o.slice(),o=[o]):(delete a.constraintrange,delete d.constraintrange,o=null);var p={};p[s]=o,t.emit("plotly_restyle",[p,[f[e]]])},hover:function(e){t.emit("plotly_hover",e)},unhover:function(e){t.emit("plotly_unhover",e)},axesMoved:function(e,n){var r=function(t,e){return function(n,r){return s(t,e,n)-s(t,e,r)}}(n,c[e].filter(a));l[e].sort(r),c[e].filter((function(t){return!a(t)})).sort((function(t){return c[e].indexOf(t)})).forEach((function(t){l[e].splice(l[e].indexOf(t),1),l[e].splice(c[e].indexOf(t),0,t)})),t.emit("plotly_restyle",[{dimensions:[l[e]]},[f[e]]])}})}}).reglPrecompiled=o},34e3:function(t,e,n){"use strict";var r=n(9012),i=n(27670).Y,a=n(41940),o=n(22399),s=n(5386).fF,l=n(5386).si,c=n(1426).extendFlat,u=n(79952).u,f=a({editType:"plot",arrayOk:!0,colorEditType:"plot"});t.exports={labels:{valType:"data_array",editType:"calc"},label0:{valType:"number",dflt:0,editType:"calc"},dlabel:{valType:"number",dflt:1,editType:"calc"},values:{valType:"data_array",editType:"calc"},marker:{colors:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:o.defaultLine,arrayOk:!0,editType:"style"},width:{valType:"number",min:0,dflt:0,arrayOk:!0,editType:"style"},editType:"calc"},pattern:u,editType:"calc"},text:{valType:"data_array",editType:"plot"},hovertext:{valType:"string",dflt:"",arrayOk:!0,editType:"style"},scalegroup:{valType:"string",dflt:"",editType:"calc"},textinfo:{valType:"flaglist",flags:["label","text","value","percent"],extras:["none"],editType:"calc"},hoverinfo:c({},r.hoverinfo,{flags:["label","text","value","percent","name"]}),hovertemplate:s({},{keys:["label","color","value","percent","text"]}),texttemplate:l({editType:"plot"},{keys:["label","color","value","percent","text"]}),textposition:{valType:"enumerated",values:["inside","outside","auto","none"],dflt:"auto",arrayOk:!0,editType:"plot"},textfont:c({},f,{}),insidetextorientation:{valType:"enumerated",values:["horizontal","radial","tangential","auto"],dflt:"auto",editType:"plot"},insidetextfont:c({},f,{}),outsidetextfont:c({},f,{}),automargin:{valType:"boolean",dflt:!1,editType:"plot"},title:{text:{valType:"string",dflt:"",editType:"plot"},font:c({},f,{}),position:{valType:"enumerated",values:["top left","top center","top right","middle center","bottom left","bottom center","bottom right"],editType:"plot"},editType:"plot"},domain:i({name:"pie",trace:!0,editType:"calc"}),hole:{valType:"number",min:0,max:1,dflt:0,editType:"calc"},sort:{valType:"boolean",dflt:!0,editType:"calc"},direction:{valType:"enumerated",values:["clockwise","counterclockwise"],dflt:"counterclockwise",editType:"calc"},rotation:{valType:"angle",dflt:0,editType:"calc"},pull:{valType:"number",min:0,max:1,dflt:0,arrayOk:!0,editType:"calc"},_deprecated:{title:{valType:"string",dflt:"",editType:"calc"},titlefont:c({},f,{}),titleposition:{valType:"enumerated",values:["top left","top center","top right","middle center","bottom left","bottom center","bottom right"],editType:"calc"}}}},13584:function(t,e,n){"use strict";var r=n(74875);e.name="pie",e.plot=function(t,n,i,a){r.plotBasePlot(e.name,t,n,i,a)},e.clean=function(t,n,i,a){r.cleanBasePlot(e.name,t,n,i,a)}},32354:function(t,e,n){"use strict";var r=n(92770),i=n(84267),a=n(7901),o={};function s(t){return function(e,n){return!!e&&!!(e=i(e)).isValid()&&(e=a.addOpacity(e,e.getAlpha()),t[n]||(t[n]=e),e)}}function l(t,e){var n,r=JSON.stringify(t),a=e[r];if(!a){for(a=t.slice(),n=0;n=0})),("funnelarea"===e.type?m:e.sort)&&a.sort((function(t,e){return e.v-t.v})),a[0]&&(a[0].vTotal=v),a},crossTraceCalc:function(t,e){var n=(e||{}).type;n||(n="pie");var r=t._fullLayout,i=t.calcdata,a=r[n+"colorway"],s=r["_"+n+"colormap"];r["extend"+n+"colors"]&&(a=l(a,o));for(var c=0,u=0;u0){s=!0;break}}s||(o=0)}return{hasLabels:n,hasValues:a,len:o}}function u(t,e,n,r,i){r("marker.line.width")&&r("marker.line.color",i?void 0:n.paper_bgcolor);var a=r("marker.colors");l(r,"marker.pattern",a),t.marker&&!e.marker.pattern.fgcolor&&(e.marker.pattern.fgcolor=t.marker.colors),e.marker.pattern.bgcolor||(e.marker.pattern.bgcolor=n.paper_bgcolor)}t.exports={handleLabelsAndValues:c,handleMarkerDefaults:u,supplyDefaults:function(t,e,n,r){function l(n,r){return i.coerce(t,e,a,n,r)}var f=c(l("labels"),l("values")),h=f.len;if(e._hasLabels=f.hasLabels,e._hasValues=f.hasValues,!e._hasLabels&&e._hasValues&&(l("label0"),l("dlabel")),h){e._length=h,u(t,e,r,l,!0),l("scalegroup");var d,p=l("text"),g=l("texttemplate");if(g||(d=l("textinfo",Array.isArray(p)?"text+percent":"percent")),l("hovertext"),l("hovertemplate"),g||d&&"none"!==d){var v=l("textposition");s(t,e,r,l,v,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),(Array.isArray(v)||"auto"===v||"outside"===v)&&l("automargin"),("inside"===v||"auto"===v||Array.isArray(v))&&l("insidetextorientation")}o(e,r,l);var m=l("hole");if(l("title.text")){var y=l("title.position",m?"middle center":"top center");m||"middle center"!==y||(e.title.position="top center"),i.coerceFont(l,"title.font",r.font)}l("sort"),l("direction"),l("rotation"),l("pull")}else e.visible=!1}}},20007:function(t,e,n){"use strict";var r=n(23469).appendArrayMultiPointValues;t.exports=function(t,e){var n={curveNumber:e.index,pointNumbers:t.pts,data:e._input,fullData:e,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return 1===t.pts.length&&(n.pointNumber=n.i=t.pts[0]),r(n,e,t.pts),"funnelarea"===e.type&&(delete n.v,delete n.i),n}},22209:function(t,e,n){"use strict";var r=n(91424),i=n(7901);t.exports=function(t,e,n,a){var o=n.marker.pattern;o&&o.shape?r.pointStyle(t,n,a,e):i.fill(t,e.color)}},53581:function(t,e,n){"use strict";var r=n(71828);function i(t){return-1!==t.indexOf("e")?t.replace(/[.]?0+e/,"e"):-1!==t.indexOf(".")?t.replace(/[.]?0+$/,""):t}e.formatPiePercent=function(t,e){var n=i((100*t).toPrecision(3));return r.numSeparate(n,e)+"%"},e.formatPieValue=function(t,e){var n=i(t.toPrecision(10));return r.numSeparate(n,e)},e.getFirstFilled=function(t,e){if(Array.isArray(t))for(var n=0;n"),name:f.hovertemplate||-1!==h.indexOf("name")?f.name:void 0,idealAlign:t.pxmid[0]<0?"left":"right",color:v.castOption(w.bgcolor,t.pts)||t.color,borderColor:v.castOption(w.bordercolor,t.pts),fontFamily:v.castOption(_.family,t.pts),fontSize:v.castOption(_.size,t.pts),fontColor:v.castOption(_.color,t.pts),nameLength:v.castOption(w.namelength,t.pts),textAlign:v.castOption(w.align,t.pts),hovertemplate:v.castOption(f.hovertemplate,t.pts),hovertemplateLabels:t,eventData:[m(t,f)]},{container:n._hoverlayer.node(),outerContainer:n._paper.node(),gd:e,inOut_bbox:k}),t.bbox=k[0],c._hasHoverLabel=!0}c._hasHoverEvent=!0,e.emit("plotly_hover",{points:[m(t,f)],event:r.event})}})),t.on("mouseout",(function(t){var n=e._fullLayout,i=e._fullData[c.index],o=r.select(this).datum();c._hasHoverEvent&&(t.originalEvent=r.event,e.emit("plotly_unhover",{points:[m(o,i)],event:r.event}),c._hasHoverEvent=!1),c._hasHoverLabel&&(a.loneUnhover(n._hoverlayer.node()),c._hasHoverLabel=!1)})),t.on("click",(function(t){var n=e._fullLayout,i=e._fullData[c.index];e._dragging||!1===n.hovermode||(e._hoverdata=[m(t,i)],a.click(e,r.event))}))}function x(t,e,n){var r=v.castOption(t.insidetextfont.color,e.pts);!r&&t._input.textfont&&(r=v.castOption(t._input.textfont.color,e.pts));var i=v.castOption(t.insidetextfont.family,e.pts)||v.castOption(t.textfont.family,e.pts)||n.family,a=v.castOption(t.insidetextfont.size,e.pts)||v.castOption(t.textfont.size,e.pts)||n.size;return{color:r||o.contrast(e.color),family:i,size:a}}function w(t,e){for(var n,r,i=0;ie&&e>r||n=-4;v-=2)m(Math.PI*v,"tan");for(v=4;v>=-4;v-=2)m(Math.PI*(v+1),"tan")}if(f||d){for(v=4;v>=-4;v-=2)m(Math.PI*(v+1.5),"rad");for(v=4;v>=-4;v-=2)m(Math.PI*(v+.5),"rad")}}if(s||p||f){var y=Math.sqrt(t.width*t.width+t.height*t.height);if((a={scale:i*r*2/y,rCenter:1-i,rotate:0}).textPosAngle=(e.startangle+e.stopangle)/2,a.scale>=1)return a;g.push(a)}(p||d)&&((a=k(t,r,o,l,c)).textPosAngle=(e.startangle+e.stopangle)/2,g.push(a)),(p||h)&&((a=T(t,r,o,l,c)).textPosAngle=(e.startangle+e.stopangle)/2,g.push(a));for(var b=0,x=0,w=0;w=1)break}return g[b]}function k(t,e,n,r,i){e=Math.max(0,e-2*g);var a=t.width/t.height,o=S(a,r,e,n);return{scale:2*o/t.height,rCenter:M(a,o/e),rotate:A(i)}}function T(t,e,n,r,i){e=Math.max(0,e-2*g);var a=t.height/t.width,o=S(a,r,e,n);return{scale:2*o/t.width,rCenter:M(a,o/e),rotate:A(i+Math.PI/2)}}function M(t,e){return Math.cos(e)-t*e}function A(t){return(180/Math.PI*t+720)%180-90}function S(t,e,n,r){var i=t+1/(2*Math.tan(e));return n*Math.min(1/(Math.sqrt(i*i+.5)+i),r/(Math.sqrt(t*t+r/2)+t))}function E(t,e){return t.v!==e.vTotal||e.trace.hole?Math.min(1/(1+1/Math.sin(t.halfangle)),t.ring/2):1}function C(t,e){var n=e.pxmid[0],r=e.pxmid[1],i=t.width/2,a=t.height/2;return n<0&&(i*=-1),r<0&&(a*=-1),{scale:1,rCenter:1,rotate:0,x:i+Math.abs(a)*(i>0?1:-1)/2,y:a/(1+n*n/(r*r)),outside:!0}}function P(t,e){var n,r,i,a=t.trace,o={x:t.cx,y:t.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=L(a),-1!==a.title.position.indexOf("top")?(o.y-=(1+i)*t.r,s.ty-=t.titleBox.height):-1!==a.title.position.indexOf("bottom")&&(o.y+=(1+i)*t.r);var l,c,u=(l=t.r,c=t.trace.aspectratio,l/(void 0===c?1:c)),f=e.w*(a.domain.x[1]-a.domain.x[0])/2;return-1!==a.title.position.indexOf("left")?(f+=u,o.x-=(1+i)*u,s.tx+=t.titleBox.width/2):-1!==a.title.position.indexOf("center")?f*=2:-1!==a.title.position.indexOf("right")&&(f+=u,o.x+=(1+i)*u,s.tx-=t.titleBox.width/2),n=f/t.titleBox.width,r=O(t,e)/t.titleBox.height,{x:o.x,y:o.y,scale:Math.min(n,r),tx:s.tx,ty:s.ty}}function O(t,e){var n=t.trace,r=e.h*(n.domain.y[1]-n.domain.y[0]);return Math.min(t.titleBox.height,r/2)}function L(t){var e,n=t.pull;if(!n)return 0;if(Array.isArray(n))for(n=0,e=0;en&&(n=t.pull[e]);return n}function I(t,e){for(var n=[],r=0;r1?u=(c=n.r)/i.aspectratio:c=(u=n.r)*i.aspectratio,l=(c*=(1+i.baseratio)/2)*u}o=Math.min(o,l/n.vTotal)}for(r=0;r")}if(a){var b=l.castOption(i,e.i,"texttemplate");if(b){var x=function(t){return{label:t.label,value:t.v,valueLabel:v.formatPieValue(t.v,r.separators),percent:t.v/n.vTotal,percentLabel:v.formatPiePercent(t.v/n.vTotal,r.separators),color:t.color,text:t.text,customdata:l.castOption(i,t.i,"customdata")}}(e),w=v.getFirstFilled(i.text,e.pts);(y(w)||""===w)&&(x.text=w),e.text=l.texttemplateString(b,x,t._fullLayout._d3locale,x,i._meta||{})}else e.text=""}}function z(t,e){var n=t.rotate*Math.PI/180,r=Math.cos(n),i=Math.sin(n),a=(e.left+e.right)/2,o=(e.top+e.bottom)/2;t.textX=a*r-o*i,t.textY=a*i+o*r,t.noCenter=!0}t.exports={plot:function(t,e){var n=t._context.staticPlot,a=t._fullLayout,h=a._size;p("pie",a),w(e,t),I(e,h);var g=l.makeTraceGroups(a._pielayer,e,"trace").each((function(e){var p=r.select(this),g=e[0],m=g.trace;!function(t){var e,n,r,i=t[0],a=i.r,o=i.trace,s=v.getRotationAngle(o.rotation),l=2*Math.PI/i.vTotal,c="px0",u="px1";if("counterclockwise"===o.direction){for(e=0;ei.vTotal/2?1:0,n.halfangle=Math.PI*Math.min(n.v/i.vTotal,.5),n.ring=1-o.hole,n.rInscribed=E(n,i))}(e),p.attr("stroke-linejoin","round"),p.each((function(){var y=r.select(this).selectAll("g.slice").data(e);y.enter().append("g").classed("slice",!0),y.exit().remove();var w=[[[],[]],[[],[]]],k=!1;y.each((function(i,o){if(i.hidden)r.select(this).selectAll("path,g").remove();else{i.pointNumber=i.i,i.curveNumber=m.index,w[i.pxmid[1]<0?0:1][i.pxmid[0]<0?0:1].push(i);var c=g.cx,u=g.cy,h=r.select(this),p=h.selectAll("path.surface").data([i]);if(p.enter().append("path").classed("surface",!0).style({"pointer-events":n?"none":"all"}),h.call(b,t,e),m.pull){var y=+v.castOption(m.pull,i.pts)||0;y>0&&(c+=y*i.pxmid[0],u+=y*i.pxmid[1])}i.cxFinal=c,i.cyFinal=u;var T=m.hole;if(i.v===g.vTotal){var M="M"+(c+i.px0[0])+","+(u+i.px0[1])+O(i.px0,i.pxmid,!0,1)+O(i.pxmid,i.px0,!0,1)+"Z";T?p.attr("d","M"+(c+T*i.px0[0])+","+(u+T*i.px0[1])+O(i.px0,i.pxmid,!1,T)+O(i.pxmid,i.px0,!1,T)+"Z"+M):p.attr("d",M)}else{var A=O(i.px0,i.px1,!0,1);if(T){var S=1-T;p.attr("d","M"+(c+T*i.px1[0])+","+(u+T*i.px1[1])+O(i.px1,i.px0,!1,T)+"l"+S*i.px0[0]+","+S*i.px0[1]+A+"Z")}else p.attr("d","M"+c+","+u+"l"+i.px0[0]+","+i.px0[1]+A+"Z")}R(t,i,g);var E=v.castOption(m.textposition,i.pts),P=h.selectAll("g.slicetext").data(i.text&&"none"!==E?[0]:[]);P.enter().append("g").classed("slicetext",!0),P.exit().remove(),P.each((function(){var n=l.ensureSingle(r.select(this),"text","",(function(t){t.attr("data-notex",1)})),h=l.ensureUniformFontSize(t,"outside"===E?function(t,e,n){var r=v.castOption(t.outsidetextfont.color,e.pts)||v.castOption(t.textfont.color,e.pts)||n.color,i=v.castOption(t.outsidetextfont.family,e.pts)||v.castOption(t.textfont.family,e.pts)||n.family,a=v.castOption(t.outsidetextfont.size,e.pts)||v.castOption(t.textfont.size,e.pts)||n.size;return{color:r,family:i,size:a}}(m,i,a.font):x(m,i,a.font));n.text(i.text).attr({class:"slicetext",transform:"","text-anchor":"middle"}).call(s.font,h).call(f.convertToTspans,t);var p,y=s.bBox(n.node());if("outside"===E)p=C(y,i);else if(p=_(y,i,g),"auto"===E&&p.scale<1){var b=l.ensureUniformFontSize(t,m.outsidetextfont);n.call(s.font,b),p=C(y=s.bBox(n.node()),i)}var w=p.textPosAngle,T=void 0===w?i.pxmid:D(g.r,w);if(p.targetX=c+T[0]*p.rCenter+(p.x||0),p.targetY=u+T[1]*p.rCenter+(p.y||0),z(p,y),p.outside){var M=p.targetY;i.yLabelMin=M-y.height/2,i.yLabelMid=M,i.yLabelMax=M+y.height/2,i.labelExtraX=0,i.labelExtraY=0,k=!0}p.fontSize=h.size,d(m.type,p,a),e[o].transform=p,l.setTransormAndDisplay(n,p)}))}function O(t,e,n,r){var a=r*(e[0]-t[0]),o=r*(e[1]-t[1]);return"a"+r*g.r+","+r*g.r+" 0 "+i.largeArc+(n?" 1 ":" 0 ")+a+","+o}}));var T=r.select(this).selectAll("g.titletext").data(m.title.text?[0]:[]);if(T.enter().append("g").classed("titletext",!0),T.exit().remove(),T.each((function(){var e,n=l.ensureSingle(r.select(this),"text","",(function(t){t.attr("data-notex",1)})),i=m.title.text;m._meta&&(i=l.templateString(i,m._meta)),n.text(i).attr({class:"titletext",transform:"","text-anchor":"middle"}).call(s.font,m.title.font).call(f.convertToTspans,t),e="middle center"===m.title.position?function(t){var e=Math.sqrt(t.titleBox.width*t.titleBox.width+t.titleBox.height*t.titleBox.height);return{x:t.cx,y:t.cy,scale:t.trace.hole*t.r*2/e,tx:0,ty:-t.titleBox.height/2+t.trace.title.font.size}}(g):P(g,h),n.attr("transform",u(e.x,e.y)+c(Math.min(1,e.scale))+u(e.tx,e.ty))})),k&&function(t,e){var n,r,i,a,o,s,l,c,u,f,h,d,p;function g(t,e){return t.pxmid[1]-e.pxmid[1]}function m(t,e){return e.pxmid[1]-t.pxmid[1]}function y(t,n){n||(n={});var i,c,u,h,d=n.labelExtraY+(r?n.yLabelMax:n.yLabelMin),p=r?t.yLabelMin:t.yLabelMax,g=r?t.yLabelMax:t.yLabelMin,m=t.cyFinal+o(t.px0[1],t.px1[1]),y=d-p;if(y*l>0&&(t.labelExtraY=y),Array.isArray(e.pull))for(c=0;c=(v.castOption(e.pull,u.pts)||0)||((t.pxmid[1]-u.pxmid[1])*l>0?(y=u.cyFinal+o(u.px0[1],u.px1[1])-p-t.labelExtraY)*l>0&&(t.labelExtraY+=y):(g+t.labelExtraY-m)*l>0&&(i=3*s*Math.abs(c-f.indexOf(t)),(h=u.cxFinal+a(u.px0[0],u.px1[0])+i-(t.cxFinal+t.pxmid[0])-t.labelExtraX)*s>0&&(t.labelExtraX+=h)))}for(r=0;r<2;r++)for(i=r?g:m,o=r?Math.max:Math.min,l=r?1:-1,n=0;n<2;n++){for(a=n?Math.max:Math.min,s=n?1:-1,(c=t[r][n]).sort(i),u=t[1-r][n],f=u.concat(c),d=[],h=0;hMath.abs(f)?s+="l"+f*t.pxmid[0]/t.pxmid[1]+","+f+"H"+(a+t.labelExtraX+c):s+="l"+t.labelExtraX+","+u+"v"+(f-u)+"h"+c}else s+="V"+(t.yLabelMid+t.labelExtraY)+"h"+c;l.ensureSingle(n,"path","textline").call(o.stroke,e.outsidetextfont.color).attr({"stroke-width":Math.min(2,e.outsidetextfont.size/8),d:s,fill:"none"})}else n.select("path.textline").remove()}))}(y,m),k&&m.automargin){var M=s.bBox(p.node()),A=m.domain,S=h.w*(A.x[1]-A.x[0]),E=h.h*(A.y[1]-A.y[0]),O=(.5*S-g.r)/h.w,L=(.5*E-g.r)/h.h;i.autoMargin(t,"pie."+m.uid+".automargin",{xl:A.x[0]-O,xr:A.x[1]+O,yb:A.y[0]-L,yt:A.y[1]+L,l:Math.max(g.cx-g.r-M.left,0),r:Math.max(M.right-(g.cx+g.r),0),b:Math.max(M.bottom-(g.cy+g.r),0),t:Math.max(g.cy-g.r-M.top,0),pad:5})}}))}));setTimeout((function(){g.selectAll("tspan").each((function(){var t=r.select(this);t.attr("dy")&&t.attr("dy",t.attr("dy"))}))}),0)},formatSliceLabel:R,transformInsideText:_,determineInsideTextFont:x,positionTitleOutside:P,prerenderTitles:w,layoutAreas:I,attachFxHandlers:b,computeTransform:z}},68357:function(t,e,n){"use strict";var r=n(39898),i=n(63463),a=n(72597).resizeText;t.exports=function(t){var e=t._fullLayout._pielayer.selectAll(".trace");a(t,e,"pie"),e.each((function(e){var n=e[0].trace,a=r.select(this);a.style({opacity:n.opacity}),a.selectAll("path.surface").each((function(e){r.select(this).call(i,e,n,t)}))}))}},63463:function(t,e,n){"use strict";var r=n(7901),i=n(53581).castOption,a=n(22209);t.exports=function(t,e,n,o){var s=n.marker.line,l=i(s.color,e.pts)||r.defaultLine,c=i(s.width,e.pts)||0;t.call(a,e,n,o).style("stroke-width",c).call(r.stroke,l)}},10959:function(t,e,n){"use strict";var r=n(82196);t.exports={x:r.x,y:r.y,xy:{valType:"data_array",editType:"calc"},indices:{valType:"data_array",editType:"calc"},xbounds:{valType:"data_array",editType:"calc"},ybounds:{valType:"data_array",editType:"calc"},text:r.text,marker:{color:{valType:"color",arrayOk:!1,editType:"calc"},opacity:{valType:"number",min:0,max:1,dflt:1,arrayOk:!1,editType:"calc"},blend:{valType:"boolean",dflt:null,editType:"calc"},sizemin:{valType:"number",min:.1,max:2,dflt:.5,editType:"calc"},sizemax:{valType:"number",min:.1,dflt:20,editType:"calc"},border:{color:{valType:"color",arrayOk:!1,editType:"calc"},arearatio:{valType:"number",min:0,max:1,dflt:0,editType:"calc"},editType:"calc"},editType:"calc"},transforms:void 0}},42743:function(t,e,n){"use strict";var r=n(9330).gl_pointcloud2d,i=n(78614),a=n(71739).findExtremes,o=n(34603);function s(t,e){this.scene=t,this.uid=e,this.type="pointcloud",this.pickXData=[],this.pickYData=[],this.xData=[],this.yData=[],this.textLabels=[],this.color="rgb(0, 0, 0)",this.name="",this.hoverinfo="all",this.idToIndex=new Int32Array(0),this.bounds=[0,0,0,0],this.pointcloudOptions={positions:new Float32Array(0),idToIndex:this.idToIndex,sizemin:.5,sizemax:12,color:[0,0,0,1],areaRatio:1,borderColor:[0,0,0,1]},this.pointcloud=r(t.glplot,this.pointcloudOptions),this.pointcloud._trace=this}var l=s.prototype;l.handlePick=function(t){var e=this.idToIndex[t.pointId];return{trace:this,dataCoord:t.dataCoord,traceCoord:this.pickXYData?[this.pickXYData[2*e],this.pickXYData[2*e+1]]:[this.pickXData[e],this.pickYData[e]],textLabel:Array.isArray(this.textLabels)?this.textLabels[e]:this.textLabels,color:this.color,name:this.name,pointIndex:e,hoverinfo:this.hoverinfo}},l.update=function(t){this.index=t.index,this.textLabels=t.text,this.name=t.name,this.hoverinfo=t.hoverinfo,this.bounds=[1/0,1/0,-1/0,-1/0],this.updateFast(t),this.color=o(t,{})},l.updateFast=function(t){var e,n,r,o,s,l,c=this.xData=this.pickXData=t.x,u=this.yData=this.pickYData=t.y,f=this.pickXYData=t.xy,h=t.xbounds&&t.ybounds,d=t.indices,p=this.bounds;if(f){if(r=f,e=f.length>>>1,h)p[0]=t.xbounds[0],p[2]=t.xbounds[1],p[1]=t.ybounds[0],p[3]=t.ybounds[1];else for(l=0;lp[2]&&(p[2]=o),sp[3]&&(p[3]=s);if(d)n=d;else for(n=new Int32Array(e),l=0;lp[2]&&(p[2]=o),sp[3]&&(p[3]=s);this.idToIndex=n,this.pointcloudOptions.idToIndex=n,this.pointcloudOptions.positions=r;var g=i(t.marker.color),v=i(t.marker.border.color),m=t.opacity*t.marker.opacity;g[3]*=m,this.pointcloudOptions.color=g;var y=t.marker.blend;null===y&&(y=c.length<100||u.length<100),this.pointcloudOptions.blend=y,v[3]*=m,this.pointcloudOptions.borderColor=v;var b=t.marker.sizemin,x=Math.max(t.marker.sizemax,t.marker.sizemin);this.pointcloudOptions.sizeMin=b,this.pointcloudOptions.sizeMax=x,this.pointcloudOptions.areaRatio=t.marker.border.arearatio,this.pointcloud.update(this.pointcloudOptions);var w=this.scene.xaxis,_=this.scene.yaxis,k=x/2||.5;t._extremes[w._id]=a(w,[p[0],p[2]],{ppad:k}),t._extremes[_._id]=a(_,[p[1],p[3]],{ppad:k})},l.dispose=function(){this.pointcloud.dispose()},t.exports=function(t,e){var n=new s(t,e.uid);return n.update(e),n}},33876:function(t,e,n){"use strict";var r=n(71828),i=n(10959);t.exports=function(t,e,n){function a(n,a){return r.coerce(t,e,i,n,a)}a("x"),a("y"),a("xbounds"),a("ybounds"),t.xy&&t.xy instanceof Float32Array&&(e.xy=t.xy),t.indices&&t.indices instanceof Int32Array&&(e.indices=t.indices),a("text"),a("marker.color",n),a("marker.opacity"),a("marker.blend"),a("marker.sizemin"),a("marker.sizemax"),a("marker.border.color",n),a("marker.border.arearatio"),e._length=null}},20593:function(t,e,n){"use strict";["*pointcloud* trace is deprecated!","Please consider switching to the *scattergl* trace type."].join(" "),t.exports={attributes:n(10959),supplyDefaults:n(33876),calc:n(36563),plot:n(42743),moduleType:"trace",name:"pointcloud",basePlotModule:n(4796),categories:["gl","gl2d","showLegend"],meta:{}}},39953:function(t,e,n){"use strict";var r=n(41940),i=n(9012),a=n(22399),o=n(77914),s=n(27670).Y,l=n(5386).fF,c=n(50693),u=n(44467).templatedArray,f=n(12663).descriptionOnlyNumbers,h=n(1426).extendFlat,d=n(30962).overrideAll;(t.exports=d({hoverinfo:h({},i.hoverinfo,{flags:[],arrayOk:!1}),hoverlabel:o.hoverlabel,domain:s({name:"sankey",trace:!0}),orientation:{valType:"enumerated",values:["v","h"],dflt:"h"},valueformat:{valType:"string",dflt:".3s",description:f("value")},valuesuffix:{valType:"string",dflt:""},arrangement:{valType:"enumerated",values:["snap","perpendicular","freeform","fixed"],dflt:"snap"},textfont:r({}),customdata:void 0,node:{label:{valType:"data_array",dflt:[]},groups:{valType:"info_array",impliedEdits:{x:[],y:[]},dimensions:2,freeLength:!0,dflt:[],items:{valType:"number",editType:"calc"}},x:{valType:"data_array",dflt:[]},y:{valType:"data_array",dflt:[]},color:{valType:"color",arrayOk:!0},customdata:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:a.defaultLine,arrayOk:!0},width:{valType:"number",min:0,dflt:.5,arrayOk:!0}},pad:{valType:"number",arrayOk:!1,min:0,dflt:20},thickness:{valType:"number",arrayOk:!1,min:1,dflt:20},hoverinfo:{valType:"enumerated",values:["all","none","skip"],dflt:"all"},hoverlabel:o.hoverlabel,hovertemplate:l({},{keys:["value","label"]})},link:{arrowlen:{valType:"number",min:0,dflt:0},label:{valType:"data_array",dflt:[]},color:{valType:"color",arrayOk:!0},customdata:{valType:"data_array",editType:"calc"},line:{color:{valType:"color",dflt:a.defaultLine,arrayOk:!0},width:{valType:"number",min:0,dflt:0,arrayOk:!0}},source:{valType:"data_array",dflt:[]},target:{valType:"data_array",dflt:[]},value:{valType:"data_array",dflt:[]},hoverinfo:{valType:"enumerated",values:["all","none","skip"],dflt:"all"},hoverlabel:o.hoverlabel,hovertemplate:l({},{keys:["value","label"]}),colorscales:u("concentrationscales",{editType:"calc",label:{valType:"string",editType:"calc",dflt:""},cmax:{valType:"number",editType:"calc",dflt:1},cmin:{valType:"number",editType:"calc",dflt:0},colorscale:h(c().colorscale,{dflt:[[0,"white"],[1,"black"]]})})}},"calc","nested")).transforms=void 0},75536:function(t,e,n){"use strict";var r=n(30962).overrideAll,i=n(27659).a0,a=n(60436),o=n(528),s=n(6964),l=n(28569),c=n(47322).prepSelect,u=n(71828),f=n(73972),h="sankey";function d(t,e){var n=t._fullData[e],r=t._fullLayout,i=r.dragmode,a="pan"===r.dragmode?"move":"crosshair",o=n._bgRect;if(o&&"pan"!==i&&"zoom"!==i){s(o,a);var h={_id:"x",c2p:u.identity,_offset:n._sankey.translateX,_length:n._sankey.width},d={_id:"y",c2p:u.identity,_offset:n._sankey.translateY,_length:n._sankey.height},p={gd:t,element:o.node(),plotinfo:{id:e,xaxis:h,yaxis:d,fillRangeItems:u.noop},subplot:e,xaxes:[h],yaxes:[d],doneFnCompleted:function(n){var r,i=t._fullData[e],a=i.node.groups.slice(),o=[];function s(t){for(var e=i._sankey.graph.nodes,n=0;ny&&(y=a.source[e]),a.target[e]>y&&(y=a.target[e]);var b,x=y+1;t.node._count=x;var w=t.node.groups,_={};for(e=0;e0&&s(E,x)&&s(C,x)&&(!_.hasOwnProperty(E)||!_.hasOwnProperty(C)||_[E]!==_[C])){_.hasOwnProperty(C)&&(C=_[C]),_.hasOwnProperty(E)&&(E=_[E]),C=+C,h[E=+E]=h[C]=!0;var P="";a.label&&a.label[e]&&(P=a.label[e]);var O=null;P&&d.hasOwnProperty(P)&&(O=d[P]),c.push({pointNumber:e,label:P,color:u?a.color[e]:a.color,customdata:f?a.customdata[e]:a.customdata,concentrationscale:O,source:E,target:C,value:+S}),A.source.push(E),A.target.push(C)}}var L=x+w.length,I=o(n.color),D=o(n.customdata),R=[];for(e=0;ex-1,childrenNodes:[],pointNumber:e,label:z,color:I?n.color[e]:n.color,customdata:D?n.customdata[e]:n.customdata})}var N=!1;return function(t,e,n){for(var a=i.init2dArray(t,0),o=0;o1}))}(L,A.source,A.target)&&(N=!0),{circular:N,links:c,nodes:R,groups:w,groupLookup:_}}t.exports=function(t,e){var n=c(e);return a({circular:n.circular,_nodes:n.nodes,_links:n.links,_groups:n.groups,_groupLookup:n.groupLookup})}},85247:function(t){"use strict";t.exports={nodeTextOffsetHorizontal:4,nodeTextOffsetVertical:3,nodePadAcross:10,sankeyIterations:50,forceIterations:5,forceTicksPerFrame:10,duration:500,ease:"linear",cn:{sankey:"sankey",sankeyLinks:"sankey-links",sankeyLink:"sankey-link",sankeyNodeSet:"sankey-node-set",sankeyNode:"sankey-node",nodeRect:"node-rect",nodeLabel:"node-label"}}},26857:function(t,e,n){"use strict";var r=n(71828),i=n(39953),a=n(7901),o=n(84267),s=n(27670).c,l=n(38048),c=n(44467),u=n(85501);function f(t,e){function n(n,a){return r.coerce(t,e,i.link.colorscales,n,a)}n("label"),n("cmin"),n("cmax"),n("colorscale")}t.exports=function(t,e,n,h){function d(n,a){return r.coerce(t,e,i,n,a)}var p=r.extendDeep(h.hoverlabel,t.hoverlabel),g=t.node,v=c.newContainer(e,"node");function m(t,e){return r.coerce(g,v,i.node,t,e)}m("label"),m("groups"),m("x"),m("y"),m("pad"),m("thickness"),m("line.color"),m("line.width"),m("hoverinfo",t.hoverinfo),l(g,v,m,p),m("hovertemplate");var y=h.colorway;m("color",v.label.map((function(t,e){return a.addOpacity(function(t){return y[t%y.length]}(e),.8)}))),m("customdata");var b=t.link||{},x=c.newContainer(e,"link");function w(t,e){return r.coerce(b,x,i.link,t,e)}w("label"),w("arrowlen"),w("source"),w("target"),w("value"),w("line.color"),w("line.width"),w("hoverinfo",t.hoverinfo),l(b,x,w,p),w("hovertemplate");var _,k=o(h.paper_bgcolor).getLuminance()<.333?"rgba(255, 255, 255, 0.6)":"rgba(0, 0, 0, 0.2)";w("color",r.repeat(k,x.value.length)),w("customdata"),u(b,x,{name:"colorscales",handleItemDefaults:f}),s(e,h,d),d("orientation"),d("valueformat"),d("valuesuffix"),v.x.length&&v.y.length&&(_="freeform"),d("arrangement",_),r.coerceFont(d,"textfont",r.extendFlat({},h.font)),e._length=null}},29396:function(t,e,n){"use strict";t.exports={attributes:n(39953),supplyDefaults:n(26857),calc:n(92930),plot:n(60436),moduleType:"trace",name:"sankey",basePlotModule:n(75536),selectPoints:n(84564),categories:["noOpacity"],meta:{}}},60436:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=i.numberFormat,o=n(3393),s=n(30211),l=n(7901),c=n(85247).cn,u=i._;function f(t){return""!==t}function h(t,e){return t.filter((function(t){return t.key===e.traceId}))}function d(t,e){r.select(t).select("path").style("fill-opacity",e),r.select(t).select("rect").style("fill-opacity",e)}function p(t){r.select(t).select("text.name").style("fill","black")}function g(t){return function(e){return-1!==t.node.sourceLinks.indexOf(e.link)||-1!==t.node.targetLinks.indexOf(e.link)}}function v(t){return function(e){return-1!==e.node.sourceLinks.indexOf(t.link)||-1!==e.node.targetLinks.indexOf(t.link)}}function m(t,e,n){e&&n&&h(n,e).selectAll("."+c.sankeyLink).filter(g(e)).call(b.bind(0,e,n,!1))}function y(t,e,n){e&&n&&h(n,e).selectAll("."+c.sankeyLink).filter(g(e)).call(x.bind(0,e,n,!1))}function b(t,e,n,r){var i=r.datum().link.label;r.style("fill-opacity",(function(t){if(!t.link.concentrationscale)return.4})),i&&h(e,t).selectAll("."+c.sankeyLink).filter((function(t){return t.link.label===i})).style("fill-opacity",(function(t){if(!t.link.concentrationscale)return.4})),n&&h(e,t).selectAll("."+c.sankeyNode).filter(v(t)).call(m)}function x(t,e,n,r){var i=r.datum().link.label;r.style("fill-opacity",(function(t){return t.tinyColorAlpha})),i&&h(e,t).selectAll("."+c.sankeyLink).filter((function(t){return t.link.label===i})).style("fill-opacity",(function(t){return t.tinyColorAlpha})),n&&h(e,t).selectAll(c.sankeyNode).filter(v(t)).call(y)}function w(t,e){var n=t.hoverlabel||{},r=i.nestedProperty(n,e).get();return!Array.isArray(r)&&r}t.exports=function(t,e){for(var n=t._fullLayout,i=n._paper,h=n._size,g=0;g"),color:w(o,"bgcolor")||l.addOpacity(g.color,1),borderColor:w(o,"bordercolor"),fontFamily:w(o,"font.family"),fontSize:w(o,"font.size"),fontColor:w(o,"font.color"),nameLength:w(o,"namelength"),textAlign:w(o,"align"),idealAlign:r.event.x"),color:w(o,"bgcolor")||i.tinyColorHue,borderColor:w(o,"bordercolor"),fontFamily:w(o,"font.family"),fontSize:w(o,"font.size"),fontColor:w(o,"font.color"),nameLength:w(o,"namelength"),textAlign:w(o,"align"),idealAlign:"left",hovertemplate:o.hovertemplate,hovertemplateLabels:y,eventData:[i.node]},{container:n._hoverlayer.node(),outerContainer:n._paper.node(),gd:t});d(_,.85),p(_)}}},unhover:function(e,i,a){!1!==t._fullLayout.hovermode&&(r.select(e).call(y,i,a),"skip"!==i.node.trace.node.hoverinfo&&(i.node.fullData=i.node.trace,t.emit("plotly_unhover",{event:r.event,points:[i.node]})),s.loneUnhover(n._hoverlayer.node()))},select:function(e,n,i){var a=n.node;a.originalEvent=r.event,t._hoverdata=[a],r.select(e).call(y,n,i),s.click(t,{target:!0})}}})}},3393:function(t,e,n){"use strict";var r=n(49887),i=n(81684).k4,a=n(39898),o=n(30838),s=n(86781),l=n(85247),c=n(84267),u=n(7901),f=n(91424),h=n(71828),d=h.strTranslate,p=h.strRotate,g=n(28984),v=g.keyFun,m=g.repeat,y=g.unwrap,b=n(63893),x=n(73972),w=n(18783),_=w.CAP_SHIFT,k=w.LINE_SPACING;function T(t,e,n){var r,i=y(e),a=i.trace,u=a.domain,f="h"===a.orientation,d=a.node.pad,p=a.node.thickness,g=t.width*(u.x[1]-u.x[0]),v=t.height*(u.y[1]-u.y[0]),m=i._nodes,b=i._links,x=i.circular;(r=x?s.sankeyCircular().circularLinkGap(0):o.sankey()).iterations(l.sankeyIterations).size(f?[g,v]:[v,g]).nodeWidth(p).nodePadding(d).nodeId((function(t){return t.pointNumber})).nodes(m).links(b);var w,_,k,T=r();for(var M in r.nodePadding()=i||(n=i-e.y0)>1e-6&&(e.y0+=n,e.y1+=n),i=e.y1+d}))}(function(t){var e,n,r=t.map((function(t,e){return{x0:t.x0,index:e}})).sort((function(t,e){return t.x0-e.x0})),i=[],a=-1,o=-1/0;for(w=0;wo+p&&(a+=1,e=s.x0),o=s.x0,i[a]||(i[a]=[]),i[a].push(s),n=e-s.x0,s.x0+=n,s.x1+=n}return i}(m=T.nodes)),r.update(T)}return{circular:x,key:n,trace:a,guid:h.randstr(),horizontal:f,width:g,height:v,nodePad:a.node.pad,nodeLineColor:a.node.line.color,nodeLineWidth:a.node.line.width,linkLineColor:a.link.line.color,linkLineWidth:a.link.line.width,linkArrowLength:a.link.arrowlen,valueFormat:a.valueformat,valueSuffix:a.valuesuffix,textFont:a.textfont,translateX:u.x[0]*t.width+t.margin.l,translateY:t.height-u.y[1]*t.height+t.margin.t,dragParallel:f?v:g,dragPerpendicular:f?g:v,arrangement:a.arrangement,sankey:r,graph:T,forceLayouts:{},interactionState:{dragInProgress:!1,hovered:!1}}}function M(t,e,n){var r=c(e.color),i=e.source.label+"|"+e.target.label+"__"+n;return e.trace=t.trace,e.curveNumber=t.trace.index,{circular:t.circular,key:i,traceId:t.key,pointNumber:e.pointNumber,link:e,tinyColorHue:u.tinyRGB(r),tinyColorAlpha:r.getAlpha(),linkPath:A,linkLineColor:t.linkLineColor,linkLineWidth:t.linkLineWidth,linkArrowLength:t.linkArrowLength,valueFormat:t.valueFormat,valueSuffix:t.valueSuffix,sankey:t.sankey,parent:t,interactionState:t.interactionState,flow:e.flow}}function A(){return function(t){var e=t.linkArrowLength;if(t.link.circular)return function(t,e){var n=t.width/2,r=t.circularPathData;return"top"===t.circularLinkType?"M "+(r.targetX-e)+" "+(r.targetY+n)+" L"+(r.rightInnerExtent-e)+" "+(r.targetY+n)+"A"+(r.rightLargeArcRadius+n)+" "+(r.rightSmallArcRadius+n)+" 0 0 1 "+(r.rightFullExtent-n-e)+" "+(r.targetY-r.rightSmallArcRadius)+"L"+(r.rightFullExtent-n-e)+" "+r.verticalRightInnerExtent+"A"+(r.rightLargeArcRadius+n)+" "+(r.rightLargeArcRadius+n)+" 0 0 1 "+(r.rightInnerExtent-e)+" "+(r.verticalFullExtent-n)+"L"+r.leftInnerExtent+" "+(r.verticalFullExtent-n)+"A"+(r.leftLargeArcRadius+n)+" "+(r.leftLargeArcRadius+n)+" 0 0 1 "+(r.leftFullExtent+n)+" "+r.verticalLeftInnerExtent+"L"+(r.leftFullExtent+n)+" "+(r.sourceY-r.leftSmallArcRadius)+"A"+(r.leftLargeArcRadius+n)+" "+(r.leftSmallArcRadius+n)+" 0 0 1 "+r.leftInnerExtent+" "+(r.sourceY+n)+"L"+r.sourceX+" "+(r.sourceY+n)+"L"+r.sourceX+" "+(r.sourceY-n)+"L"+r.leftInnerExtent+" "+(r.sourceY-n)+"A"+(r.leftLargeArcRadius-n)+" "+(r.leftSmallArcRadius-n)+" 0 0 0 "+(r.leftFullExtent-n)+" "+(r.sourceY-r.leftSmallArcRadius)+"L"+(r.leftFullExtent-n)+" "+r.verticalLeftInnerExtent+"A"+(r.leftLargeArcRadius-n)+" "+(r.leftLargeArcRadius-n)+" 0 0 0 "+r.leftInnerExtent+" "+(r.verticalFullExtent+n)+"L"+(r.rightInnerExtent-e)+" "+(r.verticalFullExtent+n)+"A"+(r.rightLargeArcRadius-n)+" "+(r.rightLargeArcRadius-n)+" 0 0 0 "+(r.rightFullExtent+n-e)+" "+r.verticalRightInnerExtent+"L"+(r.rightFullExtent+n-e)+" "+(r.targetY-r.rightSmallArcRadius)+"A"+(r.rightLargeArcRadius-n)+" "+(r.rightSmallArcRadius-n)+" 0 0 0 "+(r.rightInnerExtent-e)+" "+(r.targetY-n)+"L"+(r.targetX-e)+" "+(r.targetY-n)+(e>0?"L"+r.targetX+" "+r.targetY:"")+"Z":"M "+(r.targetX-e)+" "+(r.targetY-n)+" L"+(r.rightInnerExtent-e)+" "+(r.targetY-n)+"A"+(r.rightLargeArcRadius+n)+" "+(r.rightSmallArcRadius+n)+" 0 0 0 "+(r.rightFullExtent-n-e)+" "+(r.targetY+r.rightSmallArcRadius)+"L"+(r.rightFullExtent-n-e)+" "+r.verticalRightInnerExtent+"A"+(r.rightLargeArcRadius+n)+" "+(r.rightLargeArcRadius+n)+" 0 0 0 "+(r.rightInnerExtent-e)+" "+(r.verticalFullExtent+n)+"L"+r.leftInnerExtent+" "+(r.verticalFullExtent+n)+"A"+(r.leftLargeArcRadius+n)+" "+(r.leftLargeArcRadius+n)+" 0 0 0 "+(r.leftFullExtent+n)+" "+r.verticalLeftInnerExtent+"L"+(r.leftFullExtent+n)+" "+(r.sourceY+r.leftSmallArcRadius)+"A"+(r.leftLargeArcRadius+n)+" "+(r.leftSmallArcRadius+n)+" 0 0 0 "+r.leftInnerExtent+" "+(r.sourceY-n)+"L"+r.sourceX+" "+(r.sourceY-n)+"L"+r.sourceX+" "+(r.sourceY+n)+"L"+r.leftInnerExtent+" "+(r.sourceY+n)+"A"+(r.leftLargeArcRadius-n)+" "+(r.leftSmallArcRadius-n)+" 0 0 1 "+(r.leftFullExtent-n)+" "+(r.sourceY+r.leftSmallArcRadius)+"L"+(r.leftFullExtent-n)+" "+r.verticalLeftInnerExtent+"A"+(r.leftLargeArcRadius-n)+" "+(r.leftLargeArcRadius-n)+" 0 0 1 "+r.leftInnerExtent+" "+(r.verticalFullExtent-n)+"L"+(r.rightInnerExtent-e)+" "+(r.verticalFullExtent-n)+"A"+(r.rightLargeArcRadius-n)+" "+(r.rightLargeArcRadius-n)+" 0 0 1 "+(r.rightFullExtent+n-e)+" "+r.verticalRightInnerExtent+"L"+(r.rightFullExtent+n-e)+" "+(r.targetY+r.rightSmallArcRadius)+"A"+(r.rightLargeArcRadius-n)+" "+(r.rightSmallArcRadius-n)+" 0 0 1 "+(r.rightInnerExtent-e)+" "+(r.targetY+n)+"L"+(r.targetX-e)+" "+(r.targetY+n)+(e>0?"L"+r.targetX+" "+r.targetY:"")+"Z"}(t.link,e);var n=Math.abs((t.link.target.x0-t.link.source.x1)/2);e>n&&(e=n);var r=t.link.source.x1,a=t.link.target.x0-e,o=i(r,a),s=o(.5),l=o(.5),c=t.link.y0-t.link.width/2,u=t.link.y0+t.link.width/2,f=t.link.y1-t.link.width/2,h=t.link.y1+t.link.width/2,d="M"+r+","+c,p="C"+s+","+c+" "+l+","+f+" "+a+","+f,g="C"+l+","+h+" "+s+","+u+" "+r+","+u,v=e>0?"L"+(a+e)+","+(f+t.link.width/2):"";return d+p+(v+="L"+a+","+h)+g+"Z"}}function S(t,e){var n=c(e.color),r=l.nodePadAcross,i=t.nodePad/2;e.dx=e.x1-e.x0,e.dy=e.y1-e.y0;var a=e.dx,o=Math.max(.5,e.dy),s="node_"+e.pointNumber;return e.group&&(s=h.randstr()),e.trace=t.trace,e.curveNumber=t.trace.index,{index:e.pointNumber,key:s,partOfGroup:e.partOfGroup||!1,group:e.group,traceId:t.key,trace:t.trace,node:e,nodePad:t.nodePad,nodeLineColor:t.nodeLineColor,nodeLineWidth:t.nodeLineWidth,textFont:t.textFont,size:t.horizontal?t.height:t.width,visibleWidth:Math.ceil(a),visibleHeight:o,zoneX:-r,zoneY:-i,zoneWidth:a+2*r,zoneHeight:o+2*i,labelY:t.horizontal?e.dy/2+1:e.dx/2+1,left:1===e.originalLayer,sizeAcross:t.width,forceLayouts:t.forceLayouts,horizontal:t.horizontal,darkBackground:n.getBrightness()<=128,tinyColorHue:u.tinyRGB(n),tinyColorAlpha:n.getAlpha(),valueFormat:t.valueFormat,valueSuffix:t.valueSuffix,sankey:t.sankey,graph:t.graph,arrangement:t.arrangement,uniqueNodeLabelPathId:[t.guid,t.key,s].join("_"),interactionState:t.interactionState,figure:t}}function E(t){t.attr("transform",(function(t){return d(t.node.x0.toFixed(3),t.node.y0.toFixed(3))}))}function C(t){t.call(E)}function P(t,e){t.call(C),e.attr("d",A())}function O(t){t.attr("width",(function(t){return t.node.x1-t.node.x0})).attr("height",(function(t){return t.visibleHeight}))}function L(t){return t.link.width>1||t.linkLineWidth>0}function I(t){return d(t.translateX,t.translateY)+(t.horizontal?"matrix(1 0 0 1 0 0)":"matrix(0 1 1 0 0 0)")}function D(t,e,n){t.on(".basic",null).on("mouseover.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(n.hover(this,t,e),t.interactionState.hovered=[this,t])})).on("mousemove.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(n.follow(this,t),t.interactionState.hovered=[this,t])})).on("mouseout.basic",(function(t){t.interactionState.dragInProgress||t.partOfGroup||(n.unhover(this,t,e),t.interactionState.hovered=!1)})).on("click.basic",(function(t){t.interactionState.hovered&&(n.unhover(this,t,e),t.interactionState.hovered=!1),t.interactionState.dragInProgress||t.partOfGroup||n.select(this,t,e)}))}function R(t,e,n,i){var o=a.behavior.drag().origin((function(t){return{x:t.node.x0+t.visibleWidth/2,y:t.node.y0+t.visibleHeight/2}})).on("dragstart",(function(a){if("fixed"!==a.arrangement&&(h.ensureSingle(i._fullLayout._infolayer,"g","dragcover",(function(t){i._fullLayout._dragCover=t})),h.raiseToTop(this),a.interactionState.dragInProgress=a.node,N(a.node),a.interactionState.hovered&&(n.nodeEvents.unhover.apply(0,a.interactionState.hovered),a.interactionState.hovered=!1),"snap"===a.arrangement)){var o=a.traceId+"|"+a.key;a.forceLayouts[o]?a.forceLayouts[o].alpha(1):function(t,e,n,i){!function(t){for(var e=0;e0&&r.forceLayouts[e].alpha(0)}}(0,e,a,n)).stop()}(0,o,a),function(t,e,n,r,i){window.requestAnimationFrame((function a(){var o;for(o=0;o0)window.requestAnimationFrame(a);else{var s=n.node.originalX;n.node.x0=s-n.visibleWidth/2,n.node.x1=s+n.visibleWidth/2,z(n,i)}}))}(t,e,a,o,i)}})).on("drag",(function(n){if("fixed"!==n.arrangement){var r=a.event.x,i=a.event.y;"snap"===n.arrangement?(n.node.x0=r-n.visibleWidth/2,n.node.x1=r+n.visibleWidth/2,n.node.y0=i-n.visibleHeight/2,n.node.y1=i+n.visibleHeight/2):("freeform"===n.arrangement&&(n.node.x0=r-n.visibleWidth/2,n.node.x1=r+n.visibleWidth/2),i=Math.max(0,Math.min(n.size-n.visibleHeight/2,i)),n.node.y0=i-n.visibleHeight/2,n.node.y1=i+n.visibleHeight/2),N(n.node),"snap"!==n.arrangement&&(n.sankey.update(n.graph),P(t.filter(j(n)),e))}})).on("dragend",(function(t){if("fixed"!==t.arrangement){t.interactionState.dragInProgress=!1;for(var e=0;el&&P[m].gap;)m--;for(b=P[m].s,g=P.length-1;g>m;g--)P[g].s=b;for(;lS[f]&&f=0;f--){var h=t[f];if("scatter"===h.type&&h.xaxis===c.xaxis&&h.yaxis===c.yaxis){h.opacity=void 0;break}}}}}},17438:function(t,e,n){"use strict";var r=n(71828),i=n(73972),a=n(82196),o=n(47581),s=n(34098),l=n(67513),c=n(73927),u=n(565),f=n(49508),h=n(11058),d=n(94039),p=n(82410),g=n(28908),v=n(71828).coercePattern;t.exports=function(t,e,n,m){function y(n,i){return r.coerce(t,e,a,n,i)}var b=l(t,e,m,y);if(b||(e.visible=!1),e.visible){c(t,e,m,y),y("xhoverformat"),y("yhoverformat");var x=u(t,e,m,y);"group"===m.scattermode&&void 0===e.orientation&&y("orientation","v");var w=!x&&b=Math.min(e,n)&&p<=Math.max(e,n)?0:1/0}var r=Math.max(3,t.mrc||0),i=1-1/r,a=Math.abs(h.c2p(t.x)-p);return a=Math.min(e,n)&&g<=Math.max(e,n)?0:1/0}var r=Math.max(3,t.mrc||0),i=1-1/r,a=Math.abs(d.c2p(t.y)-g);return aY!==(F=D[L][1])>=Y&&(z=D[L-1][0],N=D[L][0],F-j&&(R=z+(N-z)*(Y-j)/(F-j),V=Math.min(V,R),q=Math.max(q,R)));V=Math.max(V,0),q=Math.min(q,h._length);var $=s.defaultLine;return s.opacity(f.fillcolor)?$=f.fillcolor:s.opacity((f.line||{}).color)&&($=f.line.color),r.extendFlat(t,{distance:t.maxHoverDistance,x0:V,x1:q,y0:Y,y1:Y,color:$,hovertemplate:!1}),delete t.index,f.text&&!Array.isArray(f.text)?t.text=String(f.text):t.text=f.name,[t]}}}},67368:function(t,e,n){"use strict";var r=n(34098);t.exports={hasLines:r.hasLines,hasMarkers:r.hasMarkers,hasText:r.hasText,isBubble:r.isBubble,attributes:n(82196),layoutAttributes:n(21479),supplyDefaults:n(17438),crossTraceDefaults:n(34936),supplyLayoutDefaults:n(79334),calc:n(47761).calc,crossTraceCalc:n(72626),arraysToCalcdata:n(75225),plot:n(32663),colorbar:n(4898),formatLabels:n(8225),style:n(16296).style,styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(33720),selectPoints:n(98002),animatable:!0,moduleType:"trace",name:"scatter",basePlotModule:n(93612),categories:["cartesian","svg","symbols","errorBarsOK","showLegend","scatter-like","zoomScale"],meta:{}}},21479:function(t){"use strict";t.exports={scattermode:{valType:"enumerated",values:["group","overlay"],dflt:"overlay",editType:"calc"},scattergap:{valType:"number",min:0,max:1,editType:"calc"}}},79334:function(t,e,n){"use strict";var r=n(71828),i=n(21479);t.exports=function(t,e){var n,a,o="group"===e.barmode;"group"===e.scattermode&&(n="scattergap",a=o?e.bargap:.2,r.coerce(t,e,i,n,a))}},11058:function(t,e,n){"use strict";var r=n(71828).isArrayOrTypedArray,i=n(52075).hasColorscale,a=n(1586);t.exports=function(t,e,n,o,s,l){l||(l={});var c=(t.marker||{}).color;s("line.color",n),i(t,"line")?a(t,e,o,s,{prefix:"line.",cLetter:"c"}):s("line.color",!r(c)&&c||n),s("line.width"),l.noDash||s("line.dash"),l.backoff&&s("line.backoff")}},34621:function(t,e,n){"use strict";var r=n(91424),i=n(50606),a=i.BADNUM,o=i.LOG_CLIP,s=o+.5,l=o-.5,c=n(71828),u=c.segmentsIntersect,f=c.constrain,h=n(47581);t.exports=function(t,e){var n,i,o,d,p,g,v,m,y,b,x,w,_,k,T,M,A,S,E=e.trace||{},C=e.xaxis,P=e.yaxis,O="log"===C.type,L="log"===P.type,I=C._length,D=P._length,R=e.backoff,z=E.marker,N=e.connectGaps,j=e.baseTolerance,F=e.shape,B="linear"===F,U=E.fill&&"none"!==E.fill,H=[],V=h.minTolerance,q=t.length,G=new Array(q),W=0;function Y(n){var r=t[n];if(!r)return!1;var i=e.linearized?C.l2p(r.x):C.c2p(r.x),o=e.linearized?P.l2p(r.y):P.c2p(r.y);if(i===a){if(O&&(i=C.c2p(r.x,!0)),i===a)return!1;L&&o===a&&(i*=Math.abs(C._m*D*(C._m>0?s:l)/(P._m*I*(P._m>0?s:l)))),i*=1e3}if(o===a){if(L&&(o=P.c2p(r.y,!0)),o===a)return!1;o*=1e3}return[i,o]}function $(t,e,n,r){var i=n-t,a=r-e,o=.5-t,s=.5-e,l=i*i+a*a,c=i*o+a*s;if(c>0&&cot||t[1]lt)return[f(t[0],at,ot),f(t[1],st,lt)]}function ft(t,e){return t[0]===e[0]&&(t[0]===at||t[0]===ot)||t[1]===e[1]&&(t[1]===st||t[1]===lt)||void 0}function ht(t,e,n){return function(r,i){var a=ut(r),o=ut(i),s=[];if(a&&o&&ft(a,o))return s;a&&s.push(a),o&&s.push(o);var l=2*c.constrain((r[t]+i[t])/2,e,n)-((a||r)[t]+(o||i)[t]);return l&&((a&&o?l>0===a[t]>o[t]?a:o:a||o)[t]+=l),s}}function dt(t){var e=t[0],n=t[1],r=e===G[W-1][0],i=n===G[W-1][1];if(!r||!i)if(W>1){var a=e===G[W-2][0],o=n===G[W-2][1];r&&(e===at||e===ot)&&a?o?W--:G[W-1]=t:i&&(n===st||n===lt)&&o?a?W--:G[W-1]=t:G[W++]=t}else G[W++]=t}function pt(t){G[W-1][0]!==t[0]&&G[W-1][1]!==t[1]&&dt([Q,tt]),dt(t),et=null,Q=tt=0}"linear"===F||"spline"===F?rt=function(t,e){for(var n=[],r=0,i=0;i<4;i++){var a=ct[i],o=u(t[0],t[1],e[0],e[1],a[0],a[1],a[2],a[3]);o&&(!r||Math.abs(o.x-n[0][0])>1||Math.abs(o.y-n[0][1])>1)&&(o=[o.x,o.y],r&&K(o,t)ot?ot:0,J=e[1]lt?lt:0,Z||J){if(W)if(et){var r=rt(et,e);r.length>1&&(pt(r[0]),G[W++]=r[1])}else nt=rt(G[W-1],e)[0],G[W++]=nt;else G[W++]=[Z||e[0],J||e[1]];var i=G[W-1];Z&&J&&(i[0]!==Z||i[1]!==J)?(et&&(Q!==Z&&tt!==J?dt(Q&&tt?function(t,e){var n=e[0]-t[0],r=(e[1]-t[1])/n;return(t[1]*e[0]-e[1]*t[0])/n>0?[r>0?at:ot,lt]:[r>0?ot:at,st]}(et,e):[Q||Z,tt||J]):Q&&tt&&dt([Q,tt])),dt([Z,J])):Q-Z&&tt-J&&dt([Z||Q,J||tt]),et=e,Q=Z,tt=J}else et&&pt(rt(et,e)[0]),G[W++]=e}for(n=0;nX(g,mt))break;o=g,(_=y[0]*m[0]+y[1]*m[1])>x?(x=_,d=g,v=!1):_=t.length||!g)break;vt(g),i=g}}else vt(d)}et&&dt([Q||et[0],tt||et[1]]),H.push(G.slice(0,W))}var yt=F.slice(F.length-1);if(R&&"h"!==yt&&"v"!==yt){for(var bt=!1,xt=-1,wt=[],_t=0;_t=0?l=d:(l=d=h,h++),l0?Math.max(n,a):0}}},4898:function(t){"use strict";t.exports={container:"marker",min:"cmin",max:"cmax"}},49508:function(t,e,n){"use strict";var r=n(7901),i=n(52075).hasColorscale,a=n(1586),o=n(34098);t.exports=function(t,e,n,s,l,c){var u=o.isBubble(t),f=(t.line||{}).color;c=c||{},f&&(n=f),l("marker.symbol"),l("marker.opacity",u?.7:1),l("marker.size"),c.noAngle||(l("marker.angle"),c.noAngleRef||l("marker.angleref"),c.noStandOff||l("marker.standoff")),l("marker.color",n),i(t,"marker")&&a(t,e,s,l,{prefix:"marker.",cLetter:"c"}),c.noSelect||(l("selected.marker.color"),l("unselected.marker.color"),l("selected.marker.size"),l("unselected.marker.size")),c.noLine||(l("marker.line.color",f&&!Array.isArray(f)&&e.marker.color!==f?f:u?r.background:r.defaultLine),i(t,"marker.line")&&a(t,e,s,l,{prefix:"marker.line.",cLetter:"c"}),l("marker.line.width",u?1:0)),u&&(l("marker.sizeref"),l("marker.sizemin"),l("marker.sizemode")),c.gradient&&"none"!==l("marker.gradient.type")&&l("marker.gradient.color")}},73927:function(t,e,n){"use strict";var r=n(71828).dateTick0,i=n(50606).ONEWEEK;function a(t,e){return r(e,t%i===0?1:0)}t.exports=function(t,e,n,r,i){if(i||(i={x:!0,y:!0}),i.x){var o=r("xperiod");o&&(r("xperiod0",a(o,e.xcalendar)),r("xperiodalignment"))}if(i.y){var s=r("yperiod");s&&(r("yperiod0",a(s,e.ycalendar)),r("yperiodalignment"))}}},32663:function(t,e,n){"use strict";var r=n(39898),i=n(73972),a=n(71828),o=a.ensureSingle,s=a.identity,l=n(91424),c=n(34098),u=n(34621),f=n(68687),h=n(61082).tester;function d(t,e,n,f,d,p,g){var v,m=t._context.staticPlot;!function(t,e,n,i,o){var s=n.xaxis,l=n.yaxis,u=r.extent(a.simpleMap(s.range,s.r2c)),f=r.extent(a.simpleMap(l.range,l.r2c)),h=i[0].trace;if(c.hasMarkers(h)){var d=h.marker.maxdisplayed;if(0!==d){var p=i.filter((function(t){return t.x>=u[0]&&t.x<=u[1]&&t.y>=f[0]&&t.y<=f[1]})),g=Math.ceil(p.length/d),v=0;o.forEach((function(t,n){var r=t[0].trace;c.hasMarkers(r)&&r.marker.maxdisplayed>0&&n0;function b(t){return y?t.transition():t}var x=n.xaxis,w=n.yaxis,_=f[0].trace,k=_.line,T=r.select(p),M=o(T,"g","errorbars"),A=o(T,"g","lines"),S=o(T,"g","points"),E=o(T,"g","text");if(i.getComponentMethod("errorbars","plot")(t,M,n,g),!0===_.visible){var C,P;b(T).style("opacity",_.opacity);var O=_.fill.charAt(_.fill.length-1);"x"!==O&&"y"!==O&&(O=""),f[0][n.isRangePlot?"nodeRangePlot3":"node3"]=T;var L,I,D="",R=[],z=_._prevtrace;z&&(D=z._prevRevpath||"",P=z._nextFill,R=z._polygons);var N,j,F,B,U,H,V,q="",G="",W=[],Y=a.noop;if(C=_._ownFill,c.hasLines(_)||"none"!==_.fill){for(P&&P.datum(f),-1!==["hv","vh","hvh","vhv"].indexOf(k.shape)?(N=l.steps(k.shape),j=l.steps(k.shape.split("").reverse().join(""))):N=j="spline"===k.shape?function(t){var e=t[t.length-1];return t.length>1&&t[0][0]===e[0]&&t[0][1]===e[1]?l.smoothclosed(t.slice(1),k.smoothing):l.smoothopen(t,k.smoothing)}:function(t){return"M"+t.join("L")},F=function(t){return j(t.reverse())},W=u(f,{xaxis:x,yaxis:w,trace:_,connectGaps:_.connectgaps,baseTolerance:Math.max(k.width||1,3)/4,shape:k.shape,backoff:k.backoff,simplify:k.simplify,fill:_.fill}),V=_._polygons=new Array(W.length),v=0;v0,v=f(t,e,n);(u=i.selectAll("g.trace").data(v,(function(t){return t[0].trace.uid}))).enter().append("g").attr("class",(function(t){return"trace scatter trace"+t[0].trace.uid})).style("stroke-miterlimit",2),u.order(),function(t,e,n){e.each((function(e){var i=o(r.select(this),"g","fills");l.setClipUrl(i,n.layerClipId,t);var a=e[0].trace,c=[];a._ownfill&&c.push("_ownFill"),a._nexttrace&&c.push("_nextFill");var u=i.selectAll("g").data(c,s);u.enter().append("g"),u.exit().each((function(t){a[t]=null})).remove(),u.order().each((function(t){a[t]=o(r.select(this),"path","js-fill")}))}))}(t,u,e),g?(c&&(h=c()),r.transition().duration(a.duration).ease(a.easing).each("end",(function(){h&&h()})).each("interrupt",(function(){h&&h()})).each((function(){i.selectAll("g.trace").each((function(n,r){d(t,r,e,n,v,this,a)}))}))):u.each((function(n,r){d(t,r,e,n,v,this,a)})),p&&u.exit().remove(),i.selectAll("path:not([d])").remove()}},98002:function(t,e,n){"use strict";var r=n(34098);t.exports=function(t,e){var n,i,a,o,s=t.cd,l=t.xaxis,c=t.yaxis,u=[],f=s[0].trace;if(!r.hasMarkers(f)&&!r.hasText(f))return[];if(!1===e)for(n=0;n0){var h=i.c2l(u);i._lowerLogErrorBound||(i._lowerLogErrorBound=h),i._lowerErrorBound=Math.min(i._lowerLogErrorBound,h)}}else o[s]=[-l[0]*n,l[1]*n]}return o}t.exports=function(t,e,n){var r=[i(t.x,t.error_x,e[0],n.xaxis),i(t.y,t.error_y,e[1],n.yaxis),i(t.z,t.error_z,e[2],n.zaxis)],a=function(t){for(var e=0;e-1?-1:t.indexOf("right")>-1?1:0}function x(t){return null===t||void 0===t?0:t.indexOf("top")>-1?-1:t.indexOf("bottom")>-1?1:0}function w(t,e){return e(4*t)}function _(t){return d[t]}function k(t,e,n,r,i){var a=null;if(l.isArrayOrTypedArray(t)){a=[];for(var o=0;o=0){var g=function(t,e,n){var r,i=(n+1)%3,a=(n+2)%3,o=[],l=[];for(r=0;r=0&&f("surfacecolor",d||p);for(var g=["x","y","z"],v=0;v<3;++v){var m="projection."+g[v];f(m+".show")&&(f(m+".opacity"),f(m+".scale"))}var y=r.getComponentMethod("errorbars","supplyDefaults");y(t,e,d||p||n,{axis:"z"}),y(t,e,d||p||n,{axis:"y",inherit:"z"}),y(t,e,d||p||n,{axis:"x",inherit:"z"})}else e.visible=!1}},13551:function(t,e,n){"use strict";t.exports={plot:n(58925),attributes:n(44542),markerSymbols:n(87381),supplyDefaults:n(21428),colorbar:[{container:"marker",min:"cmin",max:"cmax"},{container:"line",min:"cmin",max:"cmax"}],calc:n(36563),moduleType:"trace",name:"scatter3d",basePlotModule:n(58547),categories:["gl3d","symbols","showLegend","scatter-like"],meta:{}}},97001:function(t,e,n){"use strict";var r=n(82196),i=n(9012),a=n(5386).fF,o=n(5386).si,s=n(50693),l=n(1426).extendFlat,c=r.marker,u=r.line,f=c.line;t.exports={carpet:{valType:"string",editType:"calc"},a:{valType:"data_array",editType:"calc"},b:{valType:"data_array",editType:"calc"},mode:l({},r.mode,{dflt:"markers"}),text:l({},r.text,{}),texttemplate:o({editType:"plot"},{keys:["a","b","text"]}),hovertext:l({},r.hovertext,{}),line:{color:u.color,width:u.width,dash:u.dash,backoff:u.backoff,shape:l({},u.shape,{values:["linear","spline"]}),smoothing:u.smoothing,editType:"calc"},connectgaps:r.connectgaps,fill:l({},r.fill,{values:["none","toself","tonext"],dflt:"none"}),fillcolor:r.fillcolor,marker:l({symbol:c.symbol,opacity:c.opacity,maxdisplayed:c.maxdisplayed,angle:c.angle,angleref:c.angleref,standoff:c.standoff,size:c.size,sizeref:c.sizeref,sizemin:c.sizemin,sizemode:c.sizemode,line:l({width:f.width,editType:"calc"},s("marker.line")),gradient:c.gradient,editType:"calc"},s("marker")),textfont:r.textfont,textposition:r.textposition,selected:r.selected,unselected:r.unselected,hoverinfo:l({},i.hoverinfo,{flags:["a","b","text","name"]}),hoveron:r.hoveron,hovertemplate:a()}},34618:function(t,e,n){"use strict";var r=n(92770),i=n(36922),a=n(75225),o=n(66279),s=n(47761).calcMarkerSize,l=n(22882);t.exports=function(t,e){var n=e._carpetTrace=l(t,e);if(n&&n.visible&&"legendonly"!==n.visible){var c;e.xaxis=n.xaxis,e.yaxis=n.yaxis;var u,f,h=e._length,d=new Array(h),p=!1;for(c=0;c")}return o}function y(t,e){var n;n=t.labelprefix&&t.labelprefix.length>0?t.labelprefix.replace(/ = $/,""):t._hovertitle,v.push(n+": "+e.toFixed(3)+t.labelsuffix)}}},46858:function(t,e,n){"use strict";t.exports={attributes:n(97001),supplyDefaults:n(98965),colorbar:n(4898),formatLabels:n(48953),calc:n(34618),plot:n(1913),style:n(16296).style,styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(22931),selectPoints:n(98002),eventData:n(16165),moduleType:"trace",name:"scattercarpet",basePlotModule:n(93612),categories:["svg","carpet","symbols","showLegend","carpetDependent","zoomScale"],meta:{}}},1913:function(t,e,n){"use strict";var r=n(32663),i=n(89298),a=n(91424);t.exports=function(t,e,n,o){var s,l,c,u=n[0][0].carpet,f=i.getFromId(t,u.xaxis||"x"),h=i.getFromId(t,u.yaxis||"y"),d={xaxis:f,yaxis:h,plot:e.plot};for(s=0;s")}function d(t){return t+"\xb0"}}(c,g,t,l[0].t.labels),t.hovertemplate=c.hovertemplate,[t]}}},17988:function(t,e,n){"use strict";t.exports={attributes:n(19316),supplyDefaults:n(10659),colorbar:n(4898),formatLabels:n(82719),calc:n(84622),calcGeoJSON:n(89171).calcGeoJSON,plot:n(89171).plot,style:n(33095),styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(14977),eventData:n(84084),selectPoints:n(20548),moduleType:"trace",name:"scattergeo",basePlotModule:n(44622),categories:["geo","symbols","showLegend","scatter-like"],meta:{}}},89171:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=n(90973).getTopojsonFeatures,o=n(18214),s=n(41327),l=n(71739).findExtremes,c=n(50606).BADNUM,u=n(47761).calcMarkerSize,f=n(34098),h=n(33095);t.exports={calcGeoJSON:function(t,e){var n,r,i=t[0].trace,o=e[i.geo],f=o._subplot,h=i._length;if(Array.isArray(i.locations)){var d=i.locationmode,p="geojson-id"===d?s.extractTraceFeature(t):a(i,f.topojson);for(n=0;n=v,_=2*x,k={},T=l.makeCalcdata(e,"x"),M=y.makeCalcdata(e,"y"),A=s(e,l,"x",T),S=s(e,y,"y",M),E=A.vals,C=S.vals;e._x=E,e._y=C,e.xperiodalignment&&(e._origX=T,e._xStarts=A.starts,e._xEnds=A.ends),e.yperiodalignment&&(e._origY=M,e._yStarts=S.starts,e._yEnds=S.ends);var P=new Array(_),O=new Array(x);for(n=0;n1&&i.extendFlat(s.line,d.linePositions(t,n,r)),s.errorX||s.errorY){var l=d.errorBarPositions(t,n,r,a,o);s.errorX&&i.extendFlat(s.errorX,l.x),s.errorY&&i.extendFlat(s.errorY,l.y)}return s.text&&(i.extendFlat(s.text,{positions:r},d.textPosition(t,n,s.text,s.marker)),i.extendFlat(s.textSel,{positions:r},d.textPosition(t,n,s.text,s.markerSel)),i.extendFlat(s.textUnsel,{positions:r},d.textPosition(t,n,s.text,s.markerUnsel))),s}(t,0,e,P,E,C),D=p(t,b);return f(o,e),w?I.marker&&(L=I.marker.sizeAvg||Math.max(I.marker.size,3)):L=c(e,x),u(t,e,l,y,E,C,L),I.errorX&&m(e,l,I.errorX),I.errorY&&m(e,y,I.errorY),I.fill&&!D.fill2d&&(D.fill2d=!0),I.marker&&!D.scatter2d&&(D.scatter2d=!0),I.line&&!D.line2d&&(D.line2d=!0),!I.errorX&&!I.errorY||D.error2d||(D.error2d=!0),I.text&&!D.glText&&(D.glText=!0),I.marker&&(I.marker.snap=x),D.lineOptions.push(I.line),D.errorXOptions.push(I.errorX),D.errorYOptions.push(I.errorY),D.fillOptions.push(I.fill),D.markerOptions.push(I.marker),D.markerSelectedOptions.push(I.markerSel),D.markerUnselectedOptions.push(I.markerUnsel),D.textOptions.push(I.text),D.textSelectedOptions.push(I.textSel),D.textUnselectedOptions.push(I.textUnsel),D.selectBatch.push([]),D.unselectBatch.push([]),k._scene=D,k.index=D.count,k.x=E,k.y=C,k.positions=P,D.count++,[{x:!1,y:!1,t:k,trace:e}]}},78232:function(t){"use strict";t.exports={TOO_MANY_POINTS:1e5,SYMBOL_SDF_SIZE:200,SYMBOL_SIZE:20,SYMBOL_STROKE:1,DOT_RE:/-dot/,OPEN_RE:/-open/,DASHES:{solid:[1],dot:[1,1],dash:[4,1],longdash:[8,1],dashdot:[4,1,1,1],longdashdot:[8,1,1,1]}}},19635:function(t,e,n){"use strict";var r=n(92770),i=n(82019),a=n(25075),o=n(73972),s=n(71828),l=n(91424),c=n(41675),u=n(81697).formatColor,f=n(34098),h=n(39984),d=n(68645),p=n(78232),g=n(37822).DESELECTDIM,v={start:1,left:1,end:-1,right:-1,middle:0,center:0,bottom:1,top:-1},m=n(23469).appendArrayPointValue;function y(t,e){var n,i=t._fullLayout,a=e._length,o=e.textfont,l=e.textposition,c=Array.isArray(l)?l:[l],u=o.color,f=o.size,h=o.family,d={},p=t._context.plotGlPixelRatio,g=e.texttemplate;if(g){d.text=[];var v=i._d3locale,y=Array.isArray(g),b=y?Math.min(g.length,a):a,x=y?function(t){return g[t]}:function(){return g};for(n=0;np.TOO_MANY_POINTS||f.hasMarkers(e)?"rect":"round";if(c&&e.connectgaps){var h=r[0],d=r[1];for(i=0;i1?l[i]:l[0]:l,p=Array.isArray(c)?c.length>1?c[i]:c[0]:c,g=v[d],m=v[p],y=u?u/.8+1:0,b=-m*y-.5*m;o.offset[i]=[g*y/h,b/h]}}return o}}},47148:function(t,e,n){"use strict";var r=n(71828),i=n(73972),a=n(68645),o=n(42341),s=n(47581),l=n(34098),c=n(67513),u=n(73927),f=n(49508),h=n(11058),d=n(28908),p=n(82410);t.exports=function(t,e,n,g){function v(n,i){return r.coerce(t,e,o,n,i)}var m=!!t.marker&&a.isOpenSymbol(t.marker.symbol),y=l.isBubble(t),b=c(t,e,g,v);if(b){u(t,e,g,v),v("xhoverformat"),v("yhoverformat");var x=b100},e.isDotSymbol=function(t){return"string"===typeof t?r.DOT_RE.test(t):t>200}},20794:function(t,e,n){"use strict";var r=n(73972),i=n(71828),a=n(34603);function o(t,e,n,o){var s=t.xa,l=t.ya,c=t.distance,u=t.dxy,f=t.index,h={pointNumber:f,x:e[f],y:n[f]};h.tx=Array.isArray(o.text)?o.text[f]:o.text,h.htx=Array.isArray(o.hovertext)?o.hovertext[f]:o.hovertext,h.data=Array.isArray(o.customdata)?o.customdata[f]:o.customdata,h.tp=Array.isArray(o.textposition)?o.textposition[f]:o.textposition;var d=o.textfont;d&&(h.ts=i.isArrayOrTypedArray(d.size)?d.size[f]:d.size,h.tc=Array.isArray(d.color)?d.color[f]:d.color,h.tf=Array.isArray(d.family)?d.family[f]:d.family);var p=o.marker;p&&(h.ms=i.isArrayOrTypedArray(p.size)?p.size[f]:p.size,h.mo=i.isArrayOrTypedArray(p.opacity)?p.opacity[f]:p.opacity,h.mx=i.isArrayOrTypedArray(p.symbol)?p.symbol[f]:p.symbol,h.ma=i.isArrayOrTypedArray(p.angle)?p.angle[f]:p.angle,h.mc=i.isArrayOrTypedArray(p.color)?p.color[f]:p.color);var g=p&&p.line;g&&(h.mlc=Array.isArray(g.color)?g.color[f]:g.color,h.mlw=i.isArrayOrTypedArray(g.width)?g.width[f]:g.width);var v=p&&p.gradient;v&&"none"!==v.type&&(h.mgt=Array.isArray(v.type)?v.type[f]:v.type,h.mgc=Array.isArray(v.color)?v.color[f]:v.color);var m=s.c2p(h.x,!0),y=l.c2p(h.y,!0),b=h.mrc||1,x=o.hoverlabel;x&&(h.hbg=Array.isArray(x.bgcolor)?x.bgcolor[f]:x.bgcolor,h.hbc=Array.isArray(x.bordercolor)?x.bordercolor[f]:x.bordercolor,h.hts=i.isArrayOrTypedArray(x.font.size)?x.font.size[f]:x.font.size,h.htc=Array.isArray(x.font.color)?x.font.color[f]:x.font.color,h.htf=Array.isArray(x.font.family)?x.font.family[f]:x.font.family,h.hnl=i.isArrayOrTypedArray(x.namelength)?x.namelength[f]:x.namelength);var w=o.hoverinfo;w&&(h.hi=Array.isArray(w)?w[f]:w);var _=o.hovertemplate;_&&(h.ht=Array.isArray(_)?_[f]:_);var k={};k[t.index]=h;var T=o._origX,M=o._origY,A=i.extendFlat({},t,{color:a(o,h),x0:m-b,x1:m+b,xLabelVal:T?T[f]:h.x,y0:y-b,y1:y+b,yLabelVal:M?M[f]:h.y,cd:k,distance:c,spikeDistance:u,hovertemplate:h.ht});return h.htx?A.text=h.htx:h.tx?A.text=h.tx:o.text&&(A.text=o.text),i.fillText(h,o,A),r.getComponentMethod("errorbars","hoverInfo")(h,o,A),A}t.exports={hoverPoints:function(t,e,n,r){var i,a,s,l,c,u,f,h,d,p,g=t.cd,v=g[0].t,m=g[0].trace,y=t.xa,b=t.ya,x=v.x,w=v.y,_=y.c2p(e),k=b.c2p(n),T=t.distance;if(v.tree){var M=y.p2c(_-T),A=y.p2c(_+T),S=b.p2c(k-T),E=b.p2c(k+T);i="x"===r?v.tree.range(Math.min(M,A),Math.min(b._rl[0],b._rl[1]),Math.max(M,A),Math.max(b._rl[0],b._rl[1])):v.tree.range(Math.min(M,A),Math.min(S,E),Math.max(M,A),Math.max(S,E))}else i=v.ids;var C=T;if("x"===r){var P=!!m.xperiodalignment,O=!!m.yperiodalignment;for(u=0;u=Math.min(L,I)&&_<=Math.max(L,I)?0:1/0}if(f=Math.min(D,R)&&k<=Math.max(D,R)?0:1/0}p=Math.sqrt(f*f+h*h),s=i[u]}}}else for(u=i.length-1;u>-1;u--)l=x[a=i[u]],c=w[a],f=y.c2p(l)-_,h=b.c2p(c)-k,(d=Math.sqrt(f*f+h*h))y.glText.length){var k=w-y.glText.length;for(g=0;gn&&(isNaN(e[r])||isNaN(e[r+1]));)r-=2;t.positions=e.slice(n,r+2)}return t})),y.line2d.update(y.lineOptions)),y.error2d){var M=(y.errorXOptions||[]).concat(y.errorYOptions||[]);y.error2d.update(M)}y.scatter2d&&y.scatter2d.update(y.markerOptions),y.fillOrder=s.repeat(null,w),y.fill2d&&(y.fillOptions=y.fillOptions.map((function(t,e){var r=n[e];if(t&&r&&r[0]&&r[0].trace){var i,a,o=r[0],s=o.trace,l=o.t,c=y.lineOptions[e],u=[];s._ownfill&&u.push(e),s._nexttrace&&u.push(e+1),u.length&&(y.fillOrder[e]=u);var f,h,d=[],p=c&&c.positions||l.positions;if("tozeroy"===s.fill){for(f=0;ff&&isNaN(p[h+1]);)h-=2;0!==p[f+1]&&(d=[p[f],0]),d=d.concat(p.slice(f,h+2)),0!==p[h+1]&&(d=d.concat([p[h],0]))}else if("tozerox"===s.fill){for(f=0;ff&&isNaN(p[h]);)h-=2;0!==p[f]&&(d=[0,p[f+1]]),d=d.concat(p.slice(f,h+2)),0!==p[h]&&(d=d.concat([0,p[h+1]]))}else if("toself"===s.fill||"tonext"===s.fill){for(d=[],i=0,t.splitNull=!0,a=0;a-1;for(g=0;g")}function u(t){return t+"\xb0"}}t.exports={hoverPoints:function(t,e,n){var o=t.cd,u=o[0].trace,f=t.xa,h=t.ya,d=t.subplot,p=[],g=l+u.uid+"-circle",v=u.cluster&&u.cluster.enabled;if(v){var m=d.map.queryRenderedFeatures(null,{layers:[g]});p=m.map((function(t){return t.id}))}var y=360*(e>=0?Math.floor((e+180)/360):Math.ceil((e-180)/360)),b=e-y;if(r.getClosest(o,(function(t){var e=t.lonlat;if(e[0]===s)return 1/0;if(v&&-1===p.indexOf(t.i+1))return 1/0;var r=i.modHalf(e[0],360),a=e[1],o=d.project([r,a]),l=o.x-f.c2p([b,a]),c=o.y-h.c2p([r,n]),u=Math.max(3,t.mrc||0);return Math.max(Math.sqrt(l*l+c*c)-u,1-3/u)}),t),!1!==t.index){var x=o[t.index],w=x.lonlat,_=[i.modHalf(w[0],360)+y,w[1]],k=f.c2p(_),T=h.c2p(_),M=x.mrc||1;t.x0=k-M,t.x1=k+M,t.y0=T-M,t.y1=T+M;var A={};A[u.subplot]={_subplot:d};var S=u._module.formatLabels(x,u,A);return t.lonLabel=S.lonLabel,t.latLabel=S.latLabel,t.color=a(u,x),t.extraText=c(u,x,o[0].t.labels),t.hovertemplate=u.hovertemplate,[t]}},getExtraText:c}},20467:function(t,e,n){"use strict";t.exports={attributes:n(99181),supplyDefaults:n(76645),colorbar:n(4898),formatLabels:n(15636),calc:n(84622),plot:n(86951),hoverPoints:n(28178).hoverPoints,eventData:n(53353),selectPoints:n(86387),styleOnSelect:function(t,e){e&&e[0].trace._glTrace.update(e)},moduleType:"trace",name:"scattermapbox",basePlotModule:n(50101),categories:["mapbox","gl","symbols","showLegend","scatter-like"],meta:{}}},86951:function(t,e,n){"use strict";var r=n(71828),i=n(15790),a=n(77734).traceLayerPrefix,o={cluster:["cluster","clusterCount","circle"],nonCluster:["fill","line","circle","symbol"]};function s(t,e,n,r){this.type="scattermapbox",this.subplot=t,this.uid=e,this.clusterEnabled=n,this.isHidden=r,this.sourceIds={fill:"source-"+e+"-fill",line:"source-"+e+"-line",circle:"source-"+e+"-circle",symbol:"source-"+e+"-symbol",cluster:"source-"+e+"-circle",clusterCount:"source-"+e+"-circle"},this.layerIds={fill:a+e+"-fill",line:a+e+"-line",circle:a+e+"-circle",symbol:a+e+"-symbol",cluster:a+e+"-cluster",clusterCount:a+e+"-cluster-count"},this.below=null}var l=s.prototype;l.addSource=function(t,e,n){var i={type:"geojson",data:e.geojson};n&&n.enabled&&r.extendFlat(i,{cluster:!0,clusterMaxZoom:n.maxzoom});var a=this.subplot.map.getSource(this.sourceIds[t]);a?a.setData(e.geojson):this.subplot.map.addSource(this.sourceIds[t],i)},l.setSourceData=function(t,e){this.subplot.map.getSource(this.sourceIds[t]).setData(e.geojson)},l.addLayer=function(t,e,n){var r={type:e.type,id:this.layerIds[t],source:this.sourceIds[t],layout:e.layout,paint:e.paint};e.filter&&(r.filter=e.filter);for(var i,a=this.layerIds[t],o=this.subplot.getMapLayers(),s=0;s=0;n--){var i=e[n];r.removeLayer(u.layerIds[i])}t||r.removeSource(u.sourceIds.circle)}(t):function(t){for(var e=o.nonCluster,n=e.length-1;n>=0;n--){var i=e[n];r.removeLayer(u.layerIds[i]),t||r.removeSource(u.sourceIds[i])}}(t)}function h(t){l?function(t){t||u.addSource("circle",a.circle,e.cluster);for(var n=o.cluster,r=0;r=0;n--){var r=e[n];t.removeLayer(this.layerIds[r]),t.removeSource(this.sourceIds[r])}},t.exports=function(t,e){var n,r,a,l=e[0].trace,c=l.cluster&&l.cluster.enabled,u=!0!==l.visible,f=new s(t,l.uid,c,u),h=i(t.gd,e),d=f.below=t.belowLookup["trace-"+l.uid];if(c)for(f.addSource("circle",h.circle,l.cluster),n=0;n")}}t.exports={hoverPoints:function(t,e,n,a){var o=r(t,e,n,a);if(o&&!1!==o[0].index){var s=o[0];if(void 0===s.index)return o;var l=t.subplot,c=s.cd[s.index],u=s.trace;if(l.isPtInside(c))return s.xLabelVal=void 0,s.yLabelVal=void 0,i(c,u,l,s),s.hovertemplate=u.hovertemplate,o}},makeHoverPointText:i}},91271:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"scatterpolar",basePlotModule:n(23580),categories:["polar","symbols","showLegend","scatter-like"],attributes:n(81245),supplyDefaults:n(22184).supplyDefaults,colorbar:n(4898),formatLabels:n(98608),calc:n(26442),plot:n(45162),style:n(16296).style,styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(59150).hoverPoints,selectPoints:n(98002),meta:{}}},45162:function(t,e,n){"use strict";var r=n(32663),i=n(50606).BADNUM;t.exports=function(t,e,n){for(var a=e.layers.frontplot.select("g.scatterlayer"),o=e.xaxis,s=e.yaxis,l={xaxis:o,yaxis:s,plot:e.framework,layerClipId:e._hasClipOnAxisFalse?e.clipIds.forTraces:null},c=e.radialAxis,u=e.angularAxis,f=0;f=c&&(y.marker.cluster=p.tree),y.marker&&(y.markerSel.positions=y.markerUnsel.positions=y.marker.positions=w),y.line&&w.length>1&&l.extendFlat(y.line,s.linePositions(t,d,w)),y.text&&(l.extendFlat(y.text,{positions:w},s.textPosition(t,d,y.text,y.marker)),l.extendFlat(y.textSel,{positions:w},s.textPosition(t,d,y.text,y.markerSel)),l.extendFlat(y.textUnsel,{positions:w},s.textPosition(t,d,y.text,y.markerUnsel))),y.fill&&!h.fill2d&&(h.fill2d=!0),y.marker&&!h.scatter2d&&(h.scatter2d=!0),y.line&&!h.line2d&&(h.line2d=!0),y.text&&!h.glText&&(h.glText=!0),h.lineOptions.push(y.line),h.fillOptions.push(y.fill),h.markerOptions.push(y.marker),h.markerSelectedOptions.push(y.markerSel),h.markerUnselectedOptions.push(y.markerUnsel),h.textOptions.push(y.text),h.textSelectedOptions.push(y.textSel),h.textUnselectedOptions.push(y.textUnsel),h.selectBatch.push([]),h.unselectBatch.push([]),p.x=_,p.y=k,p.rawx=_,p.rawy=k,p.r=v,p.theta=m,p.positions=w,p._scene=h,p.index=h.count,h.count++}})),a(t,e,n)}},t.exports.reglPrecompiled={}},48300:function(t,e,n){"use strict";var r=n(5386).fF,i=n(5386).si,a=n(1426).extendFlat,o=n(82196),s=n(9012),l=o.line;t.exports={mode:o.mode,real:{valType:"data_array",editType:"calc+clearAxisTypes"},imag:{valType:"data_array",editType:"calc+clearAxisTypes"},text:o.text,texttemplate:i({editType:"plot"},{keys:["real","imag","text"]}),hovertext:o.hovertext,line:{color:l.color,width:l.width,dash:l.dash,backoff:l.backoff,shape:a({},l.shape,{values:["linear","spline"]}),smoothing:l.smoothing,editType:"calc"},connectgaps:o.connectgaps,marker:o.marker,cliponaxis:a({},o.cliponaxis,{dflt:!1}),textposition:o.textposition,textfont:o.textfont,fill:a({},o.fill,{values:["none","toself","tonext"],dflt:"none"}),fillcolor:o.fillcolor,hoverinfo:a({},s.hoverinfo,{flags:["real","imag","text","name"]}),hoveron:o.hoveron,hovertemplate:r(),selected:o.selected,unselected:o.unselected}},30621:function(t,e,n){"use strict";var r=n(92770),i=n(50606).BADNUM,a=n(36922),o=n(75225),s=n(66279),l=n(47761).calcMarkerSize;t.exports=function(t,e){for(var n=t._fullLayout,c=e.subplot,u=n[c].realaxis,f=n[c].imaginaryaxis,h=u.makeCalcdata(e,"real"),d=f.makeCalcdata(e,"imag"),p=e._length,g=new Array(p),v=0;v")}}t.exports={hoverPoints:function(t,e,n,a){var o=r(t,e,n,a);if(o&&!1!==o[0].index){var s=o[0];if(void 0===s.index)return o;var l=t.subplot,c=s.cd[s.index],u=s.trace;if(l.isPtInside(c))return s.xLabelVal=void 0,s.yLabelVal=void 0,i(c,u,l,s),s.hovertemplate=u.hovertemplate,o}},makeHoverPointText:i}},85956:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"scattersmith",basePlotModule:n(7504),categories:["smith","symbols","showLegend","scatter-like"],attributes:n(48300),supplyDefaults:n(65269),colorbar:n(4898),formatLabels:n(62047),calc:n(30621),plot:n(12480),style:n(16296).style,styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(11350).hoverPoints,selectPoints:n(98002),meta:{}}},12480:function(t,e,n){"use strict";var r=n(32663),i=n(50606).BADNUM,a=n(23893).smith;t.exports=function(t,e,n){for(var o=e.layers.frontplot.select("g.scatterlayer"),s=e.xaxis,l=e.yaxis,c={xaxis:s,yaxis:l,plot:e.framework,layerClipId:e._hasClipOnAxisFalse?e.clipIds.forTraces:null},u=0;u"),o.hovertemplate=h.hovertemplate,a}function b(t,e){m.push(t._hovertitle+": "+e)}}},52979:function(t,e,n){"use strict";t.exports={attributes:n(50413),supplyDefaults:n(46008),colorbar:n(4898),formatLabels:n(93645),calc:n(54337),plot:n(7507),style:n(16296).style,styleOnSelect:n(16296).styleOnSelect,hoverPoints:n(47250),selectPoints:n(98002),eventData:n(4524),moduleType:"trace",name:"scatterternary",basePlotModule:n(61639),categories:["ternary","symbols","showLegend","scatter-like"],meta:{}}},7507:function(t,e,n){"use strict";var r=n(32663);t.exports=function(t,e,n){var i=e.plotContainer;i.select(".scatterlayer").selectAll("*").remove();for(var a=e.xaxis,o=e.yaxis,s={xaxis:a,yaxis:o,plot:i,layerClipId:e._hasClipOnAxisFalse?e.clipIdRelative:null},l=e.layers.frontplot.select("g.scatterlayer"),c=0;ch?x.sizeAvg||Math.max(x.size,3):a(e,b),d=0;da&&l||i-1,O=!0;if(o(b)||d.selectedpoints||P){var L=d._length;if(d.selectedpoints){g.selectBatch=d.selectedpoints;var I=d.selectedpoints,D={};for(l=0;l1&&(u=g[y-1],h=v[y-1],p=m[y-1]),e=0;eu?"-":"+")+"x")).replace("y",(f>h?"-":"+")+"y")).replace("z",(d>p?"-":"+")+"z");var C=function(){y=0,A=[],S=[],E=[]};(!y||y2?t.slice(1,e-1):2===e?[(t[0]+t[1])/2]:t}function d(t){var e=t.length;return 1===e?[.5,.5]:[t[1]-t[0],t[e-1]-t[e-2]]}function p(t,e){var n=t.fullSceneLayout,i=t.dataScale,u=e._len,f={};function p(t,e){var r=n[e],o=i[c[e]];return a.simpleMap(t,(function(t){return r.d2l(t)*o}))}if(f.vectors=l(p(e._u,"xaxis"),p(e._v,"yaxis"),p(e._w,"zaxis"),u),!u)return{positions:[],cells:[]};var g=p(e._Xs,"xaxis"),v=p(e._Ys,"yaxis"),m=p(e._Zs,"zaxis");if(f.meshgrid=[g,v,m],f.gridFill=e._gridFill,e._slen)f.startingPositions=l(p(e._startsX,"xaxis"),p(e._startsY,"yaxis"),p(e._startsZ,"zaxis"));else{for(var y=v[0],b=h(g),x=h(m),w=new Array(b.length*x.length),_=0,k=0;k=0};y?(n=Math.min(m.length,x.length),l=function(t){return A(m[t])&&S(t)},f=function(t){return String(m[t])}):(n=Math.min(b.length,x.length),l=function(t){return A(b[t])&&S(t)},f=function(t){return String(b[t])}),_&&(n=Math.min(n,w.length));for(var E=0;E1){for(var L=a.randstr(),I=0;I=0){e.i=s.i;var u=n.marker;u.pattern&&u.colors&&u.pattern.shape||(u.color=c,e.color=c),r.pointStyle(t,n,a,e)}else i.fill(t,c)}},83523:function(t,e,n){"use strict";var r=n(39898),i=n(73972),a=n(23469).appendArrayPointValue,o=n(30211),s=n(71828),l=n(11086),c=n(2791),u=n(53581).formatPieValue;function f(t,e,n){for(var r=t.data.data,i={curveNumber:e.index,pointNumber:r.i,data:e._input,fullData:e},o=0;o"),name:M||D("name")?y.name:void 0,color:T("hoverlabel.bgcolor")||b.color,borderColor:T("hoverlabel.bordercolor"),fontFamily:T("hoverlabel.font.family"),fontSize:T("hoverlabel.font.size"),fontColor:T("hoverlabel.font.color"),nameLength:T("hoverlabel.namelength"),textAlign:T("hoverlabel.align"),hovertemplate:M,hovertemplateLabels:O,eventData:l};v&&(N.x0=E-i.rInscribed*i.rpx1,N.x1=E+i.rInscribed*i.rpx1,N.idealAlign=i.pxmid[0]<0?"left":"right"),m&&(N.x=E,N.idealAlign=E<0?"left":"right");var j=[];o.loneHover(N,{container:a._hoverlayer.node(),outerContainer:a._paper.node(),gd:n,inOut_bbox:j}),l[0].bbox=j[0],p._hasHoverLabel=!0}if(m){var F=t.select("path.surface");h.styleOne(F,i,y,n,{hovered:!0})}p._hasHoverEvent=!0,n.emit("plotly_hover",{points:l||[f(i,y,h.eventDataKeys)],event:r.event})}})),t.on("mouseout",(function(e){var i=n._fullLayout,a=n._fullData[p.index],s=r.select(this).datum();if(p._hasHoverEvent&&(e.originalEvent=r.event,n.emit("plotly_unhover",{points:[f(s,a,h.eventDataKeys)],event:r.event}),p._hasHoverEvent=!1),p._hasHoverLabel&&(o.loneUnhover(i._hoverlayer.node()),p._hasHoverLabel=!1),m){var l=t.select("path.surface");h.styleOne(l,s,a,n,{hovered:!1})}})),t.on("click",(function(t){var e=n._fullLayout,a=n._fullData[p.index],s=v&&(c.isHierarchyRoot(t)||c.isLeaf(t)),u=c.getPtId(t),d=c.isEntry(t)?c.findEntryWithChild(g,u):c.findEntryWithLevel(g,u),m=c.getPtId(d),y={points:[f(t,a,h.eventDataKeys)],event:r.event};s||(y.nextLevel=m);var b=l.triggerHandler(n,"plotly_"+p.type+"click",y);if(!1!==b&&e.hovermode&&(n._hoverdata=[f(t,a,h.eventDataKeys)],o.click(n,r.event)),!s&&!1!==b&&!n._dragging&&!n._transitioning){i.call("_storeDirectGUIEdit",a,e._tracePreGUI[a.uid],{level:a.level});var x={data:[{level:m}],traces:[p.index]},w={frame:{redraw:!1,duration:h.transitionTime},transition:{duration:h.transitionTime,easing:h.transitionEasing},mode:"immediate",fromcurrent:!0};o.loneUnhover(e._hoverlayer.node()),i.call("animate",n,x,w)}}))}},2791:function(t,e,n){"use strict";var r=n(71828),i=n(7901),a=n(6964),o=n(53581);function s(t){return t.data.data.pid}e.findEntryWithLevel=function(t,n){var r;return n&&t.eachAfter((function(t){if(e.getPtId(t)===n)return r=t.copy()})),r||t},e.findEntryWithChild=function(t,n){var r;return t.eachAfter((function(t){for(var i=t.children||[],a=0;a0)},e.getMaxDepth=function(t){return t.maxdepth>=0?t.maxdepth:1/0},e.isHeader=function(t,n){return!(e.isLeaf(t)||t.depth===n._maxDepth-1)},e.getParent=function(t,n){return e.findEntryWithLevel(t,s(n))},e.listPath=function(t,n){var r=t.parent;if(!r)return[];var i=n?[r.data[n]]:[r];return e.listPath(r,n).concat(i)},e.getPath=function(t){return e.listPath(t,"label").join("/")+"/"},e.formatValue=o.formatPieValue,e.formatPercent=function(t,e){var n=r.formatPercent(t,0);return"0%"===n&&(n=o.formatPiePercent(t,e)),n}},87619:function(t,e,n){"use strict";t.exports={moduleType:"trace",name:"sunburst",basePlotModule:n(66888),categories:[],animatable:!0,attributes:n(57564),layoutAttributes:n(2654),supplyDefaults:n(17094),supplyLayoutDefaults:n(57034),calc:n(52147).calc,crossTraceCalc:n(52147).crossTraceCalc,plot:n(24714).plot,style:n(29969).style,colorbar:n(4898),meta:{}}},2654:function(t){"use strict";t.exports={sunburstcolorway:{valType:"colorlist",editType:"calc"},extendsunburstcolors:{valType:"boolean",dflt:!0,editType:"calc"}}},57034:function(t,e,n){"use strict";var r=n(71828),i=n(2654);t.exports=function(t,e){function n(n,a){return r.coerce(t,e,i,n,a)}n("sunburstcolorway",e.colorway),n("extendsunburstcolors")}},24714:function(t,e,n){"use strict";var r=n(39898),i=n(674),a=n(81684).sX,o=n(91424),s=n(71828),l=n(63893),c=n(72597),u=c.recordMinTextSize,f=c.clearMinTextSize,h=n(14575),d=n(53581).getRotationAngle,p=h.computeTransform,g=h.transformInsideText,v=n(29969).styleOne,m=n(16688).resizeText,y=n(83523),b=n(7055),x=n(2791);function w(t,n,c,f){var h=t._context.staticPlot,m=t._fullLayout,w=!m.uniformtext.mode&&x.hasTransition(f),k=r.select(c).selectAll("g.slice"),T=n[0],M=T.trace,A=T.hierarchy,S=x.findEntryWithLevel(A,M.level),E=x.getMaxDepth(M),C=m._size,P=M.domain,O=C.w*(P.x[1]-P.x[0]),L=C.h*(P.y[1]-P.y[0]),I=.5*Math.min(O,L),D=T.cx=C.l+C.w*(P.x[1]+P.x[0])/2,R=T.cy=C.t+C.h*(1-P.y[0])-L/2;if(!S)return k.remove();var z=null,N={};w&&k.each((function(t){N[x.getPtId(t)]={rpx0:t.rpx0,rpx1:t.rpx1,x0:t.x0,x1:t.x1,transform:t.transform},!z&&x.isEntry(t)&&(z=t)}));var j=function(t){return i.partition().size([2*Math.PI,t.height+1])(t)}(S).descendants(),F=S.height+1,B=0,U=E;T.hasMultipleRoots&&x.isHierarchyRoot(S)&&(j=j.slice(1),F-=1,B=1,U+=1),j=j.filter((function(t){return t.y1<=U}));var H=d(M.rotation);H&&j.forEach((function(t){t.x0+=H,t.x1+=H}));var V=Math.min(F,E),q=function(t){return(t-B)/V*I},G=function(t,e){return[t*Math.cos(e),-t*Math.sin(e)]},W=function(t){return s.pathAnnulus(t.rpx0,t.rpx1,t.x0,t.x1,D,R)},Y=function(t){return D+_(t)[0]*(t.transform.rCenter||0)+(t.transform.x||0)},$=function(t){return R+_(t)[1]*(t.transform.rCenter||0)+(t.transform.y||0)};(k=k.data(j,x.getPtId)).enter().append("g").classed("slice",!0),w?k.exit().transition().each((function(){var t=r.select(this);t.select("path.surface").transition().attrTween("d",(function(t){var e=function(t){var e,n=x.getPtId(t),r=N[n],i=N[x.getPtId(S)];if(i){var o=(t.x1>i.x1?2*Math.PI:0)+H;e=t.rpx1X?2*Math.PI:0)+H;e={x0:i,x1:i}}else e={rpx0:I,rpx1:I},s.extendFlat(e,J(t));else e={rpx0:0,rpx1:0};else e={x0:H,x1:H};return a(e,r)}(t);return function(t){return W(e(t))}})):f.attr("d",W),c.call(y,S,t,n,{eventDataKeys:b.eventDataKeys,transitionTime:b.CLICK_TRANSITION_TIME,transitionEasing:b.CLICK_TRANSITION_EASING}).call(x.setSliceCursor,t,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:t._transitioning}),f.call(v,i,M,t);var d=s.ensureSingle(c,"g","slicetext"),_=s.ensureSingle(d,"text","",(function(t){t.attr("data-notex",1)})),k=s.ensureUniformFontSize(t,x.determineTextFont(M,i,m.font));_.text(e.formatSliceLabel(i,S,M,n,m)).classed("slicetext",!0).attr("text-anchor","middle").call(o.font,k).call(l.convertToTspans,t);var A=o.bBox(_.node());i.transform=g(A,i,T),i.transform.targetX=Y(i),i.transform.targetY=$(i);var E=function(t,e){var n=t.transform;return p(n,e),n.fontSize=k.size,u(M.type,n,m),s.getTextTransform(n)};w?_.transition().attrTween("transform",(function(t){var e=function(t){var e,n=N[x.getPtId(t)],r=t.transform;if(n)e=n;else if(e={rpx1:t.rpx1,transform:{textPosAngle:r.textPosAngle,scale:0,rotate:r.rotate,rCenter:r.rCenter,x:r.x,y:r.y}},z)if(t.parent)if(X){var i=t.x1>X?2*Math.PI:0;e.x0=e.x1=i}else s.extendFlat(e,J(t));else e.x0=e.x1=H;else e.x0=e.x1=H;var o=a(e.transform.textPosAngle,t.transform.textPosAngle),l=a(e.rpx1,t.rpx1),c=a(e.x0,t.x0),f=a(e.x1,t.x1),h=a(e.transform.scale,r.scale),d=a(e.transform.rotate,r.rotate),p=0===r.rCenter?3:0===e.transform.rCenter?1/3:1,g=a(e.transform.rCenter,r.rCenter),v=function(t){return g(Math.pow(t,p))};return function(t){var e=l(t),n=c(t),i=f(t),a=v(t),s={pxmid:G(e,(n+i)/2),rpx1:e,transform:{textPosAngle:o(t),rCenter:a,x:r.x,y:r.y}};return u(M.type,r,m),{transform:{targetX:Y(s),targetY:$(s),scale:h(t),rotate:d(t),rCenter:a}}}}(t);return function(t){return E(e(t),A)}})):_.attr("transform",E(i,A))}))}function _(t){return e=t.rpx1,n=t.transform.textPosAngle,[e*Math.sin(n),-e*Math.cos(n)];var e,n}e.plot=function(t,e,n,i){var a,o,s=t._fullLayout,l=s._sunburstlayer,c=!n,u=!s.uniformtext.mode&&x.hasTransition(n);f("sunburst",s),(a=l.selectAll("g.trace.sunburst").data(e,(function(t){return t[0].trace.uid}))).enter().append("g").classed("trace",!0).classed("sunburst",!0).attr("stroke-linejoin","round"),a.order(),u?(i&&(o=i()),r.transition().duration(n.duration).ease(n.easing).each("end",(function(){o&&o()})).each("interrupt",(function(){o&&o()})).each((function(){l.selectAll("g.trace").each((function(e){w(t,e,this,n)}))}))):(a.each((function(e){w(t,e,this,n)})),s.uniformtext.mode&&m(t,s._sunburstlayer.selectAll(".trace"),"sunburst")),c&&a.exit().remove()},e.formatSliceLabel=function(t,e,n,r,i){var a=n.texttemplate,o=n.textinfo;if(!a&&(!o||"none"===o))return"";var l=i.separators,c=r[0],u=t.data.data,f=c.hierarchy,h=x.isHierarchyRoot(t),d=x.getParent(f,t),p=x.getValue(t);if(!a){var g,v=o.split("+"),m=function(t){return-1!==v.indexOf(t)},y=[];if(m("label")&&u.label&&y.push(u.label),u.hasOwnProperty("v")&&m("value")&&y.push(x.formatValue(u.v,l)),!h){m("current path")&&y.push(x.getPath(t.data));var b=0;m("percent parent")&&b++,m("percent entry")&&b++,m("percent root")&&b++;var w=b>1;if(b){var _,k=function(t){g=x.formatPercent(_,l),w&&(g+=" of "+t),y.push(g)};m("percent parent")&&!h&&(_=p/x.getValue(d),k("parent")),m("percent entry")&&(_=p/x.getValue(e),k("entry")),m("percent root")&&(_=p/x.getValue(f),k("root"))}}return m("text")&&(g=s.castOption(n,u.i,"text"),s.isValidTextValue(g)&&y.push(g)),y.join("
")}var T=s.castOption(n,u.i,"texttemplate");if(!T)return"";var M={};u.label&&(M.label=u.label),u.hasOwnProperty("v")&&(M.value=u.v,M.valueLabel=x.formatValue(u.v,l)),M.currentPath=x.getPath(t.data),h||(M.percentParent=p/x.getValue(d),M.percentParentLabel=x.formatPercent(M.percentParent,l),M.parent=x.getPtLabel(d)),M.percentEntry=p/x.getValue(e),M.percentEntryLabel=x.formatPercent(M.percentEntry,l),M.entry=x.getPtLabel(e),M.percentRoot=p/x.getValue(f),M.percentRootLabel=x.formatPercent(M.percentRoot,l),M.root=x.getPtLabel(f),u.hasOwnProperty("color")&&(M.color=u.color);var A=s.castOption(n,u.i,"text");return(s.isValidTextValue(A)||""===A)&&(M.text=A),M.customdata=s.castOption(n,u.i,"customdata"),s.texttemplateString(T,M,i._d3locale,M,n._meta||{})}},29969:function(t,e,n){"use strict";var r=n(39898),i=n(7901),a=n(71828),o=n(72597).resizeText,s=n(43467);function l(t,e,n,r){var o=e.data.data,l=!e.children,c=o.i,u=a.castOption(n,c,"marker.line.color")||i.defaultLine,f=a.castOption(n,c,"marker.line.width")||0;t.call(s,e,n,r).style("stroke-width",f).call(i.stroke,u).style("opacity",l?n.leaf.opacity:null)}t.exports={style:function(t){var e=t._fullLayout._sunburstlayer.selectAll(".trace");o(t,e,"sunburst"),e.each((function(e){var n=r.select(this),i=e[0].trace;n.style("opacity",i.opacity),n.selectAll("path.surface").each((function(e){r.select(this).call(l,e,i,t)}))}))},styleOne:l}},54532:function(t,e,n){"use strict";var r=n(7901),i=n(50693),a=n(12663).axisHoverFormat,o=n(5386).fF,s=n(9012),l=n(1426).extendFlat,c=n(30962).overrideAll;function u(t){return{show:{valType:"boolean",dflt:!1},start:{valType:"number",dflt:null,editType:"plot"},end:{valType:"number",dflt:null,editType:"plot"},size:{valType:"number",dflt:null,min:0,editType:"plot"},project:{x:{valType:"boolean",dflt:!1},y:{valType:"boolean",dflt:!1},z:{valType:"boolean",dflt:!1}},color:{valType:"color",dflt:r.defaultLine},usecolormap:{valType:"boolean",dflt:!1},width:{valType:"number",min:1,max:16,dflt:2},highlight:{valType:"boolean",dflt:!0},highlightcolor:{valType:"color",dflt:r.defaultLine},highlightwidth:{valType:"number",min:1,max:16,dflt:2}}}var f=t.exports=c(l({z:{valType:"data_array"},x:{valType:"data_array"},y:{valType:"data_array"},text:{valType:"string",dflt:"",arrayOk:!0},hovertext:{valType:"string",dflt:"",arrayOk:!0},hovertemplate:o(),xhoverformat:a("x"),yhoverformat:a("y"),zhoverformat:a("z"),connectgaps:{valType:"boolean",dflt:!1,editType:"calc"},surfacecolor:{valType:"data_array"}},i("",{colorAttr:"z or surfacecolor",showScaleDflt:!0,autoColorDflt:!1,editTypeOverride:"calc"}),{contours:{x:u(),y:u(),z:u()},hidesurface:{valType:"boolean",dflt:!1},lightposition:{x:{valType:"number",min:-1e5,max:1e5,dflt:10},y:{valType:"number",min:-1e5,max:1e5,dflt:1e4},z:{valType:"number",min:-1e5,max:1e5,dflt:0}},lighting:{ambient:{valType:"number",min:0,max:1,dflt:.8},diffuse:{valType:"number",min:0,max:1,dflt:.8},specular:{valType:"number",min:0,max:2,dflt:.05},roughness:{valType:"number",min:0,max:1,dflt:.5},fresnel:{valType:"number",min:0,max:5,dflt:.2}},opacity:{valType:"number",min:0,max:1,dflt:1},opacityscale:{valType:"any",editType:"calc"},_deprecated:{zauto:l({},i.zauto,{}),zmin:l({},i.zmin,{}),zmax:l({},i.zmax,{})},hoverinfo:l({},s.hoverinfo),showlegend:l({},s.showlegend,{dflt:!1})}),"calc","nested");f.x.editType=f.y.editType=f.z.editType="calc+clearAxisTypes",f.transforms=void 0},18396:function(t,e,n){"use strict";var r=n(78803);t.exports=function(t,e){e.surfacecolor?r(t,e,{vals:e.surfacecolor,containerStr:"",cLetter:"c"}):r(t,e,{vals:e.z,containerStr:"",cLetter:"c"})}},43768:function(t,e,n){"use strict";var r=n(9330).gl_surface3d,i=n(9330).ndarray,a=n(9330).ndarray_linear_interpolate.d2,o=n(824),s=n(43907),l=n(71828).isArrayOrTypedArray,c=n(81697).parseColorScale,u=n(78614),f=n(21081).extractOpts;function h(t,e,n){this.scene=t,this.uid=n,this.surface=e,this.data=null,this.showContour=[!1,!1,!1],this.contourStart=[null,null,null],this.contourEnd=[null,null,null],this.contourSize=[0,0,0],this.minValues=[1/0,1/0,1/0],this.maxValues=[-1/0,-1/0,-1/0],this.dataScaleX=1,this.dataScaleY=1,this.refineData=!0,this.objectOffset=[0,0,0]}var d=h.prototype;d.getXat=function(t,e,n,r){var i=l(this.data.x)?l(this.data.x[0])?this.data.x[e][t]:this.data.x[t]:t;return void 0===n?i:r.d2l(i,0,n)},d.getYat=function(t,e,n,r){var i=l(this.data.y)?l(this.data.y[0])?this.data.y[e][t]:this.data.y[e]:e;return void 0===n?i:r.d2l(i,0,n)},d.getZat=function(t,e,n,r){var i=this.data.z[e][t];return null===i&&this.data.connectgaps&&this.data._interpolatedZ&&(i=this.data._interpolatedZ[e][t]),void 0===n?i:r.d2l(i,0,n)},d.handlePick=function(t){if(t.object===this.surface){var e=(t.data.index[0]-1)/this.dataScaleX-1,n=(t.data.index[1]-1)/this.dataScaleY-1,r=Math.max(Math.min(Math.round(e),this.data.z[0].length-1),0),i=Math.max(Math.min(Math.round(n),this.data._ylength-1),0);t.index=[r,i],t.traceCoordinate=[this.getXat(r,i),this.getYat(r,i),this.getZat(r,i)],t.dataCoordinate=[this.getXat(r,i,this.data.xcalendar,this.scene.fullSceneLayout.xaxis),this.getYat(r,i,this.data.ycalendar,this.scene.fullSceneLayout.yaxis),this.getZat(r,i,this.data.zcalendar,this.scene.fullSceneLayout.zaxis)];for(var a=0;a<3;a++){var o=t.dataCoordinate[a];null!==o&&void 0!==o&&(t.dataCoordinate[a]*=this.scene.dataScale[a])}var s=this.data.hovertext||this.data.text;return Array.isArray(s)&&s[i]&&void 0!==s[i][r]?t.textLabel=s[i][r]:t.textLabel=s||"",t.data.dataCoordinate=t.dataCoordinate.slice(),this.surface.highlight(t.data),this.scene.glplot.spikes.position=t.dataCoordinate,!0}};var p=[2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,2927,2939,2953,2957,2963,2969,2971,2999];function g(t,e){if(t0){n=p[r];break}return n}function y(t,e){if(!(t<1||e<1)){for(var n=v(t),r=v(e),i=1,a=0;aw;)n--,n/=m(n),++n1?r:1},d.refineCoords=function(t){for(var e=this.dataScaleX,n=this.dataScaleY,r=t[0].shape[0],a=t[0].shape[1],o=0|Math.floor(t[0].shape[0]*e+1),s=0|Math.floor(t[0].shape[1]*n+1),l=1+r+1,c=1+a+1,u=i(new Float32Array(l*c),[l,c]),f=[1/e,0,0,0,1/n,0,0,0,1],h=0;h0&&null!==this.contourStart[t]&&null!==this.contourEnd[t]&&this.contourEnd[t]>this.contourStart[t]))for(i[t]=!0,e=this.contourStart[t];ea&&(this.minValues[e]=a),this.maxValues[e]",maxDimensionCount:60,overdrag:45,releaseTransitionDuration:120,releaseTransitionEase:"cubic-out",scrollbarCaptureWidth:18,scrollbarHideDelay:1e3,scrollbarHideDuration:1e3,scrollbarOffset:5,scrollbarWidth:8,transitionDuration:100,transitionEase:"cubic-out",uplift:5,wrapSpacer:" ",wrapSplitCharacter:" ",cn:{table:"table",tableControlView:"table-control-view",scrollBackground:"scroll-background",yColumn:"y-column",columnBlock:"column-block",scrollAreaClip:"scroll-area-clip",scrollAreaClipRect:"scroll-area-clip-rect",columnBoundary:"column-boundary",columnBoundaryClippath:"column-boundary-clippath",columnBoundaryRect:"column-boundary-rect",columnCells:"column-cells",columnCell:"column-cell",cellRect:"cell-rect",cellText:"cell-text",cellTextHolder:"cell-text-holder",scrollbarKit:"scrollbar-kit",scrollbar:"scrollbar",scrollbarSlider:"scrollbar-slider",scrollbarGlyph:"scrollbar-glyph",scrollbarCaptureZone:"scrollbar-capture-zone"}}},51018:function(t,e,n){"use strict";var r=n(49850),i=n(1426).extendFlat,a=n(92770);function o(t){if(Array.isArray(t)){for(var e=0,n=0;n=e||c===t.length-1)&&(r[i]=o,o.key=l++,o.firstRowIndex=s,o.lastRowIndex=c,o={firstRowIndex:null,lastRowIndex:null,rows:[]},i+=a,s=c+1,a=0);return r}t.exports=function(t,e){var n=l(e.cells.values),d=function(t){return t.slice(e.header.values.length,t.length)},p=l(e.header.values);p.length&&!p[0].length&&(p[0]=[""],p=l(p));var g=p.concat(d(n).map((function(){return c((p[0]||[""]).length)}))),v=e.domain,m=Math.floor(t._fullLayout._size.w*(v.x[1]-v.x[0])),y=Math.floor(t._fullLayout._size.h*(v.y[1]-v.y[0])),b=e.header.values.length?g[0].map((function(){return e.header.height})):[r.emptyHeaderHeight],x=n.length?n[0].map((function(){return e.cells.height})):[],w=b.reduce(s,0),_=h(x,y-w+r.uplift),k=f(h(b,w),[]),T=f(_,k),M={},A=e._fullInput.columnorder.concat(d(n.map((function(t,e){return e})))),S=g.map((function(t,n){var r=Array.isArray(e.columnwidth)?e.columnwidth[Math.min(n,e.columnwidth.length-1)]:e.columnwidth;return a(r)?Number(r):1})),E=S.reduce(s,0);S=S.map((function(t){return t/E*m}));var C=Math.max(o(e.header.line.width),o(e.cells.line.width)),P={key:e.uid+t._context.staticPlot,translateX:v.x[0]*t._fullLayout._size.w,translateY:t._fullLayout._size.h*(1-v.y[1]),size:t._fullLayout._size,width:m,maxLineWidth:C,height:y,columnOrder:A,groupHeight:y,rowBlocks:T,headerRowBlocks:k,scrollY:0,cells:i({},e.cells,{values:n}),headerCells:i({},e.header,{values:g}),gdColumns:g.map((function(t){return t[0]})),gdColumnsOriginalOrder:g.map((function(t){return t[0]})),prevPages:[0,0],scrollbarState:{scrollbarScrollInProgress:!1},columns:g.map((function(t,e){var n=M[t];return M[t]=(n||0)+1,{key:t+"__"+M[t],label:t,specIndex:e,xIndex:A[e],xScale:u,x:void 0,calcdata:void 0,columnWidth:S[e]}}))};return P.columns.forEach((function(t){t.calcdata=P,t.x=u(t)})),P}},56269:function(t,e,n){"use strict";var r=n(1426).extendFlat;e.splitToPanels=function(t){var e=[0,0],n=r({},t,{key:"header",type:"header",page:0,prevPages:e,currentRepaint:[null,null],dragHandle:!0,values:t.calcdata.headerCells.values[t.specIndex],rowBlocks:t.calcdata.headerRowBlocks,calcdata:r({},t.calcdata,{cells:t.calcdata.headerCells})});return[r({},t,{key:"cells1",type:"cells",page:0,prevPages:e,currentRepaint:[null,null],dragHandle:!1,values:t.calcdata.cells.values[t.specIndex],rowBlocks:t.calcdata.rowBlocks}),r({},t,{key:"cells2",type:"cells",page:1,prevPages:e,currentRepaint:[null,null],dragHandle:!1,values:t.calcdata.cells.values[t.specIndex],rowBlocks:t.calcdata.rowBlocks}),n]},e.splitToCells=function(t){var e=function(t){var e=t.rowBlocks[t.page],n=e?e.rows[0].rowIndex:0,r=e?n+e.rows.length:0;return[n,r]}(t);return(t.values||[]).slice(e[0],e[1]).map((function(n,r){return{keyWithinBlock:r+("string"===typeof n&&n.match(/[<$&> ]/)?"_keybuster_"+Math.random():""),key:e[0]+r,column:t,calcdata:t.calcdata,page:t.page,rowBlocks:t.rowBlocks,value:n}}))}},39754:function(t,e,n){"use strict";var r=n(71828),i=n(44464),a=n(27670).c;t.exports=function(t,e,n,o){function s(n,a){return r.coerce(t,e,i,n,a)}a(e,o,s),s("columnwidth"),s("header.values"),s("header.format"),s("header.align"),s("header.prefix"),s("header.suffix"),s("header.height"),s("header.line.width"),s("header.line.color"),s("header.fill.color"),r.coerceFont(s,"header.font",r.extendFlat({},o.font)),function(t,e){for(var n=t.columnorder||[],r=t.header.values.length,i=n.slice(0,r),a=i.slice().sort((function(t,e){return t-e})),o=i.map((function(t){return a.indexOf(t)})),s=o.length;s/i),l=!o||s;t.mayHaveMarkup=o&&i.match(/[<&>]/);var c,u="string"===typeof(c=i)&&c.match(r.latexCheck);t.latex=u;var f,h,d=u?"":k(t.calcdata.cells.prefix,e,n)||"",p=u?"":k(t.calcdata.cells.suffix,e,n)||"",g=u?null:k(t.calcdata.cells.format,e,n)||null,v=d+(g?a(g)(t.value):t.value)+p;if(t.wrappingNeeded=!t.wrapped&&!l&&!u&&(f=_(v)),t.cellHeightMayIncrease=s||u||t.mayHaveMarkup||(void 0===f?_(v):f),t.needsConvertToTspans=t.mayHaveMarkup||t.wrappingNeeded||t.latex,t.wrappingNeeded){var m=(" "===r.wrapSplitCharacter?v.replace(/i&&r.push(a),i+=l}return r}(i,l,s);1===c.length&&(c[0]===i.length-1?c.unshift(c[0]-1):c.push(c[0]+1)),c[0]%2&&c.reverse(),e.each((function(t,e){t.page=c[e],t.scrollY=l})),e.attr("transform",(function(t){var e=R(t.rowBlocks,t.page)-t.scrollY;return u(0,e)})),t&&(P(t,n,e,c,r.prevPages,r,0),P(t,n,e,c,r.prevPages,r,1),b(n,t))}}function C(t,e,n,a){return function(o){var s=o.calcdata?o.calcdata:o,l=e.filter((function(t){return s.key===t.key})),c=n||s.scrollbarState.dragMultiplier,u=s.scrollY;s.scrollY=void 0===a?s.scrollY+c*i.event.dy:a;var f=l.selectAll("."+r.cn.yColumn).selectAll("."+r.cn.columnBlock).filter(M);return E(t,f,l),s.scrollY===u}}function P(t,e,n,r,i,a,o){r[o]!==i[o]&&(clearTimeout(a.currentRepaint[o]),a.currentRepaint[o]=setTimeout((function(){var a=n.filter((function(t,e){return e===o&&r[e]!==i[e]}));x(t,e,a,n),i[o]=r[o]})))}function O(t,e,n,a){return function(){var o=i.select(e.parentNode);o.each((function(t){var e=t.fragments;o.selectAll("tspan.line").each((function(t,n){e[n].width=this.getComputedTextLength()}));var n,i,a=e[e.length-1].width,s=e.slice(0,-1),l=[],c=0,u=t.column.columnWidth-2*r.cellPad;for(t.value="";s.length;)c+(i=(n=s.shift()).width+a)>u&&(t.value+=l.join(r.wrapSpacer)+r.lineBreaker,l=[],c=0),l.push(n.text),c+=i;c&&(t.value+=l.join(r.wrapSpacer)),t.wrapped=!0})),o.selectAll("tspan.line").remove(),w(o.select("."+r.cn.cellText),n,t,a),i.select(e.parentNode.parentNode).call(D)}}function L(t,e,n,a,o){return function(){if(!o.settledY){var s=i.select(e.parentNode),l=j(o),c=o.key-l.firstRowIndex,f=l.rows[c].rowHeight,h=o.cellHeightMayIncrease?e.parentNode.getBoundingClientRect().height+2*r.cellPad:f,d=Math.max(h,f);d-l.rows[c].rowHeight&&(l.rows[c].rowHeight=d,t.selectAll("."+r.cn.columnCell).call(D),E(null,t.filter(M),0),b(n,a,!0)),s.attr("transform",(function(){var t=this,e=t.parentNode.getBoundingClientRect(),n=i.select(t.parentNode).select("."+r.cn.cellRect).node().getBoundingClientRect(),a=t.transform.baseVal.consolidate(),s=n.top-e.top+(a?a.matrix.f:r.cellPad);return u(I(o,i.select(t.parentNode).select("."+r.cn.cellTextHolder).node().getBoundingClientRect().width),s)})),o.settledY=!0}}}function I(t,e){switch(t.align){case"left":default:return r.cellPad;case"right":return t.column.columnWidth-(e||0)-r.cellPad;case"center":return(t.column.columnWidth-(e||0))/2}}function D(t){t.attr("transform",(function(t){var e=t.rowBlocks[0].auxiliaryBlocks.reduce((function(t,e){return t+z(e,1/0)}),0),n=z(j(t),t.key);return u(0,n+e)})).selectAll("."+r.cn.cellRect).attr("height",(function(t){return(e=j(t),n=t.key,e.rows[n-e.firstRowIndex]).rowHeight;var e,n}))}function R(t,e){for(var n=0,r=e-1;r>=0;r--)n+=N(t[r]);return n}function z(t,e){for(var n=0,r=0;r","<","|","/","\\"],dflt:">",editType:"plot"},thickness:{valType:"number",min:12,editType:"plot"},textfont:u({},s.textfont,{}),editType:"calc"},text:s.text,textinfo:l.textinfo,texttemplate:i({editType:"plot"},{keys:c.eventDataKeys.concat(["label","value"])}),hovertext:s.hovertext,hoverinfo:l.hoverinfo,hovertemplate:r({},{keys:c.eventDataKeys}),textfont:s.textfont,insidetextfont:s.insidetextfont,outsidetextfont:u({},s.outsidetextfont,{}),textposition:{valType:"enumerated",values:["top left","top center","top right","middle left","middle center","middle right","bottom left","bottom center","bottom right"],dflt:"top left",editType:"plot"},sort:s.sort,root:l.root,domain:o({name:"treemap",trace:!0,editType:"calc"})}},78018:function(t,e,n){"use strict";var r=n(74875);e.name="treemap",e.plot=function(t,n,i,a){r.plotBasePlot(e.name,t,n,i,a)},e.clean=function(t,n,i,a){r.cleanBasePlot(e.name,t,n,i,a)}},65039:function(t,e,n){"use strict";var r=n(52147);e.y=function(t,e){return r.calc(t,e)},e.T=function(t){return r._runCrossTraceCalc("treemap",t)}},43473:function(t){"use strict";t.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:"poly",eventDataKeys:["currentPath","root","entry","percentRoot","percentEntry","percentParent"],gapWithPathbar:1}},91174:function(t,e,n){"use strict";var r=n(71828),i=n(45802),a=n(7901),o=n(27670).c,s=n(90769).handleText,l=n(97313).TEXTPAD,c=n(37434).handleMarkerDefaults,u=n(21081),f=u.hasColorscale,h=u.handleDefaults;t.exports=function(t,e,n,u){function d(n,a){return r.coerce(t,e,i,n,a)}var p=d("labels"),g=d("parents");if(p&&p.length&&g&&g.length){var v=d("values");v&&v.length?d("branchvalues"):d("count"),d("level"),d("maxdepth"),"squarify"===d("tiling.packing")&&d("tiling.squarifyratio"),d("tiling.flip"),d("tiling.pad");var m=d("text");d("texttemplate"),e.texttemplate||d("textinfo",Array.isArray(m)?"text+label":"label"),d("hovertext"),d("hovertemplate");var y=d("pathbar.visible");s(t,e,u,d,"auto",{hasPathbar:y,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),d("textposition");var b=-1!==e.textposition.indexOf("bottom");c(t,e,u,d),(e._hasColorscale=f(t,"marker","colors")||(t.marker||{}).coloraxis)?h(t,e,u,d,{prefix:"marker.",cLetter:"c"}):d("marker.depthfade",!(e.marker.colors||[]).length);var x=2*e.textfont.size;d("marker.pad.t",b?x/4:x),d("marker.pad.l",x/4),d("marker.pad.r",x/4),d("marker.pad.b",b?x:x/4),d("marker.cornerradius"),e._hovered={marker:{line:{width:2,color:a.contrast(u.paper_bgcolor)}}},y&&(d("pathbar.thickness",e.pathbar.textfont.size+2*l),d("pathbar.side"),d("pathbar.edgeshape")),d("sort"),d("root.color"),o(e,u,d),e._length=null}else e.visible=!1}},80694:function(t,e,n){"use strict";var r=n(39898),i=n(2791),a=n(72597).clearMinTextSize,o=n(16688).resizeText,s=n(46650);t.exports=function(t,e,n,l,c){var u,f,h=c.type,d=c.drawDescendants,p=t._fullLayout,g=p["_"+h+"layer"],v=!n;a(h,p),(u=g.selectAll("g.trace."+h).data(e,(function(t){return t[0].trace.uid}))).enter().append("g").classed("trace",!0).classed(h,!0),u.order(),!p.uniformtext.mode&&i.hasTransition(n)?(l&&(f=l()),r.transition().duration(n.duration).ease(n.easing).each("end",(function(){f&&f()})).each("interrupt",(function(){f&&f()})).each((function(){g.selectAll("g.trace").each((function(e){s(t,e,this,n,d)}))}))):(u.each((function(e){s(t,e,this,n,d)})),p.uniformtext.mode&&o(t,g.selectAll(".trace"),h)),v&&u.exit().remove()}},66209:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=n(91424),o=n(63893),s=n(37210),l=n(96362).styleOne,c=n(43473),u=n(2791),f=n(83523),h=!0;t.exports=function(t,e,n,d,p){var g=p.barDifY,v=p.width,m=p.height,y=p.viewX,b=p.viewY,x=p.pathSlice,w=p.toMoveInsideSlice,_=p.strTransform,k=p.hasTransition,T=p.handleSlicesExit,M=p.makeUpdateSliceInterpolator,A=p.makeUpdateTextInterpolator,S={},E=t._context.staticPlot,C=t._fullLayout,P=e[0],O=P.trace,L=P.hierarchy,I=v/O._entryDepth,D=u.listPath(n.data,"id"),R=s(L.copy(),[v,m],{packing:"dice",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();(R=R.filter((function(t){var e=D.indexOf(t.data.id);return-1!==e&&(t.x0=I*e,t.x1=I*(e+1),t.y0=g,t.y1=g+m,t.onPathbar=!0,!0)}))).reverse(),(d=d.data(R,u.getPtId)).enter().append("g").classed("pathbar",!0),T(d,h,S,[v,m],x),d.order();var z=d;k&&(z=z.transition().each("end",(function(){var e=r.select(this);u.setSliceCursor(e,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})}))),z.each((function(s){s._x0=y(s.x0),s._x1=y(s.x1),s._y0=b(s.y0),s._y1=b(s.y1),s._hoverX=y(s.x1-Math.min(v,m)/2),s._hoverY=b(s.y1-m/2);var d=r.select(this),p=i.ensureSingle(d,"path","surface",(function(t){t.style("pointer-events",E?"none":"all")}));k?p.transition().attrTween("d",(function(t){var e=M(t,h,S,[v,m]);return function(t){return x(e(t))}})):p.attr("d",x),d.call(f,n,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),p.call(l,s,O,t,{hovered:!1}),s._text=(u.getPtLabel(s)||"").split("
").join(" ")||"";var g=i.ensureSingle(d,"g","slicetext"),T=i.ensureSingle(g,"text","",(function(t){t.attr("data-notex",1)})),P=i.ensureUniformFontSize(t,u.determineTextFont(O,s,C.font,{onPathbar:!0}));T.text(s._text||" ").classed("slicetext",!0).attr("text-anchor","start").call(a.font,P).call(o.convertToTspans,t),s.textBB=a.bBox(T.node()),s.transform=w(s,{fontSize:P.size,onPathbar:!0}),s.transform.fontSize=P.size,k?T.transition().attrTween("transform",(function(t){var e=A(t,h,S,[v,m]);return function(t){return _(e(t))}})):T.attr("transform",_(s))}))}},52583:function(t,e,n){"use strict";var r=n(39898),i=n(71828),a=n(91424),o=n(63893),s=n(37210),l=n(96362).styleOne,c=n(43473),u=n(2791),f=n(83523),h=n(24714).formatSliceLabel,d=!1;t.exports=function(t,e,n,p,g){var v=g.width,m=g.height,y=g.viewX,b=g.viewY,x=g.pathSlice,w=g.toMoveInsideSlice,_=g.strTransform,k=g.hasTransition,T=g.handleSlicesExit,M=g.makeUpdateSliceInterpolator,A=g.makeUpdateTextInterpolator,S=g.prevEntry,E=t._context.staticPlot,C=t._fullLayout,P=e[0].trace,O=-1!==P.textposition.indexOf("left"),L=-1!==P.textposition.indexOf("right"),I=-1!==P.textposition.indexOf("bottom"),D=!I&&!P.marker.pad.t||I&&!P.marker.pad.b,R=s(n,[v,m],{packing:P.tiling.packing,squarifyratio:P.tiling.squarifyratio,flipX:P.tiling.flip.indexOf("x")>-1,flipY:P.tiling.flip.indexOf("y")>-1,pad:{inner:P.tiling.pad,top:P.marker.pad.t,left:P.marker.pad.l,right:P.marker.pad.r,bottom:P.marker.pad.b}}).descendants(),z=1/0,N=-1/0;R.forEach((function(t){var e=t.depth;e>=P._maxDepth?(t.x0=t.x1=(t.x0+t.x1)/2,t.y0=t.y1=(t.y0+t.y1)/2):(z=Math.min(z,e),N=Math.max(N,e))})),p=p.data(R,u.getPtId),P._maxVisibleLayers=isFinite(N)?N-z+1:0,p.enter().append("g").classed("slice",!0),T(p,d,{},[v,m],x),p.order();var j=null;if(k&&S){var F=u.getPtId(S);p.each((function(t){null===j&&u.getPtId(t)===F&&(j={x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1})}))}var B=function(){return j||{x0:0,x1:v,y0:0,y1:m}},U=p;return k&&(U=U.transition().each("end",(function(){var e=r.select(this);u.setSliceCursor(e,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})}))),U.each((function(s){var p=u.isHeader(s,P);s._x0=y(s.x0),s._x1=y(s.x1),s._y0=b(s.y0),s._y1=b(s.y1),s._hoverX=y(s.x1-P.marker.pad.r),s._hoverY=b(I?s.y1-P.marker.pad.b/2:s.y0+P.marker.pad.t/2);var g=r.select(this),T=i.ensureSingle(g,"path","surface",(function(t){t.style("pointer-events",E?"none":"all")}));k?T.transition().attrTween("d",(function(t){var e=M(t,d,B(),[v,m]);return function(t){return x(e(t))}})):T.attr("d",x),g.call(f,n,t,e,{styleOne:l,eventDataKeys:c.eventDataKeys,transitionTime:c.CLICK_TRANSITION_TIME,transitionEasing:c.CLICK_TRANSITION_EASING}).call(u.setSliceCursor,t,{isTransitioning:t._transitioning}),T.call(l,s,P,t,{hovered:!1}),s.x0===s.x1||s.y0===s.y1?s._text="":s._text=p?D?"":u.getPtLabel(s)||"":h(s,n,P,e,C)||"";var S=i.ensureSingle(g,"g","slicetext"),R=i.ensureSingle(S,"text","",(function(t){t.attr("data-notex",1)})),z=i.ensureUniformFontSize(t,u.determineTextFont(P,s,C.font));R.text(s._text||" ").classed("slicetext",!0).attr("text-anchor",L?"end":O||p?"start":"middle").call(a.font,z).call(o.convertToTspans,t),s.textBB=a.bBox(R.node()),s.transform=w(s,{fontSize:z.size,isHeader:p}),s.transform.fontSize=z.size,k?R.transition().attrTween("transform",(function(t){var e=A(t,d,B(),[v,m]);return function(t){return _(e(t))}})):R.attr("transform",_(s))})),j}},14102:function(t){"use strict";t.exports=function t(e,n,r){var i;r.swapXY&&(i=e.x0,e.x0=e.y0,e.y0=i,i=e.x1,e.x1=e.y1,e.y1=i),r.flipX&&(i=e.x0,e.x0=n[0]-e.x1,e.x1=n[0]-i),r.flipY&&(i=e.y0,e.y0=n[1]-e.y1,e.y1=n[1]-i);var a=e.children;if(a)for(var o=0;o-1?P+I:-(L+I):0,R={x0:O,x1:O,y0:D,y1:D+L},z=function(t,e,n){var r=m.tiling.pad,i=function(t){return t-r<=e.x0},a=function(t){return t+r>=e.x1},o=function(t){return t-r<=e.y0},s=function(t){return t+r>=e.y1};return t.x0===e.x0&&t.x1===e.x1&&t.y0===e.y0&&t.y1===e.y1?{x0:t.x0,x1:t.x1,y0:t.y0,y1:t.y1}:{x0:i(t.x0-r)?0:a(t.x0-r)?n[0]:t.x0,x1:i(t.x1+r)?0:a(t.x1+r)?n[0]:t.x1,y0:o(t.y0-r)?0:s(t.y0-r)?n[1]:t.y0,y1:o(t.y1+r)?0:s(t.y1+r)?n[1]:t.y1}},N=null,j={},F={},B=null,U=function(t,e){return e?j[h(t)]:F[h(t)]},H=function(t,e,n,r){if(e)return j[h(b)]||R;var i=F[m.level]||n;return function(t){return t.data.depth-x.data.depth=(r-=(y?et:et.r)-s)){var d=(n+r)/2;n=d,r=d}var p;ot?i<(p=a-(y?et:et.b))&&p"===tt?(l.x-=a,c.x-=a,u.x-=a,f.x-=a):"/"===tt?(u.x-=a,f.x-=a,o.x-=a/2,s.x-=a/2):"\\"===tt?(l.x-=a,c.x-=a,o.x-=a/2,s.x-=a/2):"<"===tt&&(o.x-=a,s.x-=a),Q(l),Q(f),Q(o),Q(c),Q(u),Q(s),"M"+Z(l.x,l.y)+"L"+Z(c.x,c.y)+"L"+Z(s.x,s.y)+"L"+Z(u.x,u.y)+"L"+Z(f.x,f.y)+"L"+Z(o.x,o.y)+"Z"},toMoveInsideSlice:st,makeUpdateSliceInterpolator:ct,makeUpdateTextInterpolator:ut,handleSlicesExit:ft,hasTransition:M,strTransform:ht}):_.remove()}},96362:function(t,e,n){"use strict";var r=n(39898),i=n(7901),a=n(71828),o=n(2791),s=n(72597).resizeText,l=n(43467);function c(t,e,n,r,s){var c,u,f=(s||{}).hovered,h=e.data.data,d=h.i,p=h.color,g=o.isHierarchyRoot(e),v=1;if(f)c=n._hovered.marker.line.color,u=n._hovered.marker.line.width;else if(g&&p===n.root.color)v=100,c="rgba(0,0,0,0)",u=0;else if(c=a.castOption(n,d,"marker.line.color")||i.defaultLine,u=a.castOption(n,d,"marker.line.width")||0,!n._hasColorscale&&!e.onPathbar){var m=n.marker.depthfade;if(m){var y,b=i.combine(i.addOpacity(n._backgroundColor,.75),p);if(!0===m){var x=o.getMaxDepth(n);y=isFinite(x)?o.isLeaf(e)?0:n._maxVisibleLayers-(e.data.depth-n._entryDepth):e.data.height+1}else y=e.data.depth-n._entryDepth,n._atRootLevel||y++;if(y>0)for(var w=0;w0){var x,w,_,k,T,M=t.xa,A=t.ya;"h"===p.orientation?(T=e,x="y",_=A,w="x",k=M):(T=n,x="x",_=M,w="y",k=A);var S=d[t.index];if(T>=S.span[0]&&T<=S.span[1]){var E=i.extendFlat({},t),C=k.c2p(T,!0),P=s.getKdeValue(S,p,T),O=s.getPositionOnKdePath(S,p,C),L=_._offset,I=_._length;E[x+"0"]=O[0],E[x+"1"]=O[1],E[w+"0"]=E[w+"1"]=C,E[w+"Label"]=w+": "+a.hoverLabelText(k,T,p[w+"hoverformat"])+", "+d[0].t.labels.kde+" "+P.toFixed(3);for(var D=0,R=0;R")),u.color=function(t,e){var n=t[e.dir].marker,r=n.color,a=n.line.color,o=n.line.width;return i(r)?r:i(a)&&o?a:void 0}(h,v),[u]}function T(t){return r(g,t,h[p+"hoverformat"])}}},19990:function(t,e,n){"use strict";t.exports={attributes:n(43037),layoutAttributes:n(13494),supplyDefaults:n(83266).supplyDefaults,crossTraceDefaults:n(83266).crossTraceDefaults,supplyLayoutDefaults:n(5176),calc:n(52752),crossTraceCalc:n(70766),plot:n(30436),style:n(55750).style,hoverPoints:n(61326),eventData:n(58593),selectPoints:n(81974),moduleType:"trace",name:"waterfall",basePlotModule:n(93612),categories:["bar-like","cartesian","svg","oriented","showLegend","zoomScale"],meta:{}}},13494:function(t){"use strict";t.exports={waterfallmode:{valType:"enumerated",values:["group","overlay"],dflt:"group",editType:"calc"},waterfallgap:{valType:"number",min:0,max:1,editType:"calc"},waterfallgroupgap:{valType:"number",min:0,max:1,dflt:0,editType:"calc"}}},5176:function(t,e,n){"use strict";var r=n(71828),i=n(13494);t.exports=function(t,e,n){var a=!1;function o(n,a){return r.coerce(t,e,i,n,a)}for(var s=0;s0&&(v+=h?"M"+f[0]+","+p[1]+"V"+p[0]:"M"+f[1]+","+p[0]+"H"+f[0]),"between"!==d&&(n.isSum||s path").each((function(t){if(!t.isBlank){var e=s[t.dir].marker;r.select(this).call(a.fill,e.color).call(a.stroke,e.line.color).call(i.dashLine,e.line.dash,e.line.width).style("opacity",s.selectedpoints&&!t.selected?o:1)}})),c(n,s,t),n.selectAll(".lines").each((function(){var t=s.connector.line;i.lineGroupStyle(r.select(this).selectAll("path"),t.width,t.color,t.dash)}))}))}}},82887:function(t,e,n){"use strict";var r=n(89298),i=n(71828),a=n(86281),o=n(79344).p,s=n(50606).BADNUM;e.moduleType="transform",e.name="aggregate";var l=e.attributes={enabled:{valType:"boolean",dflt:!0,editType:"calc"},groups:{valType:"string",strict:!0,noBlank:!0,arrayOk:!0,dflt:"x",editType:"calc"},aggregations:{_isLinkedToArray:"aggregation",target:{valType:"string",editType:"calc"},func:{valType:"enumerated",values:["count","sum","avg","median","mode","rms","stddev","min","max","first","last","change","range"],dflt:"first",editType:"calc"},funcmode:{valType:"enumerated",values:["sample","population"],dflt:"sample",editType:"calc"},enabled:{valType:"boolean",dflt:!0,editType:"calc"},editType:"calc"},editType:"calc"},c=l.aggregations;function u(t,e,n,a){if(a.enabled){for(var o=a.target,l=i.nestedProperty(e,o),c=l.get(),u=function(t,e){var n=t.func,r=e.d2c,a=e.c2d;switch(n){case"count":return f;case"first":return h;case"last":return d;case"sum":return function(t,e){for(var n=0,i=0;ii&&(i=u,o=c)}}return i?a(o):s};case"rms":return function(t,e){for(var n=0,i=0,o=0;o":return function(t){return h(t)>s};case">=":return function(t){return h(t)>=s};case"[]":return function(t){var e=h(t);return e>=s[0]&&e<=s[1]};case"()":return function(t){var e=h(t);return e>s[0]&&e=s[0]&&es[0]&&e<=s[1]};case"][":return function(t){var e=h(t);return e<=s[0]||e>=s[1]};case")(":return function(t){var e=h(t);return es[1]};case"](":return function(t){var e=h(t);return e<=s[0]||e>s[1]};case")[":return function(t){var e=h(t);return e=s[1]};case"{}":return function(t){return-1!==s.indexOf(h(t))};case"}{":return function(t){return-1===s.indexOf(h(t))}}}(n,a.getDataToCoordFunc(t,e,s,i),h),b={},x={},w=0;p?(v=function(t){b[t.astr]=r.extendDeep([],t.get()),t.set(new Array(f))},m=function(t,e){var n=b[t.astr][e];t.get()[e]=n}):(v=function(t){b[t.astr]=r.extendDeep([],t.get()),t.set([])},m=function(t,e){var n=b[t.astr][e];t.get().push(n)}),T(v);for(var _=o(e.transforms,n),k=0;k1?"%{group} (%{trace})":"%{group}");var l=t.styles,c=o.styles=[];if(l)for(a=0;ah)throw new RangeError('The value "'+t+'" is invalid for option "size"');var e=new Uint8Array(t);return Object.setPrototypeOf(e,p.prototype),e}function p(t,e,n){if("number"===typeof t){if("string"===typeof e)throw new TypeError('The "string" argument must be of type string. Received type number');return m(t)}return g(t,e,n)}function g(t,e,n){if("string"===typeof t)return function(t,e){if("string"===typeof e&&""!==e||(e="utf8"),!p.isEncoding(e))throw new TypeError("Unknown encoding: "+e);var n=0|w(t,e),r=d(n),i=r.write(t,e);return i!==n&&(r=r.slice(0,i)),r}(t,e);if(ArrayBuffer.isView(t))return function(t){if(et(t,Uint8Array)){var e=new Uint8Array(t);return b(e.buffer,e.byteOffset,e.byteLength)}return y(t)}(t);if(null==t)throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+l(t));if(et(t,ArrayBuffer)||t&&et(t.buffer,ArrayBuffer))return b(t,e,n);if("undefined"!==typeof SharedArrayBuffer&&(et(t,SharedArrayBuffer)||t&&et(t.buffer,SharedArrayBuffer)))return b(t,e,n);if("number"===typeof t)throw new TypeError('The "value" argument must not be of type number. Received type number');var r=t.valueOf&&t.valueOf();if(null!=r&&r!==t)return p.from(r,e,n);var i=function(t){if(p.isBuffer(t)){var e=0|x(t.length),n=d(e);return 0===n.length||t.copy(n,0,0,e),n}return void 0!==t.length?"number"!==typeof t.length||nt(t.length)?d(0):y(t):"Buffer"===t.type&&Array.isArray(t.data)?y(t.data):void 0}(t);if(i)return i;if("undefined"!==typeof Symbol&&null!=Symbol.toPrimitive&&"function"===typeof t[Symbol.toPrimitive])return p.from(t[Symbol.toPrimitive]("string"),e,n);throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+l(t))}function v(t){if("number"!==typeof t)throw new TypeError('"size" argument must be of type number');if(t<0)throw new RangeError('The value "'+t+'" is invalid for option "size"')}function m(t){return v(t),d(t<0?0:0|x(t))}function y(t){for(var e=t.length<0?0:0|x(t.length),n=d(e),r=0;r=h)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+h.toString(16)+" bytes");return 0|t}function w(t,e){if(p.isBuffer(t))return t.length;if(ArrayBuffer.isView(t)||et(t,ArrayBuffer))return t.byteLength;if("string"!==typeof t)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+l(t));var n=t.length,r=arguments.length>2&&!0===arguments[2];if(!r&&0===n)return 0;for(var i=!1;;)switch(e){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":return J(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return Q(t).length;default:if(i)return r?-1:J(t).length;e=(""+e).toLowerCase(),i=!0}}function _(t,e,n){var r=!1;if((void 0===e||e<0)&&(e=0),e>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return z(this,e,n);case"utf8":case"utf-8":return L(this,e,n);case"ascii":return D(this,e,n);case"latin1":case"binary":return R(this,e,n);case"base64":return O(this,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return N(this,e,n);default:if(r)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),r=!0}}function k(t,e,n){var r=t[e];t[e]=t[n],t[n]=r}function T(t,e,n,r,i){if(0===t.length)return-1;if("string"===typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),nt(n=+n)&&(n=i?0:t.length-1),n<0&&(n=t.length+n),n>=t.length){if(i)return-1;n=t.length-1}else if(n<0){if(!i)return-1;n=0}if("string"===typeof e&&(e=p.from(e,r)),p.isBuffer(e))return 0===e.length?-1:M(t,e,n,r,i);if("number"===typeof e)return e&=255,"function"===typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(t,e,n):Uint8Array.prototype.lastIndexOf.call(t,e,n):M(t,[e],n,r,i);throw new TypeError("val must be string, number or Buffer")}function M(t,e,n,r,i){var a,o=1,s=t.length,l=e.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(t.length<2||e.length<2)return-1;o=2,s/=2,l/=2,n/=2}function c(t,e){return 1===o?t[e]:t.readUInt16BE(e*o)}if(i){var u=-1;for(a=n;as&&(n=s-l),a=n;a>=0;a--){for(var f=!0,h=0;hi&&(r=i):r=i;var a,o=e.length;for(r>o/2&&(r=o/2),a=0;a>8,i=n%256,a.push(i),a.push(r);return a}(e,t.length-n),t,n,r)}function O(t,e,n){return 0===e&&n===t.length?c.fromByteArray(t):c.fromByteArray(t.slice(e,n))}function L(t,e,n){n=Math.min(t.length,n);for(var r=[],i=e;i239?4:a>223?3:a>191?2:1;if(i+s<=n){var l=void 0,c=void 0,u=void 0,f=void 0;switch(s){case 1:a<128&&(o=a);break;case 2:128===(192&(l=t[i+1]))&&(f=(31&a)<<6|63&l)>127&&(o=f);break;case 3:l=t[i+1],c=t[i+2],128===(192&l)&&128===(192&c)&&(f=(15&a)<<12|(63&l)<<6|63&c)>2047&&(f<55296||f>57343)&&(o=f);break;case 4:l=t[i+1],c=t[i+2],u=t[i+3],128===(192&l)&&128===(192&c)&&128===(192&u)&&(f=(15&a)<<18|(63&l)<<12|(63&c)<<6|63&u)>65535&&f<1114112&&(o=f)}}null===o?(o=65533,s=1):o>65535&&(o-=65536,r.push(o>>>10&1023|55296),o=56320|1023&o),r.push(o),i+=s}return function(t){var e=t.length;if(e<=I)return String.fromCharCode.apply(String,t);for(var n="",r=0;rr.length?(p.isBuffer(a)||(a=p.from(a)),a.copy(r,i)):Uint8Array.prototype.set.call(r,a,i);else{if(!p.isBuffer(a))throw new TypeError('"list" argument must be an Array of Buffers');a.copy(r,i)}i+=a.length}return r},p.byteLength=w,p.prototype._isBuffer=!0,p.prototype.swap16=function(){var t=this.length;if(t%2!==0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(var e=0;en&&(t+=" ... "),""},f&&(p.prototype[f]=p.prototype.inspect),p.prototype.compare=function(t,e,n,r,i){if(et(t,Uint8Array)&&(t=p.from(t,t.offset,t.byteLength)),!p.isBuffer(t))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+l(t));if(void 0===e&&(e=0),void 0===n&&(n=t?t.length:0),void 0===r&&(r=0),void 0===i&&(i=this.length),e<0||n>t.length||r<0||i>this.length)throw new RangeError("out of range index");if(r>=i&&e>=n)return 0;if(r>=i)return-1;if(e>=n)return 1;if(this===t)return 0;for(var a=(i>>>=0)-(r>>>=0),o=(n>>>=0)-(e>>>=0),s=Math.min(a,o),c=this.slice(r,i),u=t.slice(e,n),f=0;f>>=0,isFinite(n)?(n>>>=0,void 0===r&&(r="utf8")):(r=n,n=void 0)}var i=this.length-e;if((void 0===n||n>i)&&(n=i),t.length>0&&(n<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var a=!1;;)switch(r){case"hex":return A(this,t,e,n);case"utf8":case"utf-8":return S(this,t,e,n);case"ascii":case"latin1":case"binary":return E(this,t,e,n);case"base64":return C(this,t,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return P(this,t,e,n);default:if(a)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),a=!0}},p.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var I=4096;function D(t,e,n){var r="";n=Math.min(t.length,n);for(var i=e;ir)&&(n=r);for(var i="",a=e;an)throw new RangeError("Trying to access beyond buffer length")}function F(t,e,n,r,i,a){if(!p.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>i||et.length)throw new RangeError("Index out of range")}function B(t,e,n,r,i){$(e,r,i,t,n,7);var a=Number(e&BigInt(4294967295));t[n++]=a,a>>=8,t[n++]=a,a>>=8,t[n++]=a,a>>=8,t[n++]=a;var o=Number(e>>BigInt(32)&BigInt(4294967295));return t[n++]=o,o>>=8,t[n++]=o,o>>=8,t[n++]=o,o>>=8,t[n++]=o,n}function U(t,e,n,r,i){$(e,r,i,t,n,7);var a=Number(e&BigInt(4294967295));t[n+7]=a,a>>=8,t[n+6]=a,a>>=8,t[n+5]=a,a>>=8,t[n+4]=a;var o=Number(e>>BigInt(32)&BigInt(4294967295));return t[n+3]=o,o>>=8,t[n+2]=o,o>>=8,t[n+1]=o,o>>=8,t[n]=o,n+8}function H(t,e,n,r,i,a){if(n+r>t.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function V(t,e,n,r,i){return e=+e,n>>>=0,i||H(t,0,n,4),u.write(t,e,n,r,23,4),n+4}function q(t,e,n,r,i){return e=+e,n>>>=0,i||H(t,0,n,8),u.write(t,e,n,r,52,8),n+8}p.prototype.slice=function(t,e){var n=this.length;(t=~~t)<0?(t+=n)<0&&(t=0):t>n&&(t=n),(e=void 0===e?n:~~e)<0?(e+=n)<0&&(e=0):e>n&&(e=n),e>>=0,e>>>=0,n||j(t,e,this.length);for(var r=this[t],i=1,a=0;++a>>=0,e>>>=0,n||j(t,e,this.length);for(var r=this[t+--e],i=1;e>0&&(i*=256);)r+=this[t+--e]*i;return r},p.prototype.readUint8=p.prototype.readUInt8=function(t,e){return t>>>=0,e||j(t,1,this.length),this[t]},p.prototype.readUint16LE=p.prototype.readUInt16LE=function(t,e){return t>>>=0,e||j(t,2,this.length),this[t]|this[t+1]<<8},p.prototype.readUint16BE=p.prototype.readUInt16BE=function(t,e){return t>>>=0,e||j(t,2,this.length),this[t]<<8|this[t+1]},p.prototype.readUint32LE=p.prototype.readUInt32LE=function(t,e){return t>>>=0,e||j(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},p.prototype.readUint32BE=p.prototype.readUInt32BE=function(t,e){return t>>>=0,e||j(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},p.prototype.readBigUInt64LE=it((function(t){X(t>>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||K(t,this.length-8);var r=e+this[++t]*Math.pow(2,8)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,24),i=this[++t]+this[++t]*Math.pow(2,8)+this[++t]*Math.pow(2,16)+n*Math.pow(2,24);return BigInt(r)+(BigInt(i)<>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||K(t,this.length-8);var r=e*Math.pow(2,24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+this[++t],i=this[++t]*Math.pow(2,24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+n;return(BigInt(r)<>>=0,e>>>=0,n||j(t,e,this.length);for(var r=this[t],i=1,a=0;++a=(i*=128)&&(r-=Math.pow(2,8*e)),r},p.prototype.readIntBE=function(t,e,n){t>>>=0,e>>>=0,n||j(t,e,this.length);for(var r=e,i=1,a=this[t+--r];r>0&&(i*=256);)a+=this[t+--r]*i;return a>=(i*=128)&&(a-=Math.pow(2,8*e)),a},p.prototype.readInt8=function(t,e){return t>>>=0,e||j(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},p.prototype.readInt16LE=function(t,e){t>>>=0,e||j(t,2,this.length);var n=this[t]|this[t+1]<<8;return 32768&n?4294901760|n:n},p.prototype.readInt16BE=function(t,e){t>>>=0,e||j(t,2,this.length);var n=this[t+1]|this[t]<<8;return 32768&n?4294901760|n:n},p.prototype.readInt32LE=function(t,e){return t>>>=0,e||j(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},p.prototype.readInt32BE=function(t,e){return t>>>=0,e||j(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},p.prototype.readBigInt64LE=it((function(t){X(t>>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||K(t,this.length-8);var r=this[t+4]+this[t+5]*Math.pow(2,8)+this[t+6]*Math.pow(2,16)+(n<<24);return(BigInt(r)<>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||K(t,this.length-8);var r=(e<<24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+this[++t];return(BigInt(r)<>>=0,e||j(t,4,this.length),u.read(this,t,!0,23,4)},p.prototype.readFloatBE=function(t,e){return t>>>=0,e||j(t,4,this.length),u.read(this,t,!1,23,4)},p.prototype.readDoubleLE=function(t,e){return t>>>=0,e||j(t,8,this.length),u.read(this,t,!0,52,8)},p.prototype.readDoubleBE=function(t,e){return t>>>=0,e||j(t,8,this.length),u.read(this,t,!1,52,8)},p.prototype.writeUintLE=p.prototype.writeUIntLE=function(t,e,n,r){t=+t,e>>>=0,n>>>=0,r||F(this,t,e,n,Math.pow(2,8*n)-1,0);var i=1,a=0;for(this[e]=255&t;++a>>=0,n>>>=0,r||F(this,t,e,n,Math.pow(2,8*n)-1,0);var i=n-1,a=1;for(this[e+i]=255&t;--i>=0&&(a*=256);)this[e+i]=t/a&255;return e+n},p.prototype.writeUint8=p.prototype.writeUInt8=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,1,255,0),this[e]=255&t,e+1},p.prototype.writeUint16LE=p.prototype.writeUInt16LE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,2,65535,0),this[e]=255&t,this[e+1]=t>>>8,e+2},p.prototype.writeUint16BE=p.prototype.writeUInt16BE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,2,65535,0),this[e]=t>>>8,this[e+1]=255&t,e+2},p.prototype.writeUint32LE=p.prototype.writeUInt32LE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,4,4294967295,0),this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t,e+4},p.prototype.writeUint32BE=p.prototype.writeUInt32BE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,4,4294967295,0),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},p.prototype.writeBigUInt64LE=it((function(t){return B(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,BigInt(0),BigInt("0xffffffffffffffff"))})),p.prototype.writeBigUInt64BE=it((function(t){return U(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,BigInt(0),BigInt("0xffffffffffffffff"))})),p.prototype.writeIntLE=function(t,e,n,r){if(t=+t,e>>>=0,!r){var i=Math.pow(2,8*n-1);F(this,t,e,n,i-1,-i)}var a=0,o=1,s=0;for(this[e]=255&t;++a>0)-s&255;return e+n},p.prototype.writeIntBE=function(t,e,n,r){if(t=+t,e>>>=0,!r){var i=Math.pow(2,8*n-1);F(this,t,e,n,i-1,-i)}var a=n-1,o=1,s=0;for(this[e+a]=255&t;--a>=0&&(o*=256);)t<0&&0===s&&0!==this[e+a+1]&&(s=1),this[e+a]=(t/o>>0)-s&255;return e+n},p.prototype.writeInt8=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,1,127,-128),t<0&&(t=255+t+1),this[e]=255&t,e+1},p.prototype.writeInt16LE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,2,32767,-32768),this[e]=255&t,this[e+1]=t>>>8,e+2},p.prototype.writeInt16BE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,2,32767,-32768),this[e]=t>>>8,this[e+1]=255&t,e+2},p.prototype.writeInt32LE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,4,2147483647,-2147483648),this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24,e+4},p.prototype.writeInt32BE=function(t,e,n){return t=+t,e>>>=0,n||F(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},p.prototype.writeBigInt64LE=it((function(t){return B(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),p.prototype.writeBigInt64BE=it((function(t){return U(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),p.prototype.writeFloatLE=function(t,e,n){return V(this,t,e,!0,n)},p.prototype.writeFloatBE=function(t,e,n){return V(this,t,e,!1,n)},p.prototype.writeDoubleLE=function(t,e,n){return q(this,t,e,!0,n)},p.prototype.writeDoubleBE=function(t,e,n){return q(this,t,e,!1,n)},p.prototype.copy=function(t,e,n,r){if(!p.isBuffer(t))throw new TypeError("argument should be a Buffer");if(n||(n=0),r||0===r||(r=this.length),e>=t.length&&(e=t.length),e||(e=0),r>0&&r=this.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),t.length-e>>=0,n=void 0===n?this.length:n>>>0,t||(t=0),"number"===typeof t)for(a=e;a=r+4;n-=3)e="_".concat(t.slice(n-3,n)).concat(e);return"".concat(t.slice(0,n)).concat(e)}function $(t,e,n,r,i,a){if(t>n||t3?0===e||e===BigInt(0)?">= 0".concat(s," and < 2").concat(s," ** ").concat(8*(a+1)).concat(s):">= -(2".concat(s," ** ").concat(8*(a+1)-1).concat(s,") and < 2 ** ")+"".concat(8*(a+1)-1).concat(s):">= ".concat(e).concat(s," and <= ").concat(n).concat(s),new G.ERR_OUT_OF_RANGE("value",o,t)}!function(t,e,n){X(e,"offset"),void 0!==t[e]&&void 0!==t[e+n]||K(e,t.length-(n+1))}(r,i,a)}function X(t,e){if("number"!==typeof t)throw new G.ERR_INVALID_ARG_TYPE(e,"number",t)}function K(t,e,n){if(Math.floor(t)!==t)throw X(t,n),new G.ERR_OUT_OF_RANGE(n||"offset","an integer",t);if(e<0)throw new G.ERR_BUFFER_OUT_OF_BOUNDS;throw new G.ERR_OUT_OF_RANGE(n||"offset",">= ".concat(n?1:0," and <= ").concat(e),t)}W("ERR_BUFFER_OUT_OF_BOUNDS",(function(t){return t?"".concat(t," is outside of buffer bounds"):"Attempt to access memory outside buffer bounds"}),RangeError),W("ERR_INVALID_ARG_TYPE",(function(t,e){return'The "'.concat(t,'" argument must be of type number. Received type ').concat(l(e))}),TypeError),W("ERR_OUT_OF_RANGE",(function(t,e,n){var r='The value of "'.concat(t,'" is out of range.'),i=n;return Number.isInteger(n)&&Math.abs(n)>Math.pow(2,32)?i=Y(String(n)):"bigint"===typeof n&&(i=String(n),(n>Math.pow(BigInt(2),BigInt(32))||n<-Math.pow(BigInt(2),BigInt(32)))&&(i=Y(i)),i+="n"),r+=" It must be ".concat(e,". Received ").concat(i)}),RangeError);var Z=/[^+/0-9A-Za-z-_]/g;function J(t,e){var n;e=e||1/0;for(var r=t.length,i=null,a=[],o=0;o55295&&n<57344){if(!i){if(n>56319){(e-=3)>-1&&a.push(239,191,189);continue}if(o+1===r){(e-=3)>-1&&a.push(239,191,189);continue}i=n;continue}if(n<56320){(e-=3)>-1&&a.push(239,191,189),i=n;continue}n=65536+(i-55296<<10|n-56320)}else i&&(e-=3)>-1&&a.push(239,191,189);if(i=null,n<128){if((e-=1)<0)break;a.push(n)}else if(n<2048){if((e-=2)<0)break;a.push(n>>6|192,63&n|128)}else if(n<65536){if((e-=3)<0)break;a.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;a.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return a}function Q(t){return c.toByteArray(function(t){if((t=(t=t.split("=")[0]).trim().replace(Z,"")).length<2)return"";for(;t.length%4!==0;)t+="=";return t}(t))}function tt(t,e,n,r){var i;for(i=0;i=e.length||i>=t.length);++i)e[i+n]=t[i];return i}function et(t,e){return t instanceof e||null!=t&&null!=t.constructor&&null!=t.constructor.name&&t.constructor.name===e.name}function nt(t){return t!==t}var rt=function(){for(var t="0123456789abcdef",e=new Array(256),n=0;n<16;++n)for(var r=16*n,i=0;i<16;++i)e[r+i]=t[n]+t[i];return e}();function it(t){return"undefined"===typeof BigInt?at:t}function at(){throw new Error("BigInt not supported")}},2321:function(t){"use strict";t.exports=i,t.exports.isMobile=i,t.exports.default=i;var e=/(android|bb\d+|meego).+mobile|armv7l|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|samsungbrowser.*mobile|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i,n=/CrOS/,r=/android|ipad|playbook|silk/i;function i(t){t||(t={});var i=t.ua;if(i||"undefined"===typeof navigator||(i=navigator.userAgent),i&&i.headers&&"string"===typeof i.headers["user-agent"]&&(i=i.headers["user-agent"]),"string"!==typeof i)return!1;var a=e.test(i)&&!n.test(i)||!!t.tablet&&r.test(i);return!a&&t.tablet&&t.featureDetect&&navigator&&navigator.maxTouchPoints>1&&-1!==i.indexOf("Macintosh")&&-1!==i.indexOf("Safari")&&(a=!0),a}},3910:function(t,e){"use strict";e.byteLength=function(t){var e=s(t),n=e[0],r=e[1];return 3*(n+r)/4-r},e.toByteArray=function(t){var e,n,a=s(t),o=a[0],l=a[1],c=new i(function(t,e,n){return 3*(e+n)/4-n}(0,o,l)),u=0,f=l>0?o-4:o;for(n=0;n>16&255,c[u++]=e>>8&255,c[u++]=255&e;return 2===l&&(e=r[t.charCodeAt(n)]<<2|r[t.charCodeAt(n+1)]>>4,c[u++]=255&e),1===l&&(e=r[t.charCodeAt(n)]<<10|r[t.charCodeAt(n+1)]<<4|r[t.charCodeAt(n+2)]>>2,c[u++]=e>>8&255,c[u++]=255&e),c},e.fromByteArray=function(t){for(var e,r=t.length,i=r%3,a=[],o=16383,s=0,c=r-i;sc?c:s+o));return 1===i?(e=t[r-1],a.push(n[e>>2]+n[e<<4&63]+"==")):2===i&&(e=(t[r-2]<<8)+t[r-1],a.push(n[e>>10]+n[e>>4&63]+n[e<<2&63]+"=")),a.join("")};for(var n=[],r=[],i="undefined"!==typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0;o<64;++o)n[o]=a[o],r[a.charCodeAt(o)]=o;function s(t){var e=t.length;if(e%4>0)throw new Error("Invalid string. Length must be a multiple of 4");var n=t.indexOf("=");return-1===n&&(n=e),[n,n===e?0:4-n%4]}function l(t,e,r){for(var i,a,o=[],s=e;s>18&63]+n[a>>12&63]+n[a>>6&63]+n[63&a]);return o.join("")}r["-".charCodeAt(0)]=62,r["_".charCodeAt(0)]=63},3187:function(t,e){e.read=function(t,e,n,r,i){var a,o,s=8*i-r-1,l=(1<>1,u=-7,f=n?i-1:0,h=n?-1:1,d=t[e+f];for(f+=h,a=d&(1<<-u)-1,d>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=r;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(d?-1:1);o+=Math.pow(2,r),a-=c}return(d?-1:1)*o*Math.pow(2,a-r)},e.write=function(t,e,n,r,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=r?0:a-1,p=r?1:-1,g=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[n+d]=255&s,d+=p,s/=256,i-=8);for(o=o<0;t[n+d]=255&o,d+=p,o/=256,c-=8);t[n+d-p]|=128*g}},1152:function(t,e,n){"use strict";t.exports=function(t){var e=(t=t||{}).eye||[0,0,1],n=t.center||[0,0,0],s=t.up||[0,1,0],l=t.distanceLimits||[0,1/0],c=t.mode||"turntable",u=r(),f=i(),h=a();return u.setDistanceLimits(l[0],l[1]),u.lookAt(0,e,n,s),f.setDistanceLimits(l[0],l[1]),f.lookAt(0,e,n,s),h.setDistanceLimits(l[0],l[1]),h.lookAt(0,e,n,s),new o({turntable:u,orbit:f,matrix:h},c)};var r=n(3440),i=n(7774),a=n(9298);function o(t,e){this._controllerNames=Object.keys(t),this._controllerList=this._controllerNames.map((function(e){return t[e]})),this._mode=e,this._active=t[e],this._active||(this._mode="turntable",this._active=t.turntable),this.modes=this._controllerNames,this.computedMatrix=this._active.computedMatrix,this.computedEye=this._active.computedEye,this.computedUp=this._active.computedUp,this.computedCenter=this._active.computedCenter,this.computedRadius=this._active.computedRadius}var s=o.prototype;s.flush=function(t){for(var e=this._controllerList,n=0;n0?c=c.ushln(f):f<0&&(u=u.ushln(-f)),s(c,u)}},234:function(t,e,n){"use strict";var r=n(3218);t.exports=function(t){return Array.isArray(t)&&2===t.length&&r(t[0])&&r(t[1])}},4275:function(t,e,n){"use strict";var r=n(1928);t.exports=function(t){return t.cmp(new r(0))}},9958:function(t,e,n){"use strict";var r=n(4275);t.exports=function(t){var e=t.length,n=t.words,i=0;if(1===e)i=n[0];else if(2===e)i=n[0]+67108864*n[1];else for(var a=0;a20?52:n+32}},3218:function(t,e,n){"use strict";n(1928),t.exports=function(t){return t&&"object"===typeof t&&Boolean(t.words)}},5514:function(t,e,n){"use strict";var r=n(1928),i=n(8362);t.exports=function(t){var e=i.exponent(t);return e<52?new r(t):new r(t*Math.pow(2,52-e)).ushln(e-52)}},8524:function(t,e,n){"use strict";var r=n(5514),i=n(4275);t.exports=function(t,e){var n=i(t),a=i(e);if(0===n)return[r(0),r(1)];if(0===a)return[r(0),r(0)];a<0&&(t=t.neg(),e=e.neg());var o=t.gcd(e);return o.cmpn(1)?[t.div(o),e.div(o)]:[t,e]}},2813:function(t,e,n){"use strict";var r=n(1928);t.exports=function(t){return new r(t)}},3962:function(t,e,n){"use strict";var r=n(8524);t.exports=function(t,e){return r(t[0].mul(e[0]),t[1].mul(e[1]))}},4951:function(t,e,n){"use strict";var r=n(4275);t.exports=function(t){return r(t[0])*r(t[1])}},4354:function(t,e,n){"use strict";var r=n(8524);t.exports=function(t,e){return r(t[0].mul(e[1]).sub(t[1].mul(e[0])),t[1].mul(e[1]))}},7999:function(t,e,n){"use strict";var r=n(9958),i=n(1112);t.exports=function(t){var e=t[0],n=t[1];if(0===e.cmpn(0))return 0;var a=e.abs().divmod(n.abs()),o=a.div,s=r(o),l=a.mod,c=e.negative!==n.negative?-1:1;if(0===l.cmpn(0))return c*s;if(s){var u=i(s)+4;return c*(s+(h=r(l.ushln(u).divRound(n)))*Math.pow(2,-u))}var f=n.bitLength()-l.bitLength()+53,h=r(l.ushln(f).divRound(n));return f<1023?c*h*Math.pow(2,-f):c*(h*=Math.pow(2,-1023))*Math.pow(2,1023-f)}},5070:function(t){"use strict";function e(t,e,n,r,i){for(var a=i+1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)>=0?(a=o,i=o-1):r=o+1}return a}function n(t,e,n,r,i){for(var a=i+1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)>0?(a=o,i=o-1):r=o+1}return a}function r(t,e,n,r,i){for(var a=r-1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)<0?(a=o,r=o+1):i=o-1}return a}function i(t,e,n,r,i){for(var a=r-1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)<=0?(a=o,r=o+1):i=o-1}return a}function a(t,e,n,r,i){for(;r<=i;){var a=r+i>>>1,o=t[a],s=void 0!==n?n(o,e):o-e;if(0===s)return a;s<=0?r=a+1:i=a-1}return-1}function o(t,e,n,r,i,a){return"function"===typeof n?a(t,e,n,void 0===r?0:0|r,void 0===i?t.length-1:0|i):a(t,e,void 0,void 0===n?0:0|n,void 0===r?t.length-1:0|r)}t.exports={ge:function(t,n,r,i,a){return o(t,n,r,i,a,e)},gt:function(t,e,r,i,a){return o(t,e,r,i,a,n)},lt:function(t,e,n,i,a){return o(t,e,n,i,a,r)},le:function(t,e,n,r,a){return o(t,e,n,r,a,i)},eq:function(t,e,n,r,i){return o(t,e,n,r,i,a)}}},2288:function(t,e){"use strict";function n(t){var e=32;return(t&=-t)&&e--,65535&t&&(e-=16),16711935&t&&(e-=8),252645135&t&&(e-=4),858993459&t&&(e-=2),1431655765&t&&(e-=1),e}e.INT_BITS=32,e.INT_MAX=2147483647,e.INT_MIN=-1<<31,e.sign=function(t){return(t>0)-(t<0)},e.abs=function(t){var e=t>>31;return(t^e)-e},e.min=function(t,e){return e^(t^e)&-(t65535)<<4,e|=n=((t>>>=e)>255)<<3,e|=n=((t>>>=n)>15)<<2,(e|=n=((t>>>=n)>3)<<1)|(t>>>=n)>>1},e.log10=function(t){return t>=1e9?9:t>=1e8?8:t>=1e7?7:t>=1e6?6:t>=1e5?5:t>=1e4?4:t>=1e3?3:t>=100?2:t>=10?1:0},e.popCount=function(t){return 16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24},e.countTrailingZeros=n,e.nextPow2=function(t){return t+=0===t,--t,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,1+(t|=t>>>16)},e.prevPow2=function(t){return t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)-(t>>>1)},e.parity=function(t){return t^=t>>>16,t^=t>>>8,t^=t>>>4,27030>>>(t&=15)&1};var r=new Array(256);!function(t){for(var e=0;e<256;++e){var n=e,r=e,i=7;for(n>>>=1;n;n>>>=1)r<<=1,r|=1&n,--i;t[e]=r<>>8&255]<<16|r[t>>>16&255]<<8|r[t>>>24&255]},e.interleave2=function(t,e){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t&=65535)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e&=65535)|e<<8))|e<<4))|e<<2))|e<<1))<<1},e.deinterleave2=function(t,e){return(t=65535&((t=16711935&((t=252645135&((t=858993459&((t=t>>>e&1431655765)|t>>>1))|t>>>2))|t>>>4))|t>>>16))<<16>>16},e.interleave3=function(t,e,n){return t=1227133513&((t=3272356035&((t=251719695&((t=4278190335&((t&=1023)|t<<16))|t<<8))|t<<4))|t<<2),(t|=(e=1227133513&((e=3272356035&((e=251719695&((e=4278190335&((e&=1023)|e<<16))|e<<8))|e<<4))|e<<2))<<1)|(n=1227133513&((n=3272356035&((n=251719695&((n=4278190335&((n&=1023)|n<<16))|n<<8))|n<<4))|n<<2))<<2},e.deinterleave3=function(t,e){return(t=1023&((t=4278190335&((t=251719695&((t=3272356035&((t=t>>>e&1227133513)|t>>>2))|t>>>4))|t>>>8))|t>>>16))<<22>>22},e.nextCombination=function(t){var e=t|t-1;return e+1|(~e&-~e)-1>>>n(t)+1}},1928:function(t,e,n){!function(t,e){"use strict";function r(t,e){if(!t)throw new Error(e||"Assertion failed")}function i(t,e){t.super_=e;var n=function(){};n.prototype=e.prototype,t.prototype=new n,t.prototype.constructor=t}function a(t,e,n){if(a.isBN(t))return t;this.negative=0,this.words=null,this.length=0,this.red=null,null!==t&&("le"!==e&&"be"!==e||(n=e,e=10),this._init(t||0,e||10,n||"be"))}var o;"object"===typeof t?t.exports=a:e.BN=a,a.BN=a,a.wordSize=26;try{o="undefined"!==typeof window&&"undefined"!==typeof window.Buffer?window.Buffer:n(6601).Buffer}catch(M){}function s(t,e){var n=t.charCodeAt(e);return n>=65&&n<=70?n-55:n>=97&&n<=102?n-87:n-48&15}function l(t,e,n){var r=s(t,n);return n-1>=e&&(r|=s(t,n-1)<<4),r}function c(t,e,n,r){for(var i=0,a=Math.min(t.length,n),o=e;o=49?s-49+10:s>=17?s-17+10:s}return i}a.isBN=function(t){return t instanceof a||null!==t&&"object"===typeof t&&t.constructor.wordSize===a.wordSize&&Array.isArray(t.words)},a.max=function(t,e){return t.cmp(e)>0?t:e},a.min=function(t,e){return t.cmp(e)<0?t:e},a.prototype._init=function(t,e,n){if("number"===typeof t)return this._initNumber(t,e,n);if("object"===typeof t)return this._initArray(t,e,n);"hex"===e&&(e=16),r(e===(0|e)&&e>=2&&e<=36);var i=0;"-"===(t=t.toString().replace(/\s+/g,""))[0]&&(i++,this.negative=1),i=0;i-=3)o=t[i]|t[i-1]<<8|t[i-2]<<16,this.words[a]|=o<>>26-s&67108863,(s+=24)>=26&&(s-=26,a++);else if("le"===n)for(i=0,a=0;i>>26-s&67108863,(s+=24)>=26&&(s-=26,a++);return this.strip()},a.prototype._parseHex=function(t,e,n){this.length=Math.ceil((t.length-e)/6),this.words=new Array(this.length);for(var r=0;r=e;r-=2)i=l(t,e,r)<=18?(a-=18,o+=1,this.words[o]|=i>>>26):a+=8;else for(r=(t.length-e)%2===0?e+1:e;r=18?(a-=18,o+=1,this.words[o]|=i>>>26):a+=8;this.strip()},a.prototype._parseBase=function(t,e,n){this.words=[0],this.length=1;for(var r=0,i=1;i<=67108863;i*=e)r++;r--,i=i/e|0;for(var a=t.length-n,o=a%r,s=Math.min(a,a-o)+n,l=0,u=n;u1&&0===this.words[this.length-1];)this.length--;return this._normSign()},a.prototype._normSign=function(){return 1===this.length&&0===this.words[0]&&(this.negative=0),this},a.prototype.inspect=function(){return(this.red?""};var u=["","0","00","000","0000","00000","000000","0000000","00000000","000000000","0000000000","00000000000","000000000000","0000000000000","00000000000000","000000000000000","0000000000000000","00000000000000000","000000000000000000","0000000000000000000","00000000000000000000","000000000000000000000","0000000000000000000000","00000000000000000000000","000000000000000000000000","0000000000000000000000000"],f=[0,0,25,16,12,11,10,9,8,8,7,7,7,7,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5],h=[0,0,33554432,43046721,16777216,48828125,60466176,40353607,16777216,43046721,1e7,19487171,35831808,62748517,7529536,11390625,16777216,24137569,34012224,47045881,64e6,4084101,5153632,6436343,7962624,9765625,11881376,14348907,17210368,20511149,243e5,28629151,33554432,39135393,45435424,52521875,60466176];function d(t,e,n){n.negative=e.negative^t.negative;var r=t.length+e.length|0;n.length=r,r=r-1|0;var i=0|t.words[0],a=0|e.words[0],o=i*a,s=67108863&o,l=o/67108864|0;n.words[0]=s;for(var c=1;c>>26,f=67108863&l,h=Math.min(c,e.length-1),d=Math.max(0,c-t.length+1);d<=h;d++){var p=c-d|0;u+=(o=(i=0|t.words[p])*(a=0|e.words[d])+f)/67108864|0,f=67108863&o}n.words[c]=0|f,l=0|u}return 0!==l?n.words[c]=0|l:n.length--,n.strip()}a.prototype.toString=function(t,e){var n;if(e=0|e||1,16===(t=t||10)||"hex"===t){n="";for(var i=0,a=0,o=0;o>>24-i&16777215)||o!==this.length-1?u[6-l.length]+l+n:l+n,(i+=2)>=26&&(i-=26,o--)}for(0!==a&&(n=a.toString(16)+n);n.length%e!==0;)n="0"+n;return 0!==this.negative&&(n="-"+n),n}if(t===(0|t)&&t>=2&&t<=36){var c=f[t],d=h[t];n="";var p=this.clone();for(p.negative=0;!p.isZero();){var g=p.modn(d).toString(t);n=(p=p.idivn(d)).isZero()?g+n:u[c-g.length]+g+n}for(this.isZero()&&(n="0"+n);n.length%e!==0;)n="0"+n;return 0!==this.negative&&(n="-"+n),n}r(!1,"Base should be between 2 and 36")},a.prototype.toNumber=function(){var t=this.words[0];return 2===this.length?t+=67108864*this.words[1]:3===this.length&&1===this.words[2]?t+=4503599627370496+67108864*this.words[1]:this.length>2&&r(!1,"Number can only safely store up to 53 bits"),0!==this.negative?-t:t},a.prototype.toJSON=function(){return this.toString(16)},a.prototype.toBuffer=function(t,e){return r("undefined"!==typeof o),this.toArrayLike(o,t,e)},a.prototype.toArray=function(t,e){return this.toArrayLike(Array,t,e)},a.prototype.toArrayLike=function(t,e,n){var i=this.byteLength(),a=n||Math.max(1,i);r(i<=a,"byte array longer than desired length"),r(a>0,"Requested array length <= 0"),this.strip();var o,s,l="le"===e,c=new t(a),u=this.clone();if(l){for(s=0;!u.isZero();s++)o=u.andln(255),u.iushrn(8),c[s]=o;for(;s=4096&&(n+=13,e>>>=13),e>=64&&(n+=7,e>>>=7),e>=8&&(n+=4,e>>>=4),e>=2&&(n+=2,e>>>=2),n+e},a.prototype._zeroBits=function(t){if(0===t)return 26;var e=t,n=0;return 0===(8191&e)&&(n+=13,e>>>=13),0===(127&e)&&(n+=7,e>>>=7),0===(15&e)&&(n+=4,e>>>=4),0===(3&e)&&(n+=2,e>>>=2),0===(1&e)&&n++,n},a.prototype.bitLength=function(){var t=this.words[this.length-1],e=this._countBits(t);return 26*(this.length-1)+e},a.prototype.zeroBits=function(){if(this.isZero())return 0;for(var t=0,e=0;et.length?this.clone().ior(t):t.clone().ior(this)},a.prototype.uor=function(t){return this.length>t.length?this.clone().iuor(t):t.clone().iuor(this)},a.prototype.iuand=function(t){var e;e=this.length>t.length?t:this;for(var n=0;nt.length?this.clone().iand(t):t.clone().iand(this)},a.prototype.uand=function(t){return this.length>t.length?this.clone().iuand(t):t.clone().iuand(this)},a.prototype.iuxor=function(t){var e,n;this.length>t.length?(e=this,n=t):(e=t,n=this);for(var r=0;rt.length?this.clone().ixor(t):t.clone().ixor(this)},a.prototype.uxor=function(t){return this.length>t.length?this.clone().iuxor(t):t.clone().iuxor(this)},a.prototype.inotn=function(t){r("number"===typeof t&&t>=0);var e=0|Math.ceil(t/26),n=t%26;this._expand(e),n>0&&e--;for(var i=0;i0&&(this.words[i]=~this.words[i]&67108863>>26-n),this.strip()},a.prototype.notn=function(t){return this.clone().inotn(t)},a.prototype.setn=function(t,e){r("number"===typeof t&&t>=0);var n=t/26|0,i=t%26;return this._expand(n+1),this.words[n]=e?this.words[n]|1<t.length?(n=this,r=t):(n=t,r=this);for(var i=0,a=0;a>>26;for(;0!==i&&a>>26;if(this.length=n.length,0!==i)this.words[this.length]=i,this.length++;else if(n!==this)for(;at.length?this.clone().iadd(t):t.clone().iadd(this)},a.prototype.isub=function(t){if(0!==t.negative){t.negative=0;var e=this.iadd(t);return t.negative=1,e._normSign()}if(0!==this.negative)return this.negative=0,this.iadd(t),this.negative=1,this._normSign();var n,r,i=this.cmp(t);if(0===i)return this.negative=0,this.length=1,this.words[0]=0,this;i>0?(n=this,r=t):(n=t,r=this);for(var a=0,o=0;o>26,this.words[o]=67108863&e;for(;0!==a&&o>26,this.words[o]=67108863&e;if(0===a&&o>>13,d=0|o[1],p=8191&d,g=d>>>13,v=0|o[2],m=8191&v,y=v>>>13,b=0|o[3],x=8191&b,w=b>>>13,_=0|o[4],k=8191&_,T=_>>>13,M=0|o[5],A=8191&M,S=M>>>13,E=0|o[6],C=8191&E,P=E>>>13,O=0|o[7],L=8191&O,I=O>>>13,D=0|o[8],R=8191&D,z=D>>>13,N=0|o[9],j=8191&N,F=N>>>13,B=0|s[0],U=8191&B,H=B>>>13,V=0|s[1],q=8191&V,G=V>>>13,W=0|s[2],Y=8191&W,$=W>>>13,X=0|s[3],K=8191&X,Z=X>>>13,J=0|s[4],Q=8191&J,tt=J>>>13,et=0|s[5],nt=8191&et,rt=et>>>13,it=0|s[6],at=8191&it,ot=it>>>13,st=0|s[7],lt=8191&st,ct=st>>>13,ut=0|s[8],ft=8191&ut,ht=ut>>>13,dt=0|s[9],pt=8191&dt,gt=dt>>>13;n.negative=t.negative^e.negative,n.length=19;var vt=(c+(r=Math.imul(f,U))|0)+((8191&(i=(i=Math.imul(f,H))+Math.imul(h,U)|0))<<13)|0;c=((a=Math.imul(h,H))+(i>>>13)|0)+(vt>>>26)|0,vt&=67108863,r=Math.imul(p,U),i=(i=Math.imul(p,H))+Math.imul(g,U)|0,a=Math.imul(g,H);var mt=(c+(r=r+Math.imul(f,q)|0)|0)+((8191&(i=(i=i+Math.imul(f,G)|0)+Math.imul(h,q)|0))<<13)|0;c=((a=a+Math.imul(h,G)|0)+(i>>>13)|0)+(mt>>>26)|0,mt&=67108863,r=Math.imul(m,U),i=(i=Math.imul(m,H))+Math.imul(y,U)|0,a=Math.imul(y,H),r=r+Math.imul(p,q)|0,i=(i=i+Math.imul(p,G)|0)+Math.imul(g,q)|0,a=a+Math.imul(g,G)|0;var yt=(c+(r=r+Math.imul(f,Y)|0)|0)+((8191&(i=(i=i+Math.imul(f,$)|0)+Math.imul(h,Y)|0))<<13)|0;c=((a=a+Math.imul(h,$)|0)+(i>>>13)|0)+(yt>>>26)|0,yt&=67108863,r=Math.imul(x,U),i=(i=Math.imul(x,H))+Math.imul(w,U)|0,a=Math.imul(w,H),r=r+Math.imul(m,q)|0,i=(i=i+Math.imul(m,G)|0)+Math.imul(y,q)|0,a=a+Math.imul(y,G)|0,r=r+Math.imul(p,Y)|0,i=(i=i+Math.imul(p,$)|0)+Math.imul(g,Y)|0,a=a+Math.imul(g,$)|0;var bt=(c+(r=r+Math.imul(f,K)|0)|0)+((8191&(i=(i=i+Math.imul(f,Z)|0)+Math.imul(h,K)|0))<<13)|0;c=((a=a+Math.imul(h,Z)|0)+(i>>>13)|0)+(bt>>>26)|0,bt&=67108863,r=Math.imul(k,U),i=(i=Math.imul(k,H))+Math.imul(T,U)|0,a=Math.imul(T,H),r=r+Math.imul(x,q)|0,i=(i=i+Math.imul(x,G)|0)+Math.imul(w,q)|0,a=a+Math.imul(w,G)|0,r=r+Math.imul(m,Y)|0,i=(i=i+Math.imul(m,$)|0)+Math.imul(y,Y)|0,a=a+Math.imul(y,$)|0,r=r+Math.imul(p,K)|0,i=(i=i+Math.imul(p,Z)|0)+Math.imul(g,K)|0,a=a+Math.imul(g,Z)|0;var xt=(c+(r=r+Math.imul(f,Q)|0)|0)+((8191&(i=(i=i+Math.imul(f,tt)|0)+Math.imul(h,Q)|0))<<13)|0;c=((a=a+Math.imul(h,tt)|0)+(i>>>13)|0)+(xt>>>26)|0,xt&=67108863,r=Math.imul(A,U),i=(i=Math.imul(A,H))+Math.imul(S,U)|0,a=Math.imul(S,H),r=r+Math.imul(k,q)|0,i=(i=i+Math.imul(k,G)|0)+Math.imul(T,q)|0,a=a+Math.imul(T,G)|0,r=r+Math.imul(x,Y)|0,i=(i=i+Math.imul(x,$)|0)+Math.imul(w,Y)|0,a=a+Math.imul(w,$)|0,r=r+Math.imul(m,K)|0,i=(i=i+Math.imul(m,Z)|0)+Math.imul(y,K)|0,a=a+Math.imul(y,Z)|0,r=r+Math.imul(p,Q)|0,i=(i=i+Math.imul(p,tt)|0)+Math.imul(g,Q)|0,a=a+Math.imul(g,tt)|0;var wt=(c+(r=r+Math.imul(f,nt)|0)|0)+((8191&(i=(i=i+Math.imul(f,rt)|0)+Math.imul(h,nt)|0))<<13)|0;c=((a=a+Math.imul(h,rt)|0)+(i>>>13)|0)+(wt>>>26)|0,wt&=67108863,r=Math.imul(C,U),i=(i=Math.imul(C,H))+Math.imul(P,U)|0,a=Math.imul(P,H),r=r+Math.imul(A,q)|0,i=(i=i+Math.imul(A,G)|0)+Math.imul(S,q)|0,a=a+Math.imul(S,G)|0,r=r+Math.imul(k,Y)|0,i=(i=i+Math.imul(k,$)|0)+Math.imul(T,Y)|0,a=a+Math.imul(T,$)|0,r=r+Math.imul(x,K)|0,i=(i=i+Math.imul(x,Z)|0)+Math.imul(w,K)|0,a=a+Math.imul(w,Z)|0,r=r+Math.imul(m,Q)|0,i=(i=i+Math.imul(m,tt)|0)+Math.imul(y,Q)|0,a=a+Math.imul(y,tt)|0,r=r+Math.imul(p,nt)|0,i=(i=i+Math.imul(p,rt)|0)+Math.imul(g,nt)|0,a=a+Math.imul(g,rt)|0;var _t=(c+(r=r+Math.imul(f,at)|0)|0)+((8191&(i=(i=i+Math.imul(f,ot)|0)+Math.imul(h,at)|0))<<13)|0;c=((a=a+Math.imul(h,ot)|0)+(i>>>13)|0)+(_t>>>26)|0,_t&=67108863,r=Math.imul(L,U),i=(i=Math.imul(L,H))+Math.imul(I,U)|0,a=Math.imul(I,H),r=r+Math.imul(C,q)|0,i=(i=i+Math.imul(C,G)|0)+Math.imul(P,q)|0,a=a+Math.imul(P,G)|0,r=r+Math.imul(A,Y)|0,i=(i=i+Math.imul(A,$)|0)+Math.imul(S,Y)|0,a=a+Math.imul(S,$)|0,r=r+Math.imul(k,K)|0,i=(i=i+Math.imul(k,Z)|0)+Math.imul(T,K)|0,a=a+Math.imul(T,Z)|0,r=r+Math.imul(x,Q)|0,i=(i=i+Math.imul(x,tt)|0)+Math.imul(w,Q)|0,a=a+Math.imul(w,tt)|0,r=r+Math.imul(m,nt)|0,i=(i=i+Math.imul(m,rt)|0)+Math.imul(y,nt)|0,a=a+Math.imul(y,rt)|0,r=r+Math.imul(p,at)|0,i=(i=i+Math.imul(p,ot)|0)+Math.imul(g,at)|0,a=a+Math.imul(g,ot)|0;var kt=(c+(r=r+Math.imul(f,lt)|0)|0)+((8191&(i=(i=i+Math.imul(f,ct)|0)+Math.imul(h,lt)|0))<<13)|0;c=((a=a+Math.imul(h,ct)|0)+(i>>>13)|0)+(kt>>>26)|0,kt&=67108863,r=Math.imul(R,U),i=(i=Math.imul(R,H))+Math.imul(z,U)|0,a=Math.imul(z,H),r=r+Math.imul(L,q)|0,i=(i=i+Math.imul(L,G)|0)+Math.imul(I,q)|0,a=a+Math.imul(I,G)|0,r=r+Math.imul(C,Y)|0,i=(i=i+Math.imul(C,$)|0)+Math.imul(P,Y)|0,a=a+Math.imul(P,$)|0,r=r+Math.imul(A,K)|0,i=(i=i+Math.imul(A,Z)|0)+Math.imul(S,K)|0,a=a+Math.imul(S,Z)|0,r=r+Math.imul(k,Q)|0,i=(i=i+Math.imul(k,tt)|0)+Math.imul(T,Q)|0,a=a+Math.imul(T,tt)|0,r=r+Math.imul(x,nt)|0,i=(i=i+Math.imul(x,rt)|0)+Math.imul(w,nt)|0,a=a+Math.imul(w,rt)|0,r=r+Math.imul(m,at)|0,i=(i=i+Math.imul(m,ot)|0)+Math.imul(y,at)|0,a=a+Math.imul(y,ot)|0,r=r+Math.imul(p,lt)|0,i=(i=i+Math.imul(p,ct)|0)+Math.imul(g,lt)|0,a=a+Math.imul(g,ct)|0;var Tt=(c+(r=r+Math.imul(f,ft)|0)|0)+((8191&(i=(i=i+Math.imul(f,ht)|0)+Math.imul(h,ft)|0))<<13)|0;c=((a=a+Math.imul(h,ht)|0)+(i>>>13)|0)+(Tt>>>26)|0,Tt&=67108863,r=Math.imul(j,U),i=(i=Math.imul(j,H))+Math.imul(F,U)|0,a=Math.imul(F,H),r=r+Math.imul(R,q)|0,i=(i=i+Math.imul(R,G)|0)+Math.imul(z,q)|0,a=a+Math.imul(z,G)|0,r=r+Math.imul(L,Y)|0,i=(i=i+Math.imul(L,$)|0)+Math.imul(I,Y)|0,a=a+Math.imul(I,$)|0,r=r+Math.imul(C,K)|0,i=(i=i+Math.imul(C,Z)|0)+Math.imul(P,K)|0,a=a+Math.imul(P,Z)|0,r=r+Math.imul(A,Q)|0,i=(i=i+Math.imul(A,tt)|0)+Math.imul(S,Q)|0,a=a+Math.imul(S,tt)|0,r=r+Math.imul(k,nt)|0,i=(i=i+Math.imul(k,rt)|0)+Math.imul(T,nt)|0,a=a+Math.imul(T,rt)|0,r=r+Math.imul(x,at)|0,i=(i=i+Math.imul(x,ot)|0)+Math.imul(w,at)|0,a=a+Math.imul(w,ot)|0,r=r+Math.imul(m,lt)|0,i=(i=i+Math.imul(m,ct)|0)+Math.imul(y,lt)|0,a=a+Math.imul(y,ct)|0,r=r+Math.imul(p,ft)|0,i=(i=i+Math.imul(p,ht)|0)+Math.imul(g,ft)|0,a=a+Math.imul(g,ht)|0;var Mt=(c+(r=r+Math.imul(f,pt)|0)|0)+((8191&(i=(i=i+Math.imul(f,gt)|0)+Math.imul(h,pt)|0))<<13)|0;c=((a=a+Math.imul(h,gt)|0)+(i>>>13)|0)+(Mt>>>26)|0,Mt&=67108863,r=Math.imul(j,q),i=(i=Math.imul(j,G))+Math.imul(F,q)|0,a=Math.imul(F,G),r=r+Math.imul(R,Y)|0,i=(i=i+Math.imul(R,$)|0)+Math.imul(z,Y)|0,a=a+Math.imul(z,$)|0,r=r+Math.imul(L,K)|0,i=(i=i+Math.imul(L,Z)|0)+Math.imul(I,K)|0,a=a+Math.imul(I,Z)|0,r=r+Math.imul(C,Q)|0,i=(i=i+Math.imul(C,tt)|0)+Math.imul(P,Q)|0,a=a+Math.imul(P,tt)|0,r=r+Math.imul(A,nt)|0,i=(i=i+Math.imul(A,rt)|0)+Math.imul(S,nt)|0,a=a+Math.imul(S,rt)|0,r=r+Math.imul(k,at)|0,i=(i=i+Math.imul(k,ot)|0)+Math.imul(T,at)|0,a=a+Math.imul(T,ot)|0,r=r+Math.imul(x,lt)|0,i=(i=i+Math.imul(x,ct)|0)+Math.imul(w,lt)|0,a=a+Math.imul(w,ct)|0,r=r+Math.imul(m,ft)|0,i=(i=i+Math.imul(m,ht)|0)+Math.imul(y,ft)|0,a=a+Math.imul(y,ht)|0;var At=(c+(r=r+Math.imul(p,pt)|0)|0)+((8191&(i=(i=i+Math.imul(p,gt)|0)+Math.imul(g,pt)|0))<<13)|0;c=((a=a+Math.imul(g,gt)|0)+(i>>>13)|0)+(At>>>26)|0,At&=67108863,r=Math.imul(j,Y),i=(i=Math.imul(j,$))+Math.imul(F,Y)|0,a=Math.imul(F,$),r=r+Math.imul(R,K)|0,i=(i=i+Math.imul(R,Z)|0)+Math.imul(z,K)|0,a=a+Math.imul(z,Z)|0,r=r+Math.imul(L,Q)|0,i=(i=i+Math.imul(L,tt)|0)+Math.imul(I,Q)|0,a=a+Math.imul(I,tt)|0,r=r+Math.imul(C,nt)|0,i=(i=i+Math.imul(C,rt)|0)+Math.imul(P,nt)|0,a=a+Math.imul(P,rt)|0,r=r+Math.imul(A,at)|0,i=(i=i+Math.imul(A,ot)|0)+Math.imul(S,at)|0,a=a+Math.imul(S,ot)|0,r=r+Math.imul(k,lt)|0,i=(i=i+Math.imul(k,ct)|0)+Math.imul(T,lt)|0,a=a+Math.imul(T,ct)|0,r=r+Math.imul(x,ft)|0,i=(i=i+Math.imul(x,ht)|0)+Math.imul(w,ft)|0,a=a+Math.imul(w,ht)|0;var St=(c+(r=r+Math.imul(m,pt)|0)|0)+((8191&(i=(i=i+Math.imul(m,gt)|0)+Math.imul(y,pt)|0))<<13)|0;c=((a=a+Math.imul(y,gt)|0)+(i>>>13)|0)+(St>>>26)|0,St&=67108863,r=Math.imul(j,K),i=(i=Math.imul(j,Z))+Math.imul(F,K)|0,a=Math.imul(F,Z),r=r+Math.imul(R,Q)|0,i=(i=i+Math.imul(R,tt)|0)+Math.imul(z,Q)|0,a=a+Math.imul(z,tt)|0,r=r+Math.imul(L,nt)|0,i=(i=i+Math.imul(L,rt)|0)+Math.imul(I,nt)|0,a=a+Math.imul(I,rt)|0,r=r+Math.imul(C,at)|0,i=(i=i+Math.imul(C,ot)|0)+Math.imul(P,at)|0,a=a+Math.imul(P,ot)|0,r=r+Math.imul(A,lt)|0,i=(i=i+Math.imul(A,ct)|0)+Math.imul(S,lt)|0,a=a+Math.imul(S,ct)|0,r=r+Math.imul(k,ft)|0,i=(i=i+Math.imul(k,ht)|0)+Math.imul(T,ft)|0,a=a+Math.imul(T,ht)|0;var Et=(c+(r=r+Math.imul(x,pt)|0)|0)+((8191&(i=(i=i+Math.imul(x,gt)|0)+Math.imul(w,pt)|0))<<13)|0;c=((a=a+Math.imul(w,gt)|0)+(i>>>13)|0)+(Et>>>26)|0,Et&=67108863,r=Math.imul(j,Q),i=(i=Math.imul(j,tt))+Math.imul(F,Q)|0,a=Math.imul(F,tt),r=r+Math.imul(R,nt)|0,i=(i=i+Math.imul(R,rt)|0)+Math.imul(z,nt)|0,a=a+Math.imul(z,rt)|0,r=r+Math.imul(L,at)|0,i=(i=i+Math.imul(L,ot)|0)+Math.imul(I,at)|0,a=a+Math.imul(I,ot)|0,r=r+Math.imul(C,lt)|0,i=(i=i+Math.imul(C,ct)|0)+Math.imul(P,lt)|0,a=a+Math.imul(P,ct)|0,r=r+Math.imul(A,ft)|0,i=(i=i+Math.imul(A,ht)|0)+Math.imul(S,ft)|0,a=a+Math.imul(S,ht)|0;var Ct=(c+(r=r+Math.imul(k,pt)|0)|0)+((8191&(i=(i=i+Math.imul(k,gt)|0)+Math.imul(T,pt)|0))<<13)|0;c=((a=a+Math.imul(T,gt)|0)+(i>>>13)|0)+(Ct>>>26)|0,Ct&=67108863,r=Math.imul(j,nt),i=(i=Math.imul(j,rt))+Math.imul(F,nt)|0,a=Math.imul(F,rt),r=r+Math.imul(R,at)|0,i=(i=i+Math.imul(R,ot)|0)+Math.imul(z,at)|0,a=a+Math.imul(z,ot)|0,r=r+Math.imul(L,lt)|0,i=(i=i+Math.imul(L,ct)|0)+Math.imul(I,lt)|0,a=a+Math.imul(I,ct)|0,r=r+Math.imul(C,ft)|0,i=(i=i+Math.imul(C,ht)|0)+Math.imul(P,ft)|0,a=a+Math.imul(P,ht)|0;var Pt=(c+(r=r+Math.imul(A,pt)|0)|0)+((8191&(i=(i=i+Math.imul(A,gt)|0)+Math.imul(S,pt)|0))<<13)|0;c=((a=a+Math.imul(S,gt)|0)+(i>>>13)|0)+(Pt>>>26)|0,Pt&=67108863,r=Math.imul(j,at),i=(i=Math.imul(j,ot))+Math.imul(F,at)|0,a=Math.imul(F,ot),r=r+Math.imul(R,lt)|0,i=(i=i+Math.imul(R,ct)|0)+Math.imul(z,lt)|0,a=a+Math.imul(z,ct)|0,r=r+Math.imul(L,ft)|0,i=(i=i+Math.imul(L,ht)|0)+Math.imul(I,ft)|0,a=a+Math.imul(I,ht)|0;var Ot=(c+(r=r+Math.imul(C,pt)|0)|0)+((8191&(i=(i=i+Math.imul(C,gt)|0)+Math.imul(P,pt)|0))<<13)|0;c=((a=a+Math.imul(P,gt)|0)+(i>>>13)|0)+(Ot>>>26)|0,Ot&=67108863,r=Math.imul(j,lt),i=(i=Math.imul(j,ct))+Math.imul(F,lt)|0,a=Math.imul(F,ct),r=r+Math.imul(R,ft)|0,i=(i=i+Math.imul(R,ht)|0)+Math.imul(z,ft)|0,a=a+Math.imul(z,ht)|0;var Lt=(c+(r=r+Math.imul(L,pt)|0)|0)+((8191&(i=(i=i+Math.imul(L,gt)|0)+Math.imul(I,pt)|0))<<13)|0;c=((a=a+Math.imul(I,gt)|0)+(i>>>13)|0)+(Lt>>>26)|0,Lt&=67108863,r=Math.imul(j,ft),i=(i=Math.imul(j,ht))+Math.imul(F,ft)|0,a=Math.imul(F,ht);var It=(c+(r=r+Math.imul(R,pt)|0)|0)+((8191&(i=(i=i+Math.imul(R,gt)|0)+Math.imul(z,pt)|0))<<13)|0;c=((a=a+Math.imul(z,gt)|0)+(i>>>13)|0)+(It>>>26)|0,It&=67108863;var Dt=(c+(r=Math.imul(j,pt))|0)+((8191&(i=(i=Math.imul(j,gt))+Math.imul(F,pt)|0))<<13)|0;return c=((a=Math.imul(F,gt))+(i>>>13)|0)+(Dt>>>26)|0,Dt&=67108863,l[0]=vt,l[1]=mt,l[2]=yt,l[3]=bt,l[4]=xt,l[5]=wt,l[6]=_t,l[7]=kt,l[8]=Tt,l[9]=Mt,l[10]=At,l[11]=St,l[12]=Et,l[13]=Ct,l[14]=Pt,l[15]=Ot,l[16]=Lt,l[17]=It,l[18]=Dt,0!==c&&(l[19]=c,n.length++),n};function g(t,e,n){return(new v).mulp(t,e,n)}function v(t,e){this.x=t,this.y=e}Math.imul||(p=d),a.prototype.mulTo=function(t,e){var n,r=this.length+t.length;return n=10===this.length&&10===t.length?p(this,t,e):r<63?d(this,t,e):r<1024?function(t,e,n){n.negative=e.negative^t.negative,n.length=t.length+e.length;for(var r=0,i=0,a=0;a>>26)|0)>>>26,o&=67108863}n.words[a]=s,r=o,o=i}return 0!==r?n.words[a]=r:n.length--,n.strip()}(this,t,e):g(this,t,e),n},v.prototype.makeRBT=function(t){for(var e=new Array(t),n=a.prototype._countBits(t)-1,r=0;r>=1;return r},v.prototype.permute=function(t,e,n,r,i,a){for(var o=0;o>>=1)i++;return 1<>>=13,n[2*o+1]=8191&a,a>>>=13;for(o=2*e;o>=26,e+=i/67108864|0,e+=a>>>26,this.words[n]=67108863&a}return 0!==e&&(this.words[n]=e,this.length++),this},a.prototype.muln=function(t){return this.clone().imuln(t)},a.prototype.sqr=function(){return this.mul(this)},a.prototype.isqr=function(){return this.imul(this.clone())},a.prototype.pow=function(t){var e=function(t){for(var e=new Array(t.bitLength()),n=0;n>>i}return e}(t);if(0===e.length)return new a(1);for(var n=this,r=0;r=0);var e,n=t%26,i=(t-n)/26,a=67108863>>>26-n<<26-n;if(0!==n){var o=0;for(e=0;e>>26-n}o&&(this.words[e]=o,this.length++)}if(0!==i){for(e=this.length-1;e>=0;e--)this.words[e+i]=this.words[e];for(e=0;e=0),i=e?(e-e%26)/26:0;var a=t%26,o=Math.min((t-a)/26,this.length),s=67108863^67108863>>>a<o)for(this.length-=o,c=0;c=0&&(0!==u||c>=i);c--){var f=0|this.words[c];this.words[c]=u<<26-a|f>>>a,u=f&s}return l&&0!==u&&(l.words[l.length++]=u),0===this.length&&(this.words[0]=0,this.length=1),this.strip()},a.prototype.ishrn=function(t,e,n){return r(0===this.negative),this.iushrn(t,e,n)},a.prototype.shln=function(t){return this.clone().ishln(t)},a.prototype.ushln=function(t){return this.clone().iushln(t)},a.prototype.shrn=function(t){return this.clone().ishrn(t)},a.prototype.ushrn=function(t){return this.clone().iushrn(t)},a.prototype.testn=function(t){r("number"===typeof t&&t>=0);var e=t%26,n=(t-e)/26,i=1<=0);var e=t%26,n=(t-e)/26;if(r(0===this.negative,"imaskn works only with positive numbers"),this.length<=n)return this;if(0!==e&&n++,this.length=Math.min(n,this.length),0!==e){var i=67108863^67108863>>>e<=67108864;e++)this.words[e]-=67108864,e===this.length-1?this.words[e+1]=1:this.words[e+1]++;return this.length=Math.max(this.length,e+1),this},a.prototype.isubn=function(t){if(r("number"===typeof t),r(t<67108864),t<0)return this.iaddn(-t);if(0!==this.negative)return this.negative=0,this.iaddn(t),this.negative=1,this;if(this.words[0]-=t,1===this.length&&this.words[0]<0)this.words[0]=-this.words[0],this.negative=1;else for(var e=0;e>26)-(l/67108864|0),this.words[i+n]=67108863&a}for(;i>26,this.words[i+n]=67108863&a;if(0===s)return this.strip();for(r(-1===s),s=0,i=0;i>26,this.words[i]=67108863&a;return this.negative=1,this.strip()},a.prototype._wordDiv=function(t,e){var n=(this.length,t.length),r=this.clone(),i=t,o=0|i.words[i.length-1];0!==(n=26-this._countBits(o))&&(i=i.ushln(n),r.iushln(n),o=0|i.words[i.length-1]);var s,l=r.length-i.length;if("mod"!==e){(s=new a(null)).length=l+1,s.words=new Array(s.length);for(var c=0;c=0;f--){var h=67108864*(0|r.words[i.length+f])+(0|r.words[i.length+f-1]);for(h=Math.min(h/o|0,67108863),r._ishlnsubmul(i,h,f);0!==r.negative;)h--,r.negative=0,r._ishlnsubmul(i,1,f),r.isZero()||(r.negative^=1);s&&(s.words[f]=h)}return s&&s.strip(),r.strip(),"div"!==e&&0!==n&&r.iushrn(n),{div:s||null,mod:r}},a.prototype.divmod=function(t,e,n){return r(!t.isZero()),this.isZero()?{div:new a(0),mod:new a(0)}:0!==this.negative&&0===t.negative?(s=this.neg().divmod(t,e),"mod"!==e&&(i=s.div.neg()),"div"!==e&&(o=s.mod.neg(),n&&0!==o.negative&&o.iadd(t)),{div:i,mod:o}):0===this.negative&&0!==t.negative?(s=this.divmod(t.neg(),e),"mod"!==e&&(i=s.div.neg()),{div:i,mod:s.mod}):0!==(this.negative&t.negative)?(s=this.neg().divmod(t.neg(),e),"div"!==e&&(o=s.mod.neg(),n&&0!==o.negative&&o.isub(t)),{div:s.div,mod:o}):t.length>this.length||this.cmp(t)<0?{div:new a(0),mod:this}:1===t.length?"div"===e?{div:this.divn(t.words[0]),mod:null}:"mod"===e?{div:null,mod:new a(this.modn(t.words[0]))}:{div:this.divn(t.words[0]),mod:new a(this.modn(t.words[0]))}:this._wordDiv(t,e);var i,o,s},a.prototype.div=function(t){return this.divmod(t,"div",!1).div},a.prototype.mod=function(t){return this.divmod(t,"mod",!1).mod},a.prototype.umod=function(t){return this.divmod(t,"mod",!0).mod},a.prototype.divRound=function(t){var e=this.divmod(t);if(e.mod.isZero())return e.div;var n=0!==e.div.negative?e.mod.isub(t):e.mod,r=t.ushrn(1),i=t.andln(1),a=n.cmp(r);return a<0||1===i&&0===a?e.div:0!==e.div.negative?e.div.isubn(1):e.div.iaddn(1)},a.prototype.modn=function(t){r(t<=67108863);for(var e=(1<<26)%t,n=0,i=this.length-1;i>=0;i--)n=(e*n+(0|this.words[i]))%t;return n},a.prototype.idivn=function(t){r(t<=67108863);for(var e=0,n=this.length-1;n>=0;n--){var i=(0|this.words[n])+67108864*e;this.words[n]=i/t|0,e=i%t}return this.strip()},a.prototype.divn=function(t){return this.clone().idivn(t)},a.prototype.egcd=function(t){r(0===t.negative),r(!t.isZero());var e=this,n=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i=new a(1),o=new a(0),s=new a(0),l=new a(1),c=0;e.isEven()&&n.isEven();)e.iushrn(1),n.iushrn(1),++c;for(var u=n.clone(),f=e.clone();!e.isZero();){for(var h=0,d=1;0===(e.words[0]&d)&&h<26;++h,d<<=1);if(h>0)for(e.iushrn(h);h-- >0;)(i.isOdd()||o.isOdd())&&(i.iadd(u),o.isub(f)),i.iushrn(1),o.iushrn(1);for(var p=0,g=1;0===(n.words[0]&g)&&p<26;++p,g<<=1);if(p>0)for(n.iushrn(p);p-- >0;)(s.isOdd()||l.isOdd())&&(s.iadd(u),l.isub(f)),s.iushrn(1),l.iushrn(1);e.cmp(n)>=0?(e.isub(n),i.isub(s),o.isub(l)):(n.isub(e),s.isub(i),l.isub(o))}return{a:s,b:l,gcd:n.iushln(c)}},a.prototype._invmp=function(t){r(0===t.negative),r(!t.isZero());var e=this,n=t.clone();e=0!==e.negative?e.umod(t):e.clone();for(var i,o=new a(1),s=new a(0),l=n.clone();e.cmpn(1)>0&&n.cmpn(1)>0;){for(var c=0,u=1;0===(e.words[0]&u)&&c<26;++c,u<<=1);if(c>0)for(e.iushrn(c);c-- >0;)o.isOdd()&&o.iadd(l),o.iushrn(1);for(var f=0,h=1;0===(n.words[0]&h)&&f<26;++f,h<<=1);if(f>0)for(n.iushrn(f);f-- >0;)s.isOdd()&&s.iadd(l),s.iushrn(1);e.cmp(n)>=0?(e.isub(n),o.isub(s)):(n.isub(e),s.isub(o))}return(i=0===e.cmpn(1)?o:s).cmpn(0)<0&&i.iadd(t),i},a.prototype.gcd=function(t){if(this.isZero())return t.abs();if(t.isZero())return this.abs();var e=this.clone(),n=t.clone();e.negative=0,n.negative=0;for(var r=0;e.isEven()&&n.isEven();r++)e.iushrn(1),n.iushrn(1);for(;;){for(;e.isEven();)e.iushrn(1);for(;n.isEven();)n.iushrn(1);var i=e.cmp(n);if(i<0){var a=e;e=n,n=a}else if(0===i||0===n.cmpn(1))break;e.isub(n)}return n.iushln(r)},a.prototype.invm=function(t){return this.egcd(t).a.umod(t)},a.prototype.isEven=function(){return 0===(1&this.words[0])},a.prototype.isOdd=function(){return 1===(1&this.words[0])},a.prototype.andln=function(t){return this.words[0]&t},a.prototype.bincn=function(t){r("number"===typeof t);var e=t%26,n=(t-e)/26,i=1<>>26,s&=67108863,this.words[o]=s}return 0!==a&&(this.words[o]=a,this.length++),this},a.prototype.isZero=function(){return 1===this.length&&0===this.words[0]},a.prototype.cmpn=function(t){var e,n=t<0;if(0!==this.negative&&!n)return-1;if(0===this.negative&&n)return 1;if(this.strip(),this.length>1)e=1;else{n&&(t=-t),r(t<=67108863,"Number is too big");var i=0|this.words[0];e=i===t?0:it.length)return 1;if(this.length=0;n--){var r=0|this.words[n],i=0|t.words[n];if(r!==i){ri&&(e=1);break}}return e},a.prototype.gtn=function(t){return 1===this.cmpn(t)},a.prototype.gt=function(t){return 1===this.cmp(t)},a.prototype.gten=function(t){return this.cmpn(t)>=0},a.prototype.gte=function(t){return this.cmp(t)>=0},a.prototype.ltn=function(t){return-1===this.cmpn(t)},a.prototype.lt=function(t){return-1===this.cmp(t)},a.prototype.lten=function(t){return this.cmpn(t)<=0},a.prototype.lte=function(t){return this.cmp(t)<=0},a.prototype.eqn=function(t){return 0===this.cmpn(t)},a.prototype.eq=function(t){return 0===this.cmp(t)},a.red=function(t){return new k(t)},a.prototype.toRed=function(t){return r(!this.red,"Already a number in reduction context"),r(0===this.negative,"red works only with positives"),t.convertTo(this)._forceRed(t)},a.prototype.fromRed=function(){return r(this.red,"fromRed works only with numbers in reduction context"),this.red.convertFrom(this)},a.prototype._forceRed=function(t){return this.red=t,this},a.prototype.forceRed=function(t){return r(!this.red,"Already a number in reduction context"),this._forceRed(t)},a.prototype.redAdd=function(t){return r(this.red,"redAdd works only with red numbers"),this.red.add(this,t)},a.prototype.redIAdd=function(t){return r(this.red,"redIAdd works only with red numbers"),this.red.iadd(this,t)},a.prototype.redSub=function(t){return r(this.red,"redSub works only with red numbers"),this.red.sub(this,t)},a.prototype.redISub=function(t){return r(this.red,"redISub works only with red numbers"),this.red.isub(this,t)},a.prototype.redShl=function(t){return r(this.red,"redShl works only with red numbers"),this.red.shl(this,t)},a.prototype.redMul=function(t){return r(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.mul(this,t)},a.prototype.redIMul=function(t){return r(this.red,"redMul works only with red numbers"),this.red._verify2(this,t),this.red.imul(this,t)},a.prototype.redSqr=function(){return r(this.red,"redSqr works only with red numbers"),this.red._verify1(this),this.red.sqr(this)},a.prototype.redISqr=function(){return r(this.red,"redISqr works only with red numbers"),this.red._verify1(this),this.red.isqr(this)},a.prototype.redSqrt=function(){return r(this.red,"redSqrt works only with red numbers"),this.red._verify1(this),this.red.sqrt(this)},a.prototype.redInvm=function(){return r(this.red,"redInvm works only with red numbers"),this.red._verify1(this),this.red.invm(this)},a.prototype.redNeg=function(){return r(this.red,"redNeg works only with red numbers"),this.red._verify1(this),this.red.neg(this)},a.prototype.redPow=function(t){return r(this.red&&!t.red,"redPow(normalNum)"),this.red._verify1(this),this.red.pow(this,t)};var m={k256:null,p224:null,p192:null,p25519:null};function y(t,e){this.name=t,this.p=new a(e,16),this.n=this.p.bitLength(),this.k=new a(1).iushln(this.n).isub(this.p),this.tmp=this._tmp()}function b(){y.call(this,"k256","ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff fffffffe fffffc2f")}function x(){y.call(this,"p224","ffffffff ffffffff ffffffff ffffffff 00000000 00000000 00000001")}function w(){y.call(this,"p192","ffffffff ffffffff ffffffff fffffffe ffffffff ffffffff")}function _(){y.call(this,"25519","7fffffffffffffff ffffffffffffffff ffffffffffffffff ffffffffffffffed")}function k(t){if("string"===typeof t){var e=a._prime(t);this.m=e.p,this.prime=e}else r(t.gtn(1),"modulus must be greater than 1"),this.m=t,this.prime=null}function T(t){k.call(this,t),this.shift=this.m.bitLength(),this.shift%26!==0&&(this.shift+=26-this.shift%26),this.r=new a(1).iushln(this.shift),this.r2=this.imod(this.r.sqr()),this.rinv=this.r._invmp(this.m),this.minv=this.rinv.mul(this.r).isubn(1).div(this.m),this.minv=this.minv.umod(this.r),this.minv=this.r.sub(this.minv)}y.prototype._tmp=function(){var t=new a(null);return t.words=new Array(Math.ceil(this.n/13)),t},y.prototype.ireduce=function(t){var e,n=t;do{this.split(n,this.tmp),e=(n=(n=this.imulK(n)).iadd(this.tmp)).bitLength()}while(e>this.n);var r=e0?n.isub(this.p):void 0!==n.strip?n.strip():n._strip(),n},y.prototype.split=function(t,e){t.iushrn(this.n,0,e)},y.prototype.imulK=function(t){return t.imul(this.k)},i(b,y),b.prototype.split=function(t,e){for(var n=4194303,r=Math.min(t.length,9),i=0;i>>22,a=o}a>>>=22,t.words[i-10]=a,0===a&&t.length>10?t.length-=10:t.length-=9},b.prototype.imulK=function(t){t.words[t.length]=0,t.words[t.length+1]=0,t.length+=2;for(var e=0,n=0;n>>=26,t.words[n]=i,e=r}return 0!==e&&(t.words[t.length++]=e),t},a._prime=function(t){if(m[t])return m[t];var e;if("k256"===t)e=new b;else if("p224"===t)e=new x;else if("p192"===t)e=new w;else{if("p25519"!==t)throw new Error("Unknown prime "+t);e=new _}return m[t]=e,e},k.prototype._verify1=function(t){r(0===t.negative,"red works only with positives"),r(t.red,"red works only with red numbers")},k.prototype._verify2=function(t,e){r(0===(t.negative|e.negative),"red works only with positives"),r(t.red&&t.red===e.red,"red works only with red numbers")},k.prototype.imod=function(t){return this.prime?this.prime.ireduce(t)._forceRed(this):t.umod(this.m)._forceRed(this)},k.prototype.neg=function(t){return t.isZero()?t.clone():this.m.sub(t)._forceRed(this)},k.prototype.add=function(t,e){this._verify2(t,e);var n=t.add(e);return n.cmp(this.m)>=0&&n.isub(this.m),n._forceRed(this)},k.prototype.iadd=function(t,e){this._verify2(t,e);var n=t.iadd(e);return n.cmp(this.m)>=0&&n.isub(this.m),n},k.prototype.sub=function(t,e){this._verify2(t,e);var n=t.sub(e);return n.cmpn(0)<0&&n.iadd(this.m),n._forceRed(this)},k.prototype.isub=function(t,e){this._verify2(t,e);var n=t.isub(e);return n.cmpn(0)<0&&n.iadd(this.m),n},k.prototype.shl=function(t,e){return this._verify1(t),this.imod(t.ushln(e))},k.prototype.imul=function(t,e){return this._verify2(t,e),this.imod(t.imul(e))},k.prototype.mul=function(t,e){return this._verify2(t,e),this.imod(t.mul(e))},k.prototype.isqr=function(t){return this.imul(t,t.clone())},k.prototype.sqr=function(t){return this.mul(t,t)},k.prototype.sqrt=function(t){if(t.isZero())return t.clone();var e=this.m.andln(3);if(r(e%2===1),3===e){var n=this.m.add(new a(1)).iushrn(2);return this.pow(t,n)}for(var i=this.m.subn(1),o=0;!i.isZero()&&0===i.andln(1);)o++,i.iushrn(1);r(!i.isZero());var s=new a(1).toRed(this),l=s.redNeg(),c=this.m.subn(1).iushrn(1),u=this.m.bitLength();for(u=new a(2*u*u).toRed(this);0!==this.pow(u,c).cmp(l);)u.redIAdd(l);for(var f=this.pow(u,i),h=this.pow(t,i.addn(1).iushrn(1)),d=this.pow(t,i),p=o;0!==d.cmp(s);){for(var g=d,v=0;0!==g.cmp(s);v++)g=g.redSqr();r(v=0;r--){for(var c=e.words[r],u=l-1;u>=0;u--){var f=c>>u&1;i!==n[0]&&(i=this.sqr(i)),0!==f||0!==o?(o<<=1,o|=f,(4===++s||0===r&&0===u)&&(i=this.mul(i,n[o]),s=0,o=0)):s=0}l=26}return i},k.prototype.convertTo=function(t){var e=t.umod(this.m);return e===t?e.clone():e},k.prototype.convertFrom=function(t){var e=t.clone();return e.red=null,e},a.mont=function(t){return new T(t)},i(T,k),T.prototype.convertTo=function(t){return this.imod(t.ushln(this.shift))},T.prototype.convertFrom=function(t){var e=this.imod(t.mul(this.rinv));return e.red=null,e},T.prototype.imul=function(t,e){if(t.isZero()||e.isZero())return t.words[0]=0,t.length=1,t;var n=t.imul(e),r=n.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=n.isub(r).iushrn(this.shift),a=i;return i.cmp(this.m)>=0?a=i.isub(this.m):i.cmpn(0)<0&&(a=i.iadd(this.m)),a._forceRed(this)},T.prototype.mul=function(t,e){if(t.isZero()||e.isZero())return new a(0)._forceRed(this);var n=t.mul(e),r=n.maskn(this.shift).mul(this.minv).imaskn(this.shift).mul(this.m),i=n.isub(r).iushrn(this.shift),o=i;return i.cmp(this.m)>=0?o=i.isub(this.m):i.cmpn(0)<0&&(o=i.iadd(this.m)),o._forceRed(this)},T.prototype.invm=function(t){return this.imod(t._invmp(this.m).mul(this.r2))._forceRed(this)}}(t=n.nmd(t),this)},2692:function(t){"use strict";t.exports=function(t){var e,n,r,i=t.length,a=0;for(e=0;e>>1;if(!(u<=0)){var f,h=i.mallocDouble(2*u*s),d=i.mallocInt32(s);if((s=l(t,u,h,d))>0){if(1===u&&r)a.init(s),f=a.sweepComplete(u,n,0,s,h,d,0,s,h,d);else{var p=i.mallocDouble(2*u*c),g=i.mallocInt32(c);(c=l(e,u,p,g))>0&&(a.init(s+c),f=1===u?a.sweepBipartite(u,n,0,s,h,d,0,c,p,g):o(u,n,r,s,h,d,c,p,g),i.free(p),i.free(g))}i.free(h),i.free(d)}return f}}}function u(t,e){r.push([t,e])}},7333:function(t,e){"use strict";function n(t){return t?function(t,e,n,r,i,a,o,s,l,c,u){return i-r>l-s?function(t,e,n,r,i,a,o,s,l,c,u){for(var f=2*t,h=r,d=f*r;hc-l?r?function(t,e,n,r,i,a,o,s,l,c,u){for(var f=2*t,h=r,d=f*r;h0;){var D=(L-=1)*x,R=_[D],z=_[D+1],N=_[D+2],j=_[D+3],F=_[D+4],B=_[D+5],U=L*w,H=k[U],V=k[U+1],q=1&B,G=!!(16&B),W=u,Y=S,$=C,X=P;if(q&&(W=C,Y=P,$=u,X=S),!(2&B&&z>=(N=v(t,R,z,N,W,Y,V)))&&!(4&B&&(z=m(t,R,z,N,W,Y,H))>=N)){var K=N-z,Z=F-j;if(G){if(t*K*(K+Z)=p0)&&!(p1>=hi)"),g=u("lo===p0"),v=u("lo>>1,h=2*t,d=f,p=s[h*f+e];c=b?(d=y,p=b):m>=w?(d=v,p=m):(d=x,p=w):b>=w?(d=y,p=b):w>=m?(d=v,p=m):(d=x,p=w);for(var _=h*(u-1),k=h*d,T=0;Tn&&i[f+e]>c;--u,f-=o){for(var h=f,d=f+o,p=0;ph;++h,l+=s)if(i[l+f]===o)if(u===h)u+=1,c+=s;else{for(var d=0;s>d;++d){var p=i[l+d];i[l+d]=i[c],i[c++]=p}var g=a[h];a[h]=a[u],a[u++]=g}return u},"loh;++h,l+=s)if(i[l+f]d;++d){var p=i[l+d];i[l+d]=i[c],i[c++]=p}var g=a[h];a[h]=a[u],a[u++]=g}return u},"lo<=p0":function(t,e,n,r,i,a,o){for(var s=2*t,l=s*n,c=l,u=n,f=t+e,h=n;r>h;++h,l+=s)if(i[l+f]<=o)if(u===h)u+=1,c+=s;else{for(var d=0;s>d;++d){var p=i[l+d];i[l+d]=i[c],i[c++]=p}var g=a[h];a[h]=a[u],a[u++]=g}return u},"hi<=p0":function(t,e,n,r,i,a,o){for(var s=2*t,l=s*n,c=l,u=n,f=t+e,h=n;r>h;++h,l+=s)if(i[l+f]<=o)if(u===h)u+=1,c+=s;else{for(var d=0;s>d;++d){var p=i[l+d];i[l+d]=i[c],i[c++]=p}var g=a[h];a[h]=a[u],a[u++]=g}return u},"lod;++d,l+=s){var p=i[l+f],g=i[l+h];if(pv;++v){var m=i[l+v];i[l+v]=i[c],i[c++]=m}var y=a[d];a[d]=a[u],a[u++]=y}}return u},"lo<=p0&&p0<=hi":function(t,e,n,r,i,a,o){for(var s=2*t,l=s*n,c=l,u=n,f=e,h=t+e,d=n;r>d;++d,l+=s){var p=i[l+f],g=i[l+h];if(p<=o&&o<=g)if(u===d)u+=1,c+=s;else{for(var v=0;s>v;++v){var m=i[l+v];i[l+v]=i[c],i[c++]=m}var y=a[d];a[d]=a[u],a[u++]=y}}return u},"!(lo>=p0)&&!(p1>=hi)":function(t,e,n,r,i,a,o,s){for(var l=2*t,c=l*n,u=c,f=n,h=e,d=t+e,p=n;r>p;++p,c+=l){var g=i[c+h],v=i[c+d];if(!(g>=o)&&!(s>=v))if(f===p)f+=1,u+=l;else{for(var m=0;l>m;++m){var y=i[c+m];i[c+m]=i[u],i[u++]=y}var b=a[p];a[p]=a[f],a[f++]=b}}return f}}},309:function(t){"use strict";t.exports=function(t,r){r<=4*e?n(0,r-1,t):c(0,r-1,t)};var e=32;function n(t,e,n){for(var r=2*(t+1),i=t+1;i<=e;++i){for(var a=n[r++],o=n[r++],s=i,l=r-2;s-- >t;){var c=n[l-2],u=n[l-1];if(cn[e+1])}function l(t,e,n,r){var i=r[t*=2];return i>1,v=g-h,m=g+h,y=d,b=v,x=g,w=m,_=p,k=t+1,T=u-1,M=0;s(y,b,f)&&(M=y,y=b,b=M),s(w,_,f)&&(M=w,w=_,_=M),s(y,x,f)&&(M=y,y=x,x=M),s(b,x,f)&&(M=b,b=x,x=M),s(y,w,f)&&(M=y,y=w,w=M),s(x,w,f)&&(M=x,x=w,w=M),s(b,_,f)&&(M=b,b=_,_=M),s(b,x,f)&&(M=b,b=x,x=M),s(w,_,f)&&(M=w,w=_,_=M);for(var A=f[2*b],S=f[2*b+1],E=f[2*w],C=f[2*w+1],P=2*y,O=2*x,L=2*_,I=2*d,D=2*g,R=2*p,z=0;z<2;++z){var N=f[P+z],j=f[O+z],F=f[L+z];f[I+z]=N,f[D+z]=j,f[R+z]=F}i(v,t,f),i(m,u,f);for(var B=k;B<=T;++B)if(l(B,A,S,f))B!==k&&r(B,k,f),++k;else if(!l(B,E,C,f))for(;;){if(l(T,E,C,f)){l(T,A,S,f)?(a(B,k,T,f),++k,--T):(r(B,T,f),--T);break}if(--T>>1;a(p,S);var E=0,C=0;for(k=0;k=o)g(u,f,C--,P=P-o|0);else if(P>=0)g(l,c,E--,P);else if(P<=-o){P=-P-o|0;for(var O=0;O>>1;a(p,E);var C=0,P=0,O=0;for(T=0;T>1===p[2*T+3]>>1&&(I=2,T+=1),L<0){for(var D=-(L>>1)-1,R=0;R>1)-1,0===I?g(l,c,C--,D):1===I?g(u,f,P--,D):2===I&&g(h,d,O--,D)}},scanBipartite:function(t,e,n,r,i,s,u,f,h,d,m,y){var b=0,x=2*t,w=e,_=e+t,k=1,T=1;r?T=o:k=o;for(var M=i;M>>1;a(p,C);var P=0;for(M=0;M=o?(L=!r,A-=o):(L=!!r,A-=1),L)v(l,c,P++,A);else{var I=y[A],D=x*A,R=m[D+e+1],z=m[D+e+1+t];t:for(var N=0;N>>1;a(p,k);var T=0;for(b=0;b=o)l[T++]=x-o;else{var A=d[x-=1],S=v*x,E=h[S+e+1],C=h[S+e+1+t];t:for(var P=0;P=0;--P)if(l[P]===x){for(D=P+1;D0;){for(var d=n.pop(),p=(u=-1,f=-1,l=o[s=n.pop()],1);p=0||(e.flip(s,d),i(t,e,n,u,s,f),i(t,e,n,s,f,u),i(t,e,n,f,d,u),i(t,e,n,d,u,f))}}},7098:function(t,e,n){"use strict";var r=n(5070);function i(t,e,n,r,i,a,o){this.cells=t,this.neighbor=e,this.flags=r,this.constraint=n,this.active=i,this.next=a,this.boundary=o}function a(t,e){return t[0]-e[0]||t[1]-e[1]||t[2]-e[2]}t.exports=function(t,e,n){var r=function(t,e){for(var n=t.cells(),r=n.length,o=0;o0||l.length>0;){for(;s.length>0;){var d=s.pop();if(c[d]!==-o){c[d]=o,u[d];for(var p=0;p<3;++p){var g=h[3*d+p];g>=0&&0===c[g]&&(f[3*d+p]?l.push(g):(s.push(g),c[g]=o))}}}var v=l;l=s,s=v,l.length=0,o=-o}var m=function(t,e,n){for(var r=0,i=0;i1&&i(n[h[d-2]],n[h[d-1]],a)>0;)t.push([h[d-1],h[d-2],o]),d-=1;h.length=d,h.push(o);var p=f.upperIds;for(d=p.length;d>1&&i(n[p[d-2]],n[p[d-1]],a)<0;)t.push([p[d-2],p[d-1],o]),d-=1;p.length=d,p.push(o)}}function f(t,e){var n;return(n=t.a[0]g[0]&&i.push(new s(g,p,2,c),new s(p,g,1,c))}i.sort(l);for(var v=i[0].a[0]-(1+Math.abs(i[0].a[0]))*Math.pow(2,-52),m=[new o([v,1],[v,0],-1,[],[],[],[])],y=[],b=(c=0,i.length);c=0}}(),a.removeTriangle=function(t,e,n){var r=this.stars;o(r[t],e,n),o(r[e],n,t),o(r[n],t,e)},a.addTriangle=function(t,e,n){var r=this.stars;r[t].push(e,n),r[e].push(n,t),r[n].push(t,e)},a.opposite=function(t,e){for(var n=this.stars[e],r=1,i=n.length;re[2]?1:0)}function m(t,e,n){if(0!==t.length){if(e)for(var r=0;r=0;--a){var b=e[u=(S=r[a])[0]],x=b[0],w=b[1],_=t[x],k=t[w];if((_[0]-k[0]||_[1]-k[1])<0){var T=x;x=w,w=T}b[0]=x;var M,A=b[1]=S[1];for(i&&(M=b[2]);a>0&&r[a-1][0]===u;){var S,E=(S=r[--a])[1];i?e.push([A,E,M]):e.push([A,E]),A=E}i?e.push([A,w,M]):e.push([A,w])}return h}(t,e,h,v,n),b=p(t,y);return m(e,b,n),!!b||h.length>0||v.length>0}},5528:function(t,e,n){"use strict";t.exports=function(t,e,n,r){var a=s(e,t),f=s(r,n),h=u(a,f);if(0===o(h))return null;var d=s(t,n),p=u(f,d),g=i(p,h),v=c(a,g);return l(t,v)};var r=n(3962),i=n(9189),a=n(4354),o=n(4951),s=n(6695),l=n(7584),c=n(4469);function u(t,e){return a(r(t[0],e[1]),r(t[1],e[0]))}},5692:function(t){t.exports={jet:[{index:0,rgb:[0,0,131]},{index:.125,rgb:[0,60,170]},{index:.375,rgb:[5,255,255]},{index:.625,rgb:[255,255,0]},{index:.875,rgb:[250,0,0]},{index:1,rgb:[128,0,0]}],hsv:[{index:0,rgb:[255,0,0]},{index:.169,rgb:[253,255,2]},{index:.173,rgb:[247,255,2]},{index:.337,rgb:[0,252,4]},{index:.341,rgb:[0,252,10]},{index:.506,rgb:[1,249,255]},{index:.671,rgb:[2,0,253]},{index:.675,rgb:[8,0,253]},{index:.839,rgb:[255,0,251]},{index:.843,rgb:[255,0,245]},{index:1,rgb:[255,0,6]}],hot:[{index:0,rgb:[0,0,0]},{index:.3,rgb:[230,0,0]},{index:.6,rgb:[255,210,0]},{index:1,rgb:[255,255,255]}],spring:[{index:0,rgb:[255,0,255]},{index:1,rgb:[255,255,0]}],summer:[{index:0,rgb:[0,128,102]},{index:1,rgb:[255,255,102]}],autumn:[{index:0,rgb:[255,0,0]},{index:1,rgb:[255,255,0]}],winter:[{index:0,rgb:[0,0,255]},{index:1,rgb:[0,255,128]}],bone:[{index:0,rgb:[0,0,0]},{index:.376,rgb:[84,84,116]},{index:.753,rgb:[169,200,200]},{index:1,rgb:[255,255,255]}],copper:[{index:0,rgb:[0,0,0]},{index:.804,rgb:[255,160,102]},{index:1,rgb:[255,199,127]}],greys:[{index:0,rgb:[0,0,0]},{index:1,rgb:[255,255,255]}],yignbu:[{index:0,rgb:[8,29,88]},{index:.125,rgb:[37,52,148]},{index:.25,rgb:[34,94,168]},{index:.375,rgb:[29,145,192]},{index:.5,rgb:[65,182,196]},{index:.625,rgb:[127,205,187]},{index:.75,rgb:[199,233,180]},{index:.875,rgb:[237,248,217]},{index:1,rgb:[255,255,217]}],greens:[{index:0,rgb:[0,68,27]},{index:.125,rgb:[0,109,44]},{index:.25,rgb:[35,139,69]},{index:.375,rgb:[65,171,93]},{index:.5,rgb:[116,196,118]},{index:.625,rgb:[161,217,155]},{index:.75,rgb:[199,233,192]},{index:.875,rgb:[229,245,224]},{index:1,rgb:[247,252,245]}],yiorrd:[{index:0,rgb:[128,0,38]},{index:.125,rgb:[189,0,38]},{index:.25,rgb:[227,26,28]},{index:.375,rgb:[252,78,42]},{index:.5,rgb:[253,141,60]},{index:.625,rgb:[254,178,76]},{index:.75,rgb:[254,217,118]},{index:.875,rgb:[255,237,160]},{index:1,rgb:[255,255,204]}],bluered:[{index:0,rgb:[0,0,255]},{index:1,rgb:[255,0,0]}],rdbu:[{index:0,rgb:[5,10,172]},{index:.35,rgb:[106,137,247]},{index:.5,rgb:[190,190,190]},{index:.6,rgb:[220,170,132]},{index:.7,rgb:[230,145,90]},{index:1,rgb:[178,10,28]}],picnic:[{index:0,rgb:[0,0,255]},{index:.1,rgb:[51,153,255]},{index:.2,rgb:[102,204,255]},{index:.3,rgb:[153,204,255]},{index:.4,rgb:[204,204,255]},{index:.5,rgb:[255,255,255]},{index:.6,rgb:[255,204,255]},{index:.7,rgb:[255,153,255]},{index:.8,rgb:[255,102,204]},{index:.9,rgb:[255,102,102]},{index:1,rgb:[255,0,0]}],rainbow:[{index:0,rgb:[150,0,90]},{index:.125,rgb:[0,0,200]},{index:.25,rgb:[0,25,255]},{index:.375,rgb:[0,152,255]},{index:.5,rgb:[44,255,150]},{index:.625,rgb:[151,255,0]},{index:.75,rgb:[255,234,0]},{index:.875,rgb:[255,111,0]},{index:1,rgb:[255,0,0]}],portland:[{index:0,rgb:[12,51,131]},{index:.25,rgb:[10,136,186]},{index:.5,rgb:[242,211,56]},{index:.75,rgb:[242,143,56]},{index:1,rgb:[217,30,30]}],blackbody:[{index:0,rgb:[0,0,0]},{index:.2,rgb:[230,0,0]},{index:.4,rgb:[230,210,0]},{index:.7,rgb:[255,255,255]},{index:1,rgb:[160,200,255]}],earth:[{index:0,rgb:[0,0,130]},{index:.1,rgb:[0,180,180]},{index:.2,rgb:[40,210,40]},{index:.4,rgb:[230,230,50]},{index:.6,rgb:[120,70,20]},{index:1,rgb:[255,255,255]}],electric:[{index:0,rgb:[0,0,0]},{index:.15,rgb:[30,0,100]},{index:.4,rgb:[120,0,100]},{index:.6,rgb:[160,90,0]},{index:.8,rgb:[230,200,0]},{index:1,rgb:[255,250,220]}],alpha:[{index:0,rgb:[255,255,255,0]},{index:1,rgb:[255,255,255,1]}],viridis:[{index:0,rgb:[68,1,84]},{index:.13,rgb:[71,44,122]},{index:.25,rgb:[59,81,139]},{index:.38,rgb:[44,113,142]},{index:.5,rgb:[33,144,141]},{index:.63,rgb:[39,173,129]},{index:.75,rgb:[92,200,99]},{index:.88,rgb:[170,220,50]},{index:1,rgb:[253,231,37]}],inferno:[{index:0,rgb:[0,0,4]},{index:.13,rgb:[31,12,72]},{index:.25,rgb:[85,15,109]},{index:.38,rgb:[136,34,106]},{index:.5,rgb:[186,54,85]},{index:.63,rgb:[227,89,51]},{index:.75,rgb:[249,140,10]},{index:.88,rgb:[249,201,50]},{index:1,rgb:[252,255,164]}],magma:[{index:0,rgb:[0,0,4]},{index:.13,rgb:[28,16,68]},{index:.25,rgb:[79,18,123]},{index:.38,rgb:[129,37,129]},{index:.5,rgb:[181,54,122]},{index:.63,rgb:[229,80,100]},{index:.75,rgb:[251,135,97]},{index:.88,rgb:[254,194,135]},{index:1,rgb:[252,253,191]}],plasma:[{index:0,rgb:[13,8,135]},{index:.13,rgb:[75,3,161]},{index:.25,rgb:[125,3,168]},{index:.38,rgb:[168,34,150]},{index:.5,rgb:[203,70,121]},{index:.63,rgb:[229,107,93]},{index:.75,rgb:[248,148,65]},{index:.88,rgb:[253,195,40]},{index:1,rgb:[240,249,33]}],warm:[{index:0,rgb:[125,0,179]},{index:.13,rgb:[172,0,187]},{index:.25,rgb:[219,0,170]},{index:.38,rgb:[255,0,130]},{index:.5,rgb:[255,63,74]},{index:.63,rgb:[255,123,0]},{index:.75,rgb:[234,176,0]},{index:.88,rgb:[190,228,0]},{index:1,rgb:[147,255,0]}],cool:[{index:0,rgb:[125,0,179]},{index:.13,rgb:[116,0,218]},{index:.25,rgb:[98,74,237]},{index:.38,rgb:[68,146,231]},{index:.5,rgb:[0,204,197]},{index:.63,rgb:[0,247,146]},{index:.75,rgb:[0,255,88]},{index:.88,rgb:[40,255,8]},{index:1,rgb:[147,255,0]}],"rainbow-soft":[{index:0,rgb:[125,0,179]},{index:.1,rgb:[199,0,180]},{index:.2,rgb:[255,0,121]},{index:.3,rgb:[255,108,0]},{index:.4,rgb:[222,194,0]},{index:.5,rgb:[150,255,0]},{index:.6,rgb:[0,255,55]},{index:.7,rgb:[0,246,150]},{index:.8,rgb:[50,167,222]},{index:.9,rgb:[103,51,235]},{index:1,rgb:[124,0,186]}],bathymetry:[{index:0,rgb:[40,26,44]},{index:.13,rgb:[59,49,90]},{index:.25,rgb:[64,76,139]},{index:.38,rgb:[63,110,151]},{index:.5,rgb:[72,142,158]},{index:.63,rgb:[85,174,163]},{index:.75,rgb:[120,206,163]},{index:.88,rgb:[187,230,172]},{index:1,rgb:[253,254,204]}],cdom:[{index:0,rgb:[47,15,62]},{index:.13,rgb:[87,23,86]},{index:.25,rgb:[130,28,99]},{index:.38,rgb:[171,41,96]},{index:.5,rgb:[206,67,86]},{index:.63,rgb:[230,106,84]},{index:.75,rgb:[242,149,103]},{index:.88,rgb:[249,193,135]},{index:1,rgb:[254,237,176]}],chlorophyll:[{index:0,rgb:[18,36,20]},{index:.13,rgb:[25,63,41]},{index:.25,rgb:[24,91,59]},{index:.38,rgb:[13,119,72]},{index:.5,rgb:[18,148,80]},{index:.63,rgb:[80,173,89]},{index:.75,rgb:[132,196,122]},{index:.88,rgb:[175,221,162]},{index:1,rgb:[215,249,208]}],density:[{index:0,rgb:[54,14,36]},{index:.13,rgb:[89,23,80]},{index:.25,rgb:[110,45,132]},{index:.38,rgb:[120,77,178]},{index:.5,rgb:[120,113,213]},{index:.63,rgb:[115,151,228]},{index:.75,rgb:[134,185,227]},{index:.88,rgb:[177,214,227]},{index:1,rgb:[230,241,241]}],"freesurface-blue":[{index:0,rgb:[30,4,110]},{index:.13,rgb:[47,14,176]},{index:.25,rgb:[41,45,236]},{index:.38,rgb:[25,99,212]},{index:.5,rgb:[68,131,200]},{index:.63,rgb:[114,156,197]},{index:.75,rgb:[157,181,203]},{index:.88,rgb:[200,208,216]},{index:1,rgb:[241,237,236]}],"freesurface-red":[{index:0,rgb:[60,9,18]},{index:.13,rgb:[100,17,27]},{index:.25,rgb:[142,20,29]},{index:.38,rgb:[177,43,27]},{index:.5,rgb:[192,87,63]},{index:.63,rgb:[205,125,105]},{index:.75,rgb:[216,162,148]},{index:.88,rgb:[227,199,193]},{index:1,rgb:[241,237,236]}],oxygen:[{index:0,rgb:[64,5,5]},{index:.13,rgb:[106,6,15]},{index:.25,rgb:[144,26,7]},{index:.38,rgb:[168,64,3]},{index:.5,rgb:[188,100,4]},{index:.63,rgb:[206,136,11]},{index:.75,rgb:[220,174,25]},{index:.88,rgb:[231,215,44]},{index:1,rgb:[248,254,105]}],par:[{index:0,rgb:[51,20,24]},{index:.13,rgb:[90,32,35]},{index:.25,rgb:[129,44,34]},{index:.38,rgb:[159,68,25]},{index:.5,rgb:[182,99,19]},{index:.63,rgb:[199,134,22]},{index:.75,rgb:[212,171,35]},{index:.88,rgb:[221,210,54]},{index:1,rgb:[225,253,75]}],phase:[{index:0,rgb:[145,105,18]},{index:.13,rgb:[184,71,38]},{index:.25,rgb:[186,58,115]},{index:.38,rgb:[160,71,185]},{index:.5,rgb:[110,97,218]},{index:.63,rgb:[50,123,164]},{index:.75,rgb:[31,131,110]},{index:.88,rgb:[77,129,34]},{index:1,rgb:[145,105,18]}],salinity:[{index:0,rgb:[42,24,108]},{index:.13,rgb:[33,50,162]},{index:.25,rgb:[15,90,145]},{index:.38,rgb:[40,118,137]},{index:.5,rgb:[59,146,135]},{index:.63,rgb:[79,175,126]},{index:.75,rgb:[120,203,104]},{index:.88,rgb:[193,221,100]},{index:1,rgb:[253,239,154]}],temperature:[{index:0,rgb:[4,35,51]},{index:.13,rgb:[23,51,122]},{index:.25,rgb:[85,59,157]},{index:.38,rgb:[129,79,143]},{index:.5,rgb:[175,95,130]},{index:.63,rgb:[222,112,101]},{index:.75,rgb:[249,146,66]},{index:.88,rgb:[249,196,65]},{index:1,rgb:[232,250,91]}],turbidity:[{index:0,rgb:[34,31,27]},{index:.13,rgb:[65,50,41]},{index:.25,rgb:[98,69,52]},{index:.38,rgb:[131,89,57]},{index:.5,rgb:[161,112,59]},{index:.63,rgb:[185,140,66]},{index:.75,rgb:[202,174,88]},{index:.88,rgb:[216,209,126]},{index:1,rgb:[233,246,171]}],"velocity-blue":[{index:0,rgb:[17,32,64]},{index:.13,rgb:[35,52,116]},{index:.25,rgb:[29,81,156]},{index:.38,rgb:[31,113,162]},{index:.5,rgb:[50,144,169]},{index:.63,rgb:[87,173,176]},{index:.75,rgb:[149,196,189]},{index:.88,rgb:[203,221,211]},{index:1,rgb:[254,251,230]}],"velocity-green":[{index:0,rgb:[23,35,19]},{index:.13,rgb:[24,64,38]},{index:.25,rgb:[11,95,45]},{index:.38,rgb:[39,123,35]},{index:.5,rgb:[95,146,12]},{index:.63,rgb:[152,165,18]},{index:.75,rgb:[201,186,69]},{index:.88,rgb:[233,216,137]},{index:1,rgb:[255,253,205]}],cubehelix:[{index:0,rgb:[0,0,0]},{index:.07,rgb:[22,5,59]},{index:.13,rgb:[60,4,105]},{index:.2,rgb:[109,1,135]},{index:.27,rgb:[161,0,147]},{index:.33,rgb:[210,2,142]},{index:.4,rgb:[251,11,123]},{index:.47,rgb:[255,29,97]},{index:.53,rgb:[255,54,69]},{index:.6,rgb:[255,85,46]},{index:.67,rgb:[255,120,34]},{index:.73,rgb:[255,157,37]},{index:.8,rgb:[241,191,57]},{index:.87,rgb:[224,220,93]},{index:.93,rgb:[218,241,142]},{index:1,rgb:[227,253,198]}]}},9156:function(t,e,n){"use strict";var r=n(5692),i=n(3578);function a(t){return[t[0]/255,t[1]/255,t[2]/255,t[3]]}function o(t){for(var e,n="#",r=0;r<3;++r)n+=("00"+(e=(e=t[r]).toString(16))).substr(e.length);return n}function s(t){return"rgba("+t.join(",")+")"}t.exports=function(t){var e,n,l,c,u,f,h,d,p,g;if(t||(t={}),d=(t.nshades||72)-1,h=t.format||"hex",(f=t.colormap)||(f="jet"),"string"===typeof f){if(f=f.toLowerCase(),!r[f])throw Error(f+" not a supported colorscale");u=r[f]}else{if(!Array.isArray(f))throw Error("unsupported colormap option",f);u=f.slice()}if(u.length>d+1)throw new Error(f+" map requires nshades to be at least size "+u.length);p=Array.isArray(t.alpha)?2!==t.alpha.length?[1,1]:t.alpha.slice():"number"===typeof t.alpha?[t.alpha,t.alpha]:[1,1],e=u.map((function(t){return Math.round(t.index*d)})),p[0]=Math.min(Math.max(p[0],0),1),p[1]=Math.min(Math.max(p[1],0),1);var v=u.map((function(t,e){var n=u[e].index,r=u[e].rgb.slice();return 4===r.length&&r[3]>=0&&r[3]<=1||(r[3]=p[0]+(p[1]-p[0])*n),r})),m=[];for(g=0;g0||l(t,e,a)?-1:1:0===s?c>0||l(t,e,n)?1:-1:i(c-s)}var f=r(t,e,n);return f>0?o>0&&r(t,e,a)>0?1:-1:f<0?o>0||r(t,e,a)>0?1:-1:r(t,e,a)>0||l(t,e,n)?1:-1};var r=n(417),i=n(7538),a=n(87),o=n(2019),s=n(9662);function l(t,e,n){var r=a(t[0],-e[0]),i=a(t[1],-e[1]),l=a(n[0],-e[0]),c=a(n[1],-e[1]),u=s(o(r,l),o(i,c));return u[u.length-1]>=0}},7538:function(t){"use strict";t.exports=function(t){return t<0?-1:t>0?1:0}},9209:function(t){t.exports=function(t,r){var i=t.length,a=t.length-r.length;if(a)return a;switch(i){case 0:return 0;case 1:return t[0]-r[0];case 2:return t[0]+t[1]-r[0]-r[1]||e(t[0],t[1])-e(r[0],r[1]);case 3:var o=t[0]+t[1],s=r[0]+r[1];if(a=o+t[2]-(s+r[2]))return a;var l=e(t[0],t[1]),c=e(r[0],r[1]);return e(l,t[2])-e(c,r[2])||e(l+t[2],o)-e(c+r[2],s);case 4:var u=t[0],f=t[1],h=t[2],d=t[3],p=r[0],g=r[1],v=r[2],m=r[3];return u+f+h+d-(p+g+v+m)||e(u,f,h,d)-e(p,g,v,m,p)||e(u+f,u+h,u+d,f+h,f+d,h+d)-e(p+g,p+v,p+m,g+v,g+m,v+m)||e(u+f+h,u+f+d,u+h+d,f+h+d)-e(p+g+v,p+g+m,p+v+m,g+v+m);default:for(var y=t.slice().sort(n),b=r.slice().sort(n),x=0;xt[n][0]&&(n=r);return en?[[n],[e]]:[[e]]}},8722:function(t,e,n){"use strict";t.exports=function(t){var e=r(t),n=e.length;if(n<=2)return[];for(var i=new Array(n),a=e[n-1],o=0;o=e[l]&&(s+=1);a[o]=s}}return t}(r(a,!0),n)}};var r=n(2183),i=n(2153)},9680:function(t){"use strict";t.exports=function(t,e,n,r,i,a){var o=i-1,s=i*i,l=o*o,c=(1+2*i)*l,u=i*l,f=s*(3-2*i),h=s*o;if(t.length){a||(a=new Array(t.length));for(var d=t.length-1;d>=0;--d)a[d]=c*t[d]+u*e[d]+f*n[d]+h*r[d];return a}return c*t+u*e+f*n+h*r},t.exports.derivative=function(t,e,n,r,i,a){var o=6*i*i-6*i,s=3*i*i-4*i+1,l=-6*i*i+6*i,c=3*i*i-2*i;if(t.length){a||(a=new Array(t.length));for(var u=t.length-1;u>=0;--u)a[u]=o*t[u]+s*e[u]+l*n[u]+c*r[u];return a}return o*t+s*e+l*n[u]+c*r}},4419:function(t,e,n){"use strict";var r=n(2183),i=n(1215);function a(t,e){this.point=t,this.index=e}function o(t,e){for(var n=t.point,r=e.point,i=n.length,a=0;a=2)return!1;t[n]=r}return!0})):w.filter((function(t){for(var e=0;e<=s;++e){var n=m[t[e]];if(n<0)return!1;t[e]=n}return!0})),1&s)for(u=0;u>>31},t.exports.exponent=function(e){return(t.exports.hi(e)<<1>>>21)-1023},t.exports.fraction=function(e){var n=t.exports.lo(e),r=t.exports.hi(e),i=1048575&r;return 2146435072&r&&(i+=1<<20),[n,i]},t.exports.denormalized=function(e){return!(2146435072&t.exports.hi(e))}},3094:function(t){"use strict";function e(t,n,r){var i=0|t[r];if(i<=0)return[];var a,o=new Array(i);if(r===t.length-1)for(a=0;a0)return function(t,e){var n,r;for(n=new Array(t),r=0;r=n-1){h=l.length-1;var p=t-e[n-1];for(d=0;d=n-1)for(var u=s.length-1,f=(e[n-1],0);f=0;--n)if(t[--e])return!1;return!0},s.jump=function(t){var e=this.lastT(),n=this.dimension;if(!(t0;--f)r.push(a(l[f-1],c[f-1],arguments[f])),i.push(0)}},s.push=function(t){var e=this.lastT(),n=this.dimension;if(!(t1e-6?1/s:0;this._time.push(t);for(var h=n;h>0;--h){var d=a(c[h-1],u[h-1],arguments[h]);r.push(d),i.push((d-r[o++])*f)}}},s.set=function(t){var e=this.dimension;if(!(t0;--l)n.push(a(o[l-1],s[l-1],arguments[l])),r.push(0)}},s.move=function(t){var e=this.lastT(),n=this.dimension;if(!(t<=e||arguments.length!==n+1)){var r=this._state,i=this._velocity,o=r.length-this.dimension,s=this.bounds,l=s[0],c=s[1],u=t-e,f=u>1e-6?1/u:0;this._time.push(t);for(var h=n;h>0;--h){var d=arguments[h];r.push(a(l[h-1],c[h-1],r[o++]+d)),i.push(d*f)}}},s.idle=function(t){var e=this.lastT();if(!(t=0;--f)r.push(a(l[f],c[f],r[o]+u*i[o])),i.push(0),o+=1}}},7080:function(t){"use strict";function e(t,e,n,r,i,a){this._color=t,this.key=e,this.value=n,this.left=r,this.right=i,this._count=a}function n(t){return new e(t._color,t.key,t.value,t.left,t.right,t._count)}function r(t,n){return new e(t,n.key,n.value,n.left,n.right,n._count)}function i(t){t._count=1+(t.left?t.left._count:0)+(t.right?t.right._count:0)}function a(t,e){this._compare=t,this.root=e}t.exports=function(t){return new a(t||d,null)};var o=a.prototype;function s(t,e){var n;return e.left&&(n=s(t,e.left))?n:(n=t(e.key,e.value))||(e.right?s(t,e.right):void 0)}function l(t,e,n,r){if(e(t,r.key)<=0){var i;if(r.left&&(i=l(t,e,n,r.left)))return i;if(i=n(r.key,r.value))return i}if(r.right)return l(t,e,n,r.right)}function c(t,e,n,r,i){var a,o=n(t,i.key),s=n(e,i.key);if(o<=0){if(i.left&&(a=c(t,e,n,r,i.left)))return a;if(s>0&&(a=r(i.key,i.value)))return a}if(s>0&&i.right)return c(t,e,n,r,i.right)}function u(t,e){this.tree=t,this._stack=e}Object.defineProperty(o,"keys",{get:function(){var t=[];return this.forEach((function(e,n){t.push(e)})),t}}),Object.defineProperty(o,"values",{get:function(){var t=[];return this.forEach((function(e,n){t.push(n)})),t}}),Object.defineProperty(o,"length",{get:function(){return this.root?this.root._count:0}}),o.insert=function(t,n){for(var o=this._compare,s=this.root,l=[],c=[];s;){var u=o(t,s.key);l.push(s),c.push(u),s=u<=0?s.left:s.right}l.push(new e(0,t,n,null,null,1));for(var f=l.length-2;f>=0;--f)s=l[f],c[f]<=0?l[f]=new e(s._color,s.key,s.value,l[f+1],s.right,s._count+1):l[f]=new e(s._color,s.key,s.value,s.left,l[f+1],s._count+1);for(f=l.length-1;f>1;--f){var h=l[f-1];if(s=l[f],1===h._color||1===s._color)break;var d=l[f-2];if(d.left===h)if(h.left===s){if(!(p=d.right)||0!==p._color){d._color=0,d.left=h.right,h._color=1,h.right=d,l[f-2]=h,l[f-1]=s,i(d),i(h),f>=3&&((g=l[f-3]).left===d?g.left=h:g.right=h);break}h._color=1,d.right=r(1,p),d._color=0,f-=1}else{if(!(p=d.right)||0!==p._color){h.right=s.left,d._color=0,d.left=s.right,s._color=1,s.left=h,s.right=d,l[f-2]=s,l[f-1]=h,i(d),i(h),i(s),f>=3&&((g=l[f-3]).left===d?g.left=s:g.right=s);break}h._color=1,d.right=r(1,p),d._color=0,f-=1}else if(h.right===s){if(!(p=d.left)||0!==p._color){d._color=0,d.right=h.left,h._color=1,h.left=d,l[f-2]=h,l[f-1]=s,i(d),i(h),f>=3&&((g=l[f-3]).right===d?g.right=h:g.left=h);break}h._color=1,d.left=r(1,p),d._color=0,f-=1}else{var p;if(!(p=d.left)||0!==p._color){var g;h.left=s.right,d._color=0,d.right=s.left,s._color=1,s.right=h,s.left=d,l[f-2]=s,l[f-1]=h,i(d),i(h),i(s),f>=3&&((g=l[f-3]).right===d?g.right=s:g.left=s);break}h._color=1,d.left=r(1,p),d._color=0,f-=1}}return l[0]._color=1,new a(o,l[0])},o.forEach=function(t,e,n){if(this.root)switch(arguments.length){case 1:return s(t,this.root);case 2:return l(e,this._compare,t,this.root);case 3:if(this._compare(e,n)>=0)return;return c(e,n,this._compare,t,this.root)}},Object.defineProperty(o,"begin",{get:function(){for(var t=[],e=this.root;e;)t.push(e),e=e.left;return new u(this,t)}}),Object.defineProperty(o,"end",{get:function(){for(var t=[],e=this.root;e;)t.push(e),e=e.right;return new u(this,t)}}),o.at=function(t){if(t<0)return new u(this,[]);for(var e=this.root,n=[];;){if(n.push(e),e.left){if(t=e.right._count)break;e=e.right}return new u(this,[])},o.ge=function(t){for(var e=this._compare,n=this.root,r=[],i=0;n;){var a=e(t,n.key);r.push(n),a<=0&&(i=r.length),n=a<=0?n.left:n.right}return r.length=i,new u(this,r)},o.gt=function(t){for(var e=this._compare,n=this.root,r=[],i=0;n;){var a=e(t,n.key);r.push(n),a<0&&(i=r.length),n=a<0?n.left:n.right}return r.length=i,new u(this,r)},o.lt=function(t){for(var e=this._compare,n=this.root,r=[],i=0;n;){var a=e(t,n.key);r.push(n),a>0&&(i=r.length),n=a<=0?n.left:n.right}return r.length=i,new u(this,r)},o.le=function(t){for(var e=this._compare,n=this.root,r=[],i=0;n;){var a=e(t,n.key);r.push(n),a>=0&&(i=r.length),n=a<0?n.left:n.right}return r.length=i,new u(this,r)},o.find=function(t){for(var e=this._compare,n=this.root,r=[];n;){var i=e(t,n.key);if(r.push(n),0===i)return new u(this,r);n=i<=0?n.left:n.right}return new u(this,[])},o.remove=function(t){var e=this.find(t);return e?e.remove():this},o.get=function(t){for(var e=this._compare,n=this.root;n;){var r=e(t,n.key);if(0===r)return n.value;n=r<=0?n.left:n.right}};var f=u.prototype;function h(t,e){t.key=e.key,t.value=e.value,t.left=e.left,t.right=e.right,t._color=e._color,t._count=e._count}function d(t,e){return te?1:0}Object.defineProperty(f,"valid",{get:function(){return this._stack.length>0}}),Object.defineProperty(f,"node",{get:function(){return this._stack.length>0?this._stack[this._stack.length-1]:null},enumerable:!0}),f.clone=function(){return new u(this.tree,this._stack.slice())},f.remove=function(){var t=this._stack;if(0===t.length)return this.tree;var o=new Array(t.length),s=t[t.length-1];o[o.length-1]=new e(s._color,s.key,s.value,s.left,s.right,s._count);for(var l=t.length-2;l>=0;--l)(s=t[l]).left===t[l+1]?o[l]=new e(s._color,s.key,s.value,o[l+1],s.right,s._count):o[l]=new e(s._color,s.key,s.value,s.left,o[l+1],s._count);if((s=o[o.length-1]).left&&s.right){var c=o.length;for(s=s.left;s.right;)o.push(s),s=s.right;var u=o[c-1];for(o.push(new e(s._color,u.key,u.value,s.left,s.right,s._count)),o[c-1].key=s.key,o[c-1].value=s.value,l=o.length-2;l>=c;--l)s=o[l],o[l]=new e(s._color,s.key,s.value,s.left,o[l+1],s._count);o[c-1].left=o[c]}if(0===(s=o[o.length-1])._color){var f=o[o.length-2];for(f.left===s?f.left=null:f.right===s&&(f.right=null),o.pop(),l=0;l=0;--l){if(e=t[l],0===l)return void(e._color=1);if((a=t[l-1]).left===e){if((o=a.right).right&&0===o.right._color)return s=(o=a.right=n(o)).right=n(o.right),a.right=o.left,o.left=a,o.right=s,o._color=a._color,e._color=1,a._color=1,s._color=1,i(a),i(o),l>1&&((c=t[l-2]).left===a?c.left=o:c.right=o),void(t[l-1]=o);if(o.left&&0===o.left._color)return s=(o=a.right=n(o)).left=n(o.left),a.right=s.left,o.left=s.right,s.left=a,s.right=o,s._color=a._color,a._color=1,o._color=1,e._color=1,i(a),i(o),i(s),l>1&&((c=t[l-2]).left===a?c.left=s:c.right=s),void(t[l-1]=s);if(1===o._color){if(0===a._color)return a._color=1,void(a.right=r(0,o));a.right=r(0,o);continue}o=n(o),a.right=o.left,o.left=a,o._color=a._color,a._color=0,i(a),i(o),l>1&&((c=t[l-2]).left===a?c.left=o:c.right=o),t[l-1]=o,t[l]=a,l+11&&((c=t[l-2]).right===a?c.right=o:c.left=o),void(t[l-1]=o);if(o.right&&0===o.right._color)return s=(o=a.left=n(o)).right=n(o.right),a.left=s.right,o.right=s.left,s.right=a,s.left=o,s._color=a._color,a._color=1,o._color=1,e._color=1,i(a),i(o),i(s),l>1&&((c=t[l-2]).right===a?c.right=s:c.left=s),void(t[l-1]=s);if(1===o._color){if(0===a._color)return a._color=1,void(a.left=r(0,o));a.left=r(0,o);continue}var c;o=n(o),a.left=o.right,o.right=a,o._color=a._color,a._color=0,i(a),i(o),l>1&&((c=t[l-2]).right===a?c.right=o:c.left=o),t[l-1]=o,t[l]=a,l+10)return this._stack[this._stack.length-1].key},enumerable:!0}),Object.defineProperty(f,"value",{get:function(){if(this._stack.length>0)return this._stack[this._stack.length-1].value},enumerable:!0}),Object.defineProperty(f,"index",{get:function(){var t=0,e=this._stack;if(0===e.length){var n=this.tree.root;return n?n._count:0}e[e.length-1].left&&(t=e[e.length-1].left._count);for(var r=e.length-2;r>=0;--r)e[r+1]===e[r].right&&(++t,e[r].left&&(t+=e[r].left._count));return t},enumerable:!0}),f.next=function(){var t=this._stack;if(0!==t.length){var e=t[t.length-1];if(e.right)for(e=e.right;e;)t.push(e),e=e.left;else for(t.pop();t.length>0&&t[t.length-1].right===e;)e=t[t.length-1],t.pop()}},Object.defineProperty(f,"hasNext",{get:function(){var t=this._stack;if(0===t.length)return!1;if(t[t.length-1].right)return!0;for(var e=t.length-1;e>0;--e)if(t[e-1].left===t[e])return!0;return!1}}),f.update=function(t){var n=this._stack;if(0===n.length)throw new Error("Can't update empty node!");var r=new Array(n.length),i=n[n.length-1];r[r.length-1]=new e(i._color,i.key,t,i.left,i.right,i._count);for(var o=n.length-2;o>=0;--o)(i=n[o]).left===n[o+1]?r[o]=new e(i._color,i.key,i.value,r[o+1],i.right,i._count):r[o]=new e(i._color,i.key,i.value,i.left,r[o+1],i._count);return new a(this.tree._compare,r[0])},f.prev=function(){var t=this._stack;if(0!==t.length){var e=t[t.length-1];if(e.left)for(e=e.left;e;)t.push(e),e=e.right;else for(t.pop();t.length>0&&t[t.length-1].left===e;)e=t[t.length-1],t.pop()}},Object.defineProperty(f,"hasPrev",{get:function(){var t=this._stack;if(0===t.length)return!1;if(t[t.length-1].left)return!0;for(var e=t.length-1;e>0;--e)if(t[e-1].right===t[e])return!0;return!1}})},7453:function(t,e,n){"use strict";t.exports=function(t,e){var n=new u(t);return n.update(e),n};var r=n(9557),i=n(1681),a=n(1011),o=n(2864),s=n(8468),l=new Float32Array([1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]);function c(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t}function u(t){this.gl=t,this.pixelRatio=1,this.bounds=[[-10,-10,-10],[10,10,10]],this.ticks=[[],[],[]],this.autoTicks=!0,this.tickSpacing=[1,1,1],this.tickEnable=[!0,!0,!0],this.tickFont=["sans-serif","sans-serif","sans-serif"],this.tickSize=[12,12,12],this.tickAngle=[0,0,0],this.tickAlign=["auto","auto","auto"],this.tickColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.tickPad=[10,10,10],this.lastCubeProps={cubeEdges:[0,0,0],axis:[0,0,0]},this.labels=["x","y","z"],this.labelEnable=[!0,!0,!0],this.labelFont="sans-serif",this.labelSize=[20,20,20],this.labelAngle=[0,0,0],this.labelAlign=["auto","auto","auto"],this.labelColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.labelPad=[10,10,10],this.lineEnable=[!0,!0,!0],this.lineMirror=[!1,!1,!1],this.lineWidth=[1,1,1],this.lineColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.lineTickEnable=[!0,!0,!0],this.lineTickMirror=[!1,!1,!1],this.lineTickLength=[0,0,0],this.lineTickWidth=[1,1,1],this.lineTickColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.gridEnable=[!0,!0,!0],this.gridWidth=[1,1,1],this.gridColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.zeroEnable=[!0,!0,!0],this.zeroLineColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.zeroLineWidth=[2,2,2],this.backgroundEnable=[!1,!1,!1],this.backgroundColor=[[.8,.8,.8,.5],[.8,.8,.8,.5],[.8,.8,.8,.5]],this._firstInit=!0,this._text=null,this._lines=null,this._background=a(t)}var f=u.prototype;function h(){this.primalOffset=[0,0,0],this.primalMinor=[0,0,0],this.mirrorOffset=[0,0,0],this.mirrorMinor=[0,0,0]}f.update=function(t){function e(e,n,r){if(r in t){var i,a=t[r],o=this[r];(e?Array.isArray(a)&&Array.isArray(a[0]):Array.isArray(a))?this[r]=i=[n(a[0]),n(a[1]),n(a[2])]:this[r]=i=[n(a),n(a),n(a)];for(var s=0;s<3;++s)if(i[s]!==o[s])return!0}return!1}t=t||{};var n,a=e.bind(this,!1,Number),o=e.bind(this,!1,Boolean),l=e.bind(this,!1,String),c=e.bind(this,!0,(function(t){if(Array.isArray(t)){if(3===t.length)return[+t[0],+t[1],+t[2],1];if(4===t.length)return[+t[0],+t[1],+t[2],+t[3]]}return[0,0,0,1]})),u=!1,f=!1;if("bounds"in t)for(var h=t.bounds,d=0;d<2;++d)for(var p=0;p<3;++p)h[d][p]!==this.bounds[d][p]&&(f=!0),this.bounds[d][p]=h[d][p];if("ticks"in t)for(n=t.ticks,u=!0,this.autoTicks=!1,d=0;d<3;++d)this.tickSpacing[d]=0;else a("tickSpacing")&&(this.autoTicks=!0,f=!0);if(this._firstInit&&("ticks"in t||"tickSpacing"in t||(this.autoTicks=!0),f=!0,u=!0,this._firstInit=!1),f&&this.autoTicks&&(n=s.create(this.bounds,this.tickSpacing),u=!0),u){for(d=0;d<3;++d)n[d].sort((function(t,e){return t.x-e.x}));s.equal(n,this.ticks)?u=!1:this.ticks=n}o("tickEnable"),l("tickFont")&&(u=!0),a("tickSize"),a("tickAngle"),a("tickPad"),c("tickColor");var g=l("labels");l("labelFont")&&(g=!0),o("labelEnable"),a("labelSize"),a("labelPad"),c("labelColor"),o("lineEnable"),o("lineMirror"),a("lineWidth"),c("lineColor"),o("lineTickEnable"),o("lineTickMirror"),a("lineTickLength"),a("lineTickWidth"),c("lineTickColor"),o("gridEnable"),a("gridWidth"),c("gridColor"),o("zeroEnable"),c("zeroLineColor"),a("zeroLineWidth"),o("backgroundEnable"),c("backgroundColor"),this._text?this._text&&(g||u)&&this._text.update(this.bounds,this.labels,this.labelFont,this.ticks,this.tickFont):this._text=r(this.gl,this.bounds,this.labels,this.labelFont,this.ticks,this.tickFont),this._lines&&u&&(this._lines.dispose(),this._lines=null),this._lines||(this._lines=i(this.gl,this.bounds,this.ticks))};var d=[new h,new h,new h];function p(t,e,n,r,i){for(var a=t.primalOffset,o=t.primalMinor,s=t.mirrorOffset,l=t.mirrorMinor,c=r[e],u=0;u<3;++u)if(e!==u){var f=a,h=s,d=o,p=l;c&1<0?(d[u]=-1,p[u]=0):(d[u]=0,p[u]=1)}}var g=[0,0,0],v={model:l,view:l,projection:l,_ortho:!1};f.isOpaque=function(){return!0},f.isTransparent=function(){return!1},f.drawTransparent=function(t){};var m=[0,0,0],y=[0,0,0],b=[0,0,0];f.draw=function(t){t=t||v;for(var e=this.gl,n=t.model||l,r=t.view||l,i=t.projection||l,a=this.bounds,s=t._ortho||!1,u=o(n,r,i,a,s),f=u.cubeEdges,h=u.axis,x=r[12],w=r[13],_=r[14],k=r[15],T=(s?2:1)*this.pixelRatio*(i[3]*x+i[7]*w+i[11]*_+i[15]*k)/e.drawingBufferHeight,M=0;M<3;++M)this.lastCubeProps.cubeEdges[M]=f[M],this.lastCubeProps.axis[M]=h[M];var A=d;for(M=0;M<3;++M)p(d[M],M,this.bounds,f,h);e=this.gl;var S,E,C,P=g;for(M=0;M<3;++M)this.backgroundEnable[M]?P[M]=h[M]:P[M]=0;for(this._background.draw(n,r,i,a,P,this.backgroundColor),this._lines.bind(n,r,i,this),M=0;M<3;++M){var O=[0,0,0];h[M]>0?O[M]=a[1][M]:O[M]=a[0][M];for(var L=0;L<2;++L){var I=(M+1+L)%3,D=(M+1+(1^L))%3;this.gridEnable[I]&&this._lines.drawGrid(I,D,this.bounds,O,this.gridColor[I],this.gridWidth[I]*this.pixelRatio)}for(L=0;L<2;++L)I=(M+1+L)%3,D=(M+1+(1^L))%3,this.zeroEnable[D]&&Math.min(a[0][D],a[1][D])<=0&&Math.max(a[0][D],a[1][D])>=0&&this._lines.drawZero(I,D,this.bounds,O,this.zeroLineColor[D],this.zeroLineWidth[D]*this.pixelRatio)}for(M=0;M<3;++M){this.lineEnable[M]&&this._lines.drawAxisLine(M,this.bounds,A[M].primalOffset,this.lineColor[M],this.lineWidth[M]*this.pixelRatio),this.lineMirror[M]&&this._lines.drawAxisLine(M,this.bounds,A[M].mirrorOffset,this.lineColor[M],this.lineWidth[M]*this.pixelRatio);var R=c(m,A[M].primalMinor),z=c(y,A[M].mirrorMinor),N=this.lineTickLength;for(L=0;L<3;++L){var j=T/n[5*L];R[L]*=N[L]*j,z[L]*=N[L]*j}this.lineTickEnable[M]&&this._lines.drawAxisTicks(M,A[M].primalOffset,R,this.lineTickColor[M],this.lineTickWidth[M]*this.pixelRatio),this.lineTickMirror[M]&&this._lines.drawAxisTicks(M,A[M].mirrorOffset,z,this.lineTickColor[M],this.lineTickWidth[M]*this.pixelRatio)}function F(t){(C=[0,0,0])[t]=1}function B(t,e,n){var r=(t+1)%3,i=(t+2)%3,a=e[r],o=e[i],s=n[r],l=n[i];a>0&&l>0||a>0&&l<0||a<0&&l>0||a<0&&l<0?F(r):(o>0&&s>0||o>0&&s<0||o<0&&s>0||o<0&&s<0)&&F(i)}for(this._lines.unbind(),this._text.bind(n,r,i,this.pixelRatio),M=0;M<3;++M){var U=A[M].primalMinor,H=A[M].mirrorMinor,V=c(b,A[M].primalOffset);for(L=0;L<3;++L)this.lineTickEnable[M]&&(V[L]+=T*U[L]*Math.max(this.lineTickLength[L],0)/n[5*L]);var q=[0,0,0];if(q[M]=1,this.tickEnable[M]){for(-3600===this.tickAngle[M]?(this.tickAngle[M]=0,this.tickAlign[M]="auto"):this.tickAlign[M]=-1,E=1,"auto"===(S=[this.tickAlign[M],.5,E])[0]?S[0]=0:S[0]=parseInt(""+S[0]),C=[0,0,0],B(M,U,H),L=0;L<3;++L)V[L]+=T*U[L]*this.tickPad[L]/n[5*L];this._text.drawTicks(M,this.tickSize[M],this.tickAngle[M],V,this.tickColor[M],q,C,S)}if(this.labelEnable[M]){for(E=0,C=[0,0,0],this.labels[M].length>4&&(F(M),E=1),"auto"===(S=[this.labelAlign[M],.5,E])[0]?S[0]=0:S[0]=parseInt(""+S[0]),L=0;L<3;++L)V[L]+=T*U[L]*this.labelPad[L]/n[5*L];V[M]+=.5*(a[0][M]+a[1][M]),this._text.drawLabel(M,this.labelSize[M],this.labelAngle[M],V,this.labelColor[M],[0,0,0],C,S)}}this._text.unbind()},f.dispose=function(){this._text.dispose(),this._lines.dispose(),this._background.dispose(),this._lines=null,this._text=null,this._background=null,this.gl=null}},1011:function(t,e,n){"use strict";t.exports=function(t){for(var e=[],n=[],s=0,l=0;l<3;++l)for(var c=(l+1)%3,u=(l+2)%3,f=[0,0,0],h=[0,0,0],d=-1;d<=1;d+=2){n.push(s,s+2,s+1,s+1,s+2,s+3),f[l]=d,h[l]=d;for(var p=-1;p<=1;p+=2){f[c]=p;for(var g=-1;g<=1;g+=2)f[u]=g,e.push(f[0],f[1],f[2],h[0],h[1],h[2]),s+=1}var v=c;c=u,u=v}var m=r(t,new Float32Array(e)),y=r(t,new Uint16Array(n),t.ELEMENT_ARRAY_BUFFER),b=i(t,[{buffer:m,type:t.FLOAT,size:3,offset:0,stride:24},{buffer:m,type:t.FLOAT,size:3,offset:12,stride:24}],y),x=a(t);return x.attributes.position.location=0,x.attributes.normal.location=1,new o(t,m,b,x)};var r=n(5827),i=n(2944),a=n(1943).bg;function o(t,e,n,r){this.gl=t,this.buffer=e,this.vao=n,this.shader=r}var s=o.prototype;s.draw=function(t,e,n,r,i,a){for(var o=!1,s=0;s<3;++s)o=o||i[s];if(o){var l=this.gl;l.enable(l.POLYGON_OFFSET_FILL),l.polygonOffset(1,2),this.shader.bind(),this.shader.uniforms={model:t,view:e,projection:n,bounds:r,enable:i,colors:a},this.vao.bind(),this.vao.draw(this.gl.TRIANGLES,36),this.vao.unbind(),l.disable(l.POLYGON_OFFSET_FILL)}},s.dispose=function(){this.vao.dispose(),this.buffer.dispose(),this.shader.dispose()}},2864:function(t,e,n){"use strict";t.exports=function(t,e,n,a,d){i(s,e,t),i(s,n,s);for(var y=0,b=0;b<2;++b){u[2]=a[b][2];for(var x=0;x<2;++x){u[1]=a[x][1];for(var w=0;w<2;++w)u[0]=a[w][0],h(l[y],u,s),y+=1}}var _=-1;for(b=0;b<8;++b){for(var k=l[b][3],T=0;T<3;++T)c[b][T]=l[b][T]/k;d&&(c[b][2]*=-1),k<0&&(_<0||c[b][2]E&&(_|=1<E&&(_|=1<c[b][1])&&(z=b);var N=-1;for(b=0;b<3;++b)(F=z^1<c[j][0]&&(j=F))}var B=g;B[0]=B[1]=B[2]=0,B[r.log2(N^z)]=z&N,B[r.log2(z^j)]=z&j;var U=7^j;U===_||U===R?(U=7^N,B[r.log2(j^U)]=U&j):B[r.log2(N^U)]=U&N;var H=v,V=_;for(M=0;M<3;++M)H[M]=V&1< HALF_PI) && (b <= ONE_AND_HALF_PI)) ?\n b - PI :\n b;\n}\n\nfloat look_horizontal_or_vertical(float a, float ratio) {\n // ratio controls the ratio between being horizontal to (vertical + horizontal)\n // if ratio is set to 0.5 then it is 50%, 50%.\n // when using a higher ratio e.g. 0.75 the result would\n // likely be more horizontal than vertical.\n\n float b = positive_angle(a);\n\n return\n (b < ( ratio) * HALF_PI) ? 0.0 :\n (b < (2.0 - ratio) * HALF_PI) ? -HALF_PI :\n (b < (2.0 + ratio) * HALF_PI) ? 0.0 :\n (b < (4.0 - ratio) * HALF_PI) ? HALF_PI :\n 0.0;\n}\n\nfloat roundTo(float a, float b) {\n return float(b * floor((a + 0.5 * b) / b));\n}\n\nfloat look_round_n_directions(float a, int n) {\n float b = positive_angle(a);\n float div = TWO_PI / float(n);\n float c = roundTo(b, div);\n return look_upwards(c);\n}\n\nfloat applyAlignOption(float rawAngle, float delta) {\n return\n (option > 2) ? look_round_n_directions(rawAngle + delta, option) : // option 3-n: round to n directions\n (option == 2) ? look_horizontal_or_vertical(rawAngle + delta, hv_ratio) : // horizontal or vertical\n (option == 1) ? rawAngle + delta : // use free angle, and flip to align with one direction of the axis\n (option == 0) ? look_upwards(rawAngle) : // use free angle, and stay upwards\n (option ==-1) ? 0.0 : // useful for backward compatibility, all texts remains horizontal\n rawAngle; // otherwise return back raw input angle\n}\n\nbool isAxisTitle = (axis.x == 0.0) &&\n (axis.y == 0.0) &&\n (axis.z == 0.0);\n\nvoid main() {\n //Compute world offset\n float axisDistance = position.z;\n vec3 dataPosition = axisDistance * axis + offset;\n\n float beta = angle; // i.e. user defined attributes for each tick\n\n float axisAngle;\n float clipAngle;\n float flip;\n\n if (enableAlign) {\n axisAngle = (isAxisTitle) ? HALF_PI :\n computeViewAngle(dataPosition, dataPosition + axis);\n clipAngle = computeViewAngle(dataPosition, dataPosition + alignDir);\n\n axisAngle += (sin(axisAngle) < 0.0) ? PI : 0.0;\n clipAngle += (sin(clipAngle) < 0.0) ? PI : 0.0;\n\n flip = (dot(vec2(cos(axisAngle), sin(axisAngle)),\n vec2(sin(clipAngle),-cos(clipAngle))) > 0.0) ? 1.0 : 0.0;\n\n beta += applyAlignOption(clipAngle, flip * PI);\n }\n\n //Compute plane offset\n vec2 planeCoord = position.xy * pixelScale;\n\n mat2 planeXform = scale * mat2(\n cos(beta), sin(beta),\n -sin(beta), cos(beta)\n );\n\n vec2 viewOffset = 2.0 * planeXform * planeCoord / resolution;\n\n //Compute clip position\n vec3 clipPosition = project(dataPosition);\n\n //Apply text offset in clip coordinates\n clipPosition += vec3(viewOffset, 0.0);\n\n //Done\n gl_Position = vec4(clipPosition, 1.0);\n}"]),l=r(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec4 color;\nvoid main() {\n gl_FragColor = color;\n}"]);e.f=function(t){return i(t,s,l,null,[{name:"position",type:"vec3"}])};var c=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec3 normal;\n\nuniform mat4 model, view, projection;\nuniform vec3 enable;\nuniform vec3 bounds[2];\n\nvarying vec3 colorChannel;\n\nvoid main() {\n\n vec3 signAxis = sign(bounds[1] - bounds[0]);\n\n vec3 realNormal = signAxis * normal;\n\n if(dot(realNormal, enable) > 0.0) {\n vec3 minRange = min(bounds[0], bounds[1]);\n vec3 maxRange = max(bounds[0], bounds[1]);\n vec3 nPosition = mix(minRange, maxRange, 0.5 * (position + 1.0));\n gl_Position = projection * view * model * vec4(nPosition, 1.0);\n } else {\n gl_Position = vec4(0,0,0,0);\n }\n\n colorChannel = abs(realNormal);\n}"]),u=r(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec4 colors[3];\n\nvarying vec3 colorChannel;\n\nvoid main() {\n gl_FragColor = colorChannel.x * colors[0] +\n colorChannel.y * colors[1] +\n colorChannel.z * colors[2];\n}"]);e.bg=function(t){return i(t,c,u,null,[{name:"position",type:"vec3"},{name:"normal",type:"vec3"}])}},9557:function(t,e,n){"use strict";t.exports=function(t,e,n,i,o,l){var c=r(t),f=a(t,[{buffer:c,size:3}]),h=s(t);h.attributes.position.location=0;var d=new u(t,h,c,f);return d.update(e,n,i,o,l),d};var r=n(5827),a=n(2944),o=n(875),s=n(1943).f,l=window||i.global||{},c=l.__TEXT_CACHE||{};function u(t,e,n,r){this.gl=t,this.shader=e,this.buffer=n,this.vao=r,this.tickOffset=this.tickCount=this.labelOffset=this.labelCount=null}l.__TEXT_CACHE={};var f=u.prototype,h=[0,0];f.bind=function(t,e,n,r){this.vao.bind(),this.shader.bind();var i=this.shader.uniforms;i.model=t,i.view=e,i.projection=n,i.pixelScale=r,h[0]=this.gl.drawingBufferWidth,h[1]=this.gl.drawingBufferHeight,this.shader.uniforms.resolution=h},f.unbind=function(){this.vao.unbind()},f.update=function(t,e,n,r,i){var a=[];function s(t,e,n,r,i,s){var l=c[n];l||(l=c[n]={});var u=l[e];u||(u=l[e]=function(t,e){try{return o(t,e)}catch(n){return console.warn('error vectorizing text:"'+t+'" error:',n),{cells:[],positions:[]}}}(e,{triangles:!0,font:n,textAlign:"center",textBaseline:"middle",lineSpacing:i,styletags:s}));for(var f=(r||12)/12,h=u.positions,d=u.cells,p=0,g=d.length;p=0;--m){var y=h[v[m]];a.push(f*y[0],-f*y[1],t)}}for(var l=[0,0,0],u=[0,0,0],f=[0,0,0],h=[0,0,0],d={breaklines:!0,bolds:!0,italics:!0,subscripts:!0,superscripts:!0},p=0;p<3;++p){f[p]=a.length/3|0,s(.5*(t[0][p]+t[1][p]),e[p],n[p],12,1.25,d),h[p]=(a.length/3|0)-f[p],l[p]=a.length/3|0;for(var g=0;g=0&&(i=n.length-r-1);var a=Math.pow(10,i),o=Math.round(t*e*a),s=o+"";if(s.indexOf("e")>=0)return s;var l=o/a,c=o%a;o<0?(l=0|-Math.ceil(l),c=0|-c):(l=0|Math.floor(l),c|=0);var u=""+l;if(o<0&&(u="-"+u),i){for(var f=""+c;f.length=t[0][i];--o)a.push({x:o*e[i],text:n(e[i],o)});r.push(a)}return r},e.equal=function(t,e){for(var n=0;n<3;++n){if(t[n].length!==e[n].length)return!1;for(var r=0;rn)throw new Error("gl-buffer: If resizing buffer, must not specify offset");return t.bufferSubData(e,a,i),n}function u(t,e){for(var n=r.malloc(t.length,e),i=t.length,a=0;a=0;--r){if(e[r]!==n)return!1;n*=t[r]}return!0}(t.shape,t.stride))0===t.offset&&t.data.length===t.shape[0]?this.length=c(this.gl,this.type,this.length,this.usage,t.data,e):this.length=c(this.gl,this.type,this.length,this.usage,t.data.subarray(t.offset,t.shape[0]),e);else{var s=r.malloc(t.size,n),l=a(s,t.shape);i.assign(l,t),this.length=c(this.gl,this.type,this.length,this.usage,e<0?s:s.subarray(0,t.size),e),r.free(s)}}else if(Array.isArray(t)){var f;f=this.type===this.gl.ELEMENT_ARRAY_BUFFER?u(t,"uint16"):u(t,"float32"),this.length=c(this.gl,this.type,this.length,this.usage,e<0?f:f.subarray(0,t.length),e),r.free(f)}else if("object"===typeof t&&"number"===typeof t.length)this.length=c(this.gl,this.type,this.length,this.usage,t,e);else{if("number"!==typeof t&&void 0!==t)throw new Error("gl-buffer: Invalid data type");if(e>=0)throw new Error("gl-buffer: Cannot specify offset when resizing buffer");(t|=0)<=0&&(t=1),this.gl.bufferData(this.type,0|t,this.usage),this.length=t}},t.exports=function(t,e,n,r){if(n=n||t.ARRAY_BUFFER,r=r||t.DYNAMIC_DRAW,n!==t.ARRAY_BUFFER&&n!==t.ELEMENT_ARRAY_BUFFER)throw new Error("gl-buffer: Invalid type for webgl buffer, must be either gl.ARRAY_BUFFER or gl.ELEMENT_ARRAY_BUFFER");if(r!==t.DYNAMIC_DRAW&&r!==t.STATIC_DRAW&&r!==t.STREAM_DRAW)throw new Error("gl-buffer: Invalid usage for buffer, must be either gl.DYNAMIC_DRAW, gl.STATIC_DRAW or gl.STREAM_DRAW");var i=t.createBuffer(),a=new s(t,n,i,0,r);return a.update(e),a}},1140:function(t,e,n){"use strict";var r=n(2858);t.exports=function(t,e){var n=t.positions,i=t.vectors,a={positions:[],vertexIntensity:[],vertexIntensityBounds:t.vertexIntensityBounds,vectors:[],cells:[],coneOffset:t.coneOffset,colormap:t.colormap};if(0===t.positions.length)return e&&(e[0]=[0,0,0],e[1]=[0,0,0]),a;for(var o=0,s=1/0,l=-1/0,c=1/0,u=-1/0,f=1/0,h=-1/0,d=null,p=null,g=[],v=1/0,m=!1,y=0;yo&&(o=r.length(x)),y){var w=2*r.distance(d,b)/(r.length(p)+r.length(x));w?(v=Math.min(v,w),m=!1):m=!0}m||(d=b,p=x),g.push(x)}var _=[s,c,f],k=[l,u,h];e&&(e[0]=_,e[1]=k),0===o&&(o=1);var T=1/o;isFinite(v)||(v=1),a.vectorScale=v;var M=t.coneSize||.5;t.absoluteConeSize&&(M=t.absoluteConeSize*T),a.coneScale=M,y=0;for(var A=0;y=1},d.isTransparent=function(){return this.opacity<1},d.pickSlots=1,d.setPickBase=function(t){this.pickId=t},d.update=function(t){t=t||{};var e=this.gl;this.dirty=!0,"lightPosition"in t&&(this.lightPosition=t.lightPosition),"opacity"in t&&(this.opacity=t.opacity),"ambient"in t&&(this.ambientLight=t.ambient),"diffuse"in t&&(this.diffuseLight=t.diffuse),"specular"in t&&(this.specularLight=t.specular),"roughness"in t&&(this.roughness=t.roughness),"fresnel"in t&&(this.fresnel=t.fresnel),void 0!==t.tubeScale&&(this.tubeScale=t.tubeScale),void 0!==t.vectorScale&&(this.vectorScale=t.vectorScale),void 0!==t.coneScale&&(this.coneScale=t.coneScale),void 0!==t.coneOffset&&(this.coneOffset=t.coneOffset),t.colormap&&(this.texture.shape=[256,256],this.texture.minFilter=e.LINEAR_MIPMAP_LINEAR,this.texture.magFilter=e.LINEAR,this.texture.setPixels(function(t){for(var e=u({colormap:t,nshades:256,format:"rgba"}),n=new Uint8Array(1024),r=0;r<256;++r){for(var i=e[r],a=0;a<3;++a)n[4*r+a]=i[a];n[4*r+3]=255*i[3]}return c(n,[256,256,4],[4,0,1])}(t.colormap)),this.texture.generateMipmap());var n=t.cells,r=t.positions,i=t.vectors;if(r&&n&&i){var a=[],o=[],s=[],l=[],f=[];this.cells=n,this.positions=r,this.vectors=i;var h=t.meshColor||[1,1,1,1],d=t.vertexIntensity,p=1/0,g=-1/0;if(d)if(t.vertexIntensityBounds)p=+t.vertexIntensityBounds[0],g=+t.vertexIntensityBounds[1];else for(var v=0;v0){var g=this.triShader;g.bind(),g.uniforms=c,this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind()}},d.drawPick=function(t){t=t||{};for(var e=this.gl,n=t.model||f,r=t.view||f,i=t.projection||f,a=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],o=0;o<3;++o)a[0][o]=Math.max(a[0][o],this.clipBounds[0][o]),a[1][o]=Math.min(a[1][o],this.clipBounds[1][o]);this._model=[].slice.call(n),this._view=[].slice.call(r),this._projection=[].slice.call(i),this._resolution=[e.drawingBufferWidth,e.drawingBufferHeight];var s={model:n,view:r,projection:i,clipBounds:a,tubeScale:this.tubeScale,vectorScale:this.vectorScale,coneScale:this.coneScale,coneOffset:this.coneOffset,pickId:this.pickId/255},l=this.pickShader;l.bind(),l.uniforms=s,this.triangleCount>0&&(this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind())},d.pick=function(t){if(!t)return null;if(t.id!==this.pickId)return null;var e=t.value[0]+256*t.value[1]+65536*t.value[2],n=this.cells[e],r=this.positions[n[1]].slice(0,3),i={position:r,dataCoordinate:r,index:Math.floor(n[1]/48)};return"cone"===this.traceType?i.index=Math.floor(n[1]/48):"streamtube"===this.traceType&&(i.intensity=this.intensity[n[1]],i.velocity=this.vectors[n[1]].slice(0,3),i.divergence=this.vectors[n[1]][3],i.index=e),i},d.dispose=function(){this.texture.dispose(),this.triShader.dispose(),this.pickShader.dispose(),this.triangleVAO.dispose(),this.trianglePositions.dispose(),this.triangleVectors.dispose(),this.triangleColors.dispose(),this.triangleUVs.dispose(),this.triangleIds.dispose()},t.exports=function(t,e,n){var s=n.shaders;1===arguments.length&&(t=(e=t).gl);var l=function(t,e){var n=r(t,e.meshShader.vertex,e.meshShader.fragment,null,e.meshShader.attributes);return n.attributes.position.location=0,n.attributes.color.location=2,n.attributes.uv.location=3,n.attributes.vector.location=4,n}(t,s),u=function(t,e){var n=r(t,e.pickShader.vertex,e.pickShader.fragment,null,e.pickShader.attributes);return n.attributes.position.location=0,n.attributes.id.location=1,n.attributes.vector.location=4,n}(t,s),f=o(t,c(new Uint8Array([255,255,255,255]),[1,1,4]));f.generateMipmap(),f.minFilter=t.LINEAR_MIPMAP_LINEAR,f.magFilter=t.LINEAR;var d=i(t),p=i(t),g=i(t),v=i(t),m=i(t),y=new h(t,f,l,u,d,p,m,g,v,a(t,[{buffer:d,type:t.FLOAT,size:4},{buffer:m,type:t.UNSIGNED_BYTE,size:4,normalized:!0},{buffer:g,type:t.FLOAT,size:4},{buffer:v,type:t.FLOAT,size:2},{buffer:p,type:t.FLOAT,size:4}]),n.traceType||"cone");return y.update(e),y}},7234:function(t,e,n){var r=n(6832),i=r(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the cone vertex and normal at the given index.\n//\n// The returned vertex is for a cone with its top at origin and height of 1.0,\n// pointing in the direction of the vector attribute.\n//\n// Each cone is made up of a top vertex, a center base vertex and base perimeter vertices.\n// These vertices are used to make up the triangles of the cone by the following:\n// segment + 0 top vertex\n// segment + 1 perimeter vertex a+1\n// segment + 2 perimeter vertex a\n// segment + 3 center base vertex\n// segment + 4 perimeter vertex a\n// segment + 5 perimeter vertex a+1\n// Where segment is the number of the radial segment * 6 and a is the angle at that radial segment.\n// To go from index to segment, floor(index / 6)\n// To go from segment to angle, 2*pi * (segment/segmentCount)\n// To go from index to segment index, index - (segment*6)\n//\nvec3 getConePosition(vec3 d, float rawIndex, float coneOffset, out vec3 normal) {\n\n const float segmentCount = 8.0;\n\n float index = rawIndex - floor(rawIndex /\n (segmentCount * 6.0)) *\n (segmentCount * 6.0);\n\n float segment = floor(0.001 + index/6.0);\n float segmentIndex = index - (segment*6.0);\n\n normal = -normalize(d);\n\n if (segmentIndex > 2.99 && segmentIndex < 3.01) {\n return mix(vec3(0.0), -d, coneOffset);\n }\n\n float nextAngle = (\n (segmentIndex > 0.99 && segmentIndex < 1.01) ||\n (segmentIndex > 4.99 && segmentIndex < 5.01)\n ) ? 1.0 : 0.0;\n float angle = 2.0 * 3.14159 * ((segment + nextAngle) / segmentCount);\n\n vec3 v1 = mix(d, vec3(0.0), coneOffset);\n vec3 v2 = v1 - d;\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d)*0.25;\n vec3 y = v * sin(angle) * length(d)*0.25;\n vec3 v3 = v2 + x + y;\n if (segmentIndex < 3.0) {\n vec3 tx = u * sin(angle);\n vec3 ty = v * -cos(angle);\n vec3 tangent = tx + ty;\n normal = normalize(cross(v3 - v1, tangent));\n }\n\n if (segmentIndex == 0.0) {\n return mix(d, vec3(0.0), coneOffset);\n }\n return v3;\n}\n\nattribute vec3 vector;\nattribute vec4 color, position;\nattribute vec2 uv;\n\nuniform float vectorScale, coneScale, coneOffset;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 eyePosition, lightPosition;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n // Scale the vector magnitude to stay constant with\n // model & view changes.\n vec3 normal;\n vec3 XYZ = getConePosition(mat3(model) * ((vectorScale * coneScale) * vector), position.w, coneOffset, normal);\n vec4 conePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * conePosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n // vec4 m_position = model * vec4(conePosition, 1.0);\n vec4 t_position = view * conePosition;\n gl_Position = projection * t_position;\n\n f_color = color;\n f_data = conePosition.xyz;\n f_position = position.xyz;\n f_uv = uv;\n}\n"]),a=r(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform sampler2D texture;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = f_color * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * opacity;\n}\n"]),o=r(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the cone vertex and normal at the given index.\n//\n// The returned vertex is for a cone with its top at origin and height of 1.0,\n// pointing in the direction of the vector attribute.\n//\n// Each cone is made up of a top vertex, a center base vertex and base perimeter vertices.\n// These vertices are used to make up the triangles of the cone by the following:\n// segment + 0 top vertex\n// segment + 1 perimeter vertex a+1\n// segment + 2 perimeter vertex a\n// segment + 3 center base vertex\n// segment + 4 perimeter vertex a\n// segment + 5 perimeter vertex a+1\n// Where segment is the number of the radial segment * 6 and a is the angle at that radial segment.\n// To go from index to segment, floor(index / 6)\n// To go from segment to angle, 2*pi * (segment/segmentCount)\n// To go from index to segment index, index - (segment*6)\n//\nvec3 getConePosition(vec3 d, float rawIndex, float coneOffset, out vec3 normal) {\n\n const float segmentCount = 8.0;\n\n float index = rawIndex - floor(rawIndex /\n (segmentCount * 6.0)) *\n (segmentCount * 6.0);\n\n float segment = floor(0.001 + index/6.0);\n float segmentIndex = index - (segment*6.0);\n\n normal = -normalize(d);\n\n if (segmentIndex > 2.99 && segmentIndex < 3.01) {\n return mix(vec3(0.0), -d, coneOffset);\n }\n\n float nextAngle = (\n (segmentIndex > 0.99 && segmentIndex < 1.01) ||\n (segmentIndex > 4.99 && segmentIndex < 5.01)\n ) ? 1.0 : 0.0;\n float angle = 2.0 * 3.14159 * ((segment + nextAngle) / segmentCount);\n\n vec3 v1 = mix(d, vec3(0.0), coneOffset);\n vec3 v2 = v1 - d;\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d)*0.25;\n vec3 y = v * sin(angle) * length(d)*0.25;\n vec3 v3 = v2 + x + y;\n if (segmentIndex < 3.0) {\n vec3 tx = u * sin(angle);\n vec3 ty = v * -cos(angle);\n vec3 tangent = tx + ty;\n normal = normalize(cross(v3 - v1, tangent));\n }\n\n if (segmentIndex == 0.0) {\n return mix(d, vec3(0.0), coneOffset);\n }\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform float vectorScale, coneScale, coneOffset;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n vec3 normal;\n vec3 XYZ = getConePosition(mat3(model) * ((vectorScale * coneScale) * vector.xyz), position.w, coneOffset, normal);\n vec4 conePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n gl_Position = projection * view * conePosition;\n f_id = id;\n f_position = position.xyz;\n}\n"]),s=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]);e.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec4"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"vector",type:"vec3"}]},e.pickShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec4"},{name:"id",type:"vec4"},{name:"vector",type:"vec3"}]}},1950:function(t){t.exports={0:"NONE",1:"ONE",2:"LINE_LOOP",3:"LINE_STRIP",4:"TRIANGLES",5:"TRIANGLE_STRIP",6:"TRIANGLE_FAN",256:"DEPTH_BUFFER_BIT",512:"NEVER",513:"LESS",514:"EQUAL",515:"LEQUAL",516:"GREATER",517:"NOTEQUAL",518:"GEQUAL",519:"ALWAYS",768:"SRC_COLOR",769:"ONE_MINUS_SRC_COLOR",770:"SRC_ALPHA",771:"ONE_MINUS_SRC_ALPHA",772:"DST_ALPHA",773:"ONE_MINUS_DST_ALPHA",774:"DST_COLOR",775:"ONE_MINUS_DST_COLOR",776:"SRC_ALPHA_SATURATE",1024:"STENCIL_BUFFER_BIT",1028:"FRONT",1029:"BACK",1032:"FRONT_AND_BACK",1280:"INVALID_ENUM",1281:"INVALID_VALUE",1282:"INVALID_OPERATION",1285:"OUT_OF_MEMORY",1286:"INVALID_FRAMEBUFFER_OPERATION",2304:"CW",2305:"CCW",2849:"LINE_WIDTH",2884:"CULL_FACE",2885:"CULL_FACE_MODE",2886:"FRONT_FACE",2928:"DEPTH_RANGE",2929:"DEPTH_TEST",2930:"DEPTH_WRITEMASK",2931:"DEPTH_CLEAR_VALUE",2932:"DEPTH_FUNC",2960:"STENCIL_TEST",2961:"STENCIL_CLEAR_VALUE",2962:"STENCIL_FUNC",2963:"STENCIL_VALUE_MASK",2964:"STENCIL_FAIL",2965:"STENCIL_PASS_DEPTH_FAIL",2966:"STENCIL_PASS_DEPTH_PASS",2967:"STENCIL_REF",2968:"STENCIL_WRITEMASK",2978:"VIEWPORT",3024:"DITHER",3042:"BLEND",3088:"SCISSOR_BOX",3089:"SCISSOR_TEST",3106:"COLOR_CLEAR_VALUE",3107:"COLOR_WRITEMASK",3317:"UNPACK_ALIGNMENT",3333:"PACK_ALIGNMENT",3379:"MAX_TEXTURE_SIZE",3386:"MAX_VIEWPORT_DIMS",3408:"SUBPIXEL_BITS",3410:"RED_BITS",3411:"GREEN_BITS",3412:"BLUE_BITS",3413:"ALPHA_BITS",3414:"DEPTH_BITS",3415:"STENCIL_BITS",3553:"TEXTURE_2D",4352:"DONT_CARE",4353:"FASTEST",4354:"NICEST",5120:"BYTE",5121:"UNSIGNED_BYTE",5122:"SHORT",5123:"UNSIGNED_SHORT",5124:"INT",5125:"UNSIGNED_INT",5126:"FLOAT",5386:"INVERT",5890:"TEXTURE",6401:"STENCIL_INDEX",6402:"DEPTH_COMPONENT",6406:"ALPHA",6407:"RGB",6408:"RGBA",6409:"LUMINANCE",6410:"LUMINANCE_ALPHA",7680:"KEEP",7681:"REPLACE",7682:"INCR",7683:"DECR",7936:"VENDOR",7937:"RENDERER",7938:"VERSION",9728:"NEAREST",9729:"LINEAR",9984:"NEAREST_MIPMAP_NEAREST",9985:"LINEAR_MIPMAP_NEAREST",9986:"NEAREST_MIPMAP_LINEAR",9987:"LINEAR_MIPMAP_LINEAR",10240:"TEXTURE_MAG_FILTER",10241:"TEXTURE_MIN_FILTER",10242:"TEXTURE_WRAP_S",10243:"TEXTURE_WRAP_T",10497:"REPEAT",10752:"POLYGON_OFFSET_UNITS",16384:"COLOR_BUFFER_BIT",32769:"CONSTANT_COLOR",32770:"ONE_MINUS_CONSTANT_COLOR",32771:"CONSTANT_ALPHA",32772:"ONE_MINUS_CONSTANT_ALPHA",32773:"BLEND_COLOR",32774:"FUNC_ADD",32777:"BLEND_EQUATION_RGB",32778:"FUNC_SUBTRACT",32779:"FUNC_REVERSE_SUBTRACT",32819:"UNSIGNED_SHORT_4_4_4_4",32820:"UNSIGNED_SHORT_5_5_5_1",32823:"POLYGON_OFFSET_FILL",32824:"POLYGON_OFFSET_FACTOR",32854:"RGBA4",32855:"RGB5_A1",32873:"TEXTURE_BINDING_2D",32926:"SAMPLE_ALPHA_TO_COVERAGE",32928:"SAMPLE_COVERAGE",32936:"SAMPLE_BUFFERS",32937:"SAMPLES",32938:"SAMPLE_COVERAGE_VALUE",32939:"SAMPLE_COVERAGE_INVERT",32968:"BLEND_DST_RGB",32969:"BLEND_SRC_RGB",32970:"BLEND_DST_ALPHA",32971:"BLEND_SRC_ALPHA",33071:"CLAMP_TO_EDGE",33170:"GENERATE_MIPMAP_HINT",33189:"DEPTH_COMPONENT16",33306:"DEPTH_STENCIL_ATTACHMENT",33635:"UNSIGNED_SHORT_5_6_5",33648:"MIRRORED_REPEAT",33901:"ALIASED_POINT_SIZE_RANGE",33902:"ALIASED_LINE_WIDTH_RANGE",33984:"TEXTURE0",33985:"TEXTURE1",33986:"TEXTURE2",33987:"TEXTURE3",33988:"TEXTURE4",33989:"TEXTURE5",33990:"TEXTURE6",33991:"TEXTURE7",33992:"TEXTURE8",33993:"TEXTURE9",33994:"TEXTURE10",33995:"TEXTURE11",33996:"TEXTURE12",33997:"TEXTURE13",33998:"TEXTURE14",33999:"TEXTURE15",34e3:"TEXTURE16",34001:"TEXTURE17",34002:"TEXTURE18",34003:"TEXTURE19",34004:"TEXTURE20",34005:"TEXTURE21",34006:"TEXTURE22",34007:"TEXTURE23",34008:"TEXTURE24",34009:"TEXTURE25",34010:"TEXTURE26",34011:"TEXTURE27",34012:"TEXTURE28",34013:"TEXTURE29",34014:"TEXTURE30",34015:"TEXTURE31",34016:"ACTIVE_TEXTURE",34024:"MAX_RENDERBUFFER_SIZE",34041:"DEPTH_STENCIL",34055:"INCR_WRAP",34056:"DECR_WRAP",34067:"TEXTURE_CUBE_MAP",34068:"TEXTURE_BINDING_CUBE_MAP",34069:"TEXTURE_CUBE_MAP_POSITIVE_X",34070:"TEXTURE_CUBE_MAP_NEGATIVE_X",34071:"TEXTURE_CUBE_MAP_POSITIVE_Y",34072:"TEXTURE_CUBE_MAP_NEGATIVE_Y",34073:"TEXTURE_CUBE_MAP_POSITIVE_Z",34074:"TEXTURE_CUBE_MAP_NEGATIVE_Z",34076:"MAX_CUBE_MAP_TEXTURE_SIZE",34338:"VERTEX_ATTRIB_ARRAY_ENABLED",34339:"VERTEX_ATTRIB_ARRAY_SIZE",34340:"VERTEX_ATTRIB_ARRAY_STRIDE",34341:"VERTEX_ATTRIB_ARRAY_TYPE",34342:"CURRENT_VERTEX_ATTRIB",34373:"VERTEX_ATTRIB_ARRAY_POINTER",34466:"NUM_COMPRESSED_TEXTURE_FORMATS",34467:"COMPRESSED_TEXTURE_FORMATS",34660:"BUFFER_SIZE",34661:"BUFFER_USAGE",34816:"STENCIL_BACK_FUNC",34817:"STENCIL_BACK_FAIL",34818:"STENCIL_BACK_PASS_DEPTH_FAIL",34819:"STENCIL_BACK_PASS_DEPTH_PASS",34877:"BLEND_EQUATION_ALPHA",34921:"MAX_VERTEX_ATTRIBS",34922:"VERTEX_ATTRIB_ARRAY_NORMALIZED",34930:"MAX_TEXTURE_IMAGE_UNITS",34962:"ARRAY_BUFFER",34963:"ELEMENT_ARRAY_BUFFER",34964:"ARRAY_BUFFER_BINDING",34965:"ELEMENT_ARRAY_BUFFER_BINDING",34975:"VERTEX_ATTRIB_ARRAY_BUFFER_BINDING",35040:"STREAM_DRAW",35044:"STATIC_DRAW",35048:"DYNAMIC_DRAW",35632:"FRAGMENT_SHADER",35633:"VERTEX_SHADER",35660:"MAX_VERTEX_TEXTURE_IMAGE_UNITS",35661:"MAX_COMBINED_TEXTURE_IMAGE_UNITS",35663:"SHADER_TYPE",35664:"FLOAT_VEC2",35665:"FLOAT_VEC3",35666:"FLOAT_VEC4",35667:"INT_VEC2",35668:"INT_VEC3",35669:"INT_VEC4",35670:"BOOL",35671:"BOOL_VEC2",35672:"BOOL_VEC3",35673:"BOOL_VEC4",35674:"FLOAT_MAT2",35675:"FLOAT_MAT3",35676:"FLOAT_MAT4",35678:"SAMPLER_2D",35680:"SAMPLER_CUBE",35712:"DELETE_STATUS",35713:"COMPILE_STATUS",35714:"LINK_STATUS",35715:"VALIDATE_STATUS",35716:"INFO_LOG_LENGTH",35717:"ATTACHED_SHADERS",35718:"ACTIVE_UNIFORMS",35719:"ACTIVE_UNIFORM_MAX_LENGTH",35720:"SHADER_SOURCE_LENGTH",35721:"ACTIVE_ATTRIBUTES",35722:"ACTIVE_ATTRIBUTE_MAX_LENGTH",35724:"SHADING_LANGUAGE_VERSION",35725:"CURRENT_PROGRAM",36003:"STENCIL_BACK_REF",36004:"STENCIL_BACK_VALUE_MASK",36005:"STENCIL_BACK_WRITEMASK",36006:"FRAMEBUFFER_BINDING",36007:"RENDERBUFFER_BINDING",36048:"FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE",36049:"FRAMEBUFFER_ATTACHMENT_OBJECT_NAME",36050:"FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL",36051:"FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE",36053:"FRAMEBUFFER_COMPLETE",36054:"FRAMEBUFFER_INCOMPLETE_ATTACHMENT",36055:"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT",36057:"FRAMEBUFFER_INCOMPLETE_DIMENSIONS",36061:"FRAMEBUFFER_UNSUPPORTED",36064:"COLOR_ATTACHMENT0",36096:"DEPTH_ATTACHMENT",36128:"STENCIL_ATTACHMENT",36160:"FRAMEBUFFER",36161:"RENDERBUFFER",36162:"RENDERBUFFER_WIDTH",36163:"RENDERBUFFER_HEIGHT",36164:"RENDERBUFFER_INTERNAL_FORMAT",36168:"STENCIL_INDEX8",36176:"RENDERBUFFER_RED_SIZE",36177:"RENDERBUFFER_GREEN_SIZE",36178:"RENDERBUFFER_BLUE_SIZE",36179:"RENDERBUFFER_ALPHA_SIZE",36180:"RENDERBUFFER_DEPTH_SIZE",36181:"RENDERBUFFER_STENCIL_SIZE",36194:"RGB565",36336:"LOW_FLOAT",36337:"MEDIUM_FLOAT",36338:"HIGH_FLOAT",36339:"LOW_INT",36340:"MEDIUM_INT",36341:"HIGH_INT",36346:"SHADER_COMPILER",36347:"MAX_VERTEX_UNIFORM_VECTORS",36348:"MAX_VARYING_VECTORS",36349:"MAX_FRAGMENT_UNIFORM_VECTORS",37440:"UNPACK_FLIP_Y_WEBGL",37441:"UNPACK_PREMULTIPLY_ALPHA_WEBGL",37442:"CONTEXT_LOST_WEBGL",37443:"UNPACK_COLORSPACE_CONVERSION_WEBGL",37444:"BROWSER_DEFAULT_WEBGL"}},6603:function(t,e,n){var r=n(1950);t.exports=function(t){return r[t]}},3110:function(t,e,n){"use strict";t.exports=function(t){var e=t.gl,n=r(e),o=i(e,[{buffer:n,type:e.FLOAT,size:3,offset:0,stride:40},{buffer:n,type:e.FLOAT,size:4,offset:12,stride:40},{buffer:n,type:e.FLOAT,size:3,offset:28,stride:40}]),l=a(e);l.attributes.position.location=0,l.attributes.color.location=1,l.attributes.offset.location=2;var c=new s(e,n,o,l);return c.update(t),c};var r=n(5827),i=n(2944),a=n(7667),o=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function s(t,e,n,r){this.gl=t,this.shader=r,this.buffer=e,this.vao=n,this.pixelRatio=1,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.lineWidth=[1,1,1],this.capSize=[10,10,10],this.lineCount=[0,0,0],this.lineOffset=[0,0,0],this.opacity=1,this.hasAlpha=!1}var l=s.prototype;function c(t,e){for(var n=0;n<3;++n)t[0][n]=Math.min(t[0][n],e[n]),t[1][n]=Math.max(t[1][n],e[n])}l.isOpaque=function(){return!this.hasAlpha},l.isTransparent=function(){return this.hasAlpha},l.drawTransparent=l.draw=function(t){var e=this.gl,n=this.shader.uniforms;this.shader.bind();var r=n.view=t.view||o,i=n.projection=t.projection||o;n.model=t.model||o,n.clipBounds=this.clipBounds,n.opacity=this.opacity;var a=r[12],s=r[13],l=r[14],c=r[15],u=(t._ortho?2:1)*this.pixelRatio*(i[3]*a+i[7]*s+i[11]*l+i[15]*c)/e.drawingBufferHeight;this.vao.bind();for(var f=0;f<3;++f)e.lineWidth(this.lineWidth[f]*this.pixelRatio),n.capSize=this.capSize[f]*u,this.lineCount[f]&&e.drawArrays(e.LINES,this.lineOffset[f],this.lineCount[f]);this.vao.unbind()};var u=function(){for(var t=new Array(3),e=0;e<3;++e){for(var n=[],r=1;r<=2;++r)for(var i=-1;i<=1;i+=2){var a=[0,0,0];a[(r+e)%3]=i,n.push(a)}t[e]=n}return t}();function f(t,e,n,r){for(var i=u[r],a=0;a0&&((d=u.slice())[s]+=p[1][s],i.push(u[0],u[1],u[2],g[0],g[1],g[2],g[3],0,0,0,d[0],d[1],d[2],g[0],g[1],g[2],g[3],0,0,0),c(this.bounds,d),o+=2+f(i,d,g,s)))}this.lineCount[s]=o-this.lineOffset[s]}this.buffer.update(i)}},l.dispose=function(){this.shader.dispose(),this.buffer.dispose(),this.vao.dispose()}},7667:function(t,e,n){"use strict";var r=n(6832),i=n(5158),a=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position, offset;\nattribute vec4 color;\nuniform mat4 model, view, projection;\nuniform float capSize;\nvarying vec4 fragColor;\nvarying vec3 fragPosition;\n\nvoid main() {\n vec4 worldPosition = model * vec4(position, 1.0);\n worldPosition = (worldPosition / worldPosition.w) + vec4(capSize * offset, 0.0);\n gl_Position = projection * view * worldPosition;\n fragColor = color;\n fragPosition = position;\n}"]),o=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float opacity;\nvarying vec3 fragPosition;\nvarying vec4 fragColor;\n\nvoid main() {\n if (\n outOfRange(clipBounds[0], clipBounds[1], fragPosition) ||\n fragColor.a * opacity == 0.\n ) discard;\n\n gl_FragColor = opacity * fragColor;\n}"]);t.exports=function(t){return i(t,a,o,null,[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"offset",type:"vec3"}])}},4234:function(t,e,n){"use strict";var r=n(8931);t.exports=function(t,e,n,r){i||(i=t.FRAMEBUFFER_UNSUPPORTED,a=t.FRAMEBUFFER_INCOMPLETE_ATTACHMENT,o=t.FRAMEBUFFER_INCOMPLETE_DIMENSIONS,s=t.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT);var c=t.getExtension("WEBGL_draw_buffers");if(!l&&c&&function(t,e){var n=t.getParameter(e.MAX_COLOR_ATTACHMENTS_WEBGL);l=new Array(n+1);for(var r=0;r<=n;++r){for(var i=new Array(n),a=0;au||n<0||n>u)throw new Error("gl-fbo: Parameters are too large for FBO");var f=1;if("color"in(r=r||{})){if((f=Math.max(0|r.color,0))<0)throw new Error("gl-fbo: Must specify a nonnegative number of colors");if(f>1){if(!c)throw new Error("gl-fbo: Multiple draw buffer extension not supported");if(f>t.getParameter(c.MAX_COLOR_ATTACHMENTS_WEBGL))throw new Error("gl-fbo: Context does not support "+f+" draw buffers")}}var h=t.UNSIGNED_BYTE,d=t.getExtension("OES_texture_float");if(r.float&&f>0){if(!d)throw new Error("gl-fbo: Context does not support floating point textures");h=t.FLOAT}else r.preferFloat&&f>0&&d&&(h=t.FLOAT);var g=!0;"depth"in r&&(g=!!r.depth);var v=!1;return"stencil"in r&&(v=!!r.stencil),new p(t,e,n,h,f,g,v,c)};var i,a,o,s,l=null;function c(t){return[t.getParameter(t.FRAMEBUFFER_BINDING),t.getParameter(t.RENDERBUFFER_BINDING),t.getParameter(t.TEXTURE_BINDING_2D)]}function u(t,e){t.bindFramebuffer(t.FRAMEBUFFER,e[0]),t.bindRenderbuffer(t.RENDERBUFFER,e[1]),t.bindTexture(t.TEXTURE_2D,e[2])}function f(t){switch(t){case i:throw new Error("gl-fbo: Framebuffer unsupported");case a:throw new Error("gl-fbo: Framebuffer incomplete attachment");case o:throw new Error("gl-fbo: Framebuffer incomplete dimensions");case s:throw new Error("gl-fbo: Framebuffer incomplete missing attachment");default:throw new Error("gl-fbo: Framebuffer failed for unspecified reason")}}function h(t,e,n,i,a,o){if(!i)return null;var s=r(t,e,n,a,i);return s.magFilter=t.NEAREST,s.minFilter=t.NEAREST,s.mipSamples=1,s.bind(),t.framebufferTexture2D(t.FRAMEBUFFER,o,t.TEXTURE_2D,s.handle,0),s}function d(t,e,n,r,i){var a=t.createRenderbuffer();return t.bindRenderbuffer(t.RENDERBUFFER,a),t.renderbufferStorage(t.RENDERBUFFER,r,e,n),t.framebufferRenderbuffer(t.FRAMEBUFFER,i,t.RENDERBUFFER,a),a}function p(t,e,n,r,i,a,o,s){this.gl=t,this._shape=[0|e,0|n],this._destroyed=!1,this._ext=s,this.color=new Array(i);for(var p=0;p1&&s.drawBuffersWEBGL(l[o]);var y=n.getExtension("WEBGL_depth_texture");y?p?t.depth=h(n,i,a,y.UNSIGNED_INT_24_8_WEBGL,n.DEPTH_STENCIL,n.DEPTH_STENCIL_ATTACHMENT):g&&(t.depth=h(n,i,a,n.UNSIGNED_SHORT,n.DEPTH_COMPONENT,n.DEPTH_ATTACHMENT)):g&&p?t._depth_rb=d(n,i,a,n.DEPTH_STENCIL,n.DEPTH_STENCIL_ATTACHMENT):g?t._depth_rb=d(n,i,a,n.DEPTH_COMPONENT16,n.DEPTH_ATTACHMENT):p&&(t._depth_rb=d(n,i,a,n.STENCIL_INDEX,n.STENCIL_ATTACHMENT));var b=n.checkFramebufferStatus(n.FRAMEBUFFER);if(b!==n.FRAMEBUFFER_COMPLETE){for(t._destroyed=!0,n.bindFramebuffer(n.FRAMEBUFFER,null),n.deleteFramebuffer(t.handle),t.handle=null,t.depth&&(t.depth.dispose(),t.depth=null),t._depth_rb&&(n.deleteRenderbuffer(t._depth_rb),t._depth_rb=null),m=0;mi||n<0||n>i)throw new Error("gl-fbo: Can't resize FBO, invalid dimensions");t._shape[0]=e,t._shape[1]=n;for(var a=c(r),o=0;o>8*d&255;this.pickOffset=n,i.bind();var p=i.uniforms;p.viewTransform=t,p.pickOffset=e,p.shape=this.shape;var g=i.attributes;return this.positionBuffer.bind(),g.position.pointer(),this.weightBuffer.bind(),g.weight.pointer(s.UNSIGNED_BYTE,!1),this.idBuffer.bind(),g.pickId.pointer(s.UNSIGNED_BYTE,!1),s.drawArrays(s.TRIANGLES,0,o),n+this.shape[0]*this.shape[1]}}}(),u.pick=function(t,e,n){var r=this.pickOffset,i=this.shape[0]*this.shape[1];if(n=r+i)return null;var a=n-r,o=this.xData,s=this.yData;return{object:this,pointId:a,dataCoord:[o[a%this.shape[0]],s[a/this.shape[0]|0]]}},u.update=function(t){var e=(t=t||{}).shape||[0,0],n=t.x||i(e[0]),o=t.y||i(e[1]),s=t.z||new Float32Array(e[0]*e[1]),l=!1!==t.zsmooth;this.xData=n,this.yData=o;var c,u,h,d,p=t.colorLevels||[0],g=t.colorValues||[0,0,0,1],v=p.length,m=this.bounds;l?(c=m[0]=n[0],u=m[1]=o[0],h=m[2]=n[n.length-1],d=m[3]=o[o.length-1]):(c=m[0]=n[0]+(n[1]-n[0])/2,u=m[1]=o[0]+(o[1]-o[0])/2,h=m[2]=n[n.length-1]+(n[n.length-1]-n[n.length-2])/2,d=m[3]=o[o.length-1]+(o[o.length-1]-o[o.length-2])/2);var y=1/(h-c),b=1/(d-u),x=e[0],w=e[1];this.shape=[x,w];var _=(l?(x-1)*(w-1):x*w)*(f.length>>>1);this.numVertices=_;for(var k=a.mallocUint8(4*_),T=a.mallocFloat32(2*_),M=a.mallocUint8(2*_),A=a.mallocUint32(_),S=0,E=l?x-1:x,C=l?w-1:w,P=0;P max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform sampler2D dashTexture;\nuniform float dashScale;\nuniform float opacity;\n\nvarying vec3 worldPosition;\nvarying float pixelArcLength;\nvarying vec4 fragColor;\n\nvoid main() {\n if (\n outOfRange(clipBounds[0], clipBounds[1], worldPosition) ||\n fragColor.a * opacity == 0.\n ) discard;\n\n float dashWeight = texture2D(dashTexture, vec2(dashScale * pixelArcLength, 0)).r;\n if(dashWeight < 0.5) {\n discard;\n }\n gl_FragColor = fragColor * opacity;\n}\n"]),s=r(["precision highp float;\n#define GLSLIFY 1\n\n#define FLOAT_MAX 1.70141184e38\n#define FLOAT_MIN 1.17549435e-38\n\n// https://github.com/mikolalysenko/glsl-read-float/blob/master/index.glsl\nvec4 packFloat(float v) {\n float av = abs(v);\n\n //Handle special cases\n if(av < FLOAT_MIN) {\n return vec4(0.0, 0.0, 0.0, 0.0);\n } else if(v > FLOAT_MAX) {\n return vec4(127.0, 128.0, 0.0, 0.0) / 255.0;\n } else if(v < -FLOAT_MAX) {\n return vec4(255.0, 128.0, 0.0, 0.0) / 255.0;\n }\n\n vec4 c = vec4(0,0,0,0);\n\n //Compute exponent and mantissa\n float e = floor(log2(av));\n float m = av * pow(2.0, -e) - 1.0;\n\n //Unpack mantissa\n c[1] = floor(128.0 * m);\n m -= c[1] / 128.0;\n c[2] = floor(32768.0 * m);\n m -= c[2] / 32768.0;\n c[3] = floor(8388608.0 * m);\n\n //Unpack exponent\n float ebias = e + 127.0;\n c[0] = floor(ebias / 2.0);\n ebias -= c[0] * 2.0;\n c[1] += floor(ebias) * 128.0;\n\n //Unpack sign bit\n c[0] += 128.0 * step(0.0, -v);\n\n //Scale back to range\n return c / 255.0;\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform float pickId;\nuniform vec3 clipBounds[2];\n\nvarying vec3 worldPosition;\nvarying float pixelArcLength;\nvarying vec4 fragColor;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], worldPosition)) discard;\n\n gl_FragColor = vec4(pickId/255.0, packFloat(pixelArcLength).xyz);\n}"]),l=[{name:"position",type:"vec3"},{name:"nextPosition",type:"vec3"},{name:"arcLength",type:"float"},{name:"lineWidth",type:"float"},{name:"color",type:"vec4"}];e.createShader=function(t){return i(t,a,o,null,l)},e.createPickShader=function(t){return i(t,a,s,null,l)}},6086:function(t,e,n){"use strict";t.exports=function(t){var e=t.gl||t.scene&&t.scene.gl,n=f(e);n.attributes.position.location=0,n.attributes.nextPosition.location=1,n.attributes.arcLength.location=2,n.attributes.lineWidth.location=3,n.attributes.color.location=4;var o=h(e);o.attributes.position.location=0,o.attributes.nextPosition.location=1,o.attributes.arcLength.location=2,o.attributes.lineWidth.location=3,o.attributes.color.location=4;for(var s=r(e),l=i(e,[{buffer:s,size:3,offset:0,stride:48},{buffer:s,size:3,offset:12,stride:48},{buffer:s,size:1,offset:24,stride:48},{buffer:s,size:1,offset:28,stride:48},{buffer:s,size:4,offset:32,stride:48}]),u=c(new Array(1024),[256,1,4]),d=0;d<1024;++d)u.data[d]=255;var p=a(e,u);p.wrap=e.REPEAT;var g=new m(e,n,o,s,l,p);return g.update(t),g};var r=n(5827),i=n(2944),a=n(8931),o=new Uint8Array(4),s=new Float32Array(o.buffer),l=n(5070),c=n(5050),u=n(248),f=u.createShader,h=u.createPickShader,d=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function p(t,e){for(var n=0,r=0;r<3;++r){var i=t[r]-e[r];n+=i*i}return Math.sqrt(n)}function g(t){for(var e=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],n=0;n<3;++n)e[0][n]=Math.max(t[0][n],e[0][n]),e[1][n]=Math.min(t[1][n],e[1][n]);return e}function v(t,e,n,r){this.arcLength=t,this.position=e,this.index=n,this.dataCoordinate=r}function m(t,e,n,r,i,a){this.gl=t,this.shader=e,this.pickShader=n,this.buffer=r,this.vao=i,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.points=[],this.arcLength=[],this.vertexCount=0,this.bounds=[[0,0,0],[0,0,0]],this.pickId=0,this.lineWidth=1,this.texture=a,this.dashScale=1,this.opacity=1,this.hasAlpha=!1,this.dirty=!0,this.pixelRatio=1}var y=m.prototype;y.isTransparent=function(){return this.hasAlpha},y.isOpaque=function(){return!this.hasAlpha},y.pickSlots=1,y.setPickBase=function(t){this.pickId=t},y.drawTransparent=y.draw=function(t){if(this.vertexCount){var e=this.gl,n=this.shader,r=this.vao;n.bind(),n.uniforms={model:t.model||d,view:t.view||d,projection:t.projection||d,clipBounds:g(this.clipBounds),dashTexture:this.texture.bind(),dashScale:this.dashScale/this.arcLength[this.arcLength.length-1],opacity:this.opacity,screenShape:[e.drawingBufferWidth,e.drawingBufferHeight],pixelRatio:this.pixelRatio},r.bind(),r.draw(e.TRIANGLE_STRIP,this.vertexCount),r.unbind()}},y.drawPick=function(t){if(this.vertexCount){var e=this.gl,n=this.pickShader,r=this.vao;n.bind(),n.uniforms={model:t.model||d,view:t.view||d,projection:t.projection||d,pickId:this.pickId,clipBounds:g(this.clipBounds),screenShape:[e.drawingBufferWidth,e.drawingBufferHeight],pixelRatio:this.pixelRatio},r.bind(),r.draw(e.TRIANGLE_STRIP,this.vertexCount),r.unbind()}},y.update=function(t){var e,n;this.dirty=!0;var r=!!t.connectGaps;"dashScale"in t&&(this.dashScale=t.dashScale),this.hasAlpha=!1,"opacity"in t&&(this.opacity=+t.opacity,this.opacity<1&&(this.hasAlpha=!0));var i=[],a=[],o=[],s=0,u=0,f=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],h=t.position||t.positions;if(h){var d=t.color||t.colors||[0,0,0,1],g=t.lineWidth||1,v=!1;t:for(e=1;e0){for(var _=0;_<24;++_)i.push(i[i.length-12]);u+=2,v=!0}continue t}f[0][n]=Math.min(f[0][n],x[n],w[n]),f[1][n]=Math.max(f[1][n],x[n],w[n])}Array.isArray(d[0])?(m=d.length>e-1?d[e-1]:d.length>0?d[d.length-1]:[0,0,0,1],y=d.length>e?d[e]:d.length>0?d[d.length-1]:[0,0,0,1]):m=y=d,3===m.length&&(m=[m[0],m[1],m[2],1]),3===y.length&&(y=[y[0],y[1],y[2],1]),!this.hasAlpha&&m[3]<1&&(this.hasAlpha=!0),b=Array.isArray(g)?g.length>e-1?g[e-1]:g.length>0?g[g.length-1]:[0,0,0,1]:g;var k=s;if(s+=p(x,w),v){for(n=0;n<2;++n)i.push(x[0],x[1],x[2],w[0],w[1],w[2],k,b,m[0],m[1],m[2],m[3]);u+=2,v=!1}i.push(x[0],x[1],x[2],w[0],w[1],w[2],k,b,m[0],m[1],m[2],m[3],x[0],x[1],x[2],w[0],w[1],w[2],k,-b,m[0],m[1],m[2],m[3],w[0],w[1],w[2],x[0],x[1],x[2],s,-b,y[0],y[1],y[2],y[3],w[0],w[1],w[2],x[0],x[1],x[2],s,b,y[0],y[1],y[2],y[3]),u+=4}}if(this.buffer.update(i),a.push(s),o.push(h[h.length-1].slice()),this.bounds=f,this.vertexCount=u,this.points=o,this.arcLength=a,"dashes"in t){var T=t.dashes.slice();for(T.unshift(0),e=1;e1.0001)return null;m+=v[f]}return Math.abs(m-1)>.001?null:[h,s(t,v),v]}},2056:function(t,e,n){var r=n(6832),i=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position, normal;\nattribute vec4 color;\nattribute vec2 uv;\n\nuniform mat4 model\n , view\n , projection\n , inverseModel;\nuniform vec3 eyePosition\n , lightPosition;\n\nvarying vec3 f_normal\n , f_lightDirection\n , f_eyeDirection\n , f_data;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvec4 project(vec3 p) {\n return projection * view * model * vec4(p, 1.0);\n}\n\nvoid main() {\n gl_Position = project(position);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * vec4(position , 1.0);\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n f_color = color;\n f_data = position;\n f_uv = uv;\n}\n"]),a=r(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\n//#pragma glslify: beckmann = require(glsl-specular-beckmann) // used in gl-surface3d\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness\n , fresnel\n , kambient\n , kdiffuse\n , kspecular;\nuniform sampler2D texture;\n\nvarying vec3 f_normal\n , f_lightDirection\n , f_eyeDirection\n , f_data;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (f_color.a == 0.0 ||\n outOfRange(clipBounds[0], clipBounds[1], f_data)\n ) discard;\n\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n //float specular = max(0.0, beckmann(L, V, N, roughness)); // used in gl-surface3d\n\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = vec4(f_color.rgb, 1.0) * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * f_color.a;\n}\n"]),o=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 uv;\n\nuniform mat4 model, view, projection;\n\nvarying vec4 f_color;\nvarying vec3 f_data;\nvarying vec2 f_uv;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n f_color = color;\n f_data = position;\n f_uv = uv;\n}"]),s=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform sampler2D texture;\nuniform float opacity;\n\nvarying vec4 f_color;\nvarying vec3 f_data;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_data)) discard;\n\n gl_FragColor = f_color * texture2D(texture, f_uv) * opacity;\n}"]),l=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 uv;\nattribute float pointSize;\n\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0.0, 0.0 ,0.0 ,0.0);\n } else {\n gl_Position = projection * view * model * vec4(position, 1.0);\n }\n gl_PointSize = pointSize;\n f_color = color;\n f_uv = uv;\n}"]),c=r(["precision highp float;\n#define GLSLIFY 1\n\nuniform sampler2D texture;\nuniform float opacity;\n\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n vec2 pointR = gl_PointCoord.xy - vec2(0.5, 0.5);\n if(dot(pointR, pointR) > 0.25) {\n discard;\n }\n gl_FragColor = f_color * texture2D(texture, f_uv) * opacity;\n}"]),u=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n f_id = id;\n f_position = position;\n}"]),f=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]),h=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute float pointSize;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0.0, 0.0, 0.0, 0.0);\n } else {\n gl_Position = projection * view * model * vec4(position, 1.0);\n gl_PointSize = pointSize;\n }\n f_id = id;\n f_position = position;\n}"]),d=r(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec3 position;\n\nuniform mat4 model, view, projection;\n\nvoid main() {\n gl_Position = projection * view * model * vec4(position, 1.0);\n}"]),p=r(["precision highp float;\n#define GLSLIFY 1\n\nuniform vec3 contourColor;\n\nvoid main() {\n gl_FragColor = vec4(contourColor, 1.0);\n}\n"]);e.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec3"},{name:"normal",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"}]},e.wireShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"}]},e.pointShader={vertex:l,fragment:c,attributes:[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"pointSize",type:"float"}]},e.pickShader={vertex:u,fragment:f,attributes:[{name:"position",type:"vec3"},{name:"id",type:"vec4"}]},e.pointPickShader={vertex:h,fragment:f,attributes:[{name:"position",type:"vec3"},{name:"pointSize",type:"float"},{name:"id",type:"vec4"}]},e.contourShader={vertex:d,fragment:p,attributes:[{name:"position",type:"vec3"}]}},8116:function(t,e,n){"use strict";var r=n(5158),i=n(5827),a=n(2944),o=n(8931),s=n(115),l=n(104),c=n(7437),u=n(5050),f=n(9156),h=n(7212),d=n(5306),p=n(2056),g=n(4340),v=p.meshShader,m=p.wireShader,y=p.pointShader,b=p.pickShader,x=p.pointPickShader,w=p.contourShader,_=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function k(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b,x,w,k,T,M,A,S){this.gl=t,this.pixelRatio=1,this.cells=[],this.positions=[],this.intensity=[],this.texture=e,this.dirty=!0,this.triShader=n,this.lineShader=r,this.pointShader=i,this.pickShader=a,this.pointPickShader=o,this.contourShader=s,this.trianglePositions=l,this.triangleColors=u,this.triangleNormals=h,this.triangleUVs=f,this.triangleIds=c,this.triangleVAO=d,this.triangleCount=0,this.lineWidth=1,this.edgePositions=p,this.edgeColors=v,this.edgeUVs=m,this.edgeIds=g,this.edgeVAO=y,this.edgeCount=0,this.pointPositions=b,this.pointColors=w,this.pointUVs=k,this.pointSizes=T,this.pointIds=x,this.pointVAO=M,this.pointCount=0,this.contourLineWidth=1,this.contourPositions=A,this.contourVAO=S,this.contourCount=0,this.contourColor=[0,0,0],this.contourEnable=!0,this.pickVertex=!0,this.pickId=1,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.lightPosition=[1e5,1e5,0],this.ambientLight=.8,this.diffuseLight=.8,this.specularLight=2,this.roughness=.5,this.fresnel=1.5,this.opacity=1,this.hasAlpha=!1,this.opacityscale=!1,this._model=_,this._view=_,this._projection=_,this._resolution=[1,1]}var T=k.prototype;function M(t,e){if(!e)return 1;if(!e.length)return 1;for(var n=0;nt&&n>0){var r=(e[n][0]-t)/(e[n][0]-e[n-1][0]);return e[n][1]*(1-r)+r*e[n-1][1]}}return 1}function A(t){var e=r(t,y.vertex,y.fragment);return e.attributes.position.location=0,e.attributes.color.location=2,e.attributes.uv.location=3,e.attributes.pointSize.location=4,e}function S(t){var e=r(t,b.vertex,b.fragment);return e.attributes.position.location=0,e.attributes.id.location=1,e}function E(t){var e=r(t,x.vertex,x.fragment);return e.attributes.position.location=0,e.attributes.id.location=1,e.attributes.pointSize.location=4,e}function C(t){var e=r(t,w.vertex,w.fragment);return e.attributes.position.location=0,e}T.isOpaque=function(){return!this.hasAlpha},T.isTransparent=function(){return this.hasAlpha},T.pickSlots=1,T.setPickBase=function(t){this.pickId=t},T.highlight=function(t){if(t&&this.contourEnable){for(var e=h(this.cells,this.intensity,t.intensity),n=e.cells,r=e.vertexIds,i=e.vertexWeights,a=n.length,o=d.mallocFloat32(6*a),s=0,l=0;l0&&((f=this.triShader).bind(),f.uniforms=s,this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind()),this.edgeCount>0&&this.lineWidth>0&&((f=this.lineShader).bind(),f.uniforms=s,this.edgeVAO.bind(),e.lineWidth(this.lineWidth*this.pixelRatio),e.drawArrays(e.LINES,0,2*this.edgeCount),this.edgeVAO.unbind()),this.pointCount>0&&((f=this.pointShader).bind(),f.uniforms=s,this.pointVAO.bind(),e.drawArrays(e.POINTS,0,this.pointCount),this.pointVAO.unbind()),this.contourEnable&&this.contourCount>0&&this.contourLineWidth>0&&((f=this.contourShader).bind(),f.uniforms=s,this.contourVAO.bind(),e.drawArrays(e.LINES,0,this.contourCount),this.contourVAO.unbind())},T.drawPick=function(t){t=t||{};for(var e=this.gl,n=t.model||_,r=t.view||_,i=t.projection||_,a=[[-1e6,-1e6,-1e6],[1e6,1e6,1e6]],o=0;o<3;++o)a[0][o]=Math.max(a[0][o],this.clipBounds[0][o]),a[1][o]=Math.min(a[1][o],this.clipBounds[1][o]);this._model=[].slice.call(n),this._view=[].slice.call(r),this._projection=[].slice.call(i),this._resolution=[e.drawingBufferWidth,e.drawingBufferHeight];var s,l={model:n,view:r,projection:i,clipBounds:a,pickId:this.pickId/255};(s=this.pickShader).bind(),s.uniforms=l,this.triangleCount>0&&(this.triangleVAO.bind(),e.drawArrays(e.TRIANGLES,0,3*this.triangleCount),this.triangleVAO.unbind()),this.edgeCount>0&&(this.edgeVAO.bind(),e.lineWidth(this.lineWidth*this.pixelRatio),e.drawArrays(e.LINES,0,2*this.edgeCount),this.edgeVAO.unbind()),this.pointCount>0&&((s=this.pointPickShader).bind(),s.uniforms=l,this.pointVAO.bind(),e.drawArrays(e.POINTS,0,this.pointCount),this.pointVAO.unbind())},T.pick=function(t){if(!t)return null;if(t.id!==this.pickId)return null;for(var e=t.value[0]+256*t.value[1]+65536*t.value[2],n=this.cells[e],r=this.positions,i=new Array(n.length),a=0;al[T]&&(a.uniforms.dataAxis=t,a.uniforms.screenOffset=e,a.uniforms.color=v[r],a.uniforms.angle=m[r],c.drawArrays(c.TRIANGLES,l[T],l[M]-l[T]))),y[r]&&k&&(e[1^r]-=A*d*b[r],a.uniforms.dataAxis=n,a.uniforms.screenOffset=e,a.uniforms.color=x[r],a.uniforms.angle=w[r],c.drawArrays(c.TRIANGLES,_,k)),e[1^r]=A*u[2+(1^r)]-1,p[r+2]&&(e[1^r]+=A*d*g[r+2],Tl[T]&&(a.uniforms.dataAxis=t,a.uniforms.screenOffset=e,a.uniforms.color=v[r+2],a.uniforms.angle=m[r+2],c.drawArrays(c.TRIANGLES,l[T],l[M]-l[T]))),y[r+2]&&k&&(e[1^r]+=A*d*b[r+2],a.uniforms.dataAxis=n,a.uniforms.screenOffset=e,a.uniforms.color=x[r+2],a.uniforms.angle=w[r+2],c.drawArrays(c.TRIANGLES,_,k))}}(),c.drawTitle=function(){var t=[0,0],e=[0,0];return function(){var n=this.plot,r=this.shader,i=n.gl,a=n.screenBox,o=n.titleCenter,s=n.titleAngle,l=n.titleColor,c=n.pixelRatio;if(this.titleCount){for(var u=0;u<2;++u)e[u]=2*(o[u]*c-a[u])/(a[2+u]-a[u])-1;r.bind(),r.uniforms.dataAxis=t,r.uniforms.screenOffset=e,r.uniforms.angle=s,r.uniforms.color=l,i.drawArrays(i.TRIANGLES,this.titleOffset,this.titleCount)}}}(),c.bind=function(){var t=[0,0],e=[0,0],n=[0,0];return function(){var r=this.plot,i=this.shader,a=r._tickBounds,o=r.dataBox,s=r.screenBox,l=r.viewBox;i.bind();for(var c=0;c<2;++c){var u=a[c],f=a[c+2]-u,h=.5*(o[c+2]+o[c]),d=o[c+2]-o[c],p=l[c],g=l[c+2]-p,v=s[c],m=s[c+2]-v;e[c]=2*f/d*g/m,t[c]=2*(u-h)/d*g/m}n[1]=2*r.pixelRatio/(s[3]-s[1]),n[0]=n[1]*(s[3]-s[1])/(s[2]-s[0]),i.uniforms.dataScale=e,i.uniforms.dataShift=t,i.uniforms.textScale=n,this.vbo.bind(),i.attributes.textCoordinate.pointer()}}(),c.update=function(t){var e,n,r,i,o,s=[],l=t.ticks,c=t.bounds;for(o=0;o<2;++o){var u=[Math.floor(s.length/3)],f=[-1/0],h=l[o];for(e=0;e=0){var g=e[p]-r[p]*(e[p+2]-e[p])/(r[p+2]-r[p]);0===p?o.drawLine(g,e[1],g,e[3],d[p],h[p]):o.drawLine(e[0],g,e[2],g,d[p],h[p])}}for(p=0;p=0;--t)this.objects[t].dispose();for(this.objects.length=0,t=this.overlays.length-1;t>=0;--t)this.overlays[t].dispose();this.overlays.length=0,this.gl=null},c.addObject=function(t){this.objects.indexOf(t)<0&&(this.objects.push(t),this.setDirty())},c.removeObject=function(t){for(var e=this.objects,n=0;nMath.abs(e))c.rotate(a,0,0,-t*n*Math.PI*p.rotateSpeed/window.innerWidth);else if(!p._ortho){var o=-p.zoomSpeed*i*e/window.innerHeight*(a-c.lastT())/20;c.pan(a,0,0,f*(Math.exp(o)-1))}}}),!0)},p.enableMouseListeners(),p};var r=n(8161),i=n(1152),a=n(6145),o=n(6475),s=n(2565),l=n(5233)},8245:function(t,e,n){var r=n(6832),i=n(5158),a=r(["precision mediump float;\n#define GLSLIFY 1\nattribute vec2 position;\nvarying vec2 uv;\nvoid main() {\n uv = position;\n gl_Position = vec4(position, 0, 1);\n}"]),o=r(["precision mediump float;\n#define GLSLIFY 1\n\nuniform sampler2D accumBuffer;\nvarying vec2 uv;\n\nvoid main() {\n vec4 accum = texture2D(accumBuffer, 0.5 * (uv + 1.0));\n gl_FragColor = min(vec4(1,1,1,1), accum);\n}"]);t.exports=function(t){return i(t,a,o,null,[{name:"position",type:"vec2"}])}},1059:function(t,e,n){"use strict";var r=n(4296),i=n(7453),a=n(2771),o=n(6496),s=n(2611),l=n(4234),c=n(8126),u=n(6145),f=n(1120),h=n(5268),d=n(8245),p=n(2321)({tablet:!0,featureDetect:!0});function g(){this.mouse=[-1,-1],this.screen=null,this.distance=1/0,this.index=null,this.dataCoordinate=null,this.dataPosition=null,this.object=null,this.data=null}function v(t){var e=Math.round(Math.log(Math.abs(t))/Math.log(10));if(e<0){var n=Math.round(Math.pow(10,-e));return Math.ceil(t*n)/n}return e>0?(n=Math.round(Math.pow(10,e)),Math.ceil(t/n)*n):Math.ceil(t)}function m(t){return"boolean"!==typeof t||t}t.exports={createScene:function(t){(t=t||{}).camera=t.camera||{};var e=t.canvas;e||(e=document.createElement("canvas"),t.container?t.container.appendChild(e):document.body.appendChild(e));var n=t.gl;if(n||(t.glOptions&&(p=!!t.glOptions.preserveDrawingBuffer),n=function(t,e){var n=null;try{(n=t.getContext("webgl",e))||(n=t.getContext("experimental-webgl",e))}catch(r){return null}return n}(e,t.glOptions||{premultipliedAlpha:!0,antialias:!0,preserveDrawingBuffer:p})),!n)throw new Error("webgl not supported");var y=t.bounds||[[-10,-10,-10],[10,10,10]],b=new g,x=l(n,n.drawingBufferWidth,n.drawingBufferHeight,{preferFloat:!p}),w=d(n),_=t.cameraObject&&!0===t.cameraObject._ortho||t.camera.projection&&"orthographic"===t.camera.projection.type||!1,k={eye:t.camera.eye||[2,0,0],center:t.camera.center||[0,0,0],up:t.camera.up||[0,1,0],zoomMin:t.camera.zoomMax||.1,zoomMax:t.camera.zoomMin||100,mode:t.camera.mode||"turntable",_ortho:_},T=t.axes||{},M=i(n,T);M.enable=!T.disable;var A=t.spikes||{},S=o(n,A),E=[],C=[],P=[],O=[],L=!0,I=!0,D=new Array(16),R=new Array(16),z={view:null,projection:D,model:R,_ortho:!1},N=(I=!0,[n.drawingBufferWidth,n.drawingBufferHeight]),j=t.cameraObject||r(e,k),F={gl:n,contextLost:!1,pixelRatio:t.pixelRatio||1,canvas:e,selection:b,camera:j,axes:M,axesPixels:null,spikes:S,bounds:y,objects:E,shape:N,aspect:t.aspectRatio||[1,1,1],pickRadius:t.pickRadius||10,zNear:t.zNear||.01,zFar:t.zFar||1e3,fovy:t.fovy||Math.PI/4,clearColor:t.clearColor||[0,0,0,0],autoResize:m(t.autoResize),autoBounds:m(t.autoBounds),autoScale:!!t.autoScale,autoCenter:m(t.autoCenter),clipToBounds:m(t.clipToBounds),snapToData:!!t.snapToData,onselect:t.onselect||null,onrender:t.onrender||null,onclick:t.onclick||null,cameraParams:z,oncontextloss:null,mouseListener:null,_stopped:!1,getAspectratio:function(){return{x:this.aspect[0],y:this.aspect[1],z:this.aspect[2]}},setAspectratio:function(t){this.aspect[0]=t.x,this.aspect[1]=t.y,this.aspect[2]=t.z,I=!0},setBounds:function(t,e){this.bounds[0][t]=e.min,this.bounds[1][t]=e.max},setClearColor:function(t){this.clearColor=t},clearRGBA:function(){this.gl.clearColor(this.clearColor[0],this.clearColor[1],this.clearColor[2],this.clearColor[3]),this.gl.clear(this.gl.COLOR_BUFFER_BIT|this.gl.DEPTH_BUFFER_BIT)}},B=[n.drawingBufferWidth/F.pixelRatio|0,n.drawingBufferHeight/F.pixelRatio|0];function U(){if(!F._stopped&&F.autoResize){var t=e.parentNode,n=1,r=1;t&&t!==document.body?(n=t.clientWidth,r=t.clientHeight):(n=window.innerWidth,r=window.innerHeight);var i=0|Math.ceil(n*F.pixelRatio),a=0|Math.ceil(r*F.pixelRatio);if(i!==e.width||a!==e.height){e.width=i,e.height=a;var o=e.style;o.position=o.position||"absolute",o.left="0px",o.top="0px",o.width=n+"px",o.height=r+"px",L=!0}}}function H(){for(var t=E.length,e=O.length,r=0;r0&&0===P[e-1];)P.pop(),O.pop().dispose()}function V(){if(F.contextLost)return!0;n.isContextLost()&&(F.contextLost=!0,F.mouseListener.enabled=!1,F.selection.object=null,F.oncontextloss&&F.oncontextloss())}F.autoResize&&U(),window.addEventListener("resize",U),F.update=function(t){F._stopped||(t=t||{},L=!0,I=!0)},F.add=function(t){F._stopped||(t.axes=M,E.push(t),C.push(-1),L=!0,I=!0,H())},F.remove=function(t){if(!F._stopped){var e=E.indexOf(t);e<0||(E.splice(e,1),C.pop(),L=!0,I=!0,H())}},F.dispose=function(){if(!F._stopped&&(F._stopped=!0,window.removeEventListener("resize",U),e.removeEventListener("webglcontextlost",V),F.mouseListener.enabled=!1,!F.contextLost)){M.dispose(),S.dispose();for(var t=0;tb.distance)continue;for(var c=0;c 1.0) {\n discard;\n }\n baseColor = mix(borderColor, color, step(radius, centerFraction));\n gl_FragColor = vec4(baseColor.rgb * baseColor.a, baseColor.a);\n }\n}\n"]),e.pickVertex=r(["precision mediump float;\n#define GLSLIFY 1\n\nattribute vec2 position;\nattribute vec4 pickId;\n\nuniform mat3 matrix;\nuniform float pointSize;\nuniform vec4 pickOffset;\n\nvarying vec4 fragId;\n\nvoid main() {\n vec3 hgPosition = matrix * vec3(position, 1);\n gl_Position = vec4(hgPosition.xy, 0, hgPosition.z);\n gl_PointSize = pointSize;\n\n vec4 id = pickId + pickOffset;\n id.y += floor(id.x / 256.0);\n id.x -= floor(id.x / 256.0) * 256.0;\n\n id.z += floor(id.y / 256.0);\n id.y -= floor(id.y / 256.0) * 256.0;\n\n id.w += floor(id.z / 256.0);\n id.z -= floor(id.z / 256.0) * 256.0;\n\n fragId = id;\n}\n"]),e.pickFragment=r(["precision mediump float;\n#define GLSLIFY 1\n\nvarying vec4 fragId;\n\nvoid main() {\n float radius = length(2.0 * gl_PointCoord.xy - 1.0);\n if(radius > 1.0) {\n discard;\n }\n gl_FragColor = fragId / 255.0;\n}\n"])},8271:function(t,e,n){"use strict";var r=n(5158),i=n(5827),a=n(5306),o=n(8023);function s(t,e,n,r,i){this.plot=t,this.offsetBuffer=e,this.pickBuffer=n,this.shader=r,this.pickShader=i,this.sizeMin=.5,this.sizeMinCap=2,this.sizeMax=20,this.areaRatio=1,this.pointCount=0,this.color=[1,0,0,1],this.borderColor=[0,0,0,1],this.blend=!1,this.pickOffset=0,this.points=null}t.exports=function(t,e){var n=t.gl,a=i(n),l=i(n),c=r(n,o.pointVertex,o.pointFragment),u=r(n,o.pickVertex,o.pickFragment),f=new s(t,a,l,c,u);return f.update(e),t.addObject(f),f};var l=s.prototype;l.dispose=function(){this.shader.dispose(),this.pickShader.dispose(),this.offsetBuffer.dispose(),this.pickBuffer.dispose(),this.plot.removeObject(this)},l.update=function(t){var e;function n(e,n){return e in t?t[e]:n}t=t||{},this.sizeMin=n("sizeMin",.5),this.sizeMax=n("sizeMax",20),this.color=n("color",[1,0,0,1]).slice(),this.areaRatio=n("areaRatio",1),this.borderColor=n("borderColor",[0,0,0,1]).slice(),this.blend=n("blend",!1);var r=t.positions.length>>>1,i=t.positions instanceof Float32Array,o=t.idToIndex instanceof Int32Array&&t.idToIndex.length>=r,s=t.positions,l=i?s:a.mallocFloat32(s.length),c=o?t.idToIndex:a.mallocInt32(r);if(i||l.set(s),!o)for(l.set(s),e=0;e>>1;for(n=0;n=e[0]&&a<=e[2]&&o>=e[1]&&o<=e[3]&&r++}return r}(this.points,o),u=this.plot.pickPixelRatio*Math.max(Math.min(this.sizeMinCap,this.sizeMin),Math.min(this.sizeMax,this.sizeMax/Math.pow(c,.33333)));t[0]=2/s,t[4]=2/l,t[6]=-2*o[0]/s-1,t[7]=-2*o[1]/l-1,this.offsetBuffer.bind(),i.bind(),i.attributes.position.pointer(),i.uniforms.matrix=t,i.uniforms.color=this.color,i.uniforms.borderColor=this.borderColor,i.uniforms.pointCloud=u<5,i.uniforms.pointSize=u,i.uniforms.centerFraction=Math.min(1,Math.max(0,Math.sqrt(1-this.areaRatio))),r&&(e[0]=255&n,e[1]=n>>8&255,e[2]=n>>16&255,e[3]=n>>24&255,this.pickBuffer.bind(),i.attributes.pickId.pointer(a.UNSIGNED_BYTE),i.uniforms.pickOffset=e,this.pickOffset=n);var f=a.getParameter(a.BLEND),h=a.getParameter(a.DITHER);return f&&!this.blend&&a.disable(a.BLEND),h&&a.disable(a.DITHER),a.drawArrays(a.POINTS,0,this.pointCount),f&&!this.blend&&a.enable(a.BLEND),h&&a.enable(a.DITHER),n+this.pointCount}}(),l.draw=l.unifiedDraw,l.drawPick=l.unifiedDraw,l.pick=function(t,e,n){var r=this.pickOffset,i=this.pointCount;if(n=r+i)return null;var a=n-r,o=this.points;return{object:this,pointId:a,dataCoord:[o[2*a],o[2*a+1]]}}},6093:function(t){t.exports=function(t,e,n,r){var i,a,o,s,l,c=e[0],u=e[1],f=e[2],h=e[3],d=n[0],p=n[1],g=n[2],v=n[3];return(a=c*d+u*p+f*g+h*v)<0&&(a=-a,d=-d,p=-p,g=-g,v=-v),1-a>1e-6?(i=Math.acos(a),o=Math.sin(i),s=Math.sin((1-r)*i)/o,l=Math.sin(r*i)/o):(s=1-r,l=r),t[0]=s*c+l*d,t[1]=s*u+l*p,t[2]=s*f+l*g,t[3]=s*h+l*v,t}},8240:function(t){"use strict";t.exports=function(t){return t||0===t?t.toString():""}},4123:function(t,e,n){"use strict";var r=n(875);t.exports=function(t,e,n){var a=i[e];if(a||(a=i[e]={}),t in a)return a[t];var o={textAlign:"center",textBaseline:"middle",lineHeight:1,font:e,lineSpacing:1.25,styletags:{breaklines:!0,bolds:!0,italics:!0,subscripts:!0,superscripts:!0},triangles:!0},s=r(t,o);o.triangles=!1;var l,c,u=r(t,o);if(n&&1!==n){for(l=0;l max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform vec4 highlightId;\nuniform float highlightScale;\nuniform mat4 model, view, projection;\nuniform vec3 clipBounds[2];\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float scale = 1.0;\n if(distance(highlightId, id) < 0.0001) {\n scale = highlightScale;\n }\n\n vec4 worldPosition = model * vec4(position, 1);\n vec4 viewPosition = view * worldPosition;\n viewPosition = viewPosition / viewPosition.w;\n vec4 clipPosition = projection * (viewPosition + scale * vec4(glyph.x, -glyph.y, 0, 0));\n\n gl_Position = clipPosition;\n interpColor = color;\n pickId = id;\n dataCoordinate = position;\n }\n}"]),o=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform vec2 screenSize;\nuniform vec3 clipBounds[2];\nuniform float highlightScale, pixelRatio;\nuniform vec4 highlightId;\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float scale = pixelRatio;\n if(distance(highlightId.bgr, id.bgr) < 0.001) {\n scale *= highlightScale;\n }\n\n vec4 worldPosition = model * vec4(position, 1.0);\n vec4 viewPosition = view * worldPosition;\n vec4 clipPosition = projection * viewPosition;\n clipPosition /= clipPosition.w;\n\n gl_Position = clipPosition + vec4(screenSize * scale * vec2(glyph.x, -glyph.y), 0.0, 0.0);\n interpColor = color;\n pickId = id;\n dataCoordinate = position;\n }\n}"]),s=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nattribute vec3 position;\nattribute vec4 color;\nattribute vec2 glyph;\nattribute vec4 id;\n\nuniform float highlightScale;\nuniform vec4 highlightId;\nuniform vec3 axes[2];\nuniform mat4 model, view, projection;\nuniform vec2 screenSize;\nuniform vec3 clipBounds[2];\nuniform float scale, pixelRatio;\n\nvarying vec4 interpColor;\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], position)) {\n\n gl_Position = vec4(0,0,0,0);\n } else {\n float lscale = pixelRatio * scale;\n if(distance(highlightId, id) < 0.0001) {\n lscale *= highlightScale;\n }\n\n vec4 clipCenter = projection * view * model * vec4(position, 1);\n vec3 dataPosition = position + 0.5*lscale*(axes[0] * glyph.x + axes[1] * glyph.y) * clipCenter.w * screenSize.y;\n vec4 clipPosition = projection * view * model * vec4(dataPosition, 1);\n\n gl_Position = clipPosition;\n interpColor = color;\n pickId = id;\n dataCoordinate = dataPosition;\n }\n}\n"]),l=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 fragClipBounds[2];\nuniform float opacity;\n\nvarying vec4 interpColor;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (\n outOfRange(fragClipBounds[0], fragClipBounds[1], dataCoordinate) ||\n interpColor.a * opacity == 0.\n ) discard;\n gl_FragColor = interpColor * opacity;\n}\n"]),c=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 fragClipBounds[2];\nuniform float pickGroup;\n\nvarying vec4 pickId;\nvarying vec3 dataCoordinate;\n\nvoid main() {\n if (outOfRange(fragClipBounds[0], fragClipBounds[1], dataCoordinate)) discard;\n\n gl_FragColor = vec4(pickGroup, pickId.bgr);\n}"]),u=[{name:"position",type:"vec3"},{name:"color",type:"vec4"},{name:"glyph",type:"vec2"},{name:"id",type:"vec4"}],f={vertex:a,fragment:l,attributes:u},h={vertex:o,fragment:l,attributes:u},d={vertex:s,fragment:l,attributes:u},p={vertex:a,fragment:c,attributes:u},g={vertex:o,fragment:c,attributes:u},v={vertex:s,fragment:c,attributes:u};function m(t,e){var n=r(t,e),i=n.attributes;return i.position.location=0,i.color.location=1,i.glyph.location=2,i.id.location=3,n}e.createPerspective=function(t){return m(t,f)},e.createOrtho=function(t){return m(t,h)},e.createProject=function(t){return m(t,d)},e.createPickPerspective=function(t){return m(t,p)},e.createPickOrtho=function(t){return m(t,g)},e.createPickProject=function(t){return m(t,v)}},2182:function(t,e,n){"use strict";var r=n(3596),i=n(5827),a=n(2944),o=n(5306),s=n(104),l=n(9282),c=n(4123),u=n(8240),f=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1];function h(t,e){var n=t[0],r=t[1],i=t[2],a=t[3];return t[0]=e[0]*n+e[4]*r+e[8]*i+e[12]*a,t[1]=e[1]*n+e[5]*r+e[9]*i+e[13]*a,t[2]=e[2]*n+e[6]*r+e[10]*i+e[14]*a,t[3]=e[3]*n+e[7]*r+e[11]*i+e[15]*a,t}function d(t,e,n,r){return h(r,r),h(r,r),h(r,r)}function p(t,e){this.index=t,this.dataCoordinate=this.position=e}function g(t){return!0===t||t>1?1:t}function v(t,e,n,r,i,a,o,s,l,c,u,f){this.gl=t,this.pixelRatio=1,this.shader=e,this.orthoShader=n,this.projectShader=r,this.pointBuffer=i,this.colorBuffer=a,this.glyphBuffer=o,this.idBuffer=s,this.vao=l,this.vertexCount=0,this.lineVertexCount=0,this.opacity=1,this.hasAlpha=!1,this.lineWidth=0,this.projectScale=[2/3,2/3,2/3],this.projectOpacity=[1,1,1],this.projectHasAlpha=!1,this.pickId=0,this.pickPerspectiveShader=c,this.pickOrthoShader=u,this.pickProjectShader=f,this.points=[],this._selectResult=new p(0,[0,0,0]),this.useOrtho=!0,this.bounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.axesProject=[!0,!0,!0],this.axesBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.highlightId=[1,1,1,1],this.highlightScale=2,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.dirty=!0}t.exports=function(t){var e=t.gl,n=l.createPerspective(e),r=l.createOrtho(e),o=l.createProject(e),s=l.createPickPerspective(e),c=l.createPickOrtho(e),u=l.createPickProject(e),f=i(e),h=i(e),d=i(e),p=i(e),g=a(e,[{buffer:f,size:3,type:e.FLOAT},{buffer:h,size:4,type:e.FLOAT},{buffer:d,size:2,type:e.FLOAT},{buffer:p,size:4,type:e.UNSIGNED_BYTE,normalized:!0}]),m=new v(e,n,r,o,f,h,d,p,g,s,c,u);return m.update(t),m};var m=v.prototype;m.pickSlots=1,m.setPickBase=function(t){this.pickId=t},m.isTransparent=function(){if(this.hasAlpha)return!0;for(var t=0;t<3;++t)if(this.axesProject[t]&&this.projectHasAlpha)return!0;return!1},m.isOpaque=function(){if(!this.hasAlpha)return!0;for(var t=0;t<3;++t)if(this.axesProject[t]&&!this.projectHasAlpha)return!0;return!1};var y=[0,0],b=[0,0,0],x=[0,0,0],w=[0,0,0,1],_=[0,0,0,1],k=f.slice(),T=[0,0,0],M=[[0,0,0],[0,0,0]];function A(t){return t[0]=t[1]=t[2]=0,t}function S(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=1,t}function E(t,e,n,r){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[n]=r,t}function C(t,e,n,r){var i,a=e.axesProject,o=e.gl,l=t.uniforms,c=n.model||f,u=n.view||f,h=n.projection||f,p=e.axesBounds,g=function(t){for(var e=M,n=0;n<2;++n)for(var r=0;r<3;++r)e[n][r]=Math.max(Math.min(t[n][r],1e8),-1e8);return e}(e.clipBounds);i=e.axes&&e.axes.lastCubeProps?e.axes.lastCubeProps.axis:[1,1,1],y[0]=2/o.drawingBufferWidth,y[1]=2/o.drawingBufferHeight,t.bind(),l.view=u,l.projection=h,l.screenSize=y,l.highlightId=e.highlightId,l.highlightScale=e.highlightScale,l.clipBounds=g,l.pickGroup=e.pickId/255,l.pixelRatio=r;for(var v=0;v<3;++v)if(a[v]){l.scale=e.projectScale[v],l.opacity=e.projectOpacity[v];for(var m=k,C=0;C<16;++C)m[C]=0;for(C=0;C<4;++C)m[5*C]=1;m[5*v]=0,i[v]<0?m[12+v]=p[0][v]:m[12+v]=p[1][v],s(m,c,m),l.model=m;var P=(v+1)%3,O=(v+2)%3,L=A(b),I=A(x);L[P]=1,I[O]=1;var D=d(0,0,0,S(w,L)),R=d(0,0,0,S(_,I));if(Math.abs(D[1])>Math.abs(R[1])){var z=D;D=R,R=z,z=L,L=I,I=z;var N=P;P=O,O=N}D[0]<0&&(L[P]=-1),R[1]>0&&(I[O]=-1);var j=0,F=0;for(C=0;C<4;++C)j+=Math.pow(c[4*P+C],2),F+=Math.pow(c[4*O+C],2);L[P]/=Math.sqrt(j),I[O]/=Math.sqrt(F),l.axes[0]=L,l.axes[1]=I,l.fragClipBounds[0]=E(T,g[0],v,-1e8),l.fragClipBounds[1]=E(T,g[1],v,1e8),e.vao.bind(),e.vao.draw(o.TRIANGLES,e.vertexCount),e.lineWidth>0&&(o.lineWidth(e.lineWidth*r),e.vao.draw(o.LINES,e.lineVertexCount,e.vertexCount)),e.vao.unbind()}}var P=[[-1e8,-1e8,-1e8],[1e8,1e8,1e8]];function O(t,e,n,r,i,a,o){var s=n.gl;if((a===n.projectHasAlpha||o)&&C(e,n,r,i),a===n.hasAlpha||o){t.bind();var l=t.uniforms;l.model=r.model||f,l.view=r.view||f,l.projection=r.projection||f,y[0]=2/s.drawingBufferWidth,y[1]=2/s.drawingBufferHeight,l.screenSize=y,l.highlightId=n.highlightId,l.highlightScale=n.highlightScale,l.fragClipBounds=P,l.clipBounds=n.axes.bounds,l.opacity=n.opacity,l.pickGroup=n.pickId/255,l.pixelRatio=i,n.vao.bind(),n.vao.draw(s.TRIANGLES,n.vertexCount),n.lineWidth>0&&(s.lineWidth(n.lineWidth*i),n.vao.draw(s.LINES,n.lineVertexCount,n.vertexCount)),n.vao.unbind()}}function L(t,e,n,i){var a;a=Array.isArray(t)?e=this.pointCount||e<0)return null;var n=this.points[e],r=this._selectResult;r.index=e;for(var i=0;i<3;++i)r.position[i]=r.dataCoordinate[i]=n[i];return r},m.highlight=function(t){if(t){var e=t.index,n=255&e,r=e>>8&255,i=e>>16&255;this.highlightId=[n/255,r/255,i/255,0]}else this.highlightId=[1,1,1,1]},m.update=function(t){if("perspective"in(t=t||{})&&(this.useOrtho=!t.perspective),"orthographic"in t&&(this.useOrtho=!!t.orthographic),"lineWidth"in t&&(this.lineWidth=t.lineWidth),"project"in t)if(Array.isArray(t.project))this.axesProject=t.project;else{var e=!!t.project;this.axesProject=[e,e,e]}if("projectScale"in t)if(Array.isArray(t.projectScale))this.projectScale=t.projectScale.slice();else{var n=+t.projectScale;this.projectScale=[n,n,n]}if(this.projectHasAlpha=!1,"projectOpacity"in t){Array.isArray(t.projectOpacity)?this.projectOpacity=t.projectOpacity.slice():(n=+t.projectOpacity,this.projectOpacity=[n,n,n]);for(var r=0;r<3;++r)this.projectOpacity[r]=g(this.projectOpacity[r]),this.projectOpacity[r]<1&&(this.projectHasAlpha=!0)}this.hasAlpha=!1,"opacity"in t&&(this.opacity=g(t.opacity),this.opacity<1&&(this.hasAlpha=!0)),this.dirty=!0;var i,a,s=t.position,l=t.font||"normal",c=t.alignment||[0,0];if(2===c.length)i=c[0],a=c[1];else for(i=[],a=[],r=0;r0){var I=0,D=b,R=[0,0,0,1],z=[0,0,0,1],N=Array.isArray(d)&&Array.isArray(d[0]),j=Array.isArray(m)&&Array.isArray(m[0]);t:for(r=0;r0?1-A[0][0]:W<0?1+A[1][0]:1,Y*=Y>0?1-A[0][1]:Y<0?1+A[1][1]:1],X=T.cells||[],K=T.positions||[];for(k=0;k0){var m=n*u;o.drawBox(f-m,h-m,d+m,h+m,a),o.drawBox(f-m,p-m,d+m,p+m,a),o.drawBox(f-m,h-m,f+m,p+m,a),o.drawBox(d-m,h-m,d+m,p+m,a)}}}},s.update=function(t){t=t||{},this.innerFill=!!t.innerFill,this.outerFill=!!t.outerFill,this.innerColor=(t.innerColor||[0,0,0,.5]).slice(),this.outerColor=(t.outerColor||[0,0,0,.5]).slice(),this.borderColor=(t.borderColor||[0,0,0,1]).slice(),this.borderWidth=t.borderWidth||0,this.selectBox=(t.selectBox||this.selectBox).slice()},s.dispose=function(){this.boxBuffer.dispose(),this.boxShader.dispose(),this.plot.removeOverlay(this)}},2611:function(t,e,n){"use strict";t.exports=function(t,e){var n=e[0],a=e[1],o=r(t,n,a,{}),s=i.mallocUint8(n*a*4);return new l(t,o,s)};var r=n(4234),i=n(5306),a=n(5050),o=n(2288).nextPow2;function s(t,e,n,r,i){this.coord=[t,e],this.id=n,this.value=r,this.distance=i}function l(t,e,n){this.gl=t,this.fbo=e,this.buffer=n,this._readTimeout=null;var r=this;this._readCallback=function(){r.gl&&(e.bind(),t.readPixels(0,0,e.shape[0],e.shape[1],t.RGBA,t.UNSIGNED_BYTE,r.buffer),r._readTimeout=null)}}var c=l.prototype;Object.defineProperty(c,"shape",{get:function(){return this.gl?this.fbo.shape.slice():[0,0]},set:function(t){if(this.gl){this.fbo.shape=t;var e=this.fbo.shape[0],n=this.fbo.shape[1];if(n*e*4>this.buffer.length){i.free(this.buffer);for(var r=this.buffer=i.mallocUint8(o(n*e*4)),a=0;an)for(t=n;te)for(t=e;t=0){for(var k=0|_.type.charAt(_.type.length-1),T=new Array(k),M=0;M=0;)A+=1;w[y]=A}var S=new Array(n.length);function E(){h.program=o.program(d,h._vref,h._fref,x,w);for(var t=0;t=0){if((p=h.charCodeAt(h.length-1)-48)<2||p>4)throw new r("","Invalid data type for attribute "+f+": "+h);s(t,e,d[0],i,p,a,f)}else{if(!(h.indexOf("mat")>=0))throw new r("","Unknown data type for attribute "+f+": "+h);var p;if((p=h.charCodeAt(h.length-1)-48)<2||p>4)throw new r("","Invalid data type for attribute "+f+": "+h);l(t,e,d,i,p,a,f)}}}return a};var r=n(9068);function i(t,e,n,r,i,a){this._gl=t,this._wrapper=e,this._index=n,this._locations=r,this._dimension=i,this._constFunc=a}var a=i.prototype;a.pointer=function(t,e,n,r){var i=this,a=i._gl,o=i._locations[i._index];a.vertexAttribPointer(o,i._dimension,t||a.FLOAT,!!e,n||0,r||0),a.enableVertexAttribArray(o)},a.set=function(t,e,n,r){return this._constFunc(this._locations[this._index],t,e,n,r)},Object.defineProperty(a,"location",{get:function(){return this._locations[this._index]},set:function(t){return t!==this._locations[this._index]&&(this._locations[this._index]=0|t,this._wrapper.program=null),0|t}});var o=[function(t,e,n){return void 0===n.length?t.vertexAttrib1f(e,n):t.vertexAttrib1fv(e,n)},function(t,e,n,r){return void 0===n.length?t.vertexAttrib2f(e,n,r):t.vertexAttrib2fv(e,n)},function(t,e,n,r,i){return void 0===n.length?t.vertexAttrib3f(e,n,r,i):t.vertexAttrib3fv(e,n)},function(t,e,n,r,i,a){return void 0===n.length?t.vertexAttrib4f(e,n,r,i,a):t.vertexAttrib4fv(e,n)}];function s(t,e,n,r,a,s,l){var c=o[a],u=new i(t,e,n,r,a,c);Object.defineProperty(s,l,{set:function(e){return t.disableVertexAttribArray(r[n]),c(t,r[n],e),e},get:function(){return u},enumerable:!0})}function l(t,e,n,r,i,a,o){for(var l=new Array(i),c=new Array(i),u=0;u4)throw new i("","Invalid uniform dimension type for matrix "+name+": "+y);t["uniformMatrix"+m+"fv"](s[f],!1,h);break}throw new i("","Unknown uniform data type for "+name+": "+y)}if((m=y.charCodeAt(y.length-1)-48)<2||m>4)throw new i("","Invalid data type");switch(y.charAt(0)){case"b":case"i":t["uniform"+m+"iv"](s[f],h);break;case"v":t["uniform"+m+"fv"](s[f],h);break;default:throw new i("","Unrecognized data type for vector "+name+": "+y)}}}}}}function c(t,e){if("object"!==typeof e)return[[t,e]];var n=[];for(var r in e){var i=e[r],a=t;parseInt(r)+""===r?a+="["+r+"]":a+="."+r,"object"===typeof i?n.push.apply(n,c(a,i)):n.push([a,i])}return n}function u(t,e,r){if("object"===typeof r){var c=f(r);Object.defineProperty(t,e,{get:a(c),set:l(r),enumerable:!0,configurable:!1})}else s[r]?Object.defineProperty(t,e,{get:(u=r,function(t,e,n){return t.getUniform(e.program,n[u])}),set:l(r),enumerable:!0,configurable:!1}):t[e]=function(t){switch(t){case"bool":return!1;case"int":case"sampler2D":case"samplerCube":case"float":return 0;default:var e=t.indexOf("vec");if(0<=e&&e<=1&&t.length===4+e){if((n=t.charCodeAt(t.length-1)-48)<2||n>4)throw new i("","Invalid data type");return"b"===t.charAt(0)?o(n,!1):o(n,0)}if(0===t.indexOf("mat")&&4===t.length){var n;if((n=t.charCodeAt(t.length-1)-48)<2||n>4)throw new i("","Invalid uniform dimension type for matrix "+name+": "+t);return o(n*n,0)}throw new i("","Unknown uniform data type for "+name+": "+t)}}(n[r].type);var u}function f(t){var e;if(Array.isArray(t)){e=new Array(t.length);for(var n=0;n1){s[0]in a||(a[s[0]]=[]),a=a[s[0]];for(var l=1;l1)for(var l=0;l 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the tube vertex and normal at the given index.\n//\n// The returned vertex is for a tube ring with its center at origin, radius of length(d), pointing in the direction of d.\n//\n// Each tube segment is made up of a ring of vertices.\n// These vertices are used to make up the triangles of the tube by connecting them together in the vertex array.\n// The indexes of tube segments run from 0 to 8.\n//\nvec3 getTubePosition(vec3 d, float index, out vec3 normal) {\n float segmentCount = 8.0;\n\n float angle = 2.0 * 3.14159 * (index / segmentCount);\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d);\n vec3 y = v * sin(angle) * length(d);\n vec3 v3 = x + y;\n\n normal = normalize(v3);\n\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 color, position;\nattribute vec2 uv;\n\nuniform float vectorScale, tubeScale;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 eyePosition, lightPosition;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n // Scale the vector magnitude to stay constant with\n // model & view changes.\n vec3 normal;\n vec3 XYZ = getTubePosition(mat3(model) * (tubeScale * vector.w * normalize(vector.xyz)), position.w, normal);\n vec4 tubePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * tubePosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n f_lightDirection = lightPosition - cameraCoordinate.xyz;\n f_eyeDirection = eyePosition - cameraCoordinate.xyz;\n f_normal = normalize((vec4(normal, 0.0) * inverseModel).xyz);\n\n // vec4 m_position = model * vec4(tubePosition, 1.0);\n vec4 t_position = view * tubePosition;\n gl_Position = projection * t_position;\n\n f_color = color;\n f_data = tubePosition.xyz;\n f_position = position.xyz;\n f_uv = uv;\n}\n"]),a=r(["#extension GL_OES_standard_derivatives : enable\n\nprecision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat cookTorranceSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness,\n float fresnel) {\n\n float VdotN = max(dot(viewDirection, surfaceNormal), 0.0);\n float LdotN = max(dot(lightDirection, surfaceNormal), 0.0);\n\n //Half angle vector\n vec3 H = normalize(lightDirection + viewDirection);\n\n //Geometric term\n float NdotH = max(dot(surfaceNormal, H), 0.0);\n float VdotH = max(dot(viewDirection, H), 0.000001);\n float LdotH = max(dot(lightDirection, H), 0.000001);\n float G1 = (2.0 * NdotH * VdotN) / VdotH;\n float G2 = (2.0 * NdotH * LdotN) / LdotH;\n float G = min(1.0, min(G1, G2));\n \n //Distribution term\n float D = beckmannDistribution(NdotH, roughness);\n\n //Fresnel term\n float F = pow(1.0 - VdotN, fresnel);\n\n //Multiply terms and done\n return G * F * D / max(3.14159265 * VdotN, 0.000001);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform sampler2D texture;\n\nvarying vec3 f_normal, f_lightDirection, f_eyeDirection, f_data, f_position;\nvarying vec4 f_color;\nvarying vec2 f_uv;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n vec3 N = normalize(f_normal);\n vec3 L = normalize(f_lightDirection);\n vec3 V = normalize(f_eyeDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = min(1.0, max(0.0, cookTorranceSpecular(L, V, N, roughness, fresnel)));\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n vec4 surfaceColor = f_color * texture2D(texture, f_uv);\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = litColor * opacity;\n}\n"]),o=r(["precision highp float;\n\nprecision highp float;\n#define GLSLIFY 1\n\nvec3 getOrthogonalVector(vec3 v) {\n // Return up-vector for only-z vector.\n // Return ax + by + cz = 0, a point that lies on the plane that has v as a normal and that isn't (0,0,0).\n // From the above if-statement we have ||a|| > 0 U ||b|| > 0.\n // Assign z = 0, x = -b, y = a:\n // a*-b + b*a + c*0 = -ba + ba + 0 = 0\n if (v.x*v.x > v.z*v.z || v.y*v.y > v.z*v.z) {\n return normalize(vec3(-v.y, v.x, 0.0));\n } else {\n return normalize(vec3(0.0, v.z, -v.y));\n }\n}\n\n// Calculate the tube vertex and normal at the given index.\n//\n// The returned vertex is for a tube ring with its center at origin, radius of length(d), pointing in the direction of d.\n//\n// Each tube segment is made up of a ring of vertices.\n// These vertices are used to make up the triangles of the tube by connecting them together in the vertex array.\n// The indexes of tube segments run from 0 to 8.\n//\nvec3 getTubePosition(vec3 d, float index, out vec3 normal) {\n float segmentCount = 8.0;\n\n float angle = 2.0 * 3.14159 * (index / segmentCount);\n\n vec3 u = getOrthogonalVector(d);\n vec3 v = normalize(cross(u, d));\n\n vec3 x = u * cos(angle) * length(d);\n vec3 y = v * sin(angle) * length(d);\n vec3 v3 = x + y;\n\n normal = normalize(v3);\n\n return v3;\n}\n\nattribute vec4 vector;\nattribute vec4 position;\nattribute vec4 id;\n\nuniform mat4 model, view, projection;\nuniform float tubeScale;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n vec3 normal;\n vec3 XYZ = getTubePosition(mat3(model) * (tubeScale * vector.w * normalize(vector.xyz)), position.w, normal);\n vec4 tubePosition = model * vec4(position.xyz, 1.0) + vec4(XYZ, 0.0);\n\n gl_Position = projection * view * tubePosition;\n f_id = id;\n f_position = position.xyz;\n}\n"]),s=r(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying vec3 f_position;\nvarying vec4 f_id;\n\nvoid main() {\n if (outOfRange(clipBounds[0], clipBounds[1], f_position)) discard;\n\n gl_FragColor = vec4(pickId, f_id.xyz);\n}"]);e.meshShader={vertex:i,fragment:a,attributes:[{name:"position",type:"vec4"},{name:"color",type:"vec4"},{name:"uv",type:"vec2"},{name:"vector",type:"vec4"}]},e.pickShader={vertex:o,fragment:s,attributes:[{name:"position",type:"vec4"},{name:"id",type:"vec4"},{name:"vector",type:"vec4"}]}},7307:function(t,e,n){"use strict";var r=n(2858),i=n(4020),a=["xyz","xzy","yxz","yzx","zxy","zyx"],o=function(t,e,n,a){for(var o=0,s=0;s0)for(k=0;k<8;k++){var T=(k+1)%8;c.push(h[k],d[k],d[T],d[T],h[T],h[k]),f.push(y,m,m,m,y,y),p.push(g,v,v,v,g,g);var M=c.length;u.push([M-6,M-5,M-4],[M-3,M-2,M-1])}var A=h;h=d,d=A;var S=y;y=m,m=S;var E=g;g=v,v=E}return{positions:c,cells:u,vectors:f,vertexIntensity:p}}(t,n,a,o)})),f=[],h=[],d=[],p=[];for(s=0;se)return n-1}return n},l=function(t,e,n){return tn?n:t},c=function(t){var e=1/0;t.sort((function(t,e){return t-e}));for(var n=t.length,r=1;rf-1||y>h-1||b>d-1)return r.create();var x,w,_,k,T,M,A=a[0][p],S=a[0][m],E=a[1][g],C=a[1][y],P=a[2][v],O=(o-A)/(S-A),L=(c-E)/(C-E),I=(u-P)/(a[2][b]-P);switch(isFinite(O)||(O=.5),isFinite(L)||(L=.5),isFinite(I)||(I=.5),n.reversedX&&(p=f-1-p,m=f-1-m),n.reversedY&&(g=h-1-g,y=h-1-y),n.reversedZ&&(v=d-1-v,b=d-1-b),n.filled){case 5:T=v,M=b,_=g*d,k=y*d,x=p*d*h,w=m*d*h;break;case 4:T=v,M=b,x=p*d,w=m*d,_=g*d*f,k=y*d*f;break;case 3:_=g,k=y,T=v*h,M=b*h,x=p*h*d,w=m*h*d;break;case 2:_=g,k=y,x=p*h,w=m*h,T=v*h*f,M=b*h*f;break;case 1:x=p,w=m,T=v*f,M=b*f,_=g*f*d,k=y*f*d;break;default:x=p,w=m,_=g*f,k=y*f,T=v*f*h,M=b*f*h}var D=i[x+_+T],R=i[x+_+M],z=i[x+k+T],N=i[x+k+M],j=i[w+_+T],F=i[w+_+M],B=i[w+k+T],U=i[w+k+M],H=r.create(),V=r.create(),q=r.create(),G=r.create();r.lerp(H,D,j,O),r.lerp(V,R,F,O),r.lerp(q,z,B,O),r.lerp(G,N,U,O);var W=r.create(),Y=r.create();r.lerp(W,H,q,L),r.lerp(Y,V,G,L);var $=r.create();return r.lerp($,W,Y,I),$}(e,t,d)},g=t.getDivergence||function(t,e){var n=r.create(),i=1e-4;r.add(n,t,[i,0,0]);var a=p(n);r.subtract(a,a,e),r.scale(a,a,1/i),r.add(n,t,[0,i,0]);var o=p(n);r.subtract(o,o,e),r.scale(o,o,1/i),r.add(n,t,[0,0,i]);var s=p(n);return r.subtract(s,s,e),r.scale(s,s,1/i),r.add(n,a,o),r.add(n,n,s),n},v=[],m=e[0][0],y=e[0][1],b=e[0][2],x=e[1][0],w=e[1][1],_=e[1][2],k=function(t){var e=t[0],n=t[1],r=t[2];return!(ex||nw||r_)},T=10*r.distance(e[0],e[1])/i,M=T*T,A=1,S=0,E=n.length;E>1&&(A=function(t){for(var e=[],n=[],r=[],i={},a={},o={},s=t.length,l=0;lS&&(S=N),R.push(N),v.push({points:O,velocities:L,divergences:R});for(var j=0;j<100*i&&O.lengthM&&r.scale(F,F,T/Math.sqrt(B)),r.add(F,F,P),I=p(F),r.squaredDistance(D,F)-M>-1e-4*M&&(O.push(F),D=F,L.push(I),z=g(F,I),N=r.length(z),isFinite(N)&&N>S&&(S=N),R.push(N)),P=F}}var U=o(v,t.colormap,S,A);return f?U.tubeScale=f:(0===S&&(S=1),U.tubeScale=.5*u*A/S),U};var u=n(9578),f=n(1140).createMesh;t.exports.createTubeMesh=function(t,e){return f(t,e,{shaders:u,traceType:"streamtube"})}},9054:function(t,e,n){var r=n(5158),i=n(6832),a=i(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec4 uv;\nattribute vec3 f;\nattribute vec3 normal;\n\nuniform vec3 objectOffset;\nuniform mat4 model, view, projection, inverseModel;\nuniform vec3 lightPosition, eyePosition;\nuniform sampler2D colormap;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n vec3 localCoordinate = vec3(uv.zw, f.x);\n worldCoordinate = objectOffset + localCoordinate;\n vec4 worldPosition = model * vec4(worldCoordinate, 1.0);\n vec4 clipPosition = projection * view * worldPosition;\n gl_Position = clipPosition;\n kill = f.y;\n value = f.z;\n planeCoordinate = uv.xy;\n\n vColor = texture2D(colormap, vec2(value, value));\n\n //Lighting geometry parameters\n vec4 cameraCoordinate = view * worldPosition;\n cameraCoordinate.xyz /= cameraCoordinate.w;\n lightDirection = lightPosition - cameraCoordinate.xyz;\n eyeDirection = eyePosition - cameraCoordinate.xyz;\n surfaceNormal = normalize((vec4(normal,0) * inverseModel).xyz);\n}\n"]),o=i(["precision highp float;\n#define GLSLIFY 1\n\nfloat beckmannDistribution(float x, float roughness) {\n float NdotH = max(x, 0.0001);\n float cos2Alpha = NdotH * NdotH;\n float tan2Alpha = (cos2Alpha - 1.0) / cos2Alpha;\n float roughness2 = roughness * roughness;\n float denom = 3.141592653589793 * roughness2 * cos2Alpha * cos2Alpha;\n return exp(tan2Alpha / roughness2) / denom;\n}\n\nfloat beckmannSpecular(\n vec3 lightDirection,\n vec3 viewDirection,\n vec3 surfaceNormal,\n float roughness) {\n return beckmannDistribution(dot(surfaceNormal, normalize(lightDirection + viewDirection)), roughness);\n}\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec3 lowerBound, upperBound;\nuniform float contourTint;\nuniform vec4 contourColor;\nuniform sampler2D colormap;\nuniform vec3 clipBounds[2];\nuniform float roughness, fresnel, kambient, kdiffuse, kspecular, opacity;\nuniform float vertexColor;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n if (\n kill > 0.0 ||\n vColor.a == 0.0 ||\n outOfRange(clipBounds[0], clipBounds[1], worldCoordinate)\n ) discard;\n\n vec3 N = normalize(surfaceNormal);\n vec3 V = normalize(eyeDirection);\n vec3 L = normalize(lightDirection);\n\n if(gl_FrontFacing) {\n N = -N;\n }\n\n float specular = max(beckmannSpecular(L, V, N, roughness), 0.);\n float diffuse = min(kambient + kdiffuse * max(dot(N, L), 0.0), 1.0);\n\n //decide how to interpolate color \u2014 in vertex or in fragment\n vec4 surfaceColor =\n step(vertexColor, .5) * texture2D(colormap, vec2(value, value)) +\n step(.5, vertexColor) * vColor;\n\n vec4 litColor = surfaceColor.a * vec4(diffuse * surfaceColor.rgb + kspecular * vec3(1,1,1) * specular, 1.0);\n\n gl_FragColor = mix(litColor, contourColor, contourTint) * opacity;\n}\n"]),s=i(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec4 uv;\nattribute float f;\n\nuniform vec3 objectOffset;\nuniform mat3 permutation;\nuniform mat4 model, view, projection;\nuniform float height, zOffset;\nuniform sampler2D colormap;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 lightDirection, eyeDirection, surfaceNormal;\nvarying vec4 vColor;\n\nvoid main() {\n vec3 dataCoordinate = permutation * vec3(uv.xy, height);\n worldCoordinate = objectOffset + dataCoordinate;\n vec4 worldPosition = model * vec4(worldCoordinate, 1.0);\n\n vec4 clipPosition = projection * view * worldPosition;\n clipPosition.z += zOffset;\n\n gl_Position = clipPosition;\n value = f + objectOffset.z;\n kill = -1.0;\n planeCoordinate = uv.zw;\n\n vColor = texture2D(colormap, vec2(value, value));\n\n //Don't do lighting for contours\n surfaceNormal = vec3(1,0,0);\n eyeDirection = vec3(0,1,0);\n lightDirection = vec3(0,0,1);\n}\n"]),l=i(["precision highp float;\n#define GLSLIFY 1\n\nbool outOfRange(float a, float b, float p) {\n return ((p > max(a, b)) || \n (p < min(a, b)));\n}\n\nbool outOfRange(vec2 a, vec2 b, vec2 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y));\n}\n\nbool outOfRange(vec3 a, vec3 b, vec3 p) {\n return (outOfRange(a.x, b.x, p.x) ||\n outOfRange(a.y, b.y, p.y) ||\n outOfRange(a.z, b.z, p.z));\n}\n\nbool outOfRange(vec4 a, vec4 b, vec4 p) {\n return outOfRange(a.xyz, b.xyz, p.xyz);\n}\n\nuniform vec2 shape;\nuniform vec3 clipBounds[2];\nuniform float pickId;\n\nvarying float value, kill;\nvarying vec3 worldCoordinate;\nvarying vec2 planeCoordinate;\nvarying vec3 surfaceNormal;\n\nvec2 splitFloat(float v) {\n float vh = 255.0 * v;\n float upper = floor(vh);\n float lower = fract(vh);\n return vec2(upper / 255.0, floor(lower * 16.0) / 16.0);\n}\n\nvoid main() {\n if ((kill > 0.0) ||\n (outOfRange(clipBounds[0], clipBounds[1], worldCoordinate))) discard;\n\n vec2 ux = splitFloat(planeCoordinate.x / shape.x);\n vec2 uy = splitFloat(planeCoordinate.y / shape.y);\n gl_FragColor = vec4(pickId, ux.x, uy.x, ux.y + (uy.y/16.0));\n}\n"]);e.createShader=function(t){var e=r(t,a,o,null,[{name:"uv",type:"vec4"},{name:"f",type:"vec3"},{name:"normal",type:"vec3"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e.attributes.normal.location=2,e},e.createPickShader=function(t){var e=r(t,a,l,null,[{name:"uv",type:"vec4"},{name:"f",type:"vec3"},{name:"normal",type:"vec3"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e.attributes.normal.location=2,e},e.createContourShader=function(t){var e=r(t,s,o,null,[{name:"uv",type:"vec4"},{name:"f",type:"float"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e},e.createPickContourShader=function(t){var e=r(t,s,l,null,[{name:"uv",type:"vec4"},{name:"f",type:"float"}]);return e.attributes.uv.location=0,e.attributes.f.location=1,e}},3754:function(t,e,n){"use strict";t.exports=function(t){var e=t.gl,n=y(e),r=x(e),s=b(e),l=w(e),c=i(e),u=a(e,[{buffer:c,size:4,stride:_,offset:0},{buffer:c,size:3,stride:_,offset:16},{buffer:c,size:3,stride:_,offset:28}]),f=i(e),h=a(e,[{buffer:f,size:4,stride:20,offset:0},{buffer:f,size:1,stride:20,offset:16}]),d=i(e),p=a(e,[{buffer:d,size:2,type:e.FLOAT}]),g=o(e,1,S,e.RGBA,e.UNSIGNED_BYTE);g.minFilter=e.LINEAR,g.magFilter=e.LINEAR;var v=new E(e,[0,0],[[0,0,0],[0,0,0]],n,r,c,u,g,s,l,f,h,d,p,[0,0,0]),m={levels:[[],[],[]]};for(var k in t)m[k]=t[k];return m.colormap=m.colormap||"jet",v.update(m),v};var r=n(2288),i=n(5827),a=n(2944),o=n(8931),s=n(5306),l=n(9156),c=n(7498),u=n(7382),f=n(5050),h=n(4162),d=n(104),p=n(7437),g=n(5070),v=n(9144),m=n(9054),y=m.createShader,b=m.createContourShader,x=m.createPickShader,w=m.createPickContourShader,_=40,k=[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1],T=[[0,0],[0,1],[1,0],[1,1],[1,0],[0,1]],M=[[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]];function A(t,e,n,r,i){this.position=t,this.index=e,this.uv=n,this.level=r,this.dataCoordinate=i}!function(){for(var t=0;t<3;++t){var e=M[t],n=(t+2)%3;e[(t+1)%3+0]=1,e[n+3]=1,e[t+6]=1}}();var S=256;function E(t,e,n,r,i,a,o,l,c,u,h,d,p,g,v){this.gl=t,this.shape=e,this.bounds=n,this.objectOffset=v,this.intensityBounds=[],this._shader=r,this._pickShader=i,this._coordinateBuffer=a,this._vao=o,this._colorMap=l,this._contourShader=c,this._contourPickShader=u,this._contourBuffer=h,this._contourVAO=d,this._contourOffsets=[[],[],[]],this._contourCounts=[[],[],[]],this._vertexCount=0,this._pickResult=new A([0,0,0],[0,0],[0,0],[0,0,0],[0,0,0]),this._dynamicBuffer=p,this._dynamicVAO=g,this._dynamicOffsets=[0,0,0],this._dynamicCounts=[0,0,0],this.contourWidth=[1,1,1],this.contourLevels=[[1],[1],[1]],this.contourTint=[0,0,0],this.contourColor=[[.5,.5,.5,1],[.5,.5,.5,1],[.5,.5,.5,1]],this.showContour=!0,this.showSurface=!0,this.enableHighlight=[!0,!0,!0],this.highlightColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.highlightTint=[1,1,1],this.highlightLevel=[-1,-1,-1],this.enableDynamic=[!0,!0,!0],this.dynamicLevel=[NaN,NaN,NaN],this.dynamicColor=[[0,0,0,1],[0,0,0,1],[0,0,0,1]],this.dynamicTint=[1,1,1],this.dynamicWidth=[1,1,1],this.axesBounds=[[1/0,1/0,1/0],[-1/0,-1/0,-1/0]],this.surfaceProject=[!1,!1,!1],this.contourProject=[[!1,!1,!1],[!1,!1,!1],[!1,!1,!1]],this.colorBounds=[!1,!1],this._field=[f(s.mallocFloat(1024),[0,0]),f(s.mallocFloat(1024),[0,0]),f(s.mallocFloat(1024),[0,0])],this.pickId=1,this.clipBounds=[[-1/0,-1/0,-1/0],[1/0,1/0,1/0]],this.snapToData=!1,this.pixelRatio=1,this.opacity=1,this.lightPosition=[10,1e4,0],this.ambientLight=.8,this.diffuseLight=.8,this.specularLight=2,this.roughness=.5,this.fresnel=1.5,this.vertexColor=0,this.dirty=!0}var C=E.prototype;C.genColormap=function(t,e){var n=!1,r=u([l({colormap:t,nshades:S,format:"rgba"}).map((function(t,r){var i=e?function(t,e){if(!e)return 1;if(!e.length)return 1;for(var n=0;nt&&n>0){var r=(e[n][0]-t)/(e[n][0]-e[n-1][0]);return e[n][1]*(1-r)+r*e[n-1][1]}}return 1}(r/255,e):t[3];return i<1&&(n=!0),[t[0],t[1],t[2],255*i]}))]);return c.divseq(r,255),this.hasAlphaScale=n,r},C.isTransparent=function(){return this.opacity<1||this.hasAlphaScale},C.isOpaque=function(){return!this.isTransparent()},C.pickSlots=1,C.setPickBase=function(t){this.pickId=t};var P=[0,0,0],O={showSurface:!1,showContour:!1,projections:[k.slice(),k.slice(),k.slice()],clipBounds:[[[0,0,0],[0,0,0]],[[0,0,0],[0,0,0]],[[0,0,0],[0,0,0]]]};function L(t,e){var n,r,i,a=e.axes&&e.axes.lastCubeProps.axis||P,o=e.showSurface,s=e.showContour;for(n=0;n<3;++n)for(o=o||e.surfaceProject[n],r=0;r<3;++r)s=s||e.contourProject[n][r];for(n=0;n<3;++n){var l=O.projections[n];for(r=0;r<16;++r)l[r]=0;for(r=0;r<4;++r)l[5*r]=1;l[5*n]=0,l[12+n]=e.axesBounds[+(a[n]>0)][n],d(l,t.model,l);var c=O.clipBounds[n];for(i=0;i<2;++i)for(r=0;r<3;++r)c[i][r]=t.clipBounds[i][r];c[0][n]=-1e8,c[1][n]=1e8}return O.showSurface=o,O.showContour=s,O}var I={model:k,view:k,projection:k,inverseModel:k.slice(),lowerBound:[0,0,0],upperBound:[0,0,0],colorMap:0,clipBounds:[[0,0,0],[0,0,0]],height:0,contourTint:0,contourColor:[0,0,0,1],permutation:[1,0,0,0,1,0,0,0,1],zOffset:-1e-4,objectOffset:[0,0,0],kambient:1,kdiffuse:1,kspecular:1,lightPosition:[1e3,1e3,1e3],eyePosition:[0,0,0],roughness:1,fresnel:1,opacity:1,vertexColor:0},D=k.slice(),R=[1,0,0,0,1,0,0,0,1];function z(t,e){t=t||{};var n=this.gl;n.disable(n.CULL_FACE),this._colorMap.bind(0);var r=I;r.model=t.model||k,r.view=t.view||k,r.projection=t.projection||k,r.lowerBound=[this.bounds[0][0],this.bounds[0][1],this.colorBounds[0]||this.bounds[0][2]],r.upperBound=[this.bounds[1][0],this.bounds[1][1],this.colorBounds[1]||this.bounds[1][2]],r.objectOffset=this.objectOffset,r.contourColor=this.contourColor[0],r.inverseModel=p(r.inverseModel,r.model);for(var i=0;i<2;++i)for(var a=r.clipBounds[i],o=0;o<3;++o)a[o]=Math.min(Math.max(this.clipBounds[i][o],-1e8),1e8);r.kambient=this.ambientLight,r.kdiffuse=this.diffuseLight,r.kspecular=this.specularLight,r.roughness=this.roughness,r.fresnel=this.fresnel,r.opacity=this.opacity,r.height=0,r.permutation=R,r.vertexColor=this.vertexColor;var s=D;for(d(s,r.view,r.model),d(s,r.projection,s),p(s,s),i=0;i<3;++i)r.eyePosition[i]=s[12+i]/s[15];var l=s[15];for(i=0;i<3;++i)l+=this.lightPosition[i]*s[4*i+3];for(i=0;i<3;++i){var c=s[12+i];for(o=0;o<3;++o)c+=s[4*o+i]*this.lightPosition[o];r.lightPosition[i]=c/l}var u=L(r,this);if(u.showSurface){for(this._shader.bind(),this._shader.uniforms=r,this._vao.bind(),this.showSurface&&this._vertexCount&&this._vao.draw(n.TRIANGLES,this._vertexCount),i=0;i<3;++i)this.surfaceProject[i]&&this.vertexCount&&(this._shader.uniforms.model=u.projections[i],this._shader.uniforms.clipBounds=u.clipBounds[i],this._vao.draw(n.TRIANGLES,this._vertexCount));this._vao.unbind()}if(u.showContour){var f=this._contourShader;r.kambient=1,r.kdiffuse=0,r.kspecular=0,r.opacity=1,f.bind(),f.uniforms=r;var h=this._contourVAO;for(h.bind(),i=0;i<3;++i)for(f.uniforms.permutation=M[i],n.lineWidth(this.contourWidth[i]*this.pixelRatio),o=0;o>4)/16)/255,i=Math.floor(r),a=r-i,o=e[1]*(t.value[1]+(15&t.value[2])/16)/255,s=Math.floor(o),l=o-s;i+=1,s+=1;var c=n.position;c[0]=c[1]=c[2]=0;for(var u=0;u<2;++u)for(var f=u?a:1-a,h=0;h<2;++h)for(var d=i+u,p=s+h,v=f*(h?l:1-l),m=0;m<3;++m)c[m]+=this._field[m].get(d,p)*v;for(var y=this._pickResult.level,b=0;b<3;++b)if(y[b]=g.le(this.contourLevels[b],c[b]),y[b]<0)this.contourLevels[b].length>0&&(y[b]=0);else if(y[b]Math.abs(w-c[b])&&(y[b]+=1)}for(n.index[0]=a<.5?i:i+1,n.index[1]=l<.5?s:s+1,n.uv[0]=r/e[0],n.uv[1]=o/e[1],m=0;m<3;++m)n.dataCoordinate[m]=this._field[m].get(n.index[0],n.index[1]);return n},C.padField=function(t,e){var n=e.shape.slice(),r=t.shape.slice();c.assign(t.lo(1,1).hi(n[0],n[1]),e),c.assign(t.lo(1).hi(n[0],1),e.hi(n[0],1)),c.assign(t.lo(1,r[1]-1).hi(n[0],1),e.lo(0,n[1]-1).hi(n[0],1)),c.assign(t.lo(0,1).hi(1,n[1]),e.hi(1)),c.assign(t.lo(r[0]-1,1).hi(1,n[1]),e.lo(n[0]-1)),t.set(0,0,e.get(0,0)),t.set(0,r[1]-1,e.get(0,n[1]-1)),t.set(r[0]-1,0,e.get(n[0]-1,0)),t.set(r[0]-1,r[1]-1,e.get(n[0]-1,n[1]-1))},C.update=function(t){t=t||{},this.objectOffset=t.objectOffset||this.objectOffset,this.dirty=!0,"contourWidth"in t&&(this.contourWidth=j(t.contourWidth,Number)),"showContour"in t&&(this.showContour=j(t.showContour,Boolean)),"showSurface"in t&&(this.showSurface=!!t.showSurface),"contourTint"in t&&(this.contourTint=j(t.contourTint,Boolean)),"contourColor"in t&&(this.contourColor=B(t.contourColor)),"contourProject"in t&&(this.contourProject=j(t.contourProject,(function(t){return j(t,Boolean)}))),"surfaceProject"in t&&(this.surfaceProject=t.surfaceProject),"dynamicColor"in t&&(this.dynamicColor=B(t.dynamicColor)),"dynamicTint"in t&&(this.dynamicTint=j(t.dynamicTint,Number)),"dynamicWidth"in t&&(this.dynamicWidth=j(t.dynamicWidth,Number)),"opacity"in t&&(this.opacity=t.opacity),"opacityscale"in t&&(this.opacityscale=t.opacityscale),"colorBounds"in t&&(this.colorBounds=t.colorBounds),"vertexColor"in t&&(this.vertexColor=t.vertexColor?1:0),"colormap"in t&&this._colorMap.setPixels(this.genColormap(t.colormap,this.opacityscale));var e=t.field||t.coords&&t.coords[2]||null,n=!1;if(e||(e=this._field[2].shape[0]||this._field[2].shape[2]?this._field[2].lo(1,1).hi(this._field[2].shape[0]-2,this._field[2].shape[1]-2):this._field[2].hi(0,0)),"field"in t||"coords"in t){var i=(e.shape[0]+2)*(e.shape[1]+2);i>this._field[2].data.length&&(s.freeFloat(this._field[2].data),this._field[2].data=s.mallocFloat(r.nextPow2(i))),this._field[2]=f(this._field[2].data,[e.shape[0]+2,e.shape[1]+2]),this.padField(this._field[2],e),this.shape=e.shape.slice();for(var a=this.shape,o=0;o<2;++o)this._field[2].size>this._field[o].data.length&&(s.freeFloat(this._field[o].data),this._field[o].data=s.mallocFloat(this._field[2].size)),this._field[o]=f(this._field[o].data,[a[0]+2,a[1]+2]);if(t.coords){var l=t.coords;if(!Array.isArray(l)||3!==l.length)throw new Error("gl-surface: invalid coordinates for x/y");for(o=0;o<2;++o){var c=l[o];for(m=0;m<2;++m)if(c.shape[m]!==a[m])throw new Error("gl-surface: coords have incorrect shape");this.padField(this._field[o],c)}}else if(t.ticks){var u=t.ticks;if(!Array.isArray(u)||2!==u.length)throw new Error("gl-surface: invalid ticks");for(o=0;o<2;++o){var d=u[o];if((Array.isArray(d)||d.length)&&(d=f(d)),d.shape[0]!==a[o])throw new Error("gl-surface: invalid tick length");var p=f(d.data,a);p.stride[o]=d.stride[0],p.stride[1^o]=0,this.padField(this._field[o],p)}}else{for(o=0;o<2;++o){var g=[0,0];g[o]=1,this._field[o]=f(this._field[o].data,[a[0]+2,a[1]+2],g,0)}this._field[0].set(0,0,0);for(var m=0;m0){for(var bt=0;bt<5;++bt)J.pop();U-=1}continue t}J.push(rt[0],rt[1],ot[0],ot[1],rt[2]),U+=1}}nt.push(U)}this._contourOffsets[Q]=et,this._contourCounts[Q]=nt}var xt=s.mallocFloat(J.length);for(o=0;os||o[1]<0||o[1]>s)throw new Error("gl-texture2d: Invalid texture size");var l=p(o,e.stride.slice()),c=0;"float32"===n?c=t.FLOAT:"float64"===n?(c=t.FLOAT,l=!1,n="float32"):"uint8"===n?c=t.UNSIGNED_BYTE:(c=t.UNSIGNED_BYTE,l=!1,n="uint8");var f,d,v=0;if(2===o.length)v=t.LUMINANCE,o=[o[0],o[1],1],e=r(e.data,o,[e.stride[0],e.stride[1],1],e.offset);else{if(3!==o.length)throw new Error("gl-texture2d: Invalid shape for texture");if(1===o[2])v=t.ALPHA;else if(2===o[2])v=t.LUMINANCE_ALPHA;else if(3===o[2])v=t.RGB;else{if(4!==o[2])throw new Error("gl-texture2d: Invalid shape for pixel coords");v=t.RGBA}}c!==t.FLOAT||t.getExtension("OES_texture_float")||(c=t.UNSIGNED_BYTE,l=!1);var m=e.size;if(l)f=0===e.offset&&e.data.length===m?e.data:e.data.subarray(e.offset,e.offset+m);else{var y=[o[2],o[2]*o[0],1];d=a.malloc(m,n);var b=r(d,o,y,0);"float32"!==n&&"float64"!==n||c!==t.UNSIGNED_BYTE?i.assign(b,e):u(b,e),f=d.subarray(0,m)}var x=g(t);return t.texImage2D(t.TEXTURE_2D,0,v,o[0],o[1],0,v,c,f),l||a.free(d),new h(t,x,o[0],o[1],v,c)}(t,e)}throw new Error("gl-texture2d: Invalid arguments for texture2d constructor")};var o=null,s=null,l=null;function c(t){return"undefined"!=typeof HTMLCanvasElement&&t instanceof HTMLCanvasElement||"undefined"!=typeof HTMLImageElement&&t instanceof HTMLImageElement||"undefined"!=typeof HTMLVideoElement&&t instanceof HTMLVideoElement||"undefined"!=typeof ImageData&&t instanceof ImageData}var u=function(t,e){i.muls(t,e,255)};function f(t,e,n){var r=t.gl,i=r.getParameter(r.MAX_TEXTURE_SIZE);if(e<0||e>i||n<0||n>i)throw new Error("gl-texture2d: Invalid texture size");return t._shape=[e,n],t.bind(),r.texImage2D(r.TEXTURE_2D,0,t.format,e,n,0,t.format,t.type,null),t._mipLevels=[0],t}function h(t,e,n,r,i,a){this.gl=t,this.handle=e,this.format=i,this.type=a,this._shape=[n,r],this._mipLevels=[0],this._magFilter=t.NEAREST,this._minFilter=t.NEAREST,this._wrapS=t.CLAMP_TO_EDGE,this._wrapT=t.CLAMP_TO_EDGE,this._anisoSamples=1;var o=this,s=[this._wrapS,this._wrapT];Object.defineProperties(s,[{get:function(){return o._wrapS},set:function(t){return o.wrapS=t}},{get:function(){return o._wrapT},set:function(t){return o.wrapT=t}}]),this._wrapVector=s;var l=[this._shape[0],this._shape[1]];Object.defineProperties(l,[{get:function(){return o._shape[0]},set:function(t){return o.width=t}},{get:function(){return o._shape[1]},set:function(t){return o.height=t}}]),this._shapeVector=l}var d=h.prototype;function p(t,e){return 3===t.length?1===e[2]&&e[1]===t[0]*t[2]&&e[0]===t[2]:1===e[0]&&e[1]===t[0]}function g(t){var e=t.createTexture();return t.bindTexture(t.TEXTURE_2D,e),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_MIN_FILTER,t.NEAREST),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_MAG_FILTER,t.NEAREST),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_WRAP_S,t.CLAMP_TO_EDGE),t.texParameteri(t.TEXTURE_2D,t.TEXTURE_WRAP_T,t.CLAMP_TO_EDGE),e}function v(t,e,n,r,i){var a=t.getParameter(t.MAX_TEXTURE_SIZE);if(e<0||e>a||n<0||n>a)throw new Error("gl-texture2d: Invalid texture shape");if(i===t.FLOAT&&!t.getExtension("OES_texture_float"))throw new Error("gl-texture2d: Floating point textures not supported on this platform");var o=g(t);return t.texImage2D(t.TEXTURE_2D,0,r,e,n,0,r,i,null),new h(t,o,e,n,r,i)}Object.defineProperties(d,{minFilter:{get:function(){return this._minFilter},set:function(t){this.bind();var e=this.gl;if(this.type===e.FLOAT&&o.indexOf(t)>=0&&(e.getExtension("OES_texture_float_linear")||(t=e.NEAREST)),s.indexOf(t)<0)throw new Error("gl-texture2d: Unknown filter mode "+t);return e.texParameteri(e.TEXTURE_2D,e.TEXTURE_MIN_FILTER,t),this._minFilter=t}},magFilter:{get:function(){return this._magFilter},set:function(t){this.bind();var e=this.gl;if(this.type===e.FLOAT&&o.indexOf(t)>=0&&(e.getExtension("OES_texture_float_linear")||(t=e.NEAREST)),s.indexOf(t)<0)throw new Error("gl-texture2d: Unknown filter mode "+t);return e.texParameteri(e.TEXTURE_2D,e.TEXTURE_MAG_FILTER,t),this._magFilter=t}},mipSamples:{get:function(){return this._anisoSamples},set:function(t){var e=this._anisoSamples;if(this._anisoSamples=0|Math.max(t,1),e!==this._anisoSamples){var n=this.gl.getExtension("EXT_texture_filter_anisotropic");n&&this.gl.texParameterf(this.gl.TEXTURE_2D,n.TEXTURE_MAX_ANISOTROPY_EXT,this._anisoSamples)}return this._anisoSamples}},wrapS:{get:function(){return this._wrapS},set:function(t){if(this.bind(),l.indexOf(t)<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);return this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_S,t),this._wrapS=t}},wrapT:{get:function(){return this._wrapT},set:function(t){if(this.bind(),l.indexOf(t)<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);return this.gl.texParameteri(this.gl.TEXTURE_2D,this.gl.TEXTURE_WRAP_T,t),this._wrapT=t}},wrap:{get:function(){return this._wrapVector},set:function(t){if(Array.isArray(t)||(t=[t,t]),2!==t.length)throw new Error("gl-texture2d: Must specify wrap mode for rows and columns");for(var e=0;e<2;++e)if(l.indexOf(t[e])<0)throw new Error("gl-texture2d: Unknown wrap mode "+t);this._wrapS=t[0],this._wrapT=t[1];var n=this.gl;return this.bind(),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_WRAP_S,this._wrapS),n.texParameteri(n.TEXTURE_2D,n.TEXTURE_WRAP_T,this._wrapT),t}},shape:{get:function(){return this._shapeVector},set:function(t){if(Array.isArray(t)){if(2!==t.length)throw new Error("gl-texture2d: Invalid texture shape")}else t=[0|t,0|t];return f(this,0|t[0],0|t[1]),[0|t[0],0|t[1]]}},width:{get:function(){return this._shape[0]},set:function(t){return f(this,t|=0,this._shape[1]),t}},height:{get:function(){return this._shape[1]},set:function(t){return t|=0,f(this,this._shape[0],t),t}}}),d.bind=function(t){var e=this.gl;return void 0!==t&&e.activeTexture(e.TEXTURE0+(0|t)),e.bindTexture(e.TEXTURE_2D,this.handle),void 0!==t?0|t:e.getParameter(e.ACTIVE_TEXTURE)-e.TEXTURE0},d.dispose=function(){this.gl.deleteTexture(this.handle)},d.generateMipmap=function(){this.bind(),this.gl.generateMipmap(this.gl.TEXTURE_2D);for(var t=Math.min(this._shape[0],this._shape[1]),e=0;t>0;++e,t>>>=1)this._mipLevels.indexOf(e)<0&&this._mipLevels.push(e)},d.setPixels=function(t,e,n,o){var s=this.gl;this.bind(),Array.isArray(e)?(o=n,n=0|e[1],e=0|e[0]):(e=e||0,n=n||0),o=o||0;var l=c(t)?t:t.raw;if(l)this._mipLevels.indexOf(o)<0?(s.texImage2D(s.TEXTURE_2D,0,this.format,this.format,this.type,l),this._mipLevels.push(o)):s.texSubImage2D(s.TEXTURE_2D,o,e,n,this.format,this.type,l);else{if(!(t.shape&&t.stride&&t.data))throw new Error("gl-texture2d: Unsupported data type");if(t.shape.length<2||e+t.shape[1]>this._shape[1]>>>o||n+t.shape[0]>this._shape[0]>>>o||e<0||n<0)throw new Error("gl-texture2d: Texture dimensions are out of bounds");!function(t,e,n,o,s,l,c,f){var h=f.dtype,d=f.shape.slice();if(d.length<2||d.length>3)throw new Error("gl-texture2d: Invalid ndarray, must be 2d or 3d");var g=0,v=0,m=p(d,f.stride.slice());if("float32"===h?g=t.FLOAT:"float64"===h?(g=t.FLOAT,m=!1,h="float32"):"uint8"===h?g=t.UNSIGNED_BYTE:(g=t.UNSIGNED_BYTE,m=!1,h="uint8"),2===d.length)v=t.LUMINANCE,d=[d[0],d[1],1],f=r(f.data,d,[f.stride[0],f.stride[1],1],f.offset);else{if(3!==d.length)throw new Error("gl-texture2d: Invalid shape for texture");if(1===d[2])v=t.ALPHA;else if(2===d[2])v=t.LUMINANCE_ALPHA;else if(3===d[2])v=t.RGB;else{if(4!==d[2])throw new Error("gl-texture2d: Invalid shape for pixel coords");v=t.RGBA}d[2]}if(v!==t.LUMINANCE&&v!==t.ALPHA||s!==t.LUMINANCE&&s!==t.ALPHA||(v=s),v!==s)throw new Error("gl-texture2d: Incompatible texture format for setPixels");var y=f.size,b=c.indexOf(o)<0;if(b&&c.push(o),g===l&&m)0===f.offset&&f.data.length===y?b?t.texImage2D(t.TEXTURE_2D,o,s,d[0],d[1],0,s,l,f.data):t.texSubImage2D(t.TEXTURE_2D,o,e,n,d[0],d[1],s,l,f.data):b?t.texImage2D(t.TEXTURE_2D,o,s,d[0],d[1],0,s,l,f.data.subarray(f.offset,f.offset+y)):t.texSubImage2D(t.TEXTURE_2D,o,e,n,d[0],d[1],s,l,f.data.subarray(f.offset,f.offset+y));else{var x;x=l===t.FLOAT?a.mallocFloat32(y):a.mallocUint8(y);var w=r(x,d,[d[2],d[2]*d[0],1]);g===t.FLOAT&&l===t.UNSIGNED_BYTE?u(w,f):i.assign(w,f),b?t.texImage2D(t.TEXTURE_2D,o,s,d[0],d[1],0,s,l,x.subarray(0,y)):t.texSubImage2D(t.TEXTURE_2D,o,e,n,d[0],d[1],s,l,x.subarray(0,y)),l===t.FLOAT?a.freeFloat32(x):a.freeUint8(x)}}(s,e,n,o,this.format,this.type,this._mipLevels,t)}}},3056:function(t){"use strict";t.exports=function(t,e,n){e?e.bind():t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,null);var r=0|t.getParameter(t.MAX_VERTEX_ATTRIBS);if(n){if(n.length>r)throw new Error("gl-vao: Too many vertex attributes");for(var i=0;i1?0:Math.acos(s)};var r=n(5415),i=n(899),a=n(9305)},8827:function(t){t.exports=function(t,e){return t[0]=Math.ceil(e[0]),t[1]=Math.ceil(e[1]),t[2]=Math.ceil(e[2]),t}},7622:function(t){t.exports=function(t){var e=new Float32Array(3);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e}},8782:function(t){t.exports=function(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t}},8501:function(t){t.exports=function(){var t=new Float32Array(3);return t[0]=0,t[1]=0,t[2]=0,t}},903:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=n[0],s=n[1],l=n[2];return t[0]=i*l-a*s,t[1]=a*o-r*l,t[2]=r*s-i*o,t}},5981:function(t,e,n){t.exports=n(8288)},8288:function(t){t.exports=function(t,e){var n=e[0]-t[0],r=e[1]-t[1],i=e[2]-t[2];return Math.sqrt(n*n+r*r+i*i)}},8629:function(t,e,n){t.exports=n(7979)},7979:function(t){t.exports=function(t,e,n){return t[0]=e[0]/n[0],t[1]=e[1]/n[1],t[2]=e[2]/n[2],t}},9305:function(t){t.exports=function(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}},154:function(t){t.exports=1e-6},4932:function(t,e,n){t.exports=function(t,e){var n=t[0],i=t[1],a=t[2],o=e[0],s=e[1],l=e[2];return Math.abs(n-o)<=r*Math.max(1,Math.abs(n),Math.abs(o))&&Math.abs(i-s)<=r*Math.max(1,Math.abs(i),Math.abs(s))&&Math.abs(a-l)<=r*Math.max(1,Math.abs(a),Math.abs(l))};var r=n(154)},5777:function(t){t.exports=function(t,e){return t[0]===e[0]&&t[1]===e[1]&&t[2]===e[2]}},3306:function(t){t.exports=function(t,e){return t[0]=Math.floor(e[0]),t[1]=Math.floor(e[1]),t[2]=Math.floor(e[2]),t}},7447:function(t,e,n){t.exports=function(t,e,n,i,a,o){var s,l;for(e||(e=3),n||(n=0),l=i?Math.min(i*e+n,t.length):t.length,s=n;s0&&(a=1/Math.sqrt(a),t[0]=e[0]*a,t[1]=e[1]*a,t[2]=e[2]*a),t}},6660:function(t){t.exports=function(t,e){e=e||1;var n=2*Math.random()*Math.PI,r=2*Math.random()-1,i=Math.sqrt(1-r*r)*e;return t[0]=Math.cos(n)*i,t[1]=Math.sin(n)*i,t[2]=r*e,t}},392:function(t){t.exports=function(t,e,n,r){var i=n[1],a=n[2],o=e[1]-i,s=e[2]-a,l=Math.sin(r),c=Math.cos(r);return t[0]=e[0],t[1]=i+o*c-s*l,t[2]=a+o*l+s*c,t}},3222:function(t){t.exports=function(t,e,n,r){var i=n[0],a=n[2],o=e[0]-i,s=e[2]-a,l=Math.sin(r),c=Math.cos(r);return t[0]=i+s*l+o*c,t[1]=e[1],t[2]=a+s*c-o*l,t}},3388:function(t){t.exports=function(t,e,n,r){var i=n[0],a=n[1],o=e[0]-i,s=e[1]-a,l=Math.sin(r),c=Math.cos(r);return t[0]=i+o*c-s*l,t[1]=a+o*l+s*c,t[2]=e[2],t}},1624:function(t){t.exports=function(t,e){return t[0]=Math.round(e[0]),t[1]=Math.round(e[1]),t[2]=Math.round(e[2]),t}},5685:function(t){t.exports=function(t,e,n){return t[0]=e[0]*n,t[1]=e[1]*n,t[2]=e[2]*n,t}},6722:function(t){t.exports=function(t,e,n,r){return t[0]=e[0]+n[0]*r,t[1]=e[1]+n[1]*r,t[2]=e[2]+n[2]*r,t}},831:function(t){t.exports=function(t,e,n,r){return t[0]=e,t[1]=n,t[2]=r,t}},5294:function(t,e,n){t.exports=n(6403)},3303:function(t,e,n){t.exports=n(4337)},6403:function(t){t.exports=function(t,e){var n=e[0]-t[0],r=e[1]-t[1],i=e[2]-t[2];return n*n+r*r+i*i}},4337:function(t){t.exports=function(t){var e=t[0],n=t[1],r=t[2];return e*e+n*n+r*r}},8921:function(t,e,n){t.exports=n(911)},911:function(t){t.exports=function(t,e,n){return t[0]=e[0]-n[0],t[1]=e[1]-n[1],t[2]=e[2]-n[2],t}},9908:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2];return t[0]=r*n[0]+i*n[3]+a*n[6],t[1]=r*n[1]+i*n[4]+a*n[7],t[2]=r*n[2]+i*n[5]+a*n[8],t}},3255:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=n[3]*r+n[7]*i+n[11]*a+n[15];return o=o||1,t[0]=(n[0]*r+n[4]*i+n[8]*a+n[12])/o,t[1]=(n[1]*r+n[5]*i+n[9]*a+n[13])/o,t[2]=(n[2]*r+n[6]*i+n[10]*a+n[14])/o,t}},6568:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=n[0],s=n[1],l=n[2],c=n[3],u=c*r+s*a-l*i,f=c*i+l*r-o*a,h=c*a+o*i-s*r,d=-o*r-s*i-l*a;return t[0]=u*c+d*-o+f*-l-h*-s,t[1]=f*c+d*-s+h*-o-u*-l,t[2]=h*c+d*-l+u*-s-f*-o,t}},3433:function(t){t.exports=function(t,e,n){return t[0]=e[0]+n[0],t[1]=e[1]+n[1],t[2]=e[2]+n[2],t[3]=e[3]+n[3],t}},1413:function(t){t.exports=function(t){var e=new Float32Array(4);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e[3]=t[3],e}},3470:function(t){t.exports=function(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=e[3],t}},5313:function(t){t.exports=function(){var t=new Float32Array(4);return t[0]=0,t[1]=0,t[2]=0,t[3]=0,t}},5446:function(t){t.exports=function(t,e){var n=e[0]-t[0],r=e[1]-t[1],i=e[2]-t[2],a=e[3]-t[3];return Math.sqrt(n*n+r*r+i*i+a*a)}},205:function(t){t.exports=function(t,e,n){return t[0]=e[0]/n[0],t[1]=e[1]/n[1],t[2]=e[2]/n[2],t[3]=e[3]/n[3],t}},4242:function(t){t.exports=function(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]+t[3]*e[3]}},5680:function(t){t.exports=function(t,e,n,r){var i=new Float32Array(4);return i[0]=t,i[1]=e,i[2]=n,i[3]=r,i}},4020:function(t,e,n){t.exports={create:n(5313),clone:n(1413),fromValues:n(5680),copy:n(3470),set:n(6453),add:n(3433),subtract:n(2705),multiply:n(746),divide:n(205),min:n(2170),max:n(3030),scale:n(5510),scaleAndAdd:n(4224),distance:n(5446),squaredDistance:n(1542),length:n(8177),squaredLength:n(9037),negate:n(6459),inverse:n(8057),normalize:n(381),dot:n(4242),lerp:n(8746),random:n(3770),transformMat4:n(6342),transformQuat:n(5022)}},8057:function(t){t.exports=function(t,e){return t[0]=1/e[0],t[1]=1/e[1],t[2]=1/e[2],t[3]=1/e[3],t}},8177:function(t){t.exports=function(t){var e=t[0],n=t[1],r=t[2],i=t[3];return Math.sqrt(e*e+n*n+r*r+i*i)}},8746:function(t){t.exports=function(t,e,n,r){var i=e[0],a=e[1],o=e[2],s=e[3];return t[0]=i+r*(n[0]-i),t[1]=a+r*(n[1]-a),t[2]=o+r*(n[2]-o),t[3]=s+r*(n[3]-s),t}},3030:function(t){t.exports=function(t,e,n){return t[0]=Math.max(e[0],n[0]),t[1]=Math.max(e[1],n[1]),t[2]=Math.max(e[2],n[2]),t[3]=Math.max(e[3],n[3]),t}},2170:function(t){t.exports=function(t,e,n){return t[0]=Math.min(e[0],n[0]),t[1]=Math.min(e[1],n[1]),t[2]=Math.min(e[2],n[2]),t[3]=Math.min(e[3],n[3]),t}},746:function(t){t.exports=function(t,e,n){return t[0]=e[0]*n[0],t[1]=e[1]*n[1],t[2]=e[2]*n[2],t[3]=e[3]*n[3],t}},6459:function(t){t.exports=function(t,e){return t[0]=-e[0],t[1]=-e[1],t[2]=-e[2],t[3]=-e[3],t}},381:function(t){t.exports=function(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=n*n+r*r+i*i+a*a;return o>0&&(o=1/Math.sqrt(o),t[0]=n*o,t[1]=r*o,t[2]=i*o,t[3]=a*o),t}},3770:function(t,e,n){var r=n(381),i=n(5510);t.exports=function(t,e){return e=e||1,t[0]=Math.random(),t[1]=Math.random(),t[2]=Math.random(),t[3]=Math.random(),r(t,t),i(t,t,e),t}},5510:function(t){t.exports=function(t,e,n){return t[0]=e[0]*n,t[1]=e[1]*n,t[2]=e[2]*n,t[3]=e[3]*n,t}},4224:function(t){t.exports=function(t,e,n,r){return t[0]=e[0]+n[0]*r,t[1]=e[1]+n[1]*r,t[2]=e[2]+n[2]*r,t[3]=e[3]+n[3]*r,t}},6453:function(t){t.exports=function(t,e,n,r,i){return t[0]=e,t[1]=n,t[2]=r,t[3]=i,t}},1542:function(t){t.exports=function(t,e){var n=e[0]-t[0],r=e[1]-t[1],i=e[2]-t[2],a=e[3]-t[3];return n*n+r*r+i*i+a*a}},9037:function(t){t.exports=function(t){var e=t[0],n=t[1],r=t[2],i=t[3];return e*e+n*n+r*r+i*i}},2705:function(t){t.exports=function(t,e,n){return t[0]=e[0]-n[0],t[1]=e[1]-n[1],t[2]=e[2]-n[2],t[3]=e[3]-n[3],t}},6342:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3];return t[0]=n[0]*r+n[4]*i+n[8]*a+n[12]*o,t[1]=n[1]*r+n[5]*i+n[9]*a+n[13]*o,t[2]=n[2]*r+n[6]*i+n[10]*a+n[14]*o,t[3]=n[3]*r+n[7]*i+n[11]*a+n[15]*o,t}},5022:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=n[0],s=n[1],l=n[2],c=n[3],u=c*r+s*a-l*i,f=c*i+l*r-o*a,h=c*a+o*i-s*r,d=-o*r-s*i-l*a;return t[0]=u*c+d*-o+f*-l-h*-s,t[1]=f*c+d*-s+h*-o-u*-l,t[2]=h*c+d*-l+u*-s-f*-o,t[3]=e[3],t}},9365:function(t,e,n){var r=n(8096),i=n(7896);t.exports=function(t){for(var e=Array.isArray(t)?t:r(t),n=0;n0)continue;n=t.slice(0,1).join("")}return F(n),O+=n.length,(S=S.slice(n.length)).length}}function Y(){return/[^a-fA-F0-9]/.test(e)?(F(S.join("")),A=l,T):(S.push(e),n=e,T+1)}function $(){return"."===e||/[eE]/.test(e)?(S.push(e),A=g,n=e,T+1):"x"===e&&1===S.length&&"0"===S[0]?(A=w,S.push(e),n=e,T+1):/[^\d]/.test(e)?(F(S.join("")),A=l,T):(S.push(e),n=e,T+1)}function X(){return"f"===e&&(S.push(e),n=e,T+=1),/[eE]/.test(e)?(S.push(e),n=e,T+1):("-"!==e&&"+"!==e||!/[eE]/.test(n))&&/[^\d]/.test(e)?(F(S.join("")),A=l,T):(S.push(e),n=e,T+1)}function K(){if(/[^\d\w_]/.test(e)){var t=S.join("");return A=j[t]?y:N[t]?m:v,F(S.join("")),A=l,T}return S.push(e),n=e,T+1}};var r=n(399),i=n(9746),a=n(9525),o=n(9458),s=n(3585),l=999,c=9999,u=0,f=1,h=2,d=3,p=4,g=5,v=6,m=7,y=8,b=9,x=10,w=11,_=["block-comment","line-comment","preprocessor","operator","integer","float","ident","builtin","keyword","whitespace","eof","integer"]},3585:function(t,e,n){var r=n(9525);r=r.slice().filter((function(t){return!/^(gl\_|texture)/.test(t)})),t.exports=r.concat(["gl_VertexID","gl_InstanceID","gl_Position","gl_PointSize","gl_FragCoord","gl_FrontFacing","gl_FragDepth","gl_PointCoord","gl_MaxVertexAttribs","gl_MaxVertexUniformVectors","gl_MaxVertexOutputVectors","gl_MaxFragmentInputVectors","gl_MaxVertexTextureImageUnits","gl_MaxCombinedTextureImageUnits","gl_MaxTextureImageUnits","gl_MaxFragmentUniformVectors","gl_MaxDrawBuffers","gl_MinProgramTexelOffset","gl_MaxProgramTexelOffset","gl_DepthRangeParameters","gl_DepthRange","trunc","round","roundEven","isnan","isinf","floatBitsToInt","floatBitsToUint","intBitsToFloat","uintBitsToFloat","packSnorm2x16","unpackSnorm2x16","packUnorm2x16","unpackUnorm2x16","packHalf2x16","unpackHalf2x16","outerProduct","transpose","determinant","inverse","texture","textureSize","textureProj","textureLod","textureOffset","texelFetch","texelFetchOffset","textureProjOffset","textureLodOffset","textureProjLod","textureProjLodOffset","textureGrad","textureGradOffset","textureProjGrad","textureProjGradOffset"])},9525:function(t){t.exports=["abs","acos","all","any","asin","atan","ceil","clamp","cos","cross","dFdx","dFdy","degrees","distance","dot","equal","exp","exp2","faceforward","floor","fract","gl_BackColor","gl_BackLightModelProduct","gl_BackLightProduct","gl_BackMaterial","gl_BackSecondaryColor","gl_ClipPlane","gl_ClipVertex","gl_Color","gl_DepthRange","gl_DepthRangeParameters","gl_EyePlaneQ","gl_EyePlaneR","gl_EyePlaneS","gl_EyePlaneT","gl_Fog","gl_FogCoord","gl_FogFragCoord","gl_FogParameters","gl_FragColor","gl_FragCoord","gl_FragData","gl_FragDepth","gl_FragDepthEXT","gl_FrontColor","gl_FrontFacing","gl_FrontLightModelProduct","gl_FrontLightProduct","gl_FrontMaterial","gl_FrontSecondaryColor","gl_LightModel","gl_LightModelParameters","gl_LightModelProducts","gl_LightProducts","gl_LightSource","gl_LightSourceParameters","gl_MaterialParameters","gl_MaxClipPlanes","gl_MaxCombinedTextureImageUnits","gl_MaxDrawBuffers","gl_MaxFragmentUniformComponents","gl_MaxLights","gl_MaxTextureCoords","gl_MaxTextureImageUnits","gl_MaxTextureUnits","gl_MaxVaryingFloats","gl_MaxVertexAttribs","gl_MaxVertexTextureImageUnits","gl_MaxVertexUniformComponents","gl_ModelViewMatrix","gl_ModelViewMatrixInverse","gl_ModelViewMatrixInverseTranspose","gl_ModelViewMatrixTranspose","gl_ModelViewProjectionMatrix","gl_ModelViewProjectionMatrixInverse","gl_ModelViewProjectionMatrixInverseTranspose","gl_ModelViewProjectionMatrixTranspose","gl_MultiTexCoord0","gl_MultiTexCoord1","gl_MultiTexCoord2","gl_MultiTexCoord3","gl_MultiTexCoord4","gl_MultiTexCoord5","gl_MultiTexCoord6","gl_MultiTexCoord7","gl_Normal","gl_NormalMatrix","gl_NormalScale","gl_ObjectPlaneQ","gl_ObjectPlaneR","gl_ObjectPlaneS","gl_ObjectPlaneT","gl_Point","gl_PointCoord","gl_PointParameters","gl_PointSize","gl_Position","gl_ProjectionMatrix","gl_ProjectionMatrixInverse","gl_ProjectionMatrixInverseTranspose","gl_ProjectionMatrixTranspose","gl_SecondaryColor","gl_TexCoord","gl_TextureEnvColor","gl_TextureMatrix","gl_TextureMatrixInverse","gl_TextureMatrixInverseTranspose","gl_TextureMatrixTranspose","gl_Vertex","greaterThan","greaterThanEqual","inversesqrt","length","lessThan","lessThanEqual","log","log2","matrixCompMult","max","min","mix","mod","normalize","not","notEqual","pow","radians","reflect","refract","sign","sin","smoothstep","sqrt","step","tan","texture2D","texture2DLod","texture2DProj","texture2DProjLod","textureCube","textureCubeLod","texture2DLodEXT","texture2DProjLodEXT","textureCubeLodEXT","texture2DGradEXT","texture2DProjGradEXT","textureCubeGradEXT"]},9458:function(t,e,n){var r=n(399);t.exports=r.slice().concat(["layout","centroid","smooth","case","mat2x2","mat2x3","mat2x4","mat3x2","mat3x3","mat3x4","mat4x2","mat4x3","mat4x4","uvec2","uvec3","uvec4","samplerCubeShadow","sampler2DArray","sampler2DArrayShadow","isampler2D","isampler3D","isamplerCube","isampler2DArray","usampler2D","usampler3D","usamplerCube","usampler2DArray","coherent","restrict","readonly","writeonly","resource","atomic_uint","noperspective","patch","sample","subroutine","common","partition","active","filter","image1D","image2D","image3D","imageCube","iimage1D","iimage2D","iimage3D","iimageCube","uimage1D","uimage2D","uimage3D","uimageCube","image1DArray","image2DArray","iimage1DArray","iimage2DArray","uimage1DArray","uimage2DArray","image1DShadow","image2DShadow","image1DArrayShadow","image2DArrayShadow","imageBuffer","iimageBuffer","uimageBuffer","sampler1DArray","sampler1DArrayShadow","isampler1D","isampler1DArray","usampler1D","usampler1DArray","isampler2DRect","usampler2DRect","samplerBuffer","isamplerBuffer","usamplerBuffer","sampler2DMS","isampler2DMS","usampler2DMS","sampler2DMSArray","isampler2DMSArray","usampler2DMSArray"])},399:function(t){t.exports=["precision","highp","mediump","lowp","attribute","const","uniform","varying","break","continue","do","for","while","if","else","in","out","inout","float","int","uint","void","bool","true","false","discard","return","mat2","mat3","mat4","vec2","vec3","vec4","ivec2","ivec3","ivec4","bvec2","bvec3","bvec4","sampler1D","sampler2D","sampler3D","samplerCube","sampler1DShadow","sampler2DShadow","struct","asm","class","union","enum","typedef","template","this","packed","goto","switch","default","inline","noinline","volatile","public","static","extern","external","interface","long","short","double","half","fixed","unsigned","input","output","hvec2","hvec3","hvec4","dvec2","dvec3","dvec4","fvec2","fvec3","fvec4","sampler2DRect","sampler3DRect","sampler2DRectShadow","sizeof","cast","namespace","using"]},9746:function(t){t.exports=["<<=",">>=","++","--","<<",">>","<=",">=","==","!=","&&","||","+=","-=","*=","/=","%=","&=","^^","^=","|=","(",")","[","]",".","!","~","*","/","%","+","-","<",">","&","^","|","?",":","=",",",";","{","}"]},8096:function(t,e,n){var r=n(3193);t.exports=function(t,e){var n=r(e),i=[];return i=(i=i.concat(n(t))).concat(n(null))}},6832:function(t){t.exports=function(t){"string"===typeof t&&(t=[t]);for(var e=[].slice.call(arguments,1),n=[],r=0;r0;)for(var s=(t=o.pop()).adjacent,l=0;l<=n;++l){var c=s[l];if(c.boundary&&!(c.lastVisited<=-r)){for(var u=c.vertices,f=0;f<=n;++f){var h=u[f];i[f]=h<0?e:a[h]}var d=this.orient();if(d>0)return c;c.lastVisited=-r,0===d&&o.push(c)}}return null},u.walk=function(t,e){var n=this.vertices.length-1,r=this.dimension,i=this.vertices,a=this.tuple,o=e?this.interior.length*Math.random()|0:this.interior.length-1,s=this.interior[o];t:for(;!s.boundary;){for(var l=s.vertices,c=s.adjacent,u=0;u<=r;++u)a[u]=i[l[u]];for(s.lastVisited=n,u=0;u<=r;++u){var f=c[u];if(!(f.lastVisited>=n)){var h=a[u];a[u]=t;var d=this.orient();if(a[u]=h,d<0){s=f;continue t}f.boundary?f.lastVisited=-n:f.lastVisited=n}}return}return s},u.addPeaks=function(t,e){var n=this.vertices.length-1,r=this.dimension,i=this.vertices,l=this.tuple,c=this.interior,u=this.simplices,f=[e];e.lastVisited=n,e.vertices[e.vertices.indexOf(-1)]=n,e.boundary=!1,c.push(e);for(var h=[];f.length>0;){var d=(e=f.pop()).vertices,p=e.adjacent,g=d.indexOf(n);if(!(g<0))for(var v=0;v<=r;++v)if(v!==g){var m=p[v];if(m.boundary&&!(m.lastVisited>=n)){var y=m.vertices;if(m.lastVisited!==-n){for(var b=0,x=0;x<=r;++x)y[x]<0?(b=x,l[x]=t):l[x]=i[y[x]];if(this.orient()>0){y[b]=n,m.boundary=!1,c.push(m),f.push(m),m.lastVisited=n;continue}m.lastVisited=-n}var w=m.adjacent,_=d.slice(),k=p.slice(),T=new a(_,k,!0);u.push(T);var M=w.indexOf(e);if(!(M<0))for(w[M]=T,k[g]=m,_[v]=-1,k[v]=e,p[v]=T,T.flip(),x=0;x<=r;++x){var A=_[x];if(!(A<0||A===n)){for(var S=new Array(r-1),E=0,C=0;C<=r;++C){var P=_[C];P<0||C===x||(S[E++]=P)}h.push(new o(S,T,x))}}}}}for(h.sort(s),v=0;v+1=0?o[l++]=s[u]:c=1&u;if(c===(1&t)){var f=o[0];o[0]=o[1],o[1]=f}e.push(o)}}return e}},9014:function(t,e,n){"use strict";var r=n(5070);function i(t,e,n,r,i){this.mid=t,this.left=e,this.right=n,this.leftPoints=r,this.rightPoints=i,this.count=(e?e.count:0)+(n?n.count:0)+r.length}t.exports=function(t){return t&&0!==t.length?new m(v(t)):new m(null)};var a=i.prototype;function o(t,e){t.mid=e.mid,t.left=e.left,t.right=e.right,t.leftPoints=e.leftPoints,t.rightPoints=e.rightPoints,t.count=e.count}function s(t,e){var n=v(e);t.mid=n.mid,t.left=n.left,t.right=n.right,t.leftPoints=n.leftPoints,t.rightPoints=n.rightPoints,t.count=n.count}function l(t,e){var n=t.intervals([]);n.push(e),s(t,n)}function c(t,e){var n=t.intervals([]),r=n.indexOf(e);return r<0?0:(n.splice(r,1),s(t,n),1)}function u(t,e,n){for(var r=0;r=0&&t[r][1]>=e;--r){var i=n(t[r]);if(i)return i}}function h(t,e){for(var n=0;n>1],a=[],o=[],s=[];for(n=0;n3*(e+1)?l(this,t):this.left.insert(t):this.left=v([t]);else if(t[0]>this.mid)this.right?4*(this.right.count+1)>3*(e+1)?l(this,t):this.right.insert(t):this.right=v([t]);else{var n=r.ge(this.leftPoints,t,p),i=r.ge(this.rightPoints,t,g);this.leftPoints.splice(n,0,t),this.rightPoints.splice(i,0,t)}},a.remove=function(t){var e=this.count-this.leftPoints;if(t[1]3*(e-1)?c(this,t):2===(s=this.left.remove(t))?(this.left=null,this.count-=1,1):(1===s&&(this.count-=1),s):0;if(t[0]>this.mid)return this.right?4*(this.left?this.left.count:0)>3*(e-1)?c(this,t):2===(s=this.right.remove(t))?(this.right=null,this.count-=1,1):(1===s&&(this.count-=1),s):0;if(1===this.count)return this.leftPoints[0]===t?2:0;if(1===this.leftPoints.length&&this.leftPoints[0]===t){if(this.left&&this.right){for(var n=this,i=this.left;i.right;)n=i,i=i.right;if(n===this)i.right=this.right;else{var a=this.left,s=this.right;n.count-=i.count,n.right=i.left,i.left=a,i.right=s}o(this,i),this.count=(this.left?this.left.count:0)+(this.right?this.right.count:0)+this.leftPoints.length}else this.left?o(this,this.left):o(this,this.right);return 1}for(a=r.ge(this.leftPoints,t,p);athis.mid?this.right&&(n=this.right.queryPoint(t,e))?n:f(this.rightPoints,t,e):h(this.leftPoints,e);var n},a.queryInterval=function(t,e,n){var r;return tthis.mid&&this.right&&(r=this.right.queryInterval(t,e,n))?r:ethis.mid?f(this.rightPoints,t,n):h(this.leftPoints,n)};var y=m.prototype;y.insert=function(t){this.root?this.root.insert(t):this.root=new i(t[0],null,null,[t],[t])},y.remove=function(t){if(this.root){var e=this.root.remove(t);return 2===e&&(this.root=null),0!==e}return!1},y.queryPoint=function(t,e){if(this.root)return this.root.queryPoint(t,e)},y.queryInterval=function(t,e,n){if(t<=e&&this.root)return this.root.queryInterval(t,e,n)},Object.defineProperty(y,"count",{get:function(){return this.root?this.root.count:0}}),Object.defineProperty(y,"intervals",{get:function(){return this.root?this.root.intervals([]):[]}})},9560:function(t){"use strict";t.exports=function(t){for(var e=new Array(t),n=0;n13)&&32!==e&&133!==e&&160!==e&&5760!==e&&6158!==e&&(e<8192||e>8205)&&8232!==e&&8233!==e&&8239!==e&&8287!==e&&8288!==e&&12288!==e&&65279!==e)return!1;return!0}},3578:function(t){t.exports=function(t,e,n){return t*(1-n)+e*n}},7191:function(t,e,n){var r=n(4690),i=n(9823),a=n(7332),o=n(7787),s=n(7437),l=n(2142),c={length:n(4693),normalize:n(899),dot:n(9305),cross:n(903)},u=i(),f=i(),h=[0,0,0,0],d=[[0,0,0],[0,0,0],[0,0,0]],p=[0,0,0];function g(t,e,n,r,i){t[0]=e[0]*r+n[0]*i,t[1]=e[1]*r+n[1]*i,t[2]=e[2]*r+n[2]*i}t.exports=function(t,e,n,i,v,m){if(e||(e=[0,0,0]),n||(n=[0,0,0]),i||(i=[0,0,0]),v||(v=[0,0,0,1]),m||(m=[0,0,0,1]),!r(u,t))return!1;if(a(f,u),f[3]=0,f[7]=0,f[11]=0,f[15]=1,Math.abs(o(f)<1e-8))return!1;var y,b,x=u[3],w=u[7],_=u[11],k=u[12],T=u[13],M=u[14],A=u[15];if(0!==x||0!==w||0!==_){if(h[0]=x,h[1]=w,h[2]=_,h[3]=A,!s(f,f))return!1;l(f,f),function(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3];t[0]=n[0]*r+n[4]*i+n[8]*a+n[12]*o,t[1]=n[1]*r+n[5]*i+n[9]*a+n[13]*o,t[2]=n[2]*r+n[6]*i+n[10]*a+n[14]*o,t[3]=n[3]*r+n[7]*i+n[11]*a+n[15]*o}(v,h,f)}else v[0]=v[1]=v[2]=0,v[3]=1;if(e[0]=k,e[1]=T,e[2]=M,b=u,(y=d)[0][0]=b[0],y[0][1]=b[1],y[0][2]=b[2],y[1][0]=b[4],y[1][1]=b[5],y[1][2]=b[6],y[2][0]=b[8],y[2][1]=b[9],y[2][2]=b[10],n[0]=c.length(d[0]),c.normalize(d[0],d[0]),i[0]=c.dot(d[0],d[1]),g(d[1],d[1],d[0],1,-i[0]),n[1]=c.length(d[1]),c.normalize(d[1],d[1]),i[0]/=n[1],i[1]=c.dot(d[0],d[2]),g(d[2],d[2],d[0],1,-i[1]),i[2]=c.dot(d[1],d[2]),g(d[2],d[2],d[1],1,-i[2]),n[2]=c.length(d[2]),c.normalize(d[2],d[2]),i[1]/=n[2],i[2]/=n[2],c.cross(p,d[1],d[2]),c.dot(d[0],p)<0)for(var S=0;S<3;S++)n[S]*=-1,d[S][0]*=-1,d[S][1]*=-1,d[S][2]*=-1;return m[0]=.5*Math.sqrt(Math.max(1+d[0][0]-d[1][1]-d[2][2],0)),m[1]=.5*Math.sqrt(Math.max(1-d[0][0]+d[1][1]-d[2][2],0)),m[2]=.5*Math.sqrt(Math.max(1-d[0][0]-d[1][1]+d[2][2],0)),m[3]=.5*Math.sqrt(Math.max(1+d[0][0]+d[1][1]+d[2][2],0)),d[2][1]>d[1][2]&&(m[0]=-m[0]),d[0][2]>d[2][0]&&(m[1]=-m[1]),d[1][0]>d[0][1]&&(m[2]=-m[2]),!0}},4690:function(t){t.exports=function(t,e){var n=e[15];if(0===n)return!1;for(var r=1/n,i=0;i<16;i++)t[i]=e[i]*r;return!0}},7649:function(t,e,n){var r=n(1868),i=n(1102),a=n(7191),o=n(7787),s=n(1116),l=f(),c=f(),u=f();function f(){return{translate:h(),scale:h(1),skew:h(),perspective:[0,0,0,1],quaternion:[0,0,0,1]}}function h(t){return[t||0,t||0,t||0]}t.exports=function(t,e,n,f){if(0===o(e)||0===o(n))return!1;var h=a(e,l.translate,l.scale,l.skew,l.perspective,l.quaternion),d=a(n,c.translate,c.scale,c.skew,c.perspective,c.quaternion);return!(!h||!d)&&(r(u.translate,l.translate,c.translate,f),r(u.skew,l.skew,c.skew,f),r(u.scale,l.scale,c.scale,f),r(u.perspective,l.perspective,c.perspective,f),s(u.quaternion,l.quaternion,c.quaternion,f),i(t,u.translate,u.scale,u.skew,u.perspective,u.quaternion),!0)}},1102:function(t,e,n){var r={identity:n(9947),translate:n(998),multiply:n(104),create:n(9823),scale:n(3668),fromRotationTranslation:n(7280)},i=(r.create(),r.create());t.exports=function(t,e,n,a,o,s){return r.identity(t),r.fromRotationTranslation(t,s,e),t[3]=o[0],t[7]=o[1],t[11]=o[2],t[15]=o[3],r.identity(i),0!==a[2]&&(i[9]=a[2],r.multiply(t,t,i)),0!==a[1]&&(i[9]=0,i[8]=a[1],r.multiply(t,t,i)),0!==a[0]&&(i[8]=0,i[4]=a[0],r.multiply(t,t,i)),r.scale(t,t,n),t}},9298:function(t,e,n){"use strict";var r=n(5070),i=n(7649),a=n(7437),o=n(6109),s=n(7115),l=n(5240),c=n(3012),u=n(998),f=(n(3668),n(899)),h=[0,0,0];function d(t){this._components=t.slice(),this._time=[0],this.prevMatrix=t.slice(),this.nextMatrix=t.slice(),this.computedMatrix=t.slice(),this.computedInverse=t.slice(),this.computedEye=[0,0,0],this.computedUp=[0,0,0],this.computedCenter=[0,0,0],this.computedRadius=[0],this._limits=[-1/0,1/0]}t.exports=function(t){return new d((t=t||{}).matrix||[1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1])};var p=d.prototype;p.recalcMatrix=function(t){var e=this._time,n=r.le(e,t),o=this.computedMatrix;if(!(n<0)){var s=this._components;if(n===e.length-1)for(var l=16*n,c=0;c<16;++c)o[c]=s[l++];else{var u=e[n+1]-e[n],h=(l=16*n,this.prevMatrix),d=!0;for(c=0;c<16;++c)h[c]=s[l++];var p=this.nextMatrix;for(c=0;c<16;++c)p[c]=s[l++],d=d&&h[c]===p[c];if(u<1e-6||d)for(c=0;c<16;++c)o[c]=h[c];else i(o,h,p,(t-e[n])/u)}var g=this.computedUp;g[0]=o[1],g[1]=o[5],g[2]=o[9],f(g,g);var v=this.computedInverse;a(v,o);var m=this.computedEye,y=v[15];m[0]=v[12]/y,m[1]=v[13]/y,m[2]=v[14]/y;var b=this.computedCenter,x=Math.exp(this.computedRadius[0]);for(c=0;c<3;++c)b[c]=m[c]-o[2+4*c]*x}},p.idle=function(t){if(!(t1&&r(t[o[u-2]],t[o[u-1]],c)<=0;)u-=1,o.pop();for(o.push(l),u=s.length;u>1&&r(t[s[u-2]],t[s[u-1]],c)>=0;)u-=1,s.pop();s.push(l)}n=new Array(s.length+o.length-2);for(var f=0,h=(i=0,o.length);i0;--d)n[f++]=s[d];return n};var r=n(417)[3]},6145:function(t,e,n){"use strict";t.exports=function(t,e){e||(e=t,t=window);var n=0,i=0,a=0,o={shift:!1,alt:!1,control:!1,meta:!1},s=!1;function l(t){var e=!1;return"altKey"in t&&(e=e||t.altKey!==o.alt,o.alt=!!t.altKey),"shiftKey"in t&&(e=e||t.shiftKey!==o.shift,o.shift=!!t.shiftKey),"ctrlKey"in t&&(e=e||t.ctrlKey!==o.control,o.control=!!t.ctrlKey),"metaKey"in t&&(e=e||t.metaKey!==o.meta,o.meta=!!t.metaKey),e}function c(t,s){var c=r.x(s),u=r.y(s);"buttons"in s&&(t=0|s.buttons),(t!==n||c!==i||u!==a||l(s))&&(n=0|t,i=c||0,a=u||0,e&&e(n,i,a,o))}function u(t){c(0,t)}function f(){(n||i||a||o.shift||o.alt||o.meta||o.control)&&(i=a=0,n=0,o.shift=o.alt=o.control=o.meta=!1,e&&e(0,0,0,o))}function h(t){l(t)&&e&&e(n,i,a,o)}function d(t){0===r.buttons(t)?c(0,t):c(n,t)}function p(t){c(n|r.buttons(t),t)}function g(t){c(n&~r.buttons(t),t)}function v(){s||(s=!0,t.addEventListener("mousemove",d),t.addEventListener("mousedown",p),t.addEventListener("mouseup",g),t.addEventListener("mouseleave",u),t.addEventListener("mouseenter",u),t.addEventListener("mouseout",u),t.addEventListener("mouseover",u),t.addEventListener("blur",f),t.addEventListener("keyup",h),t.addEventListener("keydown",h),t.addEventListener("keypress",h),t!==window&&(window.addEventListener("blur",f),window.addEventListener("keyup",h),window.addEventListener("keydown",h),window.addEventListener("keypress",h)))}v();var m={element:t};return Object.defineProperties(m,{enabled:{get:function(){return s},set:function(e){e?v():s&&(s=!1,t.removeEventListener("mousemove",d),t.removeEventListener("mousedown",p),t.removeEventListener("mouseup",g),t.removeEventListener("mouseleave",u),t.removeEventListener("mouseenter",u),t.removeEventListener("mouseout",u),t.removeEventListener("mouseover",u),t.removeEventListener("blur",f),t.removeEventListener("keyup",h),t.removeEventListener("keydown",h),t.removeEventListener("keypress",h),t!==window&&(window.removeEventListener("blur",f),window.removeEventListener("keyup",h),window.removeEventListener("keydown",h),window.removeEventListener("keypress",h)))},enumerable:!0},buttons:{get:function(){return n},enumerable:!0},x:{get:function(){return i},enumerable:!0},y:{get:function(){return a},enumerable:!0},mods:{get:function(){return o},enumerable:!0}}),m};var r=n(4110)},2565:function(t){var e={left:0,top:0};t.exports=function(t,n,r){n=n||t.currentTarget||t.srcElement,Array.isArray(r)||(r=[0,0]);var i,a=t.clientX||0,o=t.clientY||0,s=(i=n)===window||i===document||i===document.body?e:i.getBoundingClientRect();return r[0]=a-s.left,r[1]=o-s.top,r}},4110:function(t,e){"use strict";function n(t){return t.target||t.srcElement||window}e.buttons=function(t){if("object"===typeof t){if("buttons"in t)return t.buttons;if("which"in t){if(2===(e=t.which))return 4;if(3===e)return 2;if(e>0)return 1<=0)return 1< 0"),"function"!==typeof t.vertex&&e("Must specify vertex creation function"),"function"!==typeof t.cell&&e("Must specify cell creation function"),"function"!==typeof t.phase&&e("Must specify phase function");for(var s=t.getters||[],l=new Array(a),c=0;c=0?l[c]=!0:l[c]=!1;return function(t,e,n,a,o,s){var l=[s,o].join(",");return(0,i[l])(t,e,n,r.mallocUint32,r.freeUint32)}(t.vertex,t.cell,t.phase,0,n,l)};var i={"false,0,1":function(t,e,n,r,i){return function(a,o,s,l){var c,u=0|a.shape[0],f=0|a.shape[1],h=a.data,d=0|a.offset,p=0|a.stride[0],g=0|a.stride[1],v=d,m=0|-p,y=0,b=0|-g,x=0,w=-p-g|0,_=0,k=0|p,T=g-p*u|0,M=0,A=0,S=0,E=2*u|0,C=r(E),P=r(E),O=0,L=0,I=-1,D=-1,R=0,z=0|-u,N=0|u,j=0,F=-u-1|0,B=u-1|0,U=0,H=0,V=0;for(M=0;M0){if(A=1,C[O++]=n(h[v],o,s,l),v+=k,u>0)for(M=1,c=h[v],L=C[O]=n(c,o,s,l),R=C[O+I],j=C[O+z],U=C[O+F],L===R&&L===j&&L===U||(y=h[v+m],x=h[v+b],_=h[v+w],t(M,A,c,y,x,_,L,R,j,U,o,s,l),H=P[O]=S++),O+=1,v+=k,M=2;M0)for(M=1,c=h[v],L=C[O]=n(c,o,s,l),R=C[O+I],j=C[O+z],U=C[O+F],L===R&&L===j&&L===U||(y=h[v+m],x=h[v+b],_=h[v+w],t(M,A,c,y,x,_,L,R,j,U,o,s,l),H=P[O]=S++,U!==j&&e(P[O+z],H,x,_,j,U,o,s,l)),O+=1,v+=k,M=2;M0){if(M=1,C[O++]=n(h[v],o,s,l),v+=k,f>0)for(A=1,c=h[v],L=C[O]=n(c,o,s,l),j=C[O+z],R=C[O+I],U=C[O+F],L===j&&L===R&&L===U||(y=h[v+m],x=h[v+b],_=h[v+w],t(M,A,c,y,x,_,L,j,R,U,o,s,l),H=P[O]=S++),O+=1,v+=k,A=2;A0)for(A=1,c=h[v],L=C[O]=n(c,o,s,l),j=C[O+z],R=C[O+I],U=C[O+F],L===j&&L===R&&L===U||(y=h[v+m],x=h[v+b],_=h[v+w],t(M,A,c,y,x,_,L,j,R,U,o,s,l),H=P[O]=S++,U!==j&&e(P[O+z],H,_,y,U,j,o,s,l)),O+=1,v+=k,A=2;A2&&a[1]>2&&r(i.pick(-1,-1).lo(1,1).hi(a[0]-2,a[1]-2),t.pick(-1,-1,0).lo(1,1).hi(a[0]-2,a[1]-2),t.pick(-1,-1,1).lo(1,1).hi(a[0]-2,a[1]-2)),a[1]>2&&(n(i.pick(0,-1).lo(1).hi(a[1]-2),t.pick(0,-1,1).lo(1).hi(a[1]-2)),e(t.pick(0,-1,0).lo(1).hi(a[1]-2))),a[1]>2&&(n(i.pick(a[0]-1,-1).lo(1).hi(a[1]-2),t.pick(a[0]-1,-1,1).lo(1).hi(a[1]-2)),e(t.pick(a[0]-1,-1,0).lo(1).hi(a[1]-2))),a[0]>2&&(n(i.pick(-1,0).lo(1).hi(a[0]-2),t.pick(-1,0,0).lo(1).hi(a[0]-2)),e(t.pick(-1,0,1).lo(1).hi(a[0]-2))),a[0]>2&&(n(i.pick(-1,a[1]-1).lo(1).hi(a[0]-2),t.pick(-1,a[1]-1,0).lo(1).hi(a[0]-2)),e(t.pick(-1,a[1]-1,1).lo(1).hi(a[0]-2))),t.set(0,0,0,0),t.set(0,0,1,0),t.set(a[0]-1,0,0,0),t.set(a[0]-1,0,1,0),t.set(0,a[1]-1,0,0),t.set(0,a[1]-1,1,0),t.set(a[0]-1,a[1]-1,0,0),t.set(a[0]-1,a[1]-1,1,0),t}}t.exports=function(t,e,n){return Array.isArray(n)||(n=r(e.dimension,"string"===typeof n?n:"clamp")),0===e.size?t:0===e.dimension?(t.set(0),t):function(t){var e=t.join();if(a=u[e])return a;for(var n=t.length,r=[f,h],i=1;i<=n;++i)r.push(d(i));var a=p.apply(void 0,r);return u[e]=a,a}(n)(t,e)}},3581:function(t){"use strict";function e(t,e){var n=Math.floor(e),r=e-n,i=0<=n&&n0;){b<64?(l=b,b=0):(l=64,b-=64);for(var x=0|t[1];x>0;){x<64?(c=x,x=0):(c=64,x-=64),r=m+b*f+x*h,o=y+b*p+x*g;var w=0,_=0,k=0,T=d,M=f-u*d,A=h-l*f,S=v,E=p-u*v,C=g-l*p;for(k=0;k0;){g<64?(l=g,g=0):(l=64,g-=64);for(var v=0|t[0];v>0;){v<64?(s=v,v=0):(s=64,v-=64),r=d+g*u+v*c,o=p+g*h+v*f;var m=0,y=0,b=u,x=c-l*u,w=h,_=f-l*h;for(y=0;y0;){y<64?(c=y,y=0):(c=64,y-=64);for(var b=0|t[0];b>0;){b<64?(s=b,b=0):(s=64,b-=64);for(var x=0|t[1];x>0;){x<64?(l=x,x=0):(l=64,x-=64),r=v+y*h+b*u+x*f,o=m+y*g+b*d+x*p;var w=0,_=0,k=0,T=h,M=u-c*h,A=f-s*u,S=g,E=d-c*g,C=p-s*d;for(k=0;kn;){m=0,y=g-o;e:for(v=0;vx)break e;y+=f,m+=h}for(m=g,y=g-o,v=0;v>1,q=V-B,G=V+B,W=U,Y=q,$=V,X=G,K=H,Z=i+1,J=a-1,Q=!0,tt=0,et=0,nt=0,rt=f,it=e(rt),at=e(rt);M=l*W,A=l*Y,F=s;t:for(T=0;T0){v=W,W=Y,Y=v;break t}if(nt<0)break t;F+=d}M=l*X,A=l*K,F=s;t:for(T=0;T0){v=X,X=K,K=v;break t}if(nt<0)break t;F+=d}M=l*W,A=l*$,F=s;t:for(T=0;T0){v=W,W=$,$=v;break t}if(nt<0)break t;F+=d}M=l*Y,A=l*$,F=s;t:for(T=0;T0){v=Y,Y=$,$=v;break t}if(nt<0)break t;F+=d}M=l*W,A=l*X,F=s;t:for(T=0;T0){v=W,W=X,X=v;break t}if(nt<0)break t;F+=d}M=l*$,A=l*X,F=s;t:for(T=0;T0){v=$,$=X,X=v;break t}if(nt<0)break t;F+=d}M=l*Y,A=l*K,F=s;t:for(T=0;T0){v=Y,Y=K,K=v;break t}if(nt<0)break t;F+=d}M=l*Y,A=l*$,F=s;t:for(T=0;T0){v=Y,Y=$,$=v;break t}if(nt<0)break t;F+=d}M=l*X,A=l*K,F=s;t:for(T=0;T0){v=X,X=K,K=v;break t}if(nt<0)break t;F+=d}for(M=l*W,A=l*Y,S=l*$,E=l*X,C=l*K,P=l*U,O=l*V,L=l*H,j=0,F=s,T=0;T0)){if(nt<0){for(M=l*x,A=l*Z,S=l*J,F=s,T=0;T0)for(;;){for(w=s+J*l,j=0,T=0;T0)){for(w=s+J*l,j=0,T=0;TH){t:for(;;){for(w=s+Z*l,j=0,F=s,T=0;T1&&r?s(n,r[0],r[1]):s(n)}(t,e,l);return r(l,c)}},8729:function(t,e,n){"use strict";var r=n(8139),i={};t.exports=function(t){var e=t.order,n=t.dtype,a=[e,n].join(":"),o=i[a];return o||(i[a]=o=r(e,n)),o(t),t}},5050:function(t,e,n){var r=n(4780),i="undefined"!==typeof Float64Array;function a(t,e){return t[0]-e[0]}function o(){var t,e=this.stride,n=new Array(e.length);for(t=0;t=0&&(e+=a*(n=0|t),i-=n),new r(this.data,i,a,e)},i.step=function(t){var e=this.shape[0],n=this.stride[0],i=this.offset,a=0,o=Math.ceil;return"number"===typeof t&&((a=0|t)<0?(i+=n*(e-1),e=o(-e/a)):e=o(e/a),n*=a),new r(this.data,e,n,i)},i.transpose=function(t){t=void 0===t?0:0|t;var e=this.shape,n=this.stride;return new r(this.data,e[t],n[t],this.offset)},i.pick=function(t){var n=[],r=[],i=this.offset;return"number"===typeof t&&t>=0?i=i+this.stride[0]*t|0:(n.push(this.shape[0]),r.push(this.stride[0])),(0,e[n.length+1])(this.data,n,r,i)},function(t,e,n,i){return new r(t,e[0],n[0],i)}},2:function(t,e,n){function r(t,e,n,r,i,a){this.data=t,this.shape=[e,n],this.stride=[r,i],this.offset=0|a}var i=r.prototype;return i.dtype=t,i.dimension=2,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]}}),Object.defineProperty(i,"order",{get:function(){return Math.abs(this.stride[0])>Math.abs(this.stride[1])?[1,0]:[0,1]}}),i.set=function(e,n,r){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*n,r):this.data[this.offset+this.stride[0]*e+this.stride[1]*n]=r},i.get=function(e,n){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*n):this.data[this.offset+this.stride[0]*e+this.stride[1]*n]},i.index=function(t,e){return this.offset+this.stride[0]*t+this.stride[1]*e},i.hi=function(t,e){return new r(this.data,"number"!==typeof t||t<0?this.shape[0]:0|t,"number"!==typeof e||e<0?this.shape[1]:0|e,this.stride[0],this.stride[1],this.offset)},i.lo=function(t,e){var n=this.offset,i=0,a=this.shape[0],o=this.shape[1],s=this.stride[0],l=this.stride[1];return"number"===typeof t&&t>=0&&(n+=s*(i=0|t),a-=i),"number"===typeof e&&e>=0&&(n+=l*(i=0|e),o-=i),new r(this.data,a,o,s,l,n)},i.step=function(t,e){var n=this.shape[0],i=this.shape[1],a=this.stride[0],o=this.stride[1],s=this.offset,l=0,c=Math.ceil;return"number"===typeof t&&((l=0|t)<0?(s+=a*(n-1),n=c(-n/l)):n=c(n/l),a*=l),"number"===typeof e&&((l=0|e)<0?(s+=o*(i-1),i=c(-i/l)):i=c(i/l),o*=l),new r(this.data,n,i,a,o,s)},i.transpose=function(t,e){t=void 0===t?0:0|t,e=void 0===e?1:0|e;var n=this.shape,i=this.stride;return new r(this.data,n[t],n[e],i[t],i[e],this.offset)},i.pick=function(t,n){var r=[],i=[],a=this.offset;return"number"===typeof t&&t>=0?a=a+this.stride[0]*t|0:(r.push(this.shape[0]),i.push(this.stride[0])),"number"===typeof n&&n>=0?a=a+this.stride[1]*n|0:(r.push(this.shape[1]),i.push(this.stride[1])),(0,e[r.length+1])(this.data,r,i,a)},function(t,e,n,i){return new r(t,e[0],e[1],n[0],n[1],i)}},3:function(t,e,n){function r(t,e,n,r,i,a,o,s){this.data=t,this.shape=[e,n,r],this.stride=[i,a,o],this.offset=0|s}var i=r.prototype;return i.dtype=t,i.dimension=3,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]}}),Object.defineProperty(i,"order",{get:function(){var t=Math.abs(this.stride[0]),e=Math.abs(this.stride[1]),n=Math.abs(this.stride[2]);return t>e?e>n?[2,1,0]:t>n?[1,2,0]:[1,0,2]:t>n?[2,0,1]:n>e?[0,1,2]:[0,2,1]}}),i.set=function(e,n,r,i){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r,i):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r]=i},i.get=function(e,n,r){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r]},i.index=function(t,e,n){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*n},i.hi=function(t,e,n){return new r(this.data,"number"!==typeof t||t<0?this.shape[0]:0|t,"number"!==typeof e||e<0?this.shape[1]:0|e,"number"!==typeof n||n<0?this.shape[2]:0|n,this.stride[0],this.stride[1],this.stride[2],this.offset)},i.lo=function(t,e,n){var i=this.offset,a=0,o=this.shape[0],s=this.shape[1],l=this.shape[2],c=this.stride[0],u=this.stride[1],f=this.stride[2];return"number"===typeof t&&t>=0&&(i+=c*(a=0|t),o-=a),"number"===typeof e&&e>=0&&(i+=u*(a=0|e),s-=a),"number"===typeof n&&n>=0&&(i+=f*(a=0|n),l-=a),new r(this.data,o,s,l,c,u,f,i)},i.step=function(t,e,n){var i=this.shape[0],a=this.shape[1],o=this.shape[2],s=this.stride[0],l=this.stride[1],c=this.stride[2],u=this.offset,f=0,h=Math.ceil;return"number"===typeof t&&((f=0|t)<0?(u+=s*(i-1),i=h(-i/f)):i=h(i/f),s*=f),"number"===typeof e&&((f=0|e)<0?(u+=l*(a-1),a=h(-a/f)):a=h(a/f),l*=f),"number"===typeof n&&((f=0|n)<0?(u+=c*(o-1),o=h(-o/f)):o=h(o/f),c*=f),new r(this.data,i,a,o,s,l,c,u)},i.transpose=function(t,e,n){t=void 0===t?0:0|t,e=void 0===e?1:0|e,n=void 0===n?2:0|n;var i=this.shape,a=this.stride;return new r(this.data,i[t],i[e],i[n],a[t],a[e],a[n],this.offset)},i.pick=function(t,n,r){var i=[],a=[],o=this.offset;return"number"===typeof t&&t>=0?o=o+this.stride[0]*t|0:(i.push(this.shape[0]),a.push(this.stride[0])),"number"===typeof n&&n>=0?o=o+this.stride[1]*n|0:(i.push(this.shape[1]),a.push(this.stride[1])),"number"===typeof r&&r>=0?o=o+this.stride[2]*r|0:(i.push(this.shape[2]),a.push(this.stride[2])),(0,e[i.length+1])(this.data,i,a,o)},function(t,e,n,i){return new r(t,e[0],e[1],e[2],n[0],n[1],n[2],i)}},4:function(t,e,n){function r(t,e,n,r,i,a,o,s,l,c){this.data=t,this.shape=[e,n,r,i],this.stride=[a,o,s,l],this.offset=0|c}var i=r.prototype;return i.dtype=t,i.dimension=4,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]*this.shape[3]}}),Object.defineProperty(i,"order",{get:n}),i.set=function(e,n,r,i,a){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i,a):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i]=a},i.get=function(e,n,r,i){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i]},i.index=function(t,e,n,r){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*n+this.stride[3]*r},i.hi=function(t,e,n,i){return new r(this.data,"number"!==typeof t||t<0?this.shape[0]:0|t,"number"!==typeof e||e<0?this.shape[1]:0|e,"number"!==typeof n||n<0?this.shape[2]:0|n,"number"!==typeof i||i<0?this.shape[3]:0|i,this.stride[0],this.stride[1],this.stride[2],this.stride[3],this.offset)},i.lo=function(t,e,n,i){var a=this.offset,o=0,s=this.shape[0],l=this.shape[1],c=this.shape[2],u=this.shape[3],f=this.stride[0],h=this.stride[1],d=this.stride[2],p=this.stride[3];return"number"===typeof t&&t>=0&&(a+=f*(o=0|t),s-=o),"number"===typeof e&&e>=0&&(a+=h*(o=0|e),l-=o),"number"===typeof n&&n>=0&&(a+=d*(o=0|n),c-=o),"number"===typeof i&&i>=0&&(a+=p*(o=0|i),u-=o),new r(this.data,s,l,c,u,f,h,d,p,a)},i.step=function(t,e,n,i){var a=this.shape[0],o=this.shape[1],s=this.shape[2],l=this.shape[3],c=this.stride[0],u=this.stride[1],f=this.stride[2],h=this.stride[3],d=this.offset,p=0,g=Math.ceil;return"number"===typeof t&&((p=0|t)<0?(d+=c*(a-1),a=g(-a/p)):a=g(a/p),c*=p),"number"===typeof e&&((p=0|e)<0?(d+=u*(o-1),o=g(-o/p)):o=g(o/p),u*=p),"number"===typeof n&&((p=0|n)<0?(d+=f*(s-1),s=g(-s/p)):s=g(s/p),f*=p),"number"===typeof i&&((p=0|i)<0?(d+=h*(l-1),l=g(-l/p)):l=g(l/p),h*=p),new r(this.data,a,o,s,l,c,u,f,h,d)},i.transpose=function(t,e,n,i){t=void 0===t?0:0|t,e=void 0===e?1:0|e,n=void 0===n?2:0|n,i=void 0===i?3:0|i;var a=this.shape,o=this.stride;return new r(this.data,a[t],a[e],a[n],a[i],o[t],o[e],o[n],o[i],this.offset)},i.pick=function(t,n,r,i){var a=[],o=[],s=this.offset;return"number"===typeof t&&t>=0?s=s+this.stride[0]*t|0:(a.push(this.shape[0]),o.push(this.stride[0])),"number"===typeof n&&n>=0?s=s+this.stride[1]*n|0:(a.push(this.shape[1]),o.push(this.stride[1])),"number"===typeof r&&r>=0?s=s+this.stride[2]*r|0:(a.push(this.shape[2]),o.push(this.stride[2])),"number"===typeof i&&i>=0?s=s+this.stride[3]*i|0:(a.push(this.shape[3]),o.push(this.stride[3])),(0,e[a.length+1])(this.data,a,o,s)},function(t,e,n,i){return new r(t,e[0],e[1],e[2],e[3],n[0],n[1],n[2],n[3],i)}},5:function(t,e,n){function r(t,e,n,r,i,a,o,s,l,c,u,f){this.data=t,this.shape=[e,n,r,i,a],this.stride=[o,s,l,c,u],this.offset=0|f}var i=r.prototype;return i.dtype=t,i.dimension=5,Object.defineProperty(i,"size",{get:function(){return this.shape[0]*this.shape[1]*this.shape[2]*this.shape[3]*this.shape[4]}}),Object.defineProperty(i,"order",{get:n}),i.set=function(e,n,r,i,a,o){return"generic"===t?this.data.set(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i+this.stride[4]*a,o):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i+this.stride[4]*a]=o},i.get=function(e,n,r,i,a){return"generic"===t?this.data.get(this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i+this.stride[4]*a):this.data[this.offset+this.stride[0]*e+this.stride[1]*n+this.stride[2]*r+this.stride[3]*i+this.stride[4]*a]},i.index=function(t,e,n,r,i){return this.offset+this.stride[0]*t+this.stride[1]*e+this.stride[2]*n+this.stride[3]*r+this.stride[4]*i},i.hi=function(t,e,n,i,a){return new r(this.data,"number"!==typeof t||t<0?this.shape[0]:0|t,"number"!==typeof e||e<0?this.shape[1]:0|e,"number"!==typeof n||n<0?this.shape[2]:0|n,"number"!==typeof i||i<0?this.shape[3]:0|i,"number"!==typeof a||a<0?this.shape[4]:0|a,this.stride[0],this.stride[1],this.stride[2],this.stride[3],this.stride[4],this.offset)},i.lo=function(t,e,n,i,a){var o=this.offset,s=0,l=this.shape[0],c=this.shape[1],u=this.shape[2],f=this.shape[3],h=this.shape[4],d=this.stride[0],p=this.stride[1],g=this.stride[2],v=this.stride[3],m=this.stride[4];return"number"===typeof t&&t>=0&&(o+=d*(s=0|t),l-=s),"number"===typeof e&&e>=0&&(o+=p*(s=0|e),c-=s),"number"===typeof n&&n>=0&&(o+=g*(s=0|n),u-=s),"number"===typeof i&&i>=0&&(o+=v*(s=0|i),f-=s),"number"===typeof a&&a>=0&&(o+=m*(s=0|a),h-=s),new r(this.data,l,c,u,f,h,d,p,g,v,m,o)},i.step=function(t,e,n,i,a){var o=this.shape[0],s=this.shape[1],l=this.shape[2],c=this.shape[3],u=this.shape[4],f=this.stride[0],h=this.stride[1],d=this.stride[2],p=this.stride[3],g=this.stride[4],v=this.offset,m=0,y=Math.ceil;return"number"===typeof t&&((m=0|t)<0?(v+=f*(o-1),o=y(-o/m)):o=y(o/m),f*=m),"number"===typeof e&&((m=0|e)<0?(v+=h*(s-1),s=y(-s/m)):s=y(s/m),h*=m),"number"===typeof n&&((m=0|n)<0?(v+=d*(l-1),l=y(-l/m)):l=y(l/m),d*=m),"number"===typeof i&&((m=0|i)<0?(v+=p*(c-1),c=y(-c/m)):c=y(c/m),p*=m),"number"===typeof a&&((m=0|a)<0?(v+=g*(u-1),u=y(-u/m)):u=y(u/m),g*=m),new r(this.data,o,s,l,c,u,f,h,d,p,g,v)},i.transpose=function(t,e,n,i,a){t=void 0===t?0:0|t,e=void 0===e?1:0|e,n=void 0===n?2:0|n,i=void 0===i?3:0|i,a=void 0===a?4:0|a;var o=this.shape,s=this.stride;return new r(this.data,o[t],o[e],o[n],o[i],o[a],s[t],s[e],s[n],s[i],s[a],this.offset)},i.pick=function(t,n,r,i,a){var o=[],s=[],l=this.offset;return"number"===typeof t&&t>=0?l=l+this.stride[0]*t|0:(o.push(this.shape[0]),s.push(this.stride[0])),"number"===typeof n&&n>=0?l=l+this.stride[1]*n|0:(o.push(this.shape[1]),s.push(this.stride[1])),"number"===typeof r&&r>=0?l=l+this.stride[2]*r|0:(o.push(this.shape[2]),s.push(this.stride[2])),"number"===typeof i&&i>=0?l=l+this.stride[3]*i|0:(o.push(this.shape[3]),s.push(this.stride[3])),"number"===typeof a&&a>=0?l=l+this.stride[4]*a|0:(o.push(this.shape[4]),s.push(this.stride[4])),(0,e[o.length+1])(this.data,o,s,l)},function(t,e,n,i){return new r(t,e[0],e[1],e[2],e[3],e[4],n[0],n[1],n[2],n[3],n[4],i)}}};function l(t,e){var n=-1===e?"T":String(e),r=s[n];return-1===e?r(t):0===e?r(t,c[t][0]):r(t,c[t],o)}var c={generic:[],buffer:[],array:[],float32:[],float64:[],int8:[],int16:[],int32:[],uint8_clamped:[],uint8:[],uint16:[],uint32:[],bigint64:[],biguint64:[]};t.exports=function(t,e,n,a){if(void 0===t)return(0,c.array[0])([]);"number"===typeof t&&(t=[t]),void 0===e&&(e=[t.length]);var o=e.length;if(void 0===n){n=new Array(o);for(var s=o-1,u=1;s>=0;--s)n[s]=u,u*=e[s]}if(void 0===a)for(a=0,s=0;s>>0;t.exports=function(t,e){if(isNaN(t)||isNaN(e))return NaN;if(t===e)return t;if(0===t)return e<0?-i:i;var n=r.hi(t),o=r.lo(t);return e>t===t>0?o===a?(n+=1,o=0):o+=1:0===o?(o=a,n-=1):o-=1,r.pack(o,n)}},115:function(t,e){e.vertexNormals=function(t,e,n){for(var r=e.length,i=new Array(r),a=void 0===n?1e-6:n,o=0;oa){var x=i[c],w=1/Math.sqrt(v*y);for(b=0;b<3;++b){var _=(b+1)%3,k=(b+2)%3;x[b]+=w*(m[_]*g[k]-m[k]*g[_])}}}for(o=0;oa)for(w=1/Math.sqrt(T),b=0;b<3;++b)x[b]*=w;else for(b=0;b<3;++b)x[b]=0}return i},e.faceNormals=function(t,e,n){for(var r=t.length,i=new Array(r),a=void 0===n?1e-6:n,o=0;oa?1/Math.sqrt(d):0,c=0;c<3;++c)h[c]*=d;i[o]=h}return i}},567:function(t){"use strict";t.exports=function(t,e,n,r,i,a,o,s,l,c){var u=e+a+c;if(f>0){var f=Math.sqrt(u+1);t[0]=.5*(o-l)/f,t[1]=.5*(s-r)/f,t[2]=.5*(n-a)/f,t[3]=.5*f}else{var h=Math.max(e,a,c);f=Math.sqrt(2*h-u+1),e>=h?(t[0]=.5*f,t[1]=.5*(i+n)/f,t[2]=.5*(s+r)/f,t[3]=.5*(o-l)/f):a>=h?(t[0]=.5*(n+i)/f,t[1]=.5*f,t[2]=.5*(l+o)/f,t[3]=.5*(s-r)/f):(t[0]=.5*(r+s)/f,t[1]=.5*(o+l)/f,t[2]=.5*f,t[3]=.5*(n-i)/f)}return t}},7774:function(t,e,n){"use strict";t.exports=function(t){var e=(t=t||{}).center||[0,0,0],n=t.rotation||[0,0,0,1],r=t.radius||1;e=[].slice.call(e,0,3),u(n=[].slice.call(n,0,4),n);var i=new f(n,e,Math.log(r));return i.setDistanceLimits(t.zoomMin,t.zoomMax),("eye"in t||"up"in t)&&i.lookAt(0,t.eye,t.center,t.up),i};var r=n(8444),i=n(3012),a=n(5950),o=n(7437),s=n(567);function l(t,e,n){return Math.sqrt(Math.pow(t,2)+Math.pow(e,2)+Math.pow(n,2))}function c(t,e,n,r){return Math.sqrt(Math.pow(t,2)+Math.pow(e,2)+Math.pow(n,2)+Math.pow(r,2))}function u(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=c(n,r,i,a);o>1e-6?(t[0]=n/o,t[1]=r/o,t[2]=i/o,t[3]=a/o):(t[0]=t[1]=t[2]=0,t[3]=1)}function f(t,e,n){this.radius=r([n]),this.center=r(e),this.rotation=r(t),this.computedRadius=this.radius.curve(0),this.computedCenter=this.center.curve(0),this.computedRotation=this.rotation.curve(0),this.computedUp=[.1,0,0],this.computedEye=[.1,0,0],this.computedMatrix=[.1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],this.recalcMatrix(0)}var h=f.prototype;h.lastT=function(){return Math.max(this.radius.lastT(),this.center.lastT(),this.rotation.lastT())},h.recalcMatrix=function(t){this.radius.curve(t),this.center.curve(t),this.rotation.curve(t);var e=this.computedRotation;u(e,e);var n=this.computedMatrix;a(n,e);var r=this.computedCenter,i=this.computedEye,o=this.computedUp,s=Math.exp(this.computedRadius[0]);i[0]=r[0]+s*n[2],i[1]=r[1]+s*n[6],i[2]=r[2]+s*n[10],o[0]=n[1],o[1]=n[5],o[2]=n[9];for(var l=0;l<3;++l){for(var c=0,f=0;f<3;++f)c+=n[l+4*f]*i[f];n[12+l]=-c}},h.getMatrix=function(t,e){this.recalcMatrix(t);var n=this.computedMatrix;if(e){for(var r=0;r<16;++r)e[r]=n[r];return e}return n},h.idle=function(t){this.center.idle(t),this.radius.idle(t),this.rotation.idle(t)},h.flush=function(t){this.center.flush(t),this.radius.flush(t),this.rotation.flush(t)},h.pan=function(t,e,n,r){e=e||0,n=n||0,r=r||0,this.recalcMatrix(t);var i=this.computedMatrix,a=i[1],o=i[5],s=i[9],c=l(a,o,s);a/=c,o/=c,s/=c;var u=i[0],f=i[4],h=i[8],d=u*a+f*o+h*s,p=l(u-=a*d,f-=o*d,h-=s*d);u/=p,f/=p,h/=p;var g=i[2],v=i[6],m=i[10],y=g*a+v*o+m*s,b=g*u+v*f+m*h,x=l(g-=y*a+b*u,v-=y*o+b*f,m-=y*s+b*h);g/=x,v/=x,m/=x;var w=u*e+a*n,_=f*e+o*n,k=h*e+s*n;this.center.move(t,w,_,k);var T=Math.exp(this.computedRadius[0]);T=Math.max(1e-4,T+r),this.radius.set(t,Math.log(T))},h.rotate=function(t,e,n,r){this.recalcMatrix(t),e=e||0,n=n||0;var i=this.computedMatrix,a=i[0],o=i[4],s=i[8],u=i[1],f=i[5],h=i[9],d=i[2],p=i[6],g=i[10],v=e*a+n*u,m=e*o+n*f,y=e*s+n*h,b=-(p*y-g*m),x=-(g*v-d*y),w=-(d*m-p*v),_=Math.sqrt(Math.max(0,1-Math.pow(b,2)-Math.pow(x,2)-Math.pow(w,2))),k=c(b,x,w,_);k>1e-6?(b/=k,x/=k,w/=k,_/=k):(b=x=w=0,_=1);var T=this.computedRotation,M=T[0],A=T[1],S=T[2],E=T[3],C=M*_+E*b+A*w-S*x,P=A*_+E*x+S*b-M*w,O=S*_+E*w+M*x-A*b,L=E*_-M*b-A*x-S*w;if(r){b=d,x=p,w=g;var I=Math.sin(r)/l(b,x,w);b*=I,x*=I,w*=I,L=L*(_=Math.cos(e))-(C=C*_+L*b+P*w-O*x)*b-(P=P*_+L*x+O*b-C*w)*x-(O=O*_+L*w+C*x-P*b)*w}var D=c(C,P,O,L);D>1e-6?(C/=D,P/=D,O/=D,L/=D):(C=P=O=0,L=1),this.rotation.set(t,C,P,O,L)},h.lookAt=function(t,e,n,r){this.recalcMatrix(t),n=n||this.computedCenter,e=e||this.computedEye,r=r||this.computedUp;var a=this.computedMatrix;i(a,e,n,r);var o=this.computedRotation;s(o,a[0],a[1],a[2],a[4],a[5],a[6],a[8],a[9],a[10]),u(o,o),this.rotation.set(t,o[0],o[1],o[2],o[3]);for(var l=0,c=0;c<3;++c)l+=Math.pow(n[c]-e[c],2);this.radius.set(t,.5*Math.log(Math.max(l,1e-6))),this.center.set(t,n[0],n[1],n[2])},h.translate=function(t,e,n,r){this.center.move(t,e||0,n||0,r||0)},h.setMatrix=function(t,e){var n=this.computedRotation;s(n,e[0],e[1],e[2],e[4],e[5],e[6],e[8],e[9],e[10]),u(n,n),this.rotation.set(t,n[0],n[1],n[2],n[3]);var r=this.computedMatrix;o(r,e);var i=r[15];if(Math.abs(i)>1e-6){var a=r[12]/i,l=r[13]/i,c=r[14]/i;this.recalcMatrix(t);var f=Math.exp(this.computedRadius[0]);this.center.set(t,a-r[2]*f,l-r[6]*f,c-r[10]*f),this.radius.idle(t)}else this.center.idle(t),this.radius.idle(t)},h.setDistance=function(t,e){e>0&&this.radius.set(t,Math.log(e))},h.setDistanceLimits=function(t,e){t=t>0?Math.log(t):-1/0,e=e>0?Math.log(e):1/0,e=Math.max(e,t),this.radius.bounds[0][0]=t,this.radius.bounds[1][0]=e},h.getDistanceLimits=function(t){var e=this.radius.bounds;return t?(t[0]=Math.exp(e[0][0]),t[1]=Math.exp(e[1][0]),t):[Math.exp(e[0][0]),Math.exp(e[1][0])]},h.toJSON=function(){return this.recalcMatrix(this.lastT()),{center:this.computedCenter.slice(),rotation:this.computedRotation.slice(),distance:Math.log(this.computedRadius[0]),zoomMin:this.radius.bounds[0][0],zoomMax:this.radius.bounds[1][0]}},h.fromJSON=function(t){var e=this.lastT(),n=t.center;n&&this.center.set(e,n[0],n[1],n[2]);var r=t.rotation;r&&this.rotation.set(e,r[0],r[1],r[2],r[3]);var i=t.distance;i&&i>0&&this.radius.set(e,Math.log(i)),this.setDistanceLimits(t.zoomMin,t.zoomMax)}},4930:function(t,e,n){"use strict";var r=n(6184);t.exports=function(t,e,n){return r(n="undefined"!==typeof n?n+"":" ",e)+t}},4405:function(t){t.exports=function(t,e){e||(e=[0,""]),t=String(t);var n=parseFloat(t,10);return e[0]=n,e[1]=t.match(/[\d.\-\+]*\s*(.*)/)[1]||"",e}},4166:function(t,e,n){"use strict";t.exports=function(t,e){for(var n=0|e.length,i=t.length,a=[new Array(n),new Array(n)],o=0;o0){o=a[u][n][0],l=u;break}s=o[1^l];for(var f=0;f<2;++f)for(var h=a[f][n],d=0;d0&&(o=p,s=g,l=f)}return i||o&&c(o,l),s}function f(t,n){var i=a[n][t][0],o=[t];c(i,n);for(var s=i[1^n];;){for(;s!==t;)o.push(s),s=u(o[o.length-2],s,!1);if(a[0][t].length+a[1][t].length===0)break;var l=o[o.length-1],f=t,h=o[1],d=u(l,f,!0);if(r(e[l],e[f],e[h],e[d])<0)break;o.push(t),s=u(l,f)}return o}function h(t,e){return e[1]===e[e.length-1]}for(o=0;o0;){a[0][o].length;var g=f(o,d);h(0,g)?p.push.apply(p,g):(p.length>0&&l.push(p),p=g)}p.length>0&&l.push(p)}return l};var r=n(9398)},3959:function(t,e,n){"use strict";t.exports=function(t,e){for(var n=r(t,e.length),i=new Array(e.length),a=new Array(e.length),o=[],s=0;s0;){i[d=o.pop()]=!1;var c=n[d];for(s=0;s0}))).length,v=new Array(g),m=new Array(g);for(d=0;d0;){var j=z.pop(),F=E[j];l(F,(function(t,e){return t-e}));var B,U=F.length,H=N[j];for(0===H&&(B=[V=p[j]]),d=0;d=0||(N[q]=1^H,z.push(q),0===H&&(R(V=p[q])||(V.reverse(),B.push(V))))}0===H&&n.push(B)}return n};var r=n(8348),i=n(4166),a=n(211),o=n(9660),s=n(9662),l=n(1215),c=n(3959);function u(t,e){for(var n=new Array(t),r=0;r0&&e[i]===n[0]))return 1;a=t[i-1]}for(var s=1;a;){var l=a.key,c=r(n,l[0],l[1]);if(l[0][0]0))return 0;s=-1,a=a.right}else if(c>0)a=a.left;else{if(!(c<0))return 0;s=1,a=a.right}}return s}}(m.slabs,m.coordinates);return 0===a.length?y:function(t,e){return function(n){return t(n[0],n[1])?0:e(n)}}(l(a),y)};var r=n(417)[3],i=n(4385),a=n(9014),o=n(5070);function s(){return!0}function l(t){for(var e={},n=0;n=c?(T=1,y=c+2*h+p):y=h*(T=-h/c)+p):(T=0,d>=0?(M=0,y=p):-d>=f?(M=1,y=f+2*d+p):y=d*(M=-d/f)+p);else if(M<0)M=0,h>=0?(T=0,y=p):-h>=c?(T=1,y=c+2*h+p):y=h*(T=-h/c)+p;else{var A=1/k;y=(T*=A)*(c*T+u*(M*=A)+2*h)+M*(u*T+f*M+2*d)+p}else T<0?(x=f+d)>(b=u+h)?(w=x-b)>=(_=c-2*u+f)?(T=1,M=0,y=c+2*h+p):y=(T=w/_)*(c*T+u*(M=1-T)+2*h)+M*(u*T+f*M+2*d)+p:(T=0,x<=0?(M=1,y=f+2*d+p):d>=0?(M=0,y=p):y=d*(M=-d/f)+p):M<0?(x=c+h)>(b=u+d)?(w=x-b)>=(_=c-2*u+f)?(M=1,T=0,y=f+2*d+p):y=(T=1-(M=w/_))*(c*T+u*M+2*h)+M*(u*T+f*M+2*d)+p:(M=0,x<=0?(T=1,y=c+2*h+p):h>=0?(T=0,y=p):y=h*(T=-h/c)+p):(w=f+d-u-h)<=0?(T=0,M=1,y=f+2*d+p):w>=(_=c-2*u+f)?(T=1,M=0,y=c+2*h+p):y=(T=w/_)*(c*T+u*(M=1-T)+2*h)+M*(u*T+f*M+2*d)+p;var S=1-T-M;for(l=0;l0){var c=t[n-1];if(0===r(s,c)&&a(c)!==l){n-=1;continue}}t[n++]=s}}return t.length=n,t}},6184:function(t){"use strict";var e,n="";t.exports=function(t,r){if("string"!==typeof t)throw new TypeError("expected a string");if(1===r)return t;if(2===r)return t+t;var i=t.length*r;if(e!==t||"undefined"===typeof e)e=t,n="";else if(n.length>=i)return n.substr(0,i);for(;i>n.length&&r>1;)1&r&&(n+=t),r>>=1,t+=t;return n=(n+=t).substr(0,i)}},8161:function(t,e,n){t.exports=n.g.performance&&n.g.performance.now?function(){return performance.now()}:Date.now||function(){return+new Date}},402:function(t){"use strict";t.exports=function(t){for(var e=t.length,n=t[t.length-1],r=e,i=e-2;i>=0;--i){var a=n;(l=(s=t[i])-((n=a+s)-a))&&(t[--r]=n,n=l)}var o=0;for(i=r;i0){if(a<=0)return o;r=i+a}else{if(!(i<0))return o;if(a>=0)return o;r=-(i+a)}var s=33306690738754716e-32*r;return o>=s||o<=-s?o:f(t,e,n)},function(t,e,n,r){var i=t[0]-r[0],a=e[0]-r[0],o=n[0]-r[0],s=t[1]-r[1],l=e[1]-r[1],c=n[1]-r[1],u=t[2]-r[2],f=e[2]-r[2],d=n[2]-r[2],p=a*c,g=o*l,v=o*s,m=i*c,y=i*l,b=a*s,x=u*(p-g)+f*(v-m)+d*(y-b),w=7771561172376103e-31*((Math.abs(p)+Math.abs(g))*Math.abs(u)+(Math.abs(v)+Math.abs(m))*Math.abs(f)+(Math.abs(y)+Math.abs(b))*Math.abs(d));return x>w||-x>w?x:h(t,e,n,r)}];function p(t){var e=d[t.length];return e||(e=d[t.length]=u(t.length)),e.apply(void 0,t)}function g(t,e,n,r,i,a,o){return function(e,n,s,l,c){switch(arguments.length){case 0:case 1:return 0;case 2:return r(e,n);case 3:return i(e,n,s);case 4:return a(e,n,s,l);case 5:return o(e,n,s,l,c)}for(var u=new Array(arguments.length),f=0;f0&&o>0||a<0&&o<0)return!1;var s=r(n,t,e),l=r(i,t,e);return!(s>0&&l>0||s<0&&l<0)&&(0!==a||0!==o||0!==s||0!==l||function(t,e,n,r){for(var i=0;i<2;++i){var a=t[i],o=e[i],s=Math.min(a,o),l=Math.max(a,o),c=n[i],u=r[i],f=Math.min(c,u);if(Math.max(c,u)=r?(i=f,(l+=1)=r?(i=f,(l+=1)>1,c=e[2*l+1];if(c===a)return l;a>1,c=e[2*l+1];if(c===a)return l;a>1,c=e[2*l+1];if(c===a)return l;a0)-(t<0)},e.abs=function(t){var e=t>>31;return(t^e)-e},e.min=function(t,e){return e^(t^e)&-(t65535)<<4,e|=n=((t>>>=e)>255)<<3,e|=n=((t>>>=n)>15)<<2,(e|=n=((t>>>=n)>3)<<1)|(t>>>=n)>>1},e.log10=function(t){return t>=1e9?9:t>=1e8?8:t>=1e7?7:t>=1e6?6:t>=1e5?5:t>=1e4?4:t>=1e3?3:t>=100?2:t>=10?1:0},e.popCount=function(t){return 16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24},e.countTrailingZeros=n,e.nextPow2=function(t){return t+=0===t,--t,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,1+(t|=t>>>16)},e.prevPow2=function(t){return t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)-(t>>>1)},e.parity=function(t){return t^=t>>>16,t^=t>>>8,t^=t>>>4,27030>>>(t&=15)&1};var r=new Array(256);!function(t){for(var e=0;e<256;++e){var n=e,r=e,i=7;for(n>>>=1;n;n>>>=1)r<<=1,r|=1&n,--i;t[e]=r<>>8&255]<<16|r[t>>>16&255]<<8|r[t>>>24&255]},e.interleave2=function(t,e){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t&=65535)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e&=65535)|e<<8))|e<<4))|e<<2))|e<<1))<<1},e.deinterleave2=function(t,e){return(t=65535&((t=16711935&((t=252645135&((t=858993459&((t=t>>>e&1431655765)|t>>>1))|t>>>2))|t>>>4))|t>>>16))<<16>>16},e.interleave3=function(t,e,n){return t=1227133513&((t=3272356035&((t=251719695&((t=4278190335&((t&=1023)|t<<16))|t<<8))|t<<4))|t<<2),(t|=(e=1227133513&((e=3272356035&((e=251719695&((e=4278190335&((e&=1023)|e<<16))|e<<8))|e<<4))|e<<2))<<1)|(n=1227133513&((n=3272356035&((n=251719695&((n=4278190335&((n&=1023)|n<<16))|n<<8))|n<<4))|n<<2))<<2},e.deinterleave3=function(t,e){return(t=1023&((t=4278190335&((t=251719695&((t=3272356035&((t=t>>>e&1227133513)|t>>>2))|t>>>4))|t>>>8))|t>>>16))<<22>>22},e.nextCombination=function(t){var e=t|t-1;return e+1|(~e&-~e)-1>>>n(t)+1}},6656:function(t,e,n){"use strict";var r=n(9392),i=n(9521);function a(t,e){var n=t.length,r=t.length-e.length,i=Math.min;if(r)return r;switch(n){case 0:return 0;case 1:return t[0]-e[0];case 2:return(s=t[0]+t[1]-e[0]-e[1])||i(t[0],t[1])-i(e[0],e[1]);case 3:var a=t[0]+t[1],o=e[0]+e[1];if(s=a+t[2]-(o+e[2]))return s;var s,l=i(t[0],t[1]),c=i(e[0],e[1]);return(s=i(l,t[2])-i(c,e[2]))||i(l+t[2],a)-i(c+e[2],o);default:var u=t.slice(0);u.sort();var f=e.slice(0);f.sort();for(var h=0;h>1,s=a(t[o],e);s<=0?(0===s&&(i=o),n=o+1):s>0&&(r=o-1)}return i}function u(t,e){for(var n=new Array(t.length),i=0,o=n.length;i=t.length||0!==a(t[v],s)););}return n}function f(t,e){if(e<0)return[];for(var n=[],i=(1<>>u&1&&c.push(i[u]);e.push(c)}return s(e)},e.skeleton=f,e.boundary=function(t){for(var e=[],n=0,r=t.length;n>1:(t>>1)-1}function b(t){for(var e=m(t);;){var n=e,r=2*t+1,i=2*(t+1),a=t;if(r0;){var n=y(t);if(!(n>=0&&e0){var t=T[0];return v(0,A-1),A-=1,b(0),t}return-1}function _(t,e){var n=T[t];return c[n]===e?t:(c[n]=-1/0,x(t),w(),c[n]=e,x((A+=1)-1))}function k(t){if(!u[t]){u[t]=!0;var e=s[t],n=l[t];s[n]>=0&&(s[n]=e),l[e]>=0&&(l[e]=n),M[e]>=0&&_(M[e],g(e)),M[n]>=0&&_(M[n],g(n))}}var T=[],M=new Array(a);for(f=0;f>1;f>=0;--f)b(f);for(;;){var S=w();if(S<0||c[S]>n)break;k(S)}var E=[];for(f=0;f=0&&n>=0&&e!==n){var r=M[e],i=M[n];r!==i&&P.push([r,i])}})),i.unique(i.normalize(P)),{positions:E,edges:P}};var r=n(417),i=n(6656)},6638:function(t,e,n){"use strict";t.exports=function(t,e){var n,a,o,s;if(e[0][0]e[1][0]))return i(e,t);n=e[1],a=e[0]}if(t[0][0]t[1][0]))return-i(t,e);o=t[1],s=t[0]}var l=r(n,a,s),c=r(n,a,o);if(l<0){if(c<=0)return l}else if(l>0){if(c>=0)return l}else if(c)return c;if(l=r(s,o,a),c=r(s,o,n),l<0){if(c<=0)return l}else if(l>0){if(c>=0)return l}else if(c)return c;return a[0]-s[0]};var r=n(417);function i(t,e){var n,i,a,o;if(e[0][0]e[1][0])){var s=Math.min(t[0][1],t[1][1]),l=Math.max(t[0][1],t[1][1]),c=Math.min(e[0][1],e[1][1]),u=Math.max(e[0][1],e[1][1]);return lu?s-u:l-u}n=e[1],i=e[0]}t[0][1]0)if(e[0]!==o[1][0])n=t,t=t.right;else{if(l=c(t.right,e))return l;t=t.left}else{if(e[0]!==o[1][0])return t;var l;if(l=c(t.right,e))return l;t=t.left}}return n}function u(t,e,n,r){this.y=t,this.index=e,this.start=n,this.closed=r}function f(t,e,n,r){this.x=t,this.segment=e,this.create=n,this.index=r}s.prototype.castUp=function(t){var e=r.le(this.coordinates,t[0]);if(e<0)return-1;this.slabs[e];var n=c(this.slabs[e],t),i=-1;if(n&&(i=n.value),this.coordinates[e]===t[0]){var s=null;if(n&&(s=n.key),e>0){var u=c(this.slabs[e-1],t);u&&(s?o(u.key,s)>0&&(s=u.key,i=u.value):(i=u.value,s=u.key))}var f=this.horizontal[e];if(f.length>0){var h=r.ge(f,t[1],l);if(h=f.length)return i;d=f[h]}}if(d.start)if(s){var p=a(s[0],s[1],[t[0],d.y]);s[0][0]>s[1][0]&&(p=-p),p>0&&(i=d.index)}else i=d.index;else d.y!==t[1]&&(i=d.index)}}}return i}},4670:function(t,e,n){"use strict";var r=n(9130),i=n(9662);function a(t,e){var n=i(r(t,e),[e[e.length-1]]);return n[n.length-1]}function o(t,e,n,r){var i=-e/(r-e);i<0?i=0:i>1&&(i=1);for(var a=1-i,o=t.length,s=new Array(o),l=0;l0||i>0&&u<0){var f=o(s,u,l,i);n.push(f),r.push(f.slice())}u<0?r.push(l.slice()):u>0?n.push(l.slice()):(n.push(l.slice()),r.push(l.slice())),i=u}return{positive:n,negative:r}},t.exports.positive=function(t,e){for(var n=[],r=a(t[t.length-1],e),i=t[t.length-1],s=t[0],l=0;l0||r>0&&c<0)&&n.push(o(i,c,s,r)),c>=0&&n.push(s.slice()),r=c}return n},t.exports.negative=function(t,e){for(var n=[],r=a(t[t.length-1],e),i=t[t.length-1],s=t[0],l=0;l0||r>0&&c<0)&&n.push(o(i,c,s,r)),c<=0&&n.push(s.slice()),r=c}return n}},8974:function(t,e,n){var r;!function(){"use strict";var i={not_string:/[^s]/,not_bool:/[^t]/,not_type:/[^T]/,not_primitive:/[^v]/,number:/[diefg]/,numeric_arg:/[bcdiefguxX]/,json:/[j]/,not_json:/[^j]/,text:/^[^\x25]+/,modulo:/^\x25{2}/,placeholder:/^\x25(?:([1-9]\d*)\$|\(([^)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-gijostTuvxX])/,key:/^([a-z_][a-z_\d]*)/i,key_access:/^\.([a-z_][a-z_\d]*)/i,index_access:/^\[(\d+)\]/,sign:/^[+-]/};function a(t){return function(t,e){var n,r,o,s,l,c,u,f,h,d=1,p=t.length,g="";for(r=0;r=0),s.type){case"b":n=parseInt(n,10).toString(2);break;case"c":n=String.fromCharCode(parseInt(n,10));break;case"d":case"i":n=parseInt(n,10);break;case"j":n=JSON.stringify(n,null,s.width?parseInt(s.width):0);break;case"e":n=s.precision?parseFloat(n).toExponential(s.precision):parseFloat(n).toExponential();break;case"f":n=s.precision?parseFloat(n).toFixed(s.precision):parseFloat(n);break;case"g":n=s.precision?String(Number(n.toPrecision(s.precision))):parseFloat(n);break;case"o":n=(parseInt(n,10)>>>0).toString(8);break;case"s":n=String(n),n=s.precision?n.substring(0,s.precision):n;break;case"t":n=String(!!n),n=s.precision?n.substring(0,s.precision):n;break;case"T":n=Object.prototype.toString.call(n).slice(8,-1).toLowerCase(),n=s.precision?n.substring(0,s.precision):n;break;case"u":n=parseInt(n,10)>>>0;break;case"v":n=n.valueOf(),n=s.precision?n.substring(0,s.precision):n;break;case"x":n=(parseInt(n,10)>>>0).toString(16);break;case"X":n=(parseInt(n,10)>>>0).toString(16).toUpperCase()}i.json.test(s.type)?g+=n:(!i.number.test(s.type)||f&&!s.sign?h="":(h=f?"+":"-",n=n.toString().replace(i.sign,"")),c=s.pad_char?"0"===s.pad_char?"0":s.pad_char.charAt(1):" ",u=s.width-(h+n).length,l=s.width&&u>0?c.repeat(u):"",g+=s.align?h+n+l:"0"===c?h+l+n:l+h+n)}return g}(function(t){if(s[t])return s[t];for(var e,n=t,r=[],a=0;n;){if(null!==(e=i.text.exec(n)))r.push(e[0]);else if(null!==(e=i.modulo.exec(n)))r.push("%");else{if(null===(e=i.placeholder.exec(n)))throw new SyntaxError("[sprintf] unexpected placeholder");if(e[2]){a|=1;var o=[],l=e[2],c=[];if(null===(c=i.key.exec(l)))throw new SyntaxError("[sprintf] failed to parse named argument key");for(o.push(c[1]);""!==(l=l.substring(c[0].length));)if(null!==(c=i.key_access.exec(l)))o.push(c[1]);else{if(null===(c=i.index_access.exec(l)))throw new SyntaxError("[sprintf] failed to parse named argument key");o.push(c[1])}e[2]=o}else a|=2;if(3===a)throw new Error("[sprintf] mixing positional and named placeholders is not (yet) supported");r.push({placeholder:e[0],param_no:e[1],keys:e[2],sign:e[3],pad_char:e[4],align:e[5],width:e[6],precision:e[7],type:e[8]})}n=n.substring(e[0].length)}return s[t]=r}(t),arguments)}function o(t,e){return a.apply(null,[t].concat(e||[]))}var s=Object.create(null);e.sprintf=a,e.vsprintf=o,"undefined"!==typeof window&&(window.sprintf=a,window.vsprintf=o,void 0===(r=function(){return{sprintf:a,vsprintf:o}}.call(e,n,e,t))||(t.exports=r))}()},4162:function(t,e,n){"use strict";t.exports=function(t,e){if(t.dimension<=0)return{positions:[],cells:[]};if(1===t.dimension)return function(t,e){for(var n=i(t,e),r=n.length,a=new Array(r),o=new Array(r),s=0;sr|0},vertex:function(t,e,n,r,i,a,o,s,l,c,u,f,h){var d=(o<<0)+(s<<1)+(l<<2)+(c<<3)|0;if(0!==d&&15!==d)switch(d){case 0:case 15:u.push([t-.5,e-.5]);break;case 1:u.push([t-.25-.25*(r+n-2*h)/(n-r),e-.25-.25*(i+n-2*h)/(n-i)]);break;case 2:u.push([t-.75-.25*(-r-n+2*h)/(r-n),e-.25-.25*(a+r-2*h)/(r-a)]);break;case 3:u.push([t-.5,e-.5-.5*(i+n+a+r-4*h)/(n-i+r-a)]);break;case 4:u.push([t-.25-.25*(a+i-2*h)/(i-a),e-.75-.25*(-i-n+2*h)/(i-n)]);break;case 5:u.push([t-.5-.5*(r+n+a+i-4*h)/(n-r+i-a),e-.5]);break;case 6:u.push([t-.5-.25*(-r-n+a+i)/(r-n+i-a),e-.5-.25*(-i-n+a+r)/(i-n+r-a)]);break;case 7:u.push([t-.75-.25*(a+i-2*h)/(i-a),e-.75-.25*(a+r-2*h)/(r-a)]);break;case 8:u.push([t-.75-.25*(-a-i+2*h)/(a-i),e-.75-.25*(-a-r+2*h)/(a-r)]);break;case 9:u.push([t-.5-.25*(r+n+-a-i)/(n-r+a-i),e-.5-.25*(i+n+-a-r)/(n-i+a-r)]);break;case 10:u.push([t-.5-.5*(-r-n-a-i+4*h)/(r-n+a-i),e-.5]);break;case 11:u.push([t-.25-.25*(-a-i+2*h)/(a-i),e-.75-.25*(i+n-2*h)/(n-i)]);break;case 12:u.push([t-.5,e-.5-.5*(-i-n-a-r+4*h)/(i-n+a-r)]);break;case 13:u.push([t-.75-.25*(r+n-2*h)/(n-r),e-.25-.25*(-a-r+2*h)/(a-r)]);break;case 14:u.push([t-.25-.25*(-r-n+2*h)/(r-n),e-.25-.25*(-i-n+2*h)/(i-n)])}},cell:function(t,e,n,r,i,a,o,s,l){i?s.push([t,e]):s.push([e,t])}});return function(t,e){var n=[],i=[];return r(t,n,i,e),{positions:n,cells:i}}}},o={}},6946:function(t,e,n){"use strict";t.exports=function t(e,n,i){i=i||{};var a=o[e];a||(a=o[e]={" ":{data:new Float32Array(0),shape:.2}});var s=a[n];if(!s)if(n.length<=1||!/\d/.test(n))s=a[n]=function(t){for(var e=t.cells,n=t.positions,r=new Float32Array(6*e.length),i=0,a=0,o=0;o0&&(f+=.02);var d=new Float32Array(u),p=0,g=-.5*f;for(h=0;hMath.max(n,r)?i[2]=1:n>Math.max(e,r)?i[0]=1:i[1]=1;for(var a=0,o=0,l=0;l<3;++l)a+=t[l]*t[l],o+=i[l]*t[l];for(l=0;l<3;++l)i[l]-=o/a*t[l];return s(i,i),i}function h(t,e,n,i,a,o,s,l){this.center=r(n),this.up=r(i),this.right=r(a),this.radius=r([o]),this.angle=r([s,l]),this.angle.bounds=[[-1/0,-Math.PI/2],[1/0,Math.PI/2]],this.setDistanceLimits(t,e),this.computedCenter=this.center.curve(0),this.computedUp=this.up.curve(0),this.computedRight=this.right.curve(0),this.computedRadius=this.radius.curve(0),this.computedAngle=this.angle.curve(0),this.computedToward=[0,0,0],this.computedEye=[0,0,0],this.computedMatrix=new Array(16);for(var c=0;c<16;++c)this.computedMatrix[c]=.5;this.recalcMatrix(0)}var d=h.prototype;d.setDistanceLimits=function(t,e){t=t>0?Math.log(t):-1/0,e=e>0?Math.log(e):1/0,e=Math.max(e,t),this.radius.bounds[0][0]=t,this.radius.bounds[1][0]=e},d.getDistanceLimits=function(t){var e=this.radius.bounds[0];return t?(t[0]=Math.exp(e[0][0]),t[1]=Math.exp(e[1][0]),t):[Math.exp(e[0][0]),Math.exp(e[1][0])]},d.recalcMatrix=function(t){this.center.curve(t),this.up.curve(t),this.right.curve(t),this.radius.curve(t),this.angle.curve(t);for(var e=this.computedUp,n=this.computedRight,r=0,i=0,a=0;a<3;++a)i+=e[a]*n[a],r+=e[a]*e[a];var l=Math.sqrt(r),u=0;for(a=0;a<3;++a)n[a]-=e[a]*i/r,u+=n[a]*n[a],e[a]/=l;var f=Math.sqrt(u);for(a=0;a<3;++a)n[a]/=f;var h=this.computedToward;o(h,e,n),s(h,h);var d=Math.exp(this.computedRadius[0]),p=this.computedAngle[0],g=this.computedAngle[1],v=Math.cos(p),m=Math.sin(p),y=Math.cos(g),b=Math.sin(g),x=this.computedCenter,w=v*y,_=m*y,k=b,T=-v*b,M=-m*b,A=y,S=this.computedEye,E=this.computedMatrix;for(a=0;a<3;++a){var C=w*n[a]+_*h[a]+k*e[a];E[4*a+1]=T*n[a]+M*h[a]+A*e[a],E[4*a+2]=C,E[4*a+3]=0}var P=E[1],O=E[5],L=E[9],I=E[2],D=E[6],R=E[10],z=O*R-L*D,N=L*I-P*R,j=P*D-O*I,F=c(z,N,j);for(z/=F,N/=F,j/=F,E[0]=z,E[4]=N,E[8]=j,a=0;a<3;++a)S[a]=x[a]+E[2+4*a]*d;for(a=0;a<3;++a){u=0;for(var B=0;B<3;++B)u+=E[a+4*B]*S[B];E[12+a]=-u}E[15]=1},d.getMatrix=function(t,e){this.recalcMatrix(t);var n=this.computedMatrix;if(e){for(var r=0;r<16;++r)e[r]=n[r];return e}return n};var p=[0,0,0];d.rotate=function(t,e,n,r){if(this.angle.move(t,e,n),r){this.recalcMatrix(t);var i=this.computedMatrix;p[0]=i[2],p[1]=i[6],p[2]=i[10];for(var o=this.computedUp,s=this.computedRight,l=this.computedToward,c=0;c<3;++c)i[4*c]=o[c],i[4*c+1]=s[c],i[4*c+2]=l[c];for(a(i,i,r,p),c=0;c<3;++c)o[c]=i[4*c],s[c]=i[4*c+1];this.up.set(t,o[0],o[1],o[2]),this.right.set(t,s[0],s[1],s[2])}},d.pan=function(t,e,n,r){e=e||0,n=n||0,r=r||0,this.recalcMatrix(t);var i=this.computedMatrix,a=(Math.exp(this.computedRadius[0]),i[1]),o=i[5],s=i[9],l=c(a,o,s);a/=l,o/=l,s/=l;var u=i[0],f=i[4],h=i[8],d=u*a+f*o+h*s,p=c(u-=a*d,f-=o*d,h-=s*d),g=(u/=p)*e+a*n,v=(f/=p)*e+o*n,m=(h/=p)*e+s*n;this.center.move(t,g,v,m);var y=Math.exp(this.computedRadius[0]);y=Math.max(1e-4,y+r),this.radius.set(t,Math.log(y))},d.translate=function(t,e,n,r){this.center.move(t,e||0,n||0,r||0)},d.setMatrix=function(t,e,n,r){var a=1;"number"===typeof n&&(a=0|n),(a<0||a>3)&&(a=1);var o=(a+2)%3;e||(this.recalcMatrix(t),e=this.computedMatrix);var s=e[a],l=e[a+4],f=e[a+8];if(r){var h=Math.abs(s),d=Math.abs(l),p=Math.abs(f),g=Math.max(h,d,p);h===g?(s=s<0?-1:1,l=f=0):p===g?(f=f<0?-1:1,s=l=0):(l=l<0?-1:1,s=f=0)}else{var v=c(s,l,f);s/=v,l/=v,f/=v}var m,y,b=e[o],x=e[o+4],w=e[o+8],_=b*s+x*l+w*f,k=c(b-=s*_,x-=l*_,w-=f*_),T=l*(w/=k)-f*(x/=k),M=f*(b/=k)-s*w,A=s*x-l*b,S=c(T,M,A);if(T/=S,M/=S,A/=S,this.center.jump(t,q,G,W),this.radius.idle(t),this.up.jump(t,s,l,f),this.right.jump(t,b,x,w),2===a){var E=e[1],C=e[5],P=e[9],O=E*b+C*x+P*w,L=E*T+C*M+P*A;m=z<0?-Math.PI/2:Math.PI/2,y=Math.atan2(L,O)}else{var I=e[2],D=e[6],R=e[10],z=I*s+D*l+R*f,N=I*b+D*x+R*w,j=I*T+D*M+R*A;m=Math.asin(u(z)),y=Math.atan2(j,N)}this.angle.jump(t,y,m),this.recalcMatrix(t);var F=e[2],B=e[6],U=e[10],H=this.computedMatrix;i(H,e);var V=H[15],q=H[12]/V,G=H[13]/V,W=H[14]/V,Y=Math.exp(this.computedRadius[0]);this.center.jump(t,q-F*Y,G-B*Y,W-U*Y)},d.lastT=function(){return Math.max(this.center.lastT(),this.up.lastT(),this.right.lastT(),this.radius.lastT(),this.angle.lastT())},d.idle=function(t){this.center.idle(t),this.up.idle(t),this.right.idle(t),this.radius.idle(t),this.angle.idle(t)},d.flush=function(t){this.center.flush(t),this.up.flush(t),this.right.flush(t),this.radius.flush(t),this.angle.flush(t)},d.setDistance=function(t,e){e>0&&this.radius.set(t,Math.log(e))},d.lookAt=function(t,e,n,r){this.recalcMatrix(t),e=e||this.computedEye,n=n||this.computedCenter;var i=(r=r||this.computedUp)[0],a=r[1],o=r[2],s=c(i,a,o);if(!(s<1e-6)){i/=s,a/=s,o/=s;var l=e[0]-n[0],f=e[1]-n[1],h=e[2]-n[2],d=c(l,f,h);if(!(d<1e-6)){l/=d,f/=d,h/=d;var p=this.computedRight,g=p[0],v=p[1],m=p[2],y=i*g+a*v+o*m,b=c(g-=y*i,v-=y*a,m-=y*o);if(!(b<.01&&(b=c(g=a*h-o*f,v=o*l-i*h,m=i*f-a*l))<1e-6)){g/=b,v/=b,m/=b,this.up.set(t,i,a,o),this.right.set(t,g,v,m),this.center.set(t,n[0],n[1],n[2]),this.radius.set(t,Math.log(d));var x=a*m-o*v,w=o*g-i*m,_=i*v-a*g,k=c(x,w,_),T=i*l+a*f+o*h,M=g*l+v*f+m*h,A=(x/=k)*l+(w/=k)*f+(_/=k)*h,S=Math.asin(u(T)),E=Math.atan2(A,M),C=this.angle._state,P=C[C.length-1],O=C[C.length-2];P%=2*Math.PI;var L=Math.abs(P+2*Math.PI-E),I=Math.abs(P-E),D=Math.abs(P-2*Math.PI-E);L0?n.pop():new ArrayBuffer(t)}function p(t){return new Uint8Array(d(t),0,t)}function g(t){return new Uint16Array(d(2*t),0,t)}function v(t){return new Uint32Array(d(4*t),0,t)}function m(t){return new Int8Array(d(t),0,t)}function y(t){return new Int16Array(d(2*t),0,t)}function b(t){return new Int32Array(d(4*t),0,t)}function x(t){return new Float32Array(d(4*t),0,t)}function w(t){return new Float64Array(d(8*t),0,t)}function _(t){return o?new Uint8ClampedArray(d(t),0,t):p(t)}function k(t){return s?new BigUint64Array(d(8*t),0,t):null}function T(t){return l?new BigInt64Array(d(8*t),0,t):null}function M(t){return new DataView(d(t),0,t)}function A(t){t=r.nextPow2(t);var e=r.log2(t),n=f[e];return n.length>0?n.pop():new a(t)}e.free=function(t){if(a.isBuffer(t))f[r.log2(t.length)].push(t);else{if("[object ArrayBuffer]"!==Object.prototype.toString.call(t)&&(t=t.buffer),!t)return;var e=t.length||t.byteLength,n=0|r.log2(e);u[n].push(t)}},e.freeUint8=e.freeUint16=e.freeUint32=e.freeBigUint64=e.freeInt8=e.freeInt16=e.freeInt32=e.freeBigInt64=e.freeFloat32=e.freeFloat=e.freeFloat64=e.freeDouble=e.freeUint8Clamped=e.freeDataView=function(t){h(t.buffer)},e.freeArrayBuffer=h,e.freeBuffer=function(t){f[r.log2(t.length)].push(t)},e.malloc=function(t,e){if(void 0===e||"arraybuffer"===e)return d(t);switch(e){case"uint8":return p(t);case"uint16":return g(t);case"uint32":return v(t);case"int8":return m(t);case"int16":return y(t);case"int32":return b(t);case"float":case"float32":return x(t);case"double":case"float64":return w(t);case"uint8_clamped":return _(t);case"bigint64":return T(t);case"biguint64":return k(t);case"buffer":return A(t);case"data":case"dataview":return M(t);default:return null}return null},e.mallocArrayBuffer=d,e.mallocUint8=p,e.mallocUint16=g,e.mallocUint32=v,e.mallocInt8=m,e.mallocInt16=y,e.mallocInt32=b,e.mallocFloat32=e.mallocFloat=x,e.mallocFloat64=e.mallocDouble=w,e.mallocUint8Clamped=_,e.mallocBigUint64=k,e.mallocBigInt64=T,e.mallocDataView=M,e.mallocBuffer=A,e.clearCache=function(){for(var t=0;t<32;++t)c.UINT8[t].length=0,c.UINT16[t].length=0,c.UINT32[t].length=0,c.INT8[t].length=0,c.INT16[t].length=0,c.INT32[t].length=0,c.FLOAT[t].length=0,c.DOUBLE[t].length=0,c.BIGUINT64[t].length=0,c.BIGINT64[t].length=0,c.UINT8C[t].length=0,u[t].length=0,f[t].length=0}},1731:function(t){"use strict";function e(t){this.roots=new Array(t),this.ranks=new Array(t);for(var e=0;e0&&(a=r.size),r.lineSpacing&&r.lineSpacing>0&&(o=r.lineSpacing),r.styletags&&r.styletags.breaklines&&(s.breaklines=!!r.styletags.breaklines),r.styletags&&r.styletags.bolds&&(s.bolds=!!r.styletags.bolds),r.styletags&&r.styletags.italics&&(s.italics=!!r.styletags.italics),r.styletags&&r.styletags.subscripts&&(s.subscripts=!!r.styletags.subscripts),r.styletags&&r.styletags.superscripts&&(s.superscripts=!!r.styletags.superscripts)),n.font=[r.fontStyle,r.fontVariant,r.fontWeight,a+"px",r.font].filter((function(t){return t})).join(" "),n.textAlign="start",n.textBaseline="alphabetic",n.direction="ltr",_(function(t,e,n,r,a,o){n=n.replace(/\n/g,""),n=!0===o.breaklines?n.replace(/\/g,"\n"):n.replace(/\/g," ");var s="",l=[];for(k=0;k-1?parseInt(t[1+i]):0,l=a>-1?parseInt(n[1+a]):0;s!==l&&(r=r.replace(N(),"?px "),A*=Math.pow(.75,l-s),r=r.replace("?px ",N())),M+=.25*C*(l-s)}if(!0===o.superscripts){var c=t.indexOf(p),f=n.indexOf(p),d=c>-1?parseInt(t[1+c]):0,g=f>-1?parseInt(n[1+f]):0;d!==g&&(r=r.replace(N(),"?px "),A*=Math.pow(.75,g-d),r=r.replace("?px ",N())),M-=.25*C*(g-d)}if(!0===o.bolds){var v=t.indexOf(u)>-1,y=n.indexOf(u)>-1;!v&&y&&(r=b?r.replace("italic ","italic bold "):"bold "+r),v&&!y&&(r=r.replace("bold ",""))}if(!0===o.italics){var b=t.indexOf(h)>-1,x=n.indexOf(h)>-1;!b&&x&&(r="italic "+r),b&&!x&&(r=r.replace("italic ",""))}e.font=r}for(_=0;_",a="",o=i.length,s=a.length,l=e[0]===p||e[0]===m,c=0,u=-s;c>-1&&-1!==(c=n.indexOf(i,c))&&-1!==(u=n.indexOf(a,c+o))&&!(u<=c);){for(var f=c;f=u)r[f]=null,n=n.substr(0,f)+" "+n.substr(f+1);else if(null!==r[f]){var h=r[f].indexOf(e[0]);-1===h?r[f]+=e:l&&(r[f]=r[f].substr(0,h+1)+(1+parseInt(r[f][h+1]))+r[f].substr(h+2))}var d=c+o,g=n.substr(d,u-d).indexOf(i);c=-1!==g?g:u+s}return r}function x(t,e){var n=r(t,128);return e?a(n.cells,n.positions,.25):{edges:n.cells,positions:n.positions}}function w(t,e,n,r){var i=x(t,r),a=function(t,e,n){for(var r=e.textAlign||"start",i=e.textBaseline||"alphabetic",a=[1<<30,1<<30],o=[0,0],s=t.length,l=0;l=0?n[a]:i}))},has___:{value:b((function(t){var n=y(t);return n?r in n:e.indexOf(t)>=0}))},set___:{value:b((function(t,i){var a,o=y(t);return o?o[r]=i:(a=e.indexOf(t))>=0?n[a]=i:(a=e.length,n[a]=i,e[a]=t),this}))},delete___:{value:b((function(t){var i,a,o=y(t);return o?r in o&&delete o[r]:!((i=e.indexOf(t))<0)&&(a=e.length-1,e[i]=void 0,n[i]=n[a],e[i]=e[a],e.length=a,n.length=a,!0)}))}})};g.prototype=Object.create(Object.prototype,{get:{value:function(t,e){return this.get___(t,e)},writable:!0,configurable:!0},has:{value:function(t){return this.has___(t)},writable:!0,configurable:!0},set:{value:function(t,e){return this.set___(t,e)},writable:!0,configurable:!0},delete:{value:function(t){return this.delete___(t)},writable:!0,configurable:!0}}),"function"===typeof n?function(){function r(){this instanceof g||x();var t,r=new n,i=void 0,a=!1;return t=e?function(t,e){return r.set(t,e),r.has(t)||(i||(i=new g),i.set(t,e)),this}:function(t,e){if(a)try{r.set(t,e)}catch(n){i||(i=new g),i.set___(t,e)}else r.set(t,e);return this},Object.create(g.prototype,{get___:{value:b((function(t,e){return i?r.has(t)?r.get(t):i.get___(t,e):r.get(t,e)}))},has___:{value:b((function(t){return r.has(t)||!!i&&i.has___(t)}))},set___:{value:b(t)},delete___:{value:b((function(t){var e=!!r.delete(t);return i&&i.delete___(t)||e}))},permitHostObjects___:{value:b((function(t){if(t!==v)throw new Error("bogus call to permitHostObjects___");a=!0}))}})}e&&"undefined"!==typeof Proxy&&(Proxy=void 0),r.prototype=g.prototype,t.exports=r,Object.defineProperty(WeakMap.prototype,"constructor",{value:WeakMap,enumerable:!1,configurable:!0,writable:!0})}():("undefined"!==typeof Proxy&&(Proxy=void 0),t.exports=g)}function v(t){t.permitHostObjects___&&t.permitHostObjects___(v)}function m(t){return!(t.substr(0,8)==l&&"___"===t.substr(t.length-3))}function y(t){if(t!==Object(t))throw new TypeError("Not an object: "+t);var e=t[c];if(e&&e.key===t)return e;if(s(t)){e={key:t};try{return o(t,c,{value:e,writable:!1,enumerable:!1,configurable:!1}),e}catch(n){return}}}function b(t){return t.prototype=null,Object.freeze(t)}function x(){d||"undefined"===typeof console||(d=!0,console.warn("WeakMap should be invoked as new WeakMap(), not WeakMap(). This will be an error in the future."))}}()},9222:function(t,e,n){var r=n(7178);t.exports=function(){var t={};return function(e){if(("object"!==typeof e||null===e)&&"function"!==typeof e)throw new Error("Weakmap-shim: Key must be object");var n=e.valueOf(t);return n&&n.identity===t?n:r(e,t)}}},7178:function(t){t.exports=function(t,e){var n={identity:e},r=t.valueOf;return Object.defineProperty(t,"valueOf",{value:function(t){return t!==e?r.apply(this,arguments):n},writable:!0}),n}},4037:function(t,e,n){var r=n(9222);t.exports=function(){var t=r();return{get:function(e,n){var r=t(e);return r.hasOwnProperty("value")?r.value:n},set:function(e,n){return t(e).value=n,this},has:function(e){return"value"in t(e)},delete:function(e){return delete t(e).value}}}},6183:function(t){"use strict";var e=function(){return function(t,e,n,r,i,a){var o=t[0],s=n[0],l=[0],c=s;r|=0;var u=0,f=s;for(u=0;u=0!==d>=0&&i.push(l[0]+.5+.5*(h+d)/(h-d)),r+=f,++l[0]}}};t.exports=function(t){return n={funcName:t.funcName},function(t){var e={};return function(n,r,i){var a=n.dtype,o=n.order,s=[a,o.join()].join(),l=e[s];return l||(e[s]=l=t([a,o])),l(n.shape.slice(0),n.data,n.stride,0|n.offset,r,i)}}(e.bind(void 0,n));var n}({funcName:"zeroCrossings"})},9584:function(t,e,n){"use strict";t.exports=function(t,e){var n=[];return e=+e||0,r(t.hi(t.shape[0]-1),n,e),n};var r=n(6183)},6601:function(){}},e={};function n(r){var i=e[r];if(void 0!==i)return i.exports;var a=e[r]={id:r,loaded:!1,exports:{}};return t[r].call(a.exports,a,a.exports,n),a.loaded=!0,a.exports}return n.g=function(){if("object"===typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"===typeof window)return window}}(),n.nmd=function(t){return t.paths=[],t.children||(t.children=[]),t},n(7386)}()},t.exports=r()},12856:function(t,e,n){"use strict";function r(t,e){for(var n=0;nd)throw new RangeError('The value "'+t+'" is invalid for option "size"');var e=new Uint8Array(t);return Object.setPrototypeOf(e,g.prototype),e}function g(t,e,n){if("number"===typeof t){if("string"===typeof e)throw new TypeError('The "string" argument must be of type string. Received type number');return y(t)}return v(t,e,n)}function v(t,e,n){if("string"===typeof t)return function(t,e){if("string"===typeof e&&""!==e||(e="utf8"),!g.isEncoding(e))throw new TypeError("Unknown encoding: "+e);var n=0|_(t,e),r=p(n),i=r.write(t,e);return i!==n&&(r=r.slice(0,i)),r}(t,e);if(ArrayBuffer.isView(t))return function(t){if(nt(t,Uint8Array)){var e=new Uint8Array(t);return x(e.buffer,e.byteOffset,e.byteLength)}return b(t)}(t);if(null==t)throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+c(t));if(nt(t,ArrayBuffer)||t&&nt(t.buffer,ArrayBuffer))return x(t,e,n);if("undefined"!==typeof SharedArrayBuffer&&(nt(t,SharedArrayBuffer)||t&&nt(t.buffer,SharedArrayBuffer)))return x(t,e,n);if("number"===typeof t)throw new TypeError('The "value" argument must not be of type number. Received type number');var r=t.valueOf&&t.valueOf();if(null!=r&&r!==t)return g.from(r,e,n);var i=function(t){if(g.isBuffer(t)){var e=0|w(t.length),n=p(e);return 0===n.length||t.copy(n,0,0,e),n}return void 0!==t.length?"number"!==typeof t.length||rt(t.length)?p(0):b(t):"Buffer"===t.type&&Array.isArray(t.data)?b(t.data):void 0}(t);if(i)return i;if("undefined"!==typeof Symbol&&null!=Symbol.toPrimitive&&"function"===typeof t[Symbol.toPrimitive])return g.from(t[Symbol.toPrimitive]("string"),e,n);throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+c(t))}function m(t){if("number"!==typeof t)throw new TypeError('"size" argument must be of type number');if(t<0)throw new RangeError('The value "'+t+'" is invalid for option "size"')}function y(t){return m(t),p(t<0?0:0|w(t))}function b(t){for(var e=t.length<0?0:0|w(t.length),n=p(e),r=0;r=d)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+d.toString(16)+" bytes");return 0|t}function _(t,e){if(g.isBuffer(t))return t.length;if(ArrayBuffer.isView(t)||nt(t,ArrayBuffer))return t.byteLength;if("string"!==typeof t)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+c(t));var n=t.length,r=arguments.length>2&&!0===arguments[2];if(!r&&0===n)return 0;for(var i=!1;;)switch(e){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":return Q(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return tt(t).length;default:if(i)return r?-1:Q(t).length;e=(""+e).toLowerCase(),i=!0}}function k(t,e,n){var r=!1;if((void 0===e||e<0)&&(e=0),e>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return N(this,e,n);case"utf8":case"utf-8":return I(this,e,n);case"ascii":return R(this,e,n);case"latin1":case"binary":return z(this,e,n);case"base64":return L(this,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return j(this,e,n);default:if(r)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),r=!0}}function T(t,e,n){var r=t[e];t[e]=t[n],t[n]=r}function M(t,e,n,r,i){if(0===t.length)return-1;if("string"===typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),rt(n=+n)&&(n=i?0:t.length-1),n<0&&(n=t.length+n),n>=t.length){if(i)return-1;n=t.length-1}else if(n<0){if(!i)return-1;n=0}if("string"===typeof e&&(e=g.from(e,r)),g.isBuffer(e))return 0===e.length?-1:A(t,e,n,r,i);if("number"===typeof e)return e&=255,"function"===typeof Uint8Array.prototype.indexOf?i?Uint8Array.prototype.indexOf.call(t,e,n):Uint8Array.prototype.lastIndexOf.call(t,e,n):A(t,[e],n,r,i);throw new TypeError("val must be string, number or Buffer")}function A(t,e,n,r,i){var a,o=1,s=t.length,l=e.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(t.length<2||e.length<2)return-1;o=2,s/=2,l/=2,n/=2}function c(t,e){return 1===o?t[e]:t.readUInt16BE(e*o)}if(i){var u=-1;for(a=n;as&&(n=s-l),a=n;a>=0;a--){for(var f=!0,h=0;hi&&(r=i):r=i;var a,o=e.length;for(r>o/2&&(r=o/2),a=0;a>8,i=n%256,a.push(i),a.push(r);return a}(e,t.length-n),t,n,r)}function L(t,e,n){return 0===e&&n===t.length?u.fromByteArray(t):u.fromByteArray(t.slice(e,n))}function I(t,e,n){n=Math.min(t.length,n);for(var r=[],i=e;i239?4:a>223?3:a>191?2:1;if(i+s<=n){var l=void 0,c=void 0,u=void 0,f=void 0;switch(s){case 1:a<128&&(o=a);break;case 2:128===(192&(l=t[i+1]))&&(f=(31&a)<<6|63&l)>127&&(o=f);break;case 3:l=t[i+1],c=t[i+2],128===(192&l)&&128===(192&c)&&(f=(15&a)<<12|(63&l)<<6|63&c)>2047&&(f<55296||f>57343)&&(o=f);break;case 4:l=t[i+1],c=t[i+2],u=t[i+3],128===(192&l)&&128===(192&c)&&128===(192&u)&&(f=(15&a)<<18|(63&l)<<12|(63&c)<<6|63&u)>65535&&f<1114112&&(o=f)}}null===o?(o=65533,s=1):o>65535&&(o-=65536,r.push(o>>>10&1023|55296),o=56320|1023&o),r.push(o),i+=s}return function(t){var e=t.length;if(e<=D)return String.fromCharCode.apply(String,t);for(var n="",r=0;rr.length?(g.isBuffer(a)||(a=g.from(a)),a.copy(r,i)):Uint8Array.prototype.set.call(r,a,i);else{if(!g.isBuffer(a))throw new TypeError('"list" argument must be an Array of Buffers');a.copy(r,i)}i+=a.length}return r},g.byteLength=_,g.prototype._isBuffer=!0,g.prototype.swap16=function(){var t=this.length;if(t%2!==0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(var e=0;en&&(t+=" ... "),""},h&&(g.prototype[h]=g.prototype.inspect),g.prototype.compare=function(t,e,n,r,i){if(nt(t,Uint8Array)&&(t=g.from(t,t.offset,t.byteLength)),!g.isBuffer(t))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+c(t));if(void 0===e&&(e=0),void 0===n&&(n=t?t.length:0),void 0===r&&(r=0),void 0===i&&(i=this.length),e<0||n>t.length||r<0||i>this.length)throw new RangeError("out of range index");if(r>=i&&e>=n)return 0;if(r>=i)return-1;if(e>=n)return 1;if(this===t)return 0;for(var a=(i>>>=0)-(r>>>=0),o=(n>>>=0)-(e>>>=0),s=Math.min(a,o),l=this.slice(r,i),u=t.slice(e,n),f=0;f>>=0,isFinite(n)?(n>>>=0,void 0===r&&(r="utf8")):(r=n,n=void 0)}var i=this.length-e;if((void 0===n||n>i)&&(n=i),t.length>0&&(n<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var a=!1;;)switch(r){case"hex":return S(this,t,e,n);case"utf8":case"utf-8":return E(this,t,e,n);case"ascii":case"latin1":case"binary":return C(this,t,e,n);case"base64":return P(this,t,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return O(this,t,e,n);default:if(a)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),a=!0}},g.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var D=4096;function R(t,e,n){var r="";n=Math.min(t.length,n);for(var i=e;ir)&&(n=r);for(var i="",a=e;an)throw new RangeError("Trying to access beyond buffer length")}function B(t,e,n,r,i,a){if(!g.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>i||et.length)throw new RangeError("Index out of range")}function U(t,e,n,r,i){X(e,r,i,t,n,7);var a=Number(e&BigInt(4294967295));t[n++]=a,a>>=8,t[n++]=a,a>>=8,t[n++]=a,a>>=8,t[n++]=a;var o=Number(e>>BigInt(32)&BigInt(4294967295));return t[n++]=o,o>>=8,t[n++]=o,o>>=8,t[n++]=o,o>>=8,t[n++]=o,n}function H(t,e,n,r,i){X(e,r,i,t,n,7);var a=Number(e&BigInt(4294967295));t[n+7]=a,a>>=8,t[n+6]=a,a>>=8,t[n+5]=a,a>>=8,t[n+4]=a;var o=Number(e>>BigInt(32)&BigInt(4294967295));return t[n+3]=o,o>>=8,t[n+2]=o,o>>=8,t[n+1]=o,o>>=8,t[n]=o,n+8}function V(t,e,n,r,i,a){if(n+r>t.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function q(t,e,n,r,i){return e=+e,n>>>=0,i||V(t,0,n,4),f.write(t,e,n,r,23,4),n+4}function G(t,e,n,r,i){return e=+e,n>>>=0,i||V(t,0,n,8),f.write(t,e,n,r,52,8),n+8}g.prototype.slice=function(t,e){var n=this.length;(t=~~t)<0?(t+=n)<0&&(t=0):t>n&&(t=n),(e=void 0===e?n:~~e)<0?(e+=n)<0&&(e=0):e>n&&(e=n),e>>=0,e>>>=0,n||F(t,e,this.length);for(var r=this[t],i=1,a=0;++a>>=0,e>>>=0,n||F(t,e,this.length);for(var r=this[t+--e],i=1;e>0&&(i*=256);)r+=this[t+--e]*i;return r},g.prototype.readUint8=g.prototype.readUInt8=function(t,e){return t>>>=0,e||F(t,1,this.length),this[t]},g.prototype.readUint16LE=g.prototype.readUInt16LE=function(t,e){return t>>>=0,e||F(t,2,this.length),this[t]|this[t+1]<<8},g.prototype.readUint16BE=g.prototype.readUInt16BE=function(t,e){return t>>>=0,e||F(t,2,this.length),this[t]<<8|this[t+1]},g.prototype.readUint32LE=g.prototype.readUInt32LE=function(t,e){return t>>>=0,e||F(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},g.prototype.readUint32BE=g.prototype.readUInt32BE=function(t,e){return t>>>=0,e||F(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},g.prototype.readBigUInt64LE=at((function(t){K(t>>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||Z(t,this.length-8);var r=e+this[++t]*Math.pow(2,8)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,24),i=this[++t]+this[++t]*Math.pow(2,8)+this[++t]*Math.pow(2,16)+n*Math.pow(2,24);return BigInt(r)+(BigInt(i)<>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||Z(t,this.length-8);var r=e*Math.pow(2,24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+this[++t],i=this[++t]*Math.pow(2,24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+n;return(BigInt(r)<>>=0,e>>>=0,n||F(t,e,this.length);for(var r=this[t],i=1,a=0;++a=(i*=128)&&(r-=Math.pow(2,8*e)),r},g.prototype.readIntBE=function(t,e,n){t>>>=0,e>>>=0,n||F(t,e,this.length);for(var r=e,i=1,a=this[t+--r];r>0&&(i*=256);)a+=this[t+--r]*i;return a>=(i*=128)&&(a-=Math.pow(2,8*e)),a},g.prototype.readInt8=function(t,e){return t>>>=0,e||F(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},g.prototype.readInt16LE=function(t,e){t>>>=0,e||F(t,2,this.length);var n=this[t]|this[t+1]<<8;return 32768&n?4294901760|n:n},g.prototype.readInt16BE=function(t,e){t>>>=0,e||F(t,2,this.length);var n=this[t+1]|this[t]<<8;return 32768&n?4294901760|n:n},g.prototype.readInt32LE=function(t,e){return t>>>=0,e||F(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},g.prototype.readInt32BE=function(t,e){return t>>>=0,e||F(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},g.prototype.readBigInt64LE=at((function(t){K(t>>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||Z(t,this.length-8);var r=this[t+4]+this[t+5]*Math.pow(2,8)+this[t+6]*Math.pow(2,16)+(n<<24);return(BigInt(r)<>>=0,"offset");var e=this[t],n=this[t+7];void 0!==e&&void 0!==n||Z(t,this.length-8);var r=(e<<24)+this[++t]*Math.pow(2,16)+this[++t]*Math.pow(2,8)+this[++t];return(BigInt(r)<>>=0,e||F(t,4,this.length),f.read(this,t,!0,23,4)},g.prototype.readFloatBE=function(t,e){return t>>>=0,e||F(t,4,this.length),f.read(this,t,!1,23,4)},g.prototype.readDoubleLE=function(t,e){return t>>>=0,e||F(t,8,this.length),f.read(this,t,!0,52,8)},g.prototype.readDoubleBE=function(t,e){return t>>>=0,e||F(t,8,this.length),f.read(this,t,!1,52,8)},g.prototype.writeUintLE=g.prototype.writeUIntLE=function(t,e,n,r){t=+t,e>>>=0,n>>>=0,r||B(this,t,e,n,Math.pow(2,8*n)-1,0);var i=1,a=0;for(this[e]=255&t;++a>>=0,n>>>=0,r||B(this,t,e,n,Math.pow(2,8*n)-1,0);var i=n-1,a=1;for(this[e+i]=255&t;--i>=0&&(a*=256);)this[e+i]=t/a&255;return e+n},g.prototype.writeUint8=g.prototype.writeUInt8=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,1,255,0),this[e]=255&t,e+1},g.prototype.writeUint16LE=g.prototype.writeUInt16LE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,2,65535,0),this[e]=255&t,this[e+1]=t>>>8,e+2},g.prototype.writeUint16BE=g.prototype.writeUInt16BE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,2,65535,0),this[e]=t>>>8,this[e+1]=255&t,e+2},g.prototype.writeUint32LE=g.prototype.writeUInt32LE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,4,4294967295,0),this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t,e+4},g.prototype.writeUint32BE=g.prototype.writeUInt32BE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,4,4294967295,0),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},g.prototype.writeBigUInt64LE=at((function(t){return U(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,BigInt(0),BigInt("0xffffffffffffffff"))})),g.prototype.writeBigUInt64BE=at((function(t){return H(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,BigInt(0),BigInt("0xffffffffffffffff"))})),g.prototype.writeIntLE=function(t,e,n,r){if(t=+t,e>>>=0,!r){var i=Math.pow(2,8*n-1);B(this,t,e,n,i-1,-i)}var a=0,o=1,s=0;for(this[e]=255&t;++a>0)-s&255;return e+n},g.prototype.writeIntBE=function(t,e,n,r){if(t=+t,e>>>=0,!r){var i=Math.pow(2,8*n-1);B(this,t,e,n,i-1,-i)}var a=n-1,o=1,s=0;for(this[e+a]=255&t;--a>=0&&(o*=256);)t<0&&0===s&&0!==this[e+a+1]&&(s=1),this[e+a]=(t/o>>0)-s&255;return e+n},g.prototype.writeInt8=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,1,127,-128),t<0&&(t=255+t+1),this[e]=255&t,e+1},g.prototype.writeInt16LE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,2,32767,-32768),this[e]=255&t,this[e+1]=t>>>8,e+2},g.prototype.writeInt16BE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,2,32767,-32768),this[e]=t>>>8,this[e+1]=255&t,e+2},g.prototype.writeInt32LE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,4,2147483647,-2147483648),this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24,e+4},g.prototype.writeInt32BE=function(t,e,n){return t=+t,e>>>=0,n||B(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},g.prototype.writeBigInt64LE=at((function(t){return U(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),g.prototype.writeBigInt64BE=at((function(t){return H(this,t,arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),g.prototype.writeFloatLE=function(t,e,n){return q(this,t,e,!0,n)},g.prototype.writeFloatBE=function(t,e,n){return q(this,t,e,!1,n)},g.prototype.writeDoubleLE=function(t,e,n){return G(this,t,e,!0,n)},g.prototype.writeDoubleBE=function(t,e,n){return G(this,t,e,!1,n)},g.prototype.copy=function(t,e,n,r){if(!g.isBuffer(t))throw new TypeError("argument should be a Buffer");if(n||(n=0),r||0===r||(r=this.length),e>=t.length&&(e=t.length),e||(e=0),r>0&&r=this.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),t.length-e>>=0,n=void 0===n?this.length:n>>>0,t||(t=0),"number"===typeof t)for(a=e;a=r+4;n-=3)e="_".concat(t.slice(n-3,n)).concat(e);return"".concat(t.slice(0,n)).concat(e)}function X(t,e,n,r,i,a){if(t>n||t3?0===e||e===BigInt(0)?">= 0".concat(s," and < 2").concat(s," ** ").concat(8*(a+1)).concat(s):">= -(2".concat(s," ** ").concat(8*(a+1)-1).concat(s,") and < 2 ** ")+"".concat(8*(a+1)-1).concat(s):">= ".concat(e).concat(s," and <= ").concat(n).concat(s),new W.ERR_OUT_OF_RANGE("value",o,t)}!function(t,e,n){K(e,"offset"),void 0!==t[e]&&void 0!==t[e+n]||Z(e,t.length-(n+1))}(r,i,a)}function K(t,e){if("number"!==typeof t)throw new W.ERR_INVALID_ARG_TYPE(e,"number",t)}function Z(t,e,n){if(Math.floor(t)!==t)throw K(t,n),new W.ERR_OUT_OF_RANGE(n||"offset","an integer",t);if(e<0)throw new W.ERR_BUFFER_OUT_OF_BOUNDS;throw new W.ERR_OUT_OF_RANGE(n||"offset",">= ".concat(n?1:0," and <= ").concat(e),t)}Y("ERR_BUFFER_OUT_OF_BOUNDS",(function(t){return t?"".concat(t," is outside of buffer bounds"):"Attempt to access memory outside buffer bounds"}),RangeError),Y("ERR_INVALID_ARG_TYPE",(function(t,e){return'The "'.concat(t,'" argument must be of type number. Received type ').concat(c(e))}),TypeError),Y("ERR_OUT_OF_RANGE",(function(t,e,n){var r='The value of "'.concat(t,'" is out of range.'),i=n;return Number.isInteger(n)&&Math.abs(n)>Math.pow(2,32)?i=$(String(n)):"bigint"===typeof n&&(i=String(n),(n>Math.pow(BigInt(2),BigInt(32))||n<-Math.pow(BigInt(2),BigInt(32)))&&(i=$(i)),i+="n"),r+=" It must be ".concat(e,". Received ").concat(i)}),RangeError);var J=/[^+/0-9A-Za-z-_]/g;function Q(t,e){var n;e=e||1/0;for(var r=t.length,i=null,a=[],o=0;o55295&&n<57344){if(!i){if(n>56319){(e-=3)>-1&&a.push(239,191,189);continue}if(o+1===r){(e-=3)>-1&&a.push(239,191,189);continue}i=n;continue}if(n<56320){(e-=3)>-1&&a.push(239,191,189),i=n;continue}n=65536+(i-55296<<10|n-56320)}else i&&(e-=3)>-1&&a.push(239,191,189);if(i=null,n<128){if((e-=1)<0)break;a.push(n)}else if(n<2048){if((e-=2)<0)break;a.push(n>>6|192,63&n|128)}else if(n<65536){if((e-=3)<0)break;a.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;a.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return a}function tt(t){return u.toByteArray(function(t){if((t=(t=t.split("=")[0]).trim().replace(J,"")).length<2)return"";for(;t.length%4!==0;)t+="=";return t}(t))}function et(t,e,n,r){var i;for(i=0;i=e.length||i>=t.length);++i)e[i+n]=t[i];return i}function nt(t,e){return t instanceof e||null!=t&&null!=t.constructor&&null!=t.constructor.name&&t.constructor.name===e.name}function rt(t){return t!==t}var it=function(){for(var t="0123456789abcdef",e=new Array(256),n=0;n<16;++n)for(var r=16*n,i=0;i<16;++i)e[r+i]=t[n]+t[i];return e}();function at(t){return"undefined"===typeof BigInt?ot:t}function ot(){throw new Error("BigInt not supported")}},35791:function(t){"use strict";t.exports=i,t.exports.isMobile=i,t.exports.default=i;var e=/(android|bb\d+|meego).+mobile|armv7l|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series[46]0|samsungbrowser.*mobile|symbian|treo|up\.(browser|link)|vodafone|wap|windows (ce|phone)|xda|xiino/i,n=/CrOS/,r=/android|ipad|playbook|silk/i;function i(t){t||(t={});var i=t.ua;if(i||"undefined"===typeof navigator||(i=navigator.userAgent),i&&i.headers&&"string"===typeof i.headers["user-agent"]&&(i=i.headers["user-agent"]),"string"!==typeof i)return!1;var a=e.test(i)&&!n.test(i)||!!t.tablet&&r.test(i);return!a&&t.tablet&&t.featureDetect&&navigator&&navigator.maxTouchPoints>1&&-1!==i.indexOf("Macintosh")&&-1!==i.indexOf("Safari")&&(a=!0),a}},86781:function(t,e,n){"use strict";n.r(e),n.d(e,{sankeyCenter:function(){return h},sankeyCircular:function(){return P},sankeyJustify:function(){return f},sankeyLeft:function(){return c},sankeyRight:function(){return u}});var r=n(33064),i=n(15140),a=n(45879),o=n(2502),s=n.n(o);function l(t){return t.target.depth}function c(t){return t.depth}function u(t,e){return e-1-t.height}function f(t,e){return t.sourceLinks.length?t.depth:e-1}function h(t){return t.targetLinks.length?t.depth:t.sourceLinks.length?(0,r.VV)(t.sourceLinks,l)-1:0}function d(t){return function(){return t}}var p="function"===typeof Symbol&&"symbol"===typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"===typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};function g(t,e){return m(t.source,e.source)||t.index-e.index}function v(t,e){return m(t.target,e.target)||t.index-e.index}function m(t,e){return t.partOfCycle===e.partOfCycle?t.y0-e.y0:"top"===t.circularLinkType||"bottom"===e.circularLinkType?-1:1}function y(t){return t.value}function b(t){return(t.y0+t.y1)/2}function x(t){return b(t.source)}function w(t){return b(t.target)}function _(t){return t.index}function k(t){return t.nodes}function T(t){return t.links}function M(t,e){var n=t.get(e);if(!n)throw new Error("missing: "+e);return n}function A(t,e){return e(t)}var S=25,E=10,C=.3;function P(){var t,e,n=0,a=0,o=1,l=1,c=24,u=_,h=f,A=k,P=T,L=32,D=2,R=null;function N(){var f={nodes:A.apply(null,arguments),links:P.apply(null,arguments)};!function(t){t.nodes.forEach((function(t,e){t.index=e,t.sourceLinks=[],t.targetLinks=[]}));var e=(0,i.UI)(t.nodes,u);t.links.forEach((function(t,n){t.index=n;var r=t.source,i=t.target;"object"!==("undefined"===typeof r?"undefined":p(r))&&(r=t.source=M(e,r)),"object"!==("undefined"===typeof i?"undefined":p(i))&&(i=t.target=M(e,i)),r.sourceLinks.push(t),i.targetLinks.push(t)}))}(f),function(t,e,n){var r=0;if(null===n){for(var i=[],a=0;a0;--p)v(d*=.99,f),_();function g(i){if(e){var u=1/0;h.forEach((function(t){var n=l*e/(t.length+1);u=n0?e+S+E:e,bottom:n=n>0?n+S+E:n,left:a=a>0?a+S+E:a,right:i=i>0?i+S+E:i}}(s),p=function(t,e){var i=(0,r.Fp)(t.nodes,(function(t){return t.column})),s=o-n,u=l-a,f=s+e.right+e.left,h=u+e.top+e.bottom,d=s/f,p=u/h;return n=n*d+e.left,o=0==e.right?o:o*d,a=a*p+e.top,l*=p,t.nodes.forEach((function(t){t.x0=n+t.column*((o-n-c)/i),t.x1=t.x0+c})),p}(s,d);f*=p,s.links.forEach((function(t){t.width=t.value*f})),h.forEach((function(t){var e=t.length;t.forEach((function(t,n){t.depth==h.length-1&&1==e||0==t.depth&&1==e?(t.y0=l/2-t.value*f,t.y1=t.y0+t.value*f):t.partOfCycle?0==I(t,i)?(t.y0=l/2+n,t.y1=t.y0+t.value*f):"top"==t.circularLinkType?(t.y0=a+n,t.y1=t.y0+t.value*f):(t.y0=l-t.value*f-n,t.y1=t.y0+t.value*f):0==d.top||0==d.bottom?(t.y0=(l-a)/e*n,t.y1=t.y0+t.value*f):(t.y0=(l-a)/2-e/2+n,t.y1=t.y0+t.value*f)}))}))}function v(t,e){var n=h.length;h.forEach((function(i){var a=i.length,o=i[0].depth;i.forEach((function(i){var s;if(i.sourceLinks.length||i.targetLinks.length)if(i.partOfCycle&&I(i,e)>0);else if(0==o&&1==a)s=i.y1-i.y0,i.y0=l/2-s/2,i.y1=l/2+s/2;else if(o==n-1&&1==a)s=i.y1-i.y0,i.y0=l/2-s/2,i.y1=l/2+s/2;else{var c=(0,r.J6)(i.sourceLinks,w),u=(0,r.J6)(i.targetLinks,x),f=((c&&u?(c+u)/2:c||u)-b(i))*t;i.y0+=f,i.y1+=f}}))}))}function _(){h.forEach((function(e){var n,r,i,o=a,s=e.length;for(e.sort(m),i=0;i0&&(n.y0+=r,n.y1+=r),o=n.y1+t;if((r=o-t-l)>0)for(o=n.y0-=r,n.y1-=r,i=s-2;i>=0;--i)(r=(n=e[i]).y1+t-o)>0&&(n.y0-=r,n.y1-=r),o=n.y0}))}}(f,L,u),j(f);for(var d=0;d<4;d++)$(f,l,u),X(f,0,u),W(f,a,l,u),$(f,l,u),X(f,0,u);return function(t,e,n){var i=t.nodes,a=t.links,o=!1,s=!1;if(a.forEach((function(t){"top"==t.circularLinkType?o=!0:"bottom"==t.circularLinkType&&(s=!0)})),0==o||0==s){var l=(0,r.VV)(i,(function(t){return t.y0})),c=(n-e)/((0,r.Fp)(i,(function(t){return t.y1}))-l);i.forEach((function(t){var e=(t.y1-t.y0)*c;t.y0=(t.y0-l)*c,t.y1=t.y0+e})),a.forEach((function(t){t.y0=(t.y0-l)*c,t.y1=(t.y1-l)*c,t.width=t.width*c}))}}(f,a,l),z(f,D,l,u),f}function j(t){t.nodes.forEach((function(t){t.sourceLinks.sort(v),t.targetLinks.sort(g)})),t.nodes.forEach((function(t){var e=t.y0,n=e,r=t.y1,i=r;t.sourceLinks.forEach((function(t){t.circular?(t.y0=r-t.width/2,r-=t.width):(t.y0=e+t.width/2,e+=t.width)})),t.targetLinks.forEach((function(t){t.circular?(t.y1=i-t.width/2,i-=t.width):(t.y1=n+t.width/2,n+=t.width)}))}))}return N.nodeId=function(t){return arguments.length?(u="function"===typeof t?t:d(t),N):u},N.nodeAlign=function(t){return arguments.length?(h="function"===typeof t?t:d(t),N):h},N.nodeWidth=function(t){return arguments.length?(c=+t,N):c},N.nodePadding=function(e){return arguments.length?(t=+e,N):t},N.nodes=function(t){return arguments.length?(A="function"===typeof t?t:d(t),N):A},N.links=function(t){return arguments.length?(P="function"===typeof t?t:d(t),N):P},N.size=function(t){return arguments.length?(n=a=0,o=+t[0],l=+t[1],N):[o-n,l-a]},N.extent=function(t){return arguments.length?(n=+t[0][0],o=+t[1][0],a=+t[0][1],l=+t[1][1],N):[[n,a],[o,l]]},N.iterations=function(t){return arguments.length?(L=+t,N):L},N.circularLinkGap=function(t){return arguments.length?(D=+t,N):D},N.nodePaddingRatio=function(t){return arguments.length?(e=+t,N):e},N.sortNodes=function(t){return arguments.length?(R=t,N):R},N.update=function(t){return O(t,u),j(t),t.links.forEach((function(t){t.circular&&(t.circularLinkType=t.y0+t.y11||i>1)}function R(t,e,n){return t.sort(N),t.forEach((function(r,i){var a,o,s=0;if(J(r,n)&&D(r))r.circularPathData.verticalBuffer=s+r.width/2;else{for(var l=0;lo.source.column)){var c=t[l].circularPathData.verticalBuffer+t[l].width/2+e;s=c>s?c:s}r.circularPathData.verticalBuffer=s+r.width/2}})),t}function z(t,e,n,i){var o=(0,r.VV)(t.links,(function(t){return t.source.y0}));t.links.forEach((function(t){t.circular&&(t.circularPathData={})})),R(t.links.filter((function(t){return"top"==t.circularLinkType})),e,i),R(t.links.filter((function(t){return"bottom"==t.circularLinkType})),e,i),t.links.forEach((function(r){if(r.circular){if(r.circularPathData.arcRadius=r.width+E,r.circularPathData.leftNodeBuffer=5,r.circularPathData.rightNodeBuffer=5,r.circularPathData.sourceWidth=r.source.x1-r.source.x0,r.circularPathData.sourceX=r.source.x0+r.circularPathData.sourceWidth,r.circularPathData.targetX=r.target.x0,r.circularPathData.sourceY=r.y0,r.circularPathData.targetY=r.y1,J(r,i)&&D(r))r.circularPathData.leftSmallArcRadius=E+r.width/2,r.circularPathData.leftLargeArcRadius=E+r.width/2,r.circularPathData.rightSmallArcRadius=E+r.width/2,r.circularPathData.rightLargeArcRadius=E+r.width/2,"bottom"==r.circularLinkType?(r.circularPathData.verticalFullExtent=r.source.y1+S+r.circularPathData.verticalBuffer,r.circularPathData.verticalLeftInnerExtent=r.circularPathData.verticalFullExtent-r.circularPathData.leftLargeArcRadius,r.circularPathData.verticalRightInnerExtent=r.circularPathData.verticalFullExtent-r.circularPathData.rightLargeArcRadius):(r.circularPathData.verticalFullExtent=r.source.y0-S-r.circularPathData.verticalBuffer,r.circularPathData.verticalLeftInnerExtent=r.circularPathData.verticalFullExtent+r.circularPathData.leftLargeArcRadius,r.circularPathData.verticalRightInnerExtent=r.circularPathData.verticalFullExtent+r.circularPathData.rightLargeArcRadius);else{var s=r.source.column,l=r.circularLinkType,c=t.links.filter((function(t){return t.source.column==s&&t.circularLinkType==l}));"bottom"==r.circularLinkType?c.sort(F):c.sort(j);var u=0;c.forEach((function(t,n){t.circularLinkID==r.circularLinkID&&(r.circularPathData.leftSmallArcRadius=E+r.width/2+u,r.circularPathData.leftLargeArcRadius=E+r.width/2+n*e+u),u+=t.width})),s=r.target.column,c=t.links.filter((function(t){return t.target.column==s&&t.circularLinkType==l})),"bottom"==r.circularLinkType?c.sort(U):c.sort(B),u=0,c.forEach((function(t,n){t.circularLinkID==r.circularLinkID&&(r.circularPathData.rightSmallArcRadius=E+r.width/2+u,r.circularPathData.rightLargeArcRadius=E+r.width/2+n*e+u),u+=t.width})),"bottom"==r.circularLinkType?(r.circularPathData.verticalFullExtent=Math.max(n,r.source.y1,r.target.y1)+S+r.circularPathData.verticalBuffer,r.circularPathData.verticalLeftInnerExtent=r.circularPathData.verticalFullExtent-r.circularPathData.leftLargeArcRadius,r.circularPathData.verticalRightInnerExtent=r.circularPathData.verticalFullExtent-r.circularPathData.rightLargeArcRadius):(r.circularPathData.verticalFullExtent=o-S-r.circularPathData.verticalBuffer,r.circularPathData.verticalLeftInnerExtent=r.circularPathData.verticalFullExtent+r.circularPathData.leftLargeArcRadius,r.circularPathData.verticalRightInnerExtent=r.circularPathData.verticalFullExtent+r.circularPathData.rightLargeArcRadius)}r.circularPathData.leftInnerExtent=r.circularPathData.sourceX+r.circularPathData.leftNodeBuffer,r.circularPathData.rightInnerExtent=r.circularPathData.targetX-r.circularPathData.rightNodeBuffer,r.circularPathData.leftFullExtent=r.circularPathData.sourceX+r.circularPathData.leftLargeArcRadius+r.circularPathData.leftNodeBuffer,r.circularPathData.rightFullExtent=r.circularPathData.targetX-r.circularPathData.rightLargeArcRadius-r.circularPathData.rightNodeBuffer}if(r.circular)r.path=function(t){return"top"==t.circularLinkType?"M"+t.circularPathData.sourceX+" "+t.circularPathData.sourceY+" L"+t.circularPathData.leftInnerExtent+" "+t.circularPathData.sourceY+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftSmallArcRadius+" 0 0 0 "+t.circularPathData.leftFullExtent+" "+(t.circularPathData.sourceY-t.circularPathData.leftSmallArcRadius)+" L"+t.circularPathData.leftFullExtent+" "+t.circularPathData.verticalLeftInnerExtent+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftLargeArcRadius+" 0 0 0 "+t.circularPathData.leftInnerExtent+" "+t.circularPathData.verticalFullExtent+" L"+t.circularPathData.rightInnerExtent+" "+t.circularPathData.verticalFullExtent+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightLargeArcRadius+" 0 0 0 "+t.circularPathData.rightFullExtent+" "+t.circularPathData.verticalRightInnerExtent+" L"+t.circularPathData.rightFullExtent+" "+(t.circularPathData.targetY-t.circularPathData.rightSmallArcRadius)+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightSmallArcRadius+" 0 0 0 "+t.circularPathData.rightInnerExtent+" "+t.circularPathData.targetY+" L"+t.circularPathData.targetX+" "+t.circularPathData.targetY:"M"+t.circularPathData.sourceX+" "+t.circularPathData.sourceY+" L"+t.circularPathData.leftInnerExtent+" "+t.circularPathData.sourceY+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftSmallArcRadius+" 0 0 1 "+t.circularPathData.leftFullExtent+" "+(t.circularPathData.sourceY+t.circularPathData.leftSmallArcRadius)+" L"+t.circularPathData.leftFullExtent+" "+t.circularPathData.verticalLeftInnerExtent+" A"+t.circularPathData.leftLargeArcRadius+" "+t.circularPathData.leftLargeArcRadius+" 0 0 1 "+t.circularPathData.leftInnerExtent+" "+t.circularPathData.verticalFullExtent+" L"+t.circularPathData.rightInnerExtent+" "+t.circularPathData.verticalFullExtent+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightLargeArcRadius+" 0 0 1 "+t.circularPathData.rightFullExtent+" "+t.circularPathData.verticalRightInnerExtent+" L"+t.circularPathData.rightFullExtent+" "+(t.circularPathData.targetY+t.circularPathData.rightSmallArcRadius)+" A"+t.circularPathData.rightLargeArcRadius+" "+t.circularPathData.rightSmallArcRadius+" 0 0 1 "+t.circularPathData.rightInnerExtent+" "+t.circularPathData.targetY+" L"+t.circularPathData.targetX+" "+t.circularPathData.targetY}(r);else{var f=(0,a.h5)().source((function(t){return[t.source.x0+(t.source.x1-t.source.x0),t.y0]})).target((function(t){return[t.target.x0,t.y1]}));r.path=f(r)}}))}function N(t,e){return H(t)==H(e)?"bottom"==t.circularLinkType?F(t,e):j(t,e):H(e)-H(t)}function j(t,e){return t.y0-e.y0}function F(t,e){return e.y0-t.y0}function B(t,e){return t.y1-e.y1}function U(t,e){return e.y1-t.y1}function H(t){return t.target.column-t.source.column}function V(t){return t.target.x0-t.source.x1}function q(t,e){var n=L(t),r=V(e)/Math.tan(n);return"up"==Z(t)?t.y1+r:t.y1-r}function G(t,e){var n=L(t),r=V(e)/Math.tan(n);return"up"==Z(t)?t.y1-r:t.y1+r}function W(t,e,n,r){t.links.forEach((function(i){if(!i.circular&&i.target.column-i.source.column>1){var a=i.source.column+1,o=i.target.column-1,s=1,l=o-a+1;for(s=1;a<=o;a++,s++)t.nodes.forEach((function(o){if(o.column==a){var c,u=s/(l+1),f=Math.pow(1-u,3),h=3*u*Math.pow(1-u,2),d=3*Math.pow(u,2)*(1-u),p=Math.pow(u,3),g=f*i.y0+h*i.y0+d*i.y1+p*i.y1,v=g-i.width/2,m=g+i.width/2;v>o.y0&&va.y0&&i.y0a.y0&&i.y1a.y1)&&Y(t,c,e,n)}))):(m>o.y0&&mo.y1)&&(c=m-o.y0+10,o=Y(o,c,e,n),t.nodes.forEach((function(t){A(t,r)!=A(o,r)&&t.column==o.column&&t.y0o.y1&&Y(t,c,e,n)})))}}))}}))}function Y(t,e,n,r){return t.y0+e>=n&&t.y1+e<=r&&(t.y0=t.y0+e,t.y1=t.y1+e,t.targetLinks.forEach((function(t){t.y1=t.y1+e})),t.sourceLinks.forEach((function(t){t.y0=t.y0+e}))),t}function $(t,e,n,r){t.nodes.forEach((function(i){r&&i.y+(i.y1-i.y0)>e&&(i.y=i.y-(i.y+(i.y1-i.y0)-e));var a=t.links.filter((function(t){return A(t.source,n)==A(i,n)})),o=a.length;o>1&&a.sort((function(t,e){if(!t.circular&&!e.circular){if(t.target.column==e.target.column)return t.y1-e.y1;if(!K(t,e))return t.y1-e.y1;if(t.target.column>e.target.column){var n=G(e,t);return t.y1-n}if(e.target.column>t.target.column)return G(t,e)-e.y1}return t.circular&&!e.circular?"top"==t.circularLinkType?-1:1:e.circular&&!t.circular?"top"==e.circularLinkType?1:-1:t.circular&&e.circular?t.circularLinkType===e.circularLinkType&&"top"==t.circularLinkType?t.target.column===e.target.column?t.target.y1-e.target.y1:e.target.column-t.target.column:t.circularLinkType===e.circularLinkType&&"bottom"==t.circularLinkType?t.target.column===e.target.column?e.target.y1-t.target.y1:t.target.column-e.target.column:"top"==t.circularLinkType?-1:1:void 0}));var s=i.y0;a.forEach((function(t){t.y0=s+t.width/2,s+=t.width})),a.forEach((function(t,e){if("bottom"==t.circularLinkType){for(var n=e+1,r=0;n1&&r.sort((function(t,e){if(!t.circular&&!e.circular){if(t.source.column==e.source.column)return t.y0-e.y0;if(!K(t,e))return t.y0-e.y0;if(e.source.column0?"up":"down"}function J(t,e){return A(t.source,e)==A(t.target,e)}},30838:function(t,e,n){"use strict";n.r(e),n.d(e,{sankey:function(){return _},sankeyCenter:function(){return c},sankeyJustify:function(){return l},sankeyLeft:function(){return o},sankeyLinkHorizontal:function(){return A},sankeyRight:function(){return s}});var r=n(33064),i=n(15140);function a(t){return t.target.depth}function o(t){return t.depth}function s(t,e){return e-1-t.height}function l(t,e){return t.sourceLinks.length?t.depth:e-1}function c(t){return t.targetLinks.length?t.depth:t.sourceLinks.length?(0,r.VV)(t.sourceLinks,a)-1:0}function u(t){return function(){return t}}function f(t,e){return d(t.source,e.source)||t.index-e.index}function h(t,e){return d(t.target,e.target)||t.index-e.index}function d(t,e){return t.y0-e.y0}function p(t){return t.value}function g(t){return(t.y0+t.y1)/2}function v(t){return g(t.source)*t.value}function m(t){return g(t.target)*t.value}function y(t){return t.index}function b(t){return t.nodes}function x(t){return t.links}function w(t,e){var n=t.get(e);if(!n)throw new Error("missing: "+e);return n}function _(){var t=0,e=0,n=1,a=1,o=24,s=8,c=y,_=l,k=b,T=x,M=32,A=2/3;function S(){var l={nodes:k.apply(null,arguments),links:T.apply(null,arguments)};return function(t){t.nodes.forEach((function(t,e){t.index=e,t.sourceLinks=[],t.targetLinks=[]}));var e=(0,i.UI)(t.nodes,c);t.links.forEach((function(t,n){t.index=n;var r=t.source,i=t.target;"object"!==typeof r&&(r=t.source=w(e,r)),"object"!==typeof i&&(i=t.target=w(e,i)),r.sourceLinks.push(t),i.targetLinks.push(t)}))}(l),function(t){t.nodes.forEach((function(t){t.value=Math.max((0,r.Sm)(t.sourceLinks,p),(0,r.Sm)(t.targetLinks,p))}))}(l),function(e){var r,i,a;for(r=e.nodes,i=[],a=0;r.length;++a,r=i,i=[])r.forEach((function(t){t.depth=a,t.sourceLinks.forEach((function(t){i.indexOf(t.target)<0&&i.push(t.target)}))}));for(r=e.nodes,i=[],a=0;r.length;++a,r=i,i=[])r.forEach((function(t){t.height=a,t.targetLinks.forEach((function(t){i.indexOf(t.source)<0&&i.push(t.source)}))}));var s=(n-t-o)/(a-1);e.nodes.forEach((function(e){e.x1=(e.x0=t+Math.max(0,Math.min(a-1,Math.floor(_.call(null,e,a))))*s)+o}))}(l),function(t){var n=(0,i.b1)().key((function(t){return t.x0})).sortKeys(r.j2).entries(t.nodes).map((function(t){return t.values}));c(),h();for(var o=1,l=M;l>0;--l)f(o*=.99),h(),u(o),h();function c(){var i=(0,r.Fp)(n,(function(t){return t.length})),o=A*(a-e)/(i-1);s>o&&(s=o);var l=(0,r.VV)(n,(function(t){return(a-e-(t.length-1)*s)/(0,r.Sm)(t,p)}));n.forEach((function(t){t.forEach((function(t,e){t.y1=(t.y0=e)+t.value*l}))})),t.links.forEach((function(t){t.width=t.value*l}))}function u(t){n.forEach((function(e){e.forEach((function(e){if(e.targetLinks.length){var n=((0,r.Sm)(e.targetLinks,v)/(0,r.Sm)(e.targetLinks,p)-g(e))*t;e.y0+=n,e.y1+=n}}))}))}function f(t){n.slice().reverse().forEach((function(e){e.forEach((function(e){if(e.sourceLinks.length){var n=((0,r.Sm)(e.sourceLinks,m)/(0,r.Sm)(e.sourceLinks,p)-g(e))*t;e.y0+=n,e.y1+=n}}))}))}function h(){n.forEach((function(t){var n,r,i,o=e,l=t.length;for(t.sort(d),i=0;i0&&(n.y0+=r,n.y1+=r),o=n.y1+s;if((r=o-s-a)>0)for(o=n.y0-=r,n.y1-=r,i=l-2;i>=0;--i)(r=(n=t[i]).y1+s-o)>0&&(n.y0-=r,n.y1-=r),o=n.y0}))}}(l),E(l),l}function E(t){t.nodes.forEach((function(t){t.sourceLinks.sort(h),t.targetLinks.sort(f)})),t.nodes.forEach((function(t){var e=t.y0,n=e;t.sourceLinks.forEach((function(t){t.y0=e+t.width/2,e+=t.width})),t.targetLinks.forEach((function(t){t.y1=n+t.width/2,n+=t.width}))}))}return S.update=function(t){return E(t),t},S.nodeId=function(t){return arguments.length?(c="function"===typeof t?t:u(t),S):c},S.nodeAlign=function(t){return arguments.length?(_="function"===typeof t?t:u(t),S):_},S.nodeWidth=function(t){return arguments.length?(o=+t,S):o},S.nodePadding=function(t){return arguments.length?(s=+t,S):s},S.nodes=function(t){return arguments.length?(k="function"===typeof t?t:u(t),S):k},S.links=function(t){return arguments.length?(T="function"===typeof t?t:u(t),S):T},S.size=function(r){return arguments.length?(t=e=0,n=+r[0],a=+r[1],S):[n-t,a-e]},S.extent=function(r){return arguments.length?(t=+r[0][0],n=+r[1][0],e=+r[0][1],a=+r[1][1],S):[[t,e],[n,a]]},S.iterations=function(t){return arguments.length?(M=+t,S):M},S}var k=n(45879);function T(t){return[t.source.x1,t.y0]}function M(t){return[t.target.x0,t.y1]}function A(){return(0,k.h5)().source(T).target(M)}},39898:function(t,e,n){var r,i;(function(){var a={version:"3.8.0"},o=[].slice,s=function(t){return o.call(t)},l=self.document;function c(t){return t&&(t.ownerDocument||t.document||t).documentElement}function u(t){return t&&(t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView)}if(l)try{s(l.documentElement.childNodes)[0].nodeType}catch(ma){s=function(t){for(var e=t.length,n=new Array(e);e--;)n[e]=t[e];return n}}if(Date.now||(Date.now=function(){return+new Date}),l)try{l.createElement("DIV").style.setProperty("opacity",0,"")}catch(ya){var f=this.Element.prototype,h=f.setAttribute,d=f.setAttributeNS,p=this.CSSStyleDeclaration.prototype,g=p.setProperty;f.setAttribute=function(t,e){h.call(this,t,e+"")},f.setAttributeNS=function(t,e,n){d.call(this,t,e,n+"")},p.setProperty=function(t,e,n){g.call(this,t,e+"",n)}}function v(t,e){return te?1:t>=e?0:NaN}function m(t){return null===t?NaN:+t}function y(t){return!isNaN(t)}function b(t){return{left:function(e,n,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(arguments.length<3&&(r=0),arguments.length<4&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}}a.ascending=v,a.descending=function(t,e){return et?1:e>=t?0:NaN},a.min=function(t,e){var n,r,i=-1,a=t.length;if(1===arguments.length){for(;++i=r){n=r;break}for(;++ir&&(n=r)}else{for(;++i=r){n=r;break}for(;++ir&&(n=r)}return n},a.max=function(t,e){var n,r,i=-1,a=t.length;if(1===arguments.length){for(;++i=r){n=r;break}for(;++in&&(n=r)}else{for(;++i=r){n=r;break}for(;++in&&(n=r)}return n},a.extent=function(t,e){var n,r,i,a=-1,o=t.length;if(1===arguments.length){for(;++a=r){n=i=r;break}for(;++ar&&(n=r),i=r){n=i=r;break}for(;++ar&&(n=r),i1)return o/(l-1)},a.deviation=function(){var t=a.variance.apply(this,arguments);return t?Math.sqrt(t):t};var x=b(v);function w(t){return t.length}a.bisectLeft=x.left,a.bisect=a.bisectRight=x.right,a.bisector=function(t){return b(1===t.length?function(e,n){return v(t(e),n)}:t)},a.shuffle=function(t,e,n){(a=arguments.length)<3&&(n=t.length,a<2&&(e=0));for(var r,i,a=n-e;a;)i=Math.random()*a--|0,r=t[a+e],t[a+e]=t[i+e],t[i+e]=r;return t},a.permute=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},a.pairs=function(t){for(var e=0,n=t.length-1,r=t[0],i=new Array(n<0?0:n);e=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n};var _=Math.abs;function k(t,e){for(var n in e)Object.defineProperty(t.prototype,n,{value:e[n],enumerable:!1})}function T(){this._=Object.create(null)}a.range=function(t,e,n){if(arguments.length<3&&(n=1,arguments.length<2&&(e=t,t=0)),(e-t)/n===1/0)throw new Error("infinite range");var r,i=[],a=function(t){for(var e=1;t*e%1;)e*=10;return e}(_(n)),o=-1;if(t*=a,e*=a,(n*=a)<0)for(;(r=t+n*++o)>e;)i.push(r/a);else for(;(r=t+n*++o)=r.length)return e?e.call(n,a):t?a.sort(t):a;for(var l,c,u,f,h=-1,d=a.length,p=r[s++],g=new T;++h=r.length)return t;var n=[],a=i[e++];return t.forEach((function(t,r){n.push({key:t,values:s(r,e)})})),a?n.sort((function(t,e){return a(t.key,e.key)})):n}return n.map=function(t,e){return o(e,t,0)},n.entries=function(t){return s(o(a.map,t,0),0)},n.key=function(t){return r.push(t),n},n.sortKeys=function(t){return i[r.length-1]=t,n},n.sortValues=function(e){return t=e,n},n.rollup=function(t){return e=t,n},n},a.set=function(t){var e=new D;if(t)for(var n=0,r=t.length;n=0&&(r=t.slice(n+1),t=t.slice(0,n)),t)return arguments.length<2?this[t].on(r):this[t].on(r,e);if(2===arguments.length){if(null==e)for(t in this)this.hasOwnProperty(t)&&this[t].on(r,null);return this}},a.event=null,a.requote=function(t){return t.replace(G,"\\$&")};var G=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g,W={}.__proto__?function(t,e){t.__proto__=e}:function(t,e){for(var n in e)t[n]=e[n]};function Y(t){return W(t,Z),t}var $=function(t,e){return e.querySelector(t)},X=function(t,e){return e.querySelectorAll(t)},K=function(t,e){var n=t.matches||t[N(t,"matchesSelector")];return K=function(t,e){return n.call(t,e)},K(t,e)};"function"===typeof Sizzle&&($=function(t,e){return Sizzle(t,e)[0]||null},X=Sizzle,K=Sizzle.matchesSelector),a.selection=function(){return a.select(l.documentElement)};var Z=a.selection.prototype=[];function J(t){return"function"===typeof t?t:function(){return $(t,this)}}function Q(t){return"function"===typeof t?t:function(){return X(t,this)}}Z.select=function(t){var e,n,r,i,a=[];t=J(t);for(var o=-1,s=this.length;++o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),et.hasOwnProperty(n)?{space:et[n],local:t}:t}},Z.attr=function(t,e){if(arguments.length<2){if("string"===typeof t){var n=this.node();return(t=a.ns.qualify(t)).local?n.getAttributeNS(t.space,t.local):n.getAttribute(t)}for(e in t)this.each(nt(e,t[e]));return this}return this.each(nt(t,e))},Z.classed=function(t,e){if(arguments.length<2){if("string"===typeof t){var n=this.node(),r=(t=at(t)).length,i=-1;if(e=n.classList){for(;++i=0;)(n=r[i])&&(a&&a!==n.nextSibling&&a.parentNode.insertBefore(n,a),a=n);return this},Z.sort=function(t){t=pt.apply(this,arguments);for(var e=-1,n=this.length;++e0&&(t=t.slice(0,i));var l=bt.get(t);function c(){var e=this[r];e&&(this.removeEventListener(t,e,e.$),delete this[r])}return l&&(t=l,o=wt),i?e?function(){var i=o(e,s(arguments));c.call(this),this.addEventListener(t,this[r]=i,i.$=n),i._=e}:c:e?F:function(){var e,n=new RegExp("^__on([^.]+)"+a.requote(t)+"$");for(var r in this)if(e=r.match(n)){var i=this[r];this.removeEventListener(e[1],i,i.$),delete this[r]}}}a.selection.enter=vt,a.selection.enter.prototype=mt,mt.append=Z.append,mt.empty=Z.empty,mt.node=Z.node,mt.call=Z.call,mt.size=Z.size,mt.select=function(t){for(var e,n,r,i,a,o=[],s=-1,l=this.length;++s=e&&(e=i+1);!(o=s[e])&&++e1?It:t<-1?-It:Math.asin(t)}function Nt(t){return((t=Math.exp(t))+1/t)/2}var jt=Math.SQRT2;a.interpolateZoom=function(t,e){var n,r,i=t[0],a=t[1],o=t[2],s=e[0],l=e[1],c=e[2],u=s-i,f=l-a,h=u*u+f*f;if(h0&&(t=t.transition().duration(g)),t.call(_.event)}function S(){s&&s.domain(o.range().map((function(t){return(t-h.x)/h.k})).map(o.invert)),f&&f.domain(c.range().map((function(t){return(t-h.y)/h.k})).map(c.invert))}function E(t){v++||t({type:"zoomstart"})}function C(t){S(),t({type:"zoom",scale:h.k,translate:[h.x,h.y]})}function P(t){--v||(t({type:"zoomend"}),e=null)}function O(){var t=this,e=w.of(t,arguments),n=0,r=a.select(u(t)).on(y,(function(){n=1,M(a.mouse(t),i),C(e)})).on(b,(function(){r.on(y,null).on(b,null),o(n),P(e)})),i=k(a.mouse(t)),o=Tt(t);Ji.call(t),E(e)}function L(){var t,e=this,n=w.of(e,arguments),r={},o=0,s=".zoom-"+a.event.changedTouches[0].identifier,l="touchmove"+s,c="touchend"+s,u=[],f=a.select(e),d=Tt(e);function p(){var n=a.touches(e);return t=h.k,n.forEach((function(t){t.identifier in r&&(r[t.identifier]=k(t))})),n}function g(){var t=a.event.target;a.select(t).on(l,v).on(c,y),u.push(t);for(var n=a.event.changedTouches,s=0,f=n.length;s1){m=d[0];var b=d[1],x=m[0]-b[0],w=m[1]-b[1];o=x*x+w*w}}function v(){var s,l,c,u,f=a.touches(e);Ji.call(e);for(var h=0,d=f.length;h360?t-=360:t<0&&(t+=360),t<60?r+(i-r)*t/60:t<180?i:t<240?r+(i-r)*(240-t)/60:r}(t))}return t=isNaN(t)?0:(t%=360)<0?t+360:t,e=isNaN(e)||e<0?0:e>1?1:e,r=2*(n=n<0?0:n>1?1:n)-(i=n<=.5?n*(1+e):n+e-n*e),new oe(a(t+120),a(t),a(t-120))}function Wt(t,e,n){return this instanceof Wt?(this.h=+t,this.c=+e,void(this.l=+n)):arguments.length<2?t instanceof Wt?new Wt(t.h,t.c,t.l):ne(t instanceof Xt?t.l:(t=de((t=a.rgb(t)).r,t.g,t.b)).l,t.a,t.b):new Wt(t,e,n)}qt.brighter=function(t){return t=Math.pow(.7,arguments.length?t:1),new Vt(this.h,this.s,this.l/t)},qt.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),new Vt(this.h,this.s,t*this.l)},qt.rgb=function(){return Gt(this.h,this.s,this.l)},a.hcl=Wt;var Yt=Wt.prototype=new Ht;function $t(t,e,n){return isNaN(t)&&(t=0),isNaN(e)&&(e=0),new Xt(n,Math.cos(t*=Dt)*e,Math.sin(t)*e)}function Xt(t,e,n){return this instanceof Xt?(this.l=+t,this.a=+e,void(this.b=+n)):arguments.length<2?t instanceof Xt?new Xt(t.l,t.a,t.b):t instanceof Wt?$t(t.h,t.c,t.l):de((t=oe(t)).r,t.g,t.b):new Xt(t,e,n)}Yt.brighter=function(t){return new Wt(this.h,this.c,Math.min(100,this.l+Kt*(arguments.length?t:1)))},Yt.darker=function(t){return new Wt(this.h,this.c,Math.max(0,this.l-Kt*(arguments.length?t:1)))},Yt.rgb=function(){return $t(this.h,this.c,this.l).rgb()},a.lab=Xt;var Kt=18,Zt=.95047,Jt=1,Qt=1.08883,te=Xt.prototype=new Ht;function ee(t,e,n){var r=(t+16)/116,i=r+e/500,a=r-n/200;return new oe(ae(3.2404542*(i=re(i)*Zt)-1.5371385*(r=re(r)*Jt)-.4985314*(a=re(a)*Qt)),ae(-.969266*i+1.8760108*r+.041556*a),ae(.0556434*i-.2040259*r+1.0572252*a))}function ne(t,e,n){return t>0?new Wt(Math.atan2(n,e)*Rt,Math.sqrt(e*e+n*n),t):new Wt(NaN,NaN,t)}function re(t){return t>.206893034?t*t*t:(t-4/29)/7.787037}function ie(t){return t>.008856?Math.pow(t,1/3):7.787037*t+4/29}function ae(t){return Math.round(255*(t<=.00304?12.92*t:1.055*Math.pow(t,1/2.4)-.055))}function oe(t,e,n){return this instanceof oe?(this.r=~~t,this.g=~~e,void(this.b=~~n)):arguments.length<2?t instanceof oe?new oe(t.r,t.g,t.b):fe(""+t,oe,Gt):new oe(t,e,n)}function se(t){return new oe(t>>16,t>>8&255,255&t)}function le(t){return se(t)+""}te.brighter=function(t){return new Xt(Math.min(100,this.l+Kt*(arguments.length?t:1)),this.a,this.b)},te.darker=function(t){return new Xt(Math.max(0,this.l-Kt*(arguments.length?t:1)),this.a,this.b)},te.rgb=function(){return ee(this.l,this.a,this.b)},a.rgb=oe;var ce=oe.prototype=new Ht;function ue(t){return t<16?"0"+Math.max(0,t).toString(16):Math.min(255,t).toString(16)}function fe(t,e,n){var r,i,a,o=0,s=0,l=0;if(r=/([a-z]+)\((.*)\)/.exec(t=t.toLowerCase()))switch(i=r[2].split(","),r[1]){case"hsl":return n(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return e(ge(i[0]),ge(i[1]),ge(i[2]))}return(a=ve.get(t))?e(a.r,a.g,a.b):(null==t||"#"!==t.charAt(0)||isNaN(a=parseInt(t.slice(1),16))||(4===t.length?(o=(3840&a)>>4,o|=o>>4,s=240&a,s|=s>>4,l=15&a,l|=l<<4):7===t.length&&(o=(16711680&a)>>16,s=(65280&a)>>8,l=255&a)),e(o,s,l))}function he(t,e,n){var r,i,a=Math.min(t/=255,e/=255,n/=255),o=Math.max(t,e,n),s=o-a,l=(o+a)/2;return s?(i=l<.5?s/(o+a):s/(2-o-a),r=t==o?(e-n)/s+(e0&&l<1?0:r),new Vt(r,i,l)}function de(t,e,n){var r=ie((.4124564*(t=pe(t))+.3575761*(e=pe(e))+.1804375*(n=pe(n)))/Zt),i=ie((.2126729*t+.7151522*e+.072175*n)/Jt);return Xt(116*i-16,500*(r-i),200*(i-ie((.0193339*t+.119192*e+.9503041*n)/Qt)))}function pe(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function ge(t){var e=parseFloat(t);return"%"===t.charAt(t.length-1)?Math.round(2.55*e):e}ce.brighter=function(t){t=Math.pow(.7,arguments.length?t:1);var e=this.r,n=this.g,r=this.b,i=30;return e||n||r?(e&&e=200&&e<300||304===e){try{t=n.call(i,c)}catch(ma){return void o.error.call(i,ma)}o.load.call(i,t)}else o.error.call(i,c)}return self.XDomainRequest&&!("withCredentials"in c)&&/^(http(s)?:)?\/\//.test(t)&&(c=new XDomainRequest),"onload"in c?c.onload=c.onerror=f:c.onreadystatechange=function(){c.readyState>3&&f()},c.onprogress=function(t){var e=a.event;a.event=t;try{o.progress.call(i,c)}finally{a.event=e}},i.header=function(t,e){return t=(t+"").toLowerCase(),arguments.length<2?l[t]:(null==e?delete l[t]:l[t]=e+"",i)},i.mimeType=function(t){return arguments.length?(e=null==t?null:t+"",i):e},i.responseType=function(t){return arguments.length?(u=t,i):u},i.response=function(t){return n=t,i},["get","post"].forEach((function(t){i[t]=function(){return i.send.apply(i,[t].concat(s(arguments)))}})),i.send=function(n,r,a){if(2===arguments.length&&"function"===typeof r&&(a=r,r=null),c.open(n,t,!0),null==e||"accept"in l||(l.accept=e+",*/*"),c.setRequestHeader)for(var s in l)c.setRequestHeader(s,l[s]);return null!=e&&c.overrideMimeType&&c.overrideMimeType(e),null!=u&&(c.responseType=u),null!=a&&i.on("error",a).on("load",(function(t){a(null,t)})),o.beforesend.call(i,c),c.send(null==r?null:r),i},i.abort=function(){return c.abort(),i},a.rebind(i,o,"on"),null==r?i:i.get(function(t){return 1===t.length?function(e,n){t(null==e?n:null)}:t}(r))}ve.forEach((function(t,e){ve.set(t,se(e))})),a.functor=me,a.xhr=ye(R),a.dsv=function(t,e){var n=new RegExp('["'+t+"\n]"),r=t.charCodeAt(0);function i(t,n,r){arguments.length<3&&(r=n,n=null);var i=be(t,e,null==n?a:o(n),r);return i.row=function(t){return arguments.length?i.response(null==(n=t)?a:o(t)):n},i}function a(t){return i.parse(t.responseText)}function o(t){return function(e){return i.parse(e.responseText,t)}}function s(e){return e.map(l).join(t)}function l(t){return n.test(t)?'"'+t.replace(/\"/g,'""')+'"':t}return i.parse=function(t,e){var n;return i.parseRows(t,(function(t,r){if(n)return n(t,r-1);var i=function(e){for(var n={},r=t.length,i=0;i=l)return o;if(i)return i=!1,a;var e=c;if(34===t.charCodeAt(e)){for(var n=e;n++24?(isFinite(e)&&(clearTimeout(ke),ke=setTimeout(Ae,e)),_e=0):(_e=1,Te(Ae))}function Se(){for(var t=Date.now(),e=xe;e;)t>=e.t&&e.c(t-e.t)&&(e.c=null),e=e.n;return t}function Ee(){for(var t,e=xe,n=1/0;e;)e.c?(e.t1&&(e=t[a[o-2]],n=t[a[o-1]],r=t[s],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}function Le(t,e){return t[0]-e[0]||t[1]-e[1]}a.timer=function(){Me.apply(this,arguments)},a.timer.flush=function(){Se(),Ee()},a.round=function(t,e){return e?Math.round(t*(e=Math.pow(10,e)))/e:Math.round(t)},a.geom={},a.geom.hull=function(t){var e=Ce,n=Pe;if(arguments.length)return r(t);function r(t){if(t.length<3)return[];var r,i=me(e),a=me(n),o=t.length,s=[],l=[];for(r=0;r=0;--r)d.push(t[s[c[r]][2]]);for(r=+f;rEt)s=s.L;else{if(!((i=a-Ke(s,o))>Et)){r>-Et?(e=s.P,n=s):i>-Et?(e=s,n=s.N):e=n=s;break}if(!s.R){e=s;break}s=s.R}var l=Ge(t);if(Fe.insert(e,l),e||n){if(e===n)return en(e),n=Ge(e.site),Fe.insert(l,n),l.edge=n.edge=an(e.site,l.site),tn(e),void tn(n);if(n){en(e),en(n);var c=e.site,u=c.x,f=c.y,h=t.x-u,d=t.y-f,p=n.site,g=p.x-u,v=p.y-f,m=2*(h*v-d*g),y=h*h+d*d,b=g*g+v*v,x={x:(v*y-d*b)/m+u,y:(h*b-g*y)/m+f};sn(n.edge,c,p,x),l.edge=an(c,t,null,x),n.edge=an(t,p,null,x),tn(e),tn(n)}else l.edge=an(e.site,l.site)}}function Xe(t,e){var n=t.site,r=n.x,i=n.y,a=i-e;if(!a)return r;var o=t.P;if(!o)return-1/0;var s=(n=o.site).x,l=n.y,c=l-e;if(!c)return s;var u=s-r,f=1/a-1/c,h=u/c;return f?(-h+Math.sqrt(h*h-2*f*(u*u/(-2*c)-l+c/2+i-a/2)))/f+r:(r+s)/2}function Ke(t,e){var n=t.N;if(n)return Xe(n,e);var r=t.site;return r.y===e?r.x:1/0}function Ze(t){this.site=t,this.edges=[]}function Je(t,e){return e.angle-t.angle}function Qe(){un(this),this.x=this.y=this.arc=this.site=this.cy=null}function tn(t){var e=t.P,n=t.N;if(e&&n){var r=e.site,i=t.site,a=n.site;if(r!==a){var o=i.x,s=i.y,l=r.x-o,c=r.y-s,u=a.x-o,f=2*(l*(v=a.y-s)-c*u);if(!(f>=-Ct)){var h=l*l+c*c,d=u*u+v*v,p=(v*h-c*d)/f,g=(l*d-u*h)/f,v=g+s,m=Ve.pop()||new Qe;m.arc=t,m.site=i,m.x=p+o,m.y=v+Math.sqrt(p*p+g*g),m.cy=v,t.circle=m;for(var y=null,b=Ue._;b;)if(m.y=s)return;if(h>p){if(a){if(a.y>=c)return}else a={x:v,y:l};n={x:v,y:c}}else{if(a){if(a.y1)if(h>p){if(a){if(a.y>=c)return}else a={x:(l-i)/r,y:l};n={x:(c-i)/r,y:c}}else{if(a){if(a.y=s)return}else a={x:o,y:r*o+i};n={x:s,y:r*s+i}}else{if(a){if(a.x0)){if(e/=h,h<0){if(e0){if(e>f)return;e>u&&(u=e)}if(e=i-l,h||!(e<0)){if(e/=h,h<0){if(e>f)return;e>u&&(u=e)}else if(h>0){if(e0)){if(e/=d,d<0){if(e0){if(e>f)return;e>u&&(u=e)}if(e=a-c,d||!(e<0)){if(e/=d,d<0){if(e>f)return;e>u&&(u=e)}else if(d>0){if(e0&&(t.a={x:l+u*h,y:c+u*d}),f<1&&(t.b={x:l+f*h,y:c+f*d}),t}}}}}),l=o.length;l--;)(!nn(e=o[l],t)||!s(e)||_(e.a.x-e.b.x)Et||_(i-n)>Et)&&(s.splice(o,0,new ln(on(a.site,u,_(r-f)Et?{x:f,y:_(e-f)Et?{x:_(n-p)Et?{x:h,y:_(e-h)Et?{x:_(n-d)=n&&c.x<=i&&c.y>=r&&c.y<=o?[[n,o],[i,o],[i,r],[n,r]]:[]).point=t[s]})),e}function s(t){return t.map((function(t,e){return{x:Math.round(r(t,e)/Et)*Et,y:Math.round(i(t,e)/Et)*Et,i:e}}))}return o.links=function(t){return pn(s(t)).edges.filter((function(t){return t.l&&t.r})).map((function(e){return{source:t[e.l.i],target:t[e.r.i]}}))},o.triangles=function(t){var e=[];return pn(s(t)).cells.forEach((function(n,r){for(var i,a,o,s,l=n.site,c=n.edges.sort(Je),u=-1,f=c.length,h=c[f-1].edge,d=h.l===l?h.r:h.l;++ua&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,l.push({i:o,x:_n(n,r)})),a=Mn.lastIndex;return ag&&(g=l.x),l.y>v&&(v=l.y),c.push(l.x),u.push(l.y);else for(f=0;fg&&(g=b),x>v&&(v=x),c.push(b),u.push(x)}var w=g-d,k=v-p;function T(t,e,n,r,i,a,o,s){if(!isNaN(n)&&!isNaN(r))if(t.leaf){var l=t.x,c=t.y;if(null!=l)if(_(l-n)+_(c-r)<.01)M(t,e,n,r,i,a,o,s);else{var u=t.point;t.x=t.y=t.point=null,M(t,u,l,c,i,a,o,s),M(t,e,n,r,i,a,o,s)}else t.x=n,t.y=r,t.point=e}else M(t,e,n,r,i,a,o,s)}function M(t,e,n,r,i,a,o,s){var l=.5*(i+o),c=.5*(a+s),u=n>=l,f=r>=c,h=f<<1|u;t.leaf=!1,u?i=l:o=l,f?a=c:s=c,T(t=t.nodes[h]||(t.nodes[h]={leaf:!0,nodes:[],point:null,x:null,y:null}),e,n,r,i,a,o,s)}w>k?v=p+w:g=d+k;var A={leaf:!0,nodes:[],point:null,x:null,y:null,add:function(t){T(A,t,+m(t,++f),+y(t,f),d,p,g,v)},visit:function(t){bn(t,A,d,p,g,v)},find:function(t){return function(t,e,n,r,i,a,o){var s,l=1/0;return function t(c,u,f,h,d){if(!(u>a||f>o||h=w)<<1|e>=x,k=_+4;_=0&&!(n=a.interpolators[r](t,e)););return n}function Sn(t,e){var n,r=[],i=[],a=t.length,o=e.length,s=Math.min(t.length,e.length);for(n=0;n=1)return 1;var e=t*t,n=e*t;return 4*(t<.5?n:3*(t-e)+n-.75)}function zn(t){return 1-Math.cos(t*It)}function Nn(t){return Math.pow(2,10*(t-1))}function jn(t){return 1-Math.sqrt(1-t*t)}function Fn(t){return t<1/2.75?7.5625*t*t:t<2/2.75?7.5625*(t-=1.5/2.75)*t+.75:t<2.5/2.75?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375}function Bn(t,e){return e-=t,function(n){return Math.round(t+e*n)}}function Un(t){var e,n,r,i=[t.a,t.b],a=[t.c,t.d],o=Vn(i),s=Hn(i,a),l=Vn(((e=a)[0]+=(r=-s)*(n=i)[0],e[1]+=r*n[1],e))||0;i[0]*a[1]=0?t.slice(0,n):t,i=n>=0?t.slice(n+1):"in";return r=Cn.get(r)||En,i=Pn.get(i)||R,e=i(r.apply(null,o.call(arguments,1))),function(t){return t<=0?0:t>=1?1:e(t)}},a.interpolateHcl=function(t,e){t=a.hcl(t),e=a.hcl(e);var n=t.h,r=t.c,i=t.l,o=e.h-n,s=e.c-r,l=e.l-i;return isNaN(s)&&(s=0,r=isNaN(r)?e.c:r),isNaN(o)?(o=0,n=isNaN(n)?e.h:n):o>180?o-=360:o<-180&&(o+=360),function(t){return $t(n+o*t,r+s*t,i+l*t)+""}},a.interpolateHsl=function(t,e){t=a.hsl(t),e=a.hsl(e);var n=t.h,r=t.s,i=t.l,o=e.h-n,s=e.s-r,l=e.l-i;return isNaN(s)&&(s=0,r=isNaN(r)?e.s:r),isNaN(o)?(o=0,n=isNaN(n)?e.h:n):o>180?o-=360:o<-180&&(o+=360),function(t){return Gt(n+o*t,r+s*t,i+l*t)+""}},a.interpolateLab=function(t,e){t=a.lab(t),e=a.lab(e);var n=t.l,r=t.a,i=t.b,o=e.l-n,s=e.a-r,l=e.b-i;return function(t){return ee(n+o*t,r+s*t,i+l*t)+""}},a.interpolateRound=Bn,a.transform=function(t){var e=l.createElementNS(a.ns.prefix.svg,"g");return(a.transform=function(t){if(null!=t){e.setAttribute("transform",t);var n=e.transform.baseVal.consolidate()}return new Un(n?n.matrix:qn)})(t)},Un.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var qn={a:1,b:0,c:0,d:1,e:0,f:0};function Gn(t){return t.length?t.pop()+",":""}function Wn(t,e){var n=[],r=[];return t=a.transform(t),e=a.transform(e),function(t,e,n,r){if(t[0]!==e[0]||t[1]!==e[1]){var i=n.push("translate(",null,",",null,")");r.push({i:i-4,x:_n(t[0],e[0])},{i:i-2,x:_n(t[1],e[1])})}else(e[0]||e[1])&&n.push("translate("+e+")")}(t.translate,e.translate,n,r),function(t,e,n,r){t!==e?(t-e>180?e+=360:e-t>180&&(t+=360),r.push({i:n.push(Gn(n)+"rotate(",null,")")-2,x:_n(t,e)})):e&&n.push(Gn(n)+"rotate("+e+")")}(t.rotate,e.rotate,n,r),function(t,e,n,r){t!==e?r.push({i:n.push(Gn(n)+"skewX(",null,")")-2,x:_n(t,e)}):e&&n.push(Gn(n)+"skewX("+e+")")}(t.skew,e.skew,n,r),function(t,e,n,r){if(t[0]!==e[0]||t[1]!==e[1]){var i=n.push(Gn(n)+"scale(",null,",",null,")");r.push({i:i-4,x:_n(t[0],e[0])},{i:i-2,x:_n(t[1],e[1])})}else 1===e[0]&&1===e[1]||n.push(Gn(n)+"scale("+e+")")}(t.scale,e.scale,n,r),t=e=null,function(t){for(var e,i=-1,a=r.length;++i0?n=e:(t.c=null,t.t=NaN,t=null,l.end({type:"end",alpha:n=0})):e>0&&(l.start({type:"start",alpha:n=e}),t=Me(s.tick)),s):n},s.start=function(){var t,e,n,a=m.length,l=y.length,u=c[0],p=c[1];for(t=0;t=0;)n.push(i[r])}function sr(t,e){for(var n=[t],r=[];null!=(t=n.pop());)if(r.push(t),(a=t.children)&&(i=a.length))for(var i,a,o=-1;++o=0;)o.push(u=c[l]),u.parent=a,u.depth=a.depth+1;n&&(a.value=0),a.children=c}else n&&(a.value=+n.call(r,a,a.depth)||0),delete a.children;return sr(i,(function(e){var r,i;t&&(r=e.children)&&r.sort(t),n&&(i=e.parent)&&(i.value+=e.value)})),s}return r.sort=function(e){return arguments.length?(t=e,r):t},r.children=function(t){return arguments.length?(e=t,r):e},r.value=function(t){return arguments.length?(n=t,r):n},r.revalue=function(t){return n&&(or(t,(function(t){t.children&&(t.value=0)})),sr(t,(function(t){var e;t.children||(t.value=+n.call(r,t,t.depth)||0),(e=t.parent)&&(e.value+=t.value)}))),t},r},a.layout.partition=function(){var t=a.layout.hierarchy(),e=[1,1];function n(t,e,r,i){var a=t.children;if(t.x=e,t.y=t.depth*i,t.dx=r,t.dy=i,a&&(o=a.length)){var o,s,l,c=-1;for(r=t.value?r/t.value:0;++cs&&(s=r),o.push(r)}for(n=0;ni&&(r=n,i=e);return r}function wr(t){return t.reduce(_r,0)}function _r(t,e){return t+e[1]}function kr(t,e){return Tr(t,Math.ceil(Math.log(e.length)/Math.LN2+1))}function Tr(t,e){for(var n=-1,r=+t[0],i=(t[1]-r)/e,a=[];++n<=e;)a[n]=i*n+r;return a}function Mr(t){return[a.min(t),a.max(t)]}function Ar(t,e){return t.value-e.value}function Sr(t,e){var n=t._pack_next;t._pack_next=e,e._pack_prev=t,e._pack_next=n,n._pack_prev=e}function Er(t,e){t._pack_next=e,e._pack_prev=t}function Cr(t,e){var n=e.x-t.x,r=e.y-t.y,i=t.r+e.r;return.999*i*i>n*n+r*r}function Pr(t){if((e=t.children)&&(l=e.length)){var e,n,r,i,a,o,s,l,c=1/0,u=-1/0,f=1/0,h=-1/0;if(e.forEach(Or),(n=e[0]).x=-n.r,n.y=0,b(n),l>1&&((r=e[1]).x=r.r,r.y=0,b(r),l>2))for(Dr(n,r,i=e[2]),b(i),Sr(n,i),n._pack_prev=i,Sr(i,r),r=n._pack_next,a=3;a0)for(o=-1;++o=f[0]&&l<=f[1]&&((s=c[a.bisect(h,l,1,p)-1]).y+=g,s.push(i[o]));return c}return i.value=function(t){return arguments.length?(e=t,i):e},i.range=function(t){return arguments.length?(n=me(t),i):n},i.bins=function(t){return arguments.length?(r="number"===typeof t?function(e){return Tr(e,t)}:me(t),i):r},i.frequency=function(e){return arguments.length?(t=!!e,i):t},i},a.layout.pack=function(){var t,e=a.layout.hierarchy().sort(Ar),n=0,r=[1,1];function i(i,a){var o=e.call(this,i,a),s=o[0],l=r[0],c=r[1],u=null==t?Math.sqrt:"function"===typeof t?t:function(){return t};if(s.x=s.y=0,sr(s,(function(t){t.r=+u(t.value)})),sr(s,Pr),n){var f=n*(t?1:Math.max(2*s.r/l,2*s.r/c))/2;sr(s,(function(t){t.r+=f})),sr(s,Pr),sr(s,(function(t){t.r-=f}))}return Ir(s,l/2,c/2,t?1:1/Math.max(2*s.r/l,2*s.r/c)),o}return i.size=function(t){return arguments.length?(r=t,i):r},i.radius=function(e){return arguments.length?(t=null==e||"function"===typeof e?e:+e,i):t},i.padding=function(t){return arguments.length?(n=+t,i):n},ar(i,e)},a.layout.tree=function(){var t=a.layout.hierarchy().sort(null).value(null),e=Rr,n=[1,1],r=null;function i(i,a){var c=t.call(this,i,a),u=c[0],f=function(t){for(var e,n={A:null,children:[t]},r=[n];null!=(e=r.pop());)for(var i,a=e.children,o=0,s=a.length;od.x&&(d=t),t.depth>p.depth&&(p=t)}));var g=e(h,d)/2-h.x,v=n[0]/(d.x+e(d,h)/2+g),m=n[1]/(p.depth||1);or(u,(function(t){t.x=(t.x+g)*v,t.y=t.depth*m}))}return c}function o(t){var n=t.children,r=t.parent.children,i=t.i?r[t.i-1]:null;if(n.length){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(t);var a=(n[0].z+n[n.length-1].z)/2;i?(t.z=i.z+e(t._,i._),t.m=t.z-a):t.z=a}else i&&(t.z=i.z+e(t._,i._));t.parent.A=function(t,n,r){if(n){for(var i,a=t,o=t,s=n,l=a.parent.children[0],c=a.m,u=o.m,f=s.m,h=l.m;s=Nr(s),a=zr(a),s&&a;)l=zr(l),(o=Nr(o)).a=t,(i=s.z+f-a.z-c+e(s._,a._))>0&&(jr(Fr(s,t,r),t,i),c+=i,u+=i),f+=s.m,c+=a.m,h+=l.m,u+=o.m;s&&!Nr(o)&&(o.t=s,o.m+=f-u),a&&!zr(l)&&(l.t=a,l.m+=c-h,r=t)}return r}(t,i,t.parent.A||r[0])}function s(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function l(t){t.x*=n[0],t.y=t.depth*n[1]}return i.separation=function(t){return arguments.length?(e=t,i):e},i.size=function(t){return arguments.length?(r=null==(n=t)?l:null,i):r?null:n},i.nodeSize=function(t){return arguments.length?(r=null==(n=t)?null:l,i):r?n:null},ar(i,t)},a.layout.cluster=function(){var t=a.layout.hierarchy().sort(null).value(null),e=Rr,n=[1,1],r=!1;function i(i,o){var s,l=t.call(this,i,o),c=l[0],u=0;sr(c,(function(t){var n=t.children;n&&n.length?(t.x=function(t){return t.reduce((function(t,e){return t+e.x}),0)/t.length}(n),t.y=function(t){return 1+a.max(t,(function(t){return t.y}))}(n)):(t.x=s?u+=e(t,s):0,t.y=0,s=t)}));var f=Br(c),h=Ur(c),d=f.x-e(f,h)/2,p=h.x+e(h,f)/2;return sr(c,r?function(t){t.x=(t.x-c.x)*n[0],t.y=(c.y-t.y)*n[1]}:function(t){t.x=(t.x-d)/(p-d)*n[0],t.y=(1-(c.y?t.y/c.y:1))*n[1]}),l}return i.separation=function(t){return arguments.length?(e=t,i):e},i.size=function(t){return arguments.length?(r=null==(n=t),i):r?null:n},i.nodeSize=function(t){return arguments.length?(r=null!=(n=t),i):r?n:null},ar(i,t)},a.layout.treemap=function(){var t,e=a.layout.hierarchy(),n=Math.round,r=[1,1],i=null,o=Hr,s=!1,l="squarify",c=.5*(1+Math.sqrt(5));function u(t,e){for(var n,r,i=-1,a=t.length;++i0;)s.push(n=c[i-1]),s.area+=n.area,"squarify"!==l||(r=d(s,g))<=h?(c.pop(),h=r):(s.area-=s.pop().area,p(s,g,a,!1),g=Math.min(a.dx,a.dy),s.length=s.area=0,h=1/0);s.length&&(p(s,g,a,!0),s.length=s.area=0),e.forEach(f)}}function h(t){var e=t.children;if(e&&e.length){var n,r=o(t),i=e.slice(),a=[];for(u(i,r.dx*r.dy/t.value),a.area=0;n=i.pop();)a.push(n),a.area+=n.area,null!=n.z&&(p(a,n.z?r.dx:r.dy,r,!i.length),a.length=a.area=0);e.forEach(h)}}function d(t,e){for(var n,r=t.area,i=0,a=1/0,o=-1,s=t.length;++oi&&(i=n));return e*=e,(r*=r)?Math.max(e*i*c/r,r/(e*a*c)):1/0}function p(t,e,r,i){var a,o=-1,s=t.length,l=r.x,c=r.y,u=e?n(t.area/e):0;if(e==r.dx){for((i||u>r.dy)&&(u=r.dy);++or.dx)&&(u=r.dx);++o1);return t+e*n*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(){var t=a.random.normal.apply(a,arguments);return function(){return Math.exp(t())}},bates:function(t){var e=a.random.irwinHall(t);return function(){return e()/t}},irwinHall:function(t){return function(){for(var e=0,n=0;n2?Kr:Wr,l=r?$n:Yn;return i=o(t,e,l,n),a=o(e,t,l,An),s}function s(t){return i(t)}return s.invert=function(t){return a(t)},s.domain=function(e){return arguments.length?(t=e.map(Number),o()):t},s.range=function(t){return arguments.length?(e=t,o()):e},s.rangeRound=function(t){return s.range(t).interpolate(Bn)},s.clamp=function(t){return arguments.length?(r=t,o()):r},s.interpolate=function(t){return arguments.length?(n=t,o()):n},s.ticks=function(e){return ei(t,e)},s.tickFormat=function(e,n){return d3_scale_linearTickFormat(t,e,n)},s.nice=function(e){return Qr(t,e),o()},s.copy=function(){return Zr(t,e,n,r)},o()}function Jr(t,e){return a.rebind(t,e,"range","rangeRound","interpolate","clamp")}function Qr(t,e){return Yr(t,$r(ti(t,e)[2])),Yr(t,$r(ti(t,e)[2])),t}function ti(t,e){null==e&&(e=10);var n=qr(t),r=n[1]-n[0],i=Math.pow(10,Math.floor(Math.log(r/e)/Math.LN10)),a=e/r*i;return a<=.15?i*=10:a<=.35?i*=5:a<=.75&&(i*=2),n[0]=Math.ceil(n[0]/i)*i,n[1]=Math.floor(n[1]/i)*i+.5*i,n[2]=i,n}function ei(t,e){return a.range.apply(a,ti(t,e))}function ni(t,e,n,r){function i(t){return(n?Math.log(t<0?0:t):-Math.log(t>0?0:-t))/Math.log(e)}function a(t){return n?Math.pow(e,t):-Math.pow(e,-t)}function o(e){return t(i(e))}return o.invert=function(e){return a(t.invert(e))},o.domain=function(e){return arguments.length?(n=e[0]>=0,t.domain((r=e.map(Number)).map(i)),o):r},o.base=function(n){return arguments.length?(e=+n,t.domain(r.map(i)),o):e},o.nice=function(){var e=Yr(r.map(i),n?Math:ri);return t.domain(e),r=e.map(a),o},o.ticks=function(){var t=qr(r),o=[],s=t[0],l=t[1],c=Math.floor(i(s)),u=Math.ceil(i(l)),f=e%1?2:e;if(isFinite(u-c)){if(n){for(;c0;h--)o.push(a(c)*h);for(c=0;o[c]l;u--);o=o.slice(c,u)}return o},o.copy=function(){return ni(t.copy(),e,n,r)},Jr(o,t)}a.scale.linear=function(){return Zr([0,1],[0,1],An,!1)},a.scale.log=function(){return ni(a.scale.linear().domain([0,1]),10,!0,[1,10])};var ri={floor:function(t){return-Math.ceil(-t)},ceil:function(t){return-Math.floor(-t)}};function ii(t,e,n){var r=ai(e),i=ai(1/e);function a(e){return t(r(e))}return a.invert=function(e){return i(t.invert(e))},a.domain=function(e){return arguments.length?(t.domain((n=e.map(Number)).map(r)),a):n},a.ticks=function(t){return ei(n,t)},a.tickFormat=function(t,e){return d3_scale_linearTickFormat(n,t,e)},a.nice=function(t){return a.domain(Qr(n,t))},a.exponent=function(o){return arguments.length?(r=ai(e=o),i=ai(1/e),t.domain(n.map(r)),a):e},a.copy=function(){return ii(t.copy(),e,n)},Jr(a,t)}function ai(t){return function(e){return e<0?-Math.pow(-e,t):Math.pow(e,t)}}function oi(t,e){var n,r,i;function o(i){return r[((n.get(i)||("range"===e.t?n.set(i,t.push(i)):NaN))-1)%r.length]}function s(e,n){return a.range(t.length).map((function(t){return e+n*t}))}return o.domain=function(r){if(!arguments.length)return t;t=[],n=new T;for(var i,a=-1,s=r.length;++a0?n[r-1]:t[0],rf?0:1;if(c=Lt)return l(c,d)+(s?l(s,1-d):"")+"Z";var p,g,v,m,y,b,x,w,_,k,T,M,A=0,S=0,E=[];if((m=(+o.apply(this,arguments)||0)/2)&&(v=r===vi?Math.sqrt(s*s+c*c):+r.apply(this,arguments),d||(S*=-1),c&&(S=zt(v/c*Math.sin(m))),s&&(A=zt(v/s*Math.sin(m)))),c){y=c*Math.cos(u+S),b=c*Math.sin(u+S),x=c*Math.cos(f-S),w=c*Math.sin(f-S);var C=Math.abs(f-u-2*S)<=Pt?0:1;if(S&&_i(y,b,x,w)===d^C){var P=(u+f)/2;y=c*Math.cos(P),b=c*Math.sin(P),x=w=null}}else y=b=0;if(s){_=s*Math.cos(f-A),k=s*Math.sin(f-A),T=s*Math.cos(u+A),M=s*Math.sin(u+A);var O=Math.abs(u-f+2*A)<=Pt?0:1;if(A&&_i(_,k,T,M)===1-d^O){var L=(u+f)/2;_=s*Math.cos(L),k=s*Math.sin(L),T=M=null}}else _=k=0;if(h>Et&&(p=Math.min(Math.abs(c-s)/2,+n.apply(this,arguments)))>.001){g=s0?0:1}function ki(t,e,n,r,i){var a=t[0]-e[0],o=t[1]-e[1],s=(i?r:-r)/Math.sqrt(a*a+o*o),l=s*o,c=-s*a,u=t[0]+l,f=t[1]+c,h=e[0]+l,d=e[1]+c,p=(u+h)/2,g=(f+d)/2,v=h-u,m=d-f,y=v*v+m*m,b=n-r,x=u*d-h*f,w=(m<0?-1:1)*Math.sqrt(Math.max(0,b*b*y-x*x)),_=(x*m-v*w)/y,k=(-x*v-m*w)/y,T=(x*m+v*w)/y,M=(-x*v+m*w)/y,A=_-p,S=k-g,E=T-p,C=M-g;return A*A+S*S>E*E+C*C&&(_=T,k=M),[[_-l,k-c],[_*n/b,k*n/b]]}function Ti(){return!0}function Mi(t){var e=Ce,n=Pe,r=Ti,i=Si,a=i.key,o=.7;function s(a){var s,l=[],c=[],u=-1,f=a.length,h=me(e),d=me(n);function p(){l.push("M",i(t(c),o))}for(;++u1&&i.push("H",r[0]),i.join("")},"step-before":Ci,"step-after":Pi,basis:Ii,"basis-open":function(t){if(t.length<4)return Si(t);for(var e,n=[],r=-1,i=t.length,a=[0],o=[0];++r<3;)e=t[r],a.push(e[0]),o.push(e[1]);for(n.push(Di(Ni,a)+","+Di(Ni,o)),--r;++r9&&(i=3*e/Math.sqrt(i),o[s]=i*n,o[s+1]=i*r);for(s=-1;++s<=l;)i=(t[Math.min(l,s+1)][0]-t[Math.max(0,s-1)][0])/(6*(1+o[s]*o[s])),a.push([i||0,o[s]*i||0]);return a}(t))}});function Si(t){return t.length>1?t.join("L"):t+"Z"}function Ei(t){return t.join("L")+"Z"}function Ci(t){for(var e=0,n=t.length,r=t[0],i=[r[0],",",r[1]];++e1){s=e[1],a=t[l],l++,r+="C"+(i[0]+o[0])+","+(i[1]+o[1])+","+(a[0]-s[0])+","+(a[1]-s[1])+","+a[0]+","+a[1];for(var c=2;cPt)+",1 "+e}function l(t,e,n,r){return"Q 0,0 "+r}return a.radius=function(t){return arguments.length?(n=me(t),a):n},a.source=function(e){return arguments.length?(t=me(e),a):t},a.target=function(t){return arguments.length?(e=me(t),a):e},a.startAngle=function(t){return arguments.length?(r=me(t),a):r},a.endAngle=function(t){return arguments.length?(i=me(t),a):i},a},a.svg.diagonal=function(){var t=Hi,e=Vi,n=Gi;function r(r,i){var a=t.call(this,r,i),o=e.call(this,r,i),s=(a.y+o.y)/2,l=[a,{x:a.x,y:s},{x:o.x,y:s},o];return"M"+(l=l.map(n))[0]+"C"+l[1]+" "+l[2]+" "+l[3]}return r.source=function(e){return arguments.length?(t=me(e),r):t},r.target=function(t){return arguments.length?(e=me(t),r):e},r.projection=function(t){return arguments.length?(n=t,r):n},r},a.svg.diagonal.radial=function(){var t=a.svg.diagonal(),e=Gi,n=t.projection;return t.projection=function(t){return arguments.length?n(function(t){return function(){var e=t.apply(this,arguments),n=e[0],r=e[1]-It;return[n*Math.cos(r),n*Math.sin(r)]}}(e=t)):e},t},a.svg.symbol=function(){var t=Yi,e=Wi;function n(n,r){return(Xi.get(t.call(this,n,r))||$i)(e.call(this,n,r))}return n.type=function(e){return arguments.length?(t=me(e),n):t},n.size=function(t){return arguments.length?(e=me(t),n):e},n};var Xi=a.map({circle:$i,cross:function(t){var e=Math.sqrt(t/5)/2;return"M"+-3*e+","+-e+"H"+-e+"V"+-3*e+"H"+e+"V"+-e+"H"+3*e+"V"+e+"H"+e+"V"+3*e+"H"+-e+"V"+e+"H"+-3*e+"Z"},diamond:function(t){var e=Math.sqrt(t/(2*Zi)),n=e*Zi;return"M0,"+-e+"L"+n+",0 0,"+e+" "+-n+",0Z"},square:function(t){var e=Math.sqrt(t)/2;return"M"+-e+","+-e+"L"+e+","+-e+" "+e+","+e+" "+-e+","+e+"Z"},"triangle-down":function(t){var e=Math.sqrt(t/Ki),n=e*Ki/2;return"M0,"+n+"L"+e+","+-n+" "+-e+","+-n+"Z"},"triangle-up":function(t){var e=Math.sqrt(t/Ki),n=e*Ki/2;return"M0,"+-n+"L"+e+","+n+" "+-e+","+n+"Z"}});a.svg.symbolTypes=Xi.keys();var Ki=Math.sqrt(3),Zi=Math.tan(30*Dt);Z.transition=function(t){for(var e,n,r=ea||++ia,i=sa(t),a=[],o=na||{time:Date.now(),ease:Rn,delay:0,duration:250},s=-1,l=this.length;++s0;)c[--h].call(t,o);if(a>=1)return f.event&&f.event.end.call(t,t.__data__,e),--u.count?delete u[r]:delete t[n],1}f||(a=i.time,o=Me((function(t){var e=f.delay;if(o.t=e+a,e<=t)return h(t-e);o.c=h}),0,a),f=u[r]={tween:new T,time:a,timer:o,delay:i.delay,duration:i.duration,ease:i.ease,index:e},i=null,++u.count)}ra.call=Z.call,ra.empty=Z.empty,ra.node=Z.node,ra.size=Z.size,a.transition=function(t,e){return t&&t.transition?ea?t.transition(e):t:a.selection().transition(t)},a.transition.prototype=ra,ra.select=function(t){var e,n,r,i=this.id,a=this.namespace,o=[];t=J(t);for(var s=-1,l=this.length;++srect,.s>rect").attr("width",o[1]-o[0])}function g(t){t.select(".extent").attr("y",s[0]),t.selectAll(".extent,.e>rect,.w>rect").attr("height",s[1]-s[0])}function v(){var f,v,m=this,y=a.select(a.event.target),b=n.of(m,arguments),x=a.select(m),w=y.datum(),_=!/^(n|s)$/.test(w)&&r,k=!/^(e|w)$/.test(w)&&i,T=y.classed("extent"),M=Tt(m),A=a.mouse(m),S=a.select(u(m)).on("keydown.brush",(function(){32==a.event.keyCode&&(T||(f=null,A[0]-=o[1],A[1]-=s[1],T=2),H())})).on("keyup.brush",(function(){32==a.event.keyCode&&2==T&&(A[0]+=o[1],A[1]+=s[1],T=0,H())}));if(a.event.changedTouches?S.on("touchmove.brush",P).on("touchend.brush",L):S.on("mousemove.brush",P).on("mouseup.brush",L),x.interrupt().selectAll("*").interrupt(),T)A[0]=o[0]-A[0],A[1]=s[0]-A[1];else if(w){var E=+/w$/.test(w),C=+/^n/.test(w);v=[o[1-E]-A[0],s[1-C]-A[1]],A[0]=o[E],A[1]=s[C]}else a.event.altKey&&(f=A.slice());function P(){var t=a.mouse(m),e=!1;v&&(t[0]+=v[0],t[1]+=v[1]),T||(a.event.altKey?(f||(f=[(o[0]+o[1])/2,(s[0]+s[1])/2]),A[0]=o[+(t[0]>>1;e.dtype||(e.dtype="array"),"string"===typeof e.dtype?p=new(f(e.dtype))(v):e.dtype&&(p=e.dtype,Array.isArray(p)&&(p.length=v));for(var m=0;mn||s>1073741824){for(var h=0;hn+i||A>r+i||S=P)&&o!==s){var l=y[a];void 0===s&&(s=l.length);for(var c=o;c=v&&f<=_&&h>=m&&h<=k&&O.push(u)}var d=b[a],p=d[4*o+0],g=d[4*o+1],x=d[4*o+2],w=d[4*o+3],T=function(t,e){for(var n=null,r=0;null===n;)if(n=t[4*e+r],++r>t.length)return null;return n}(d,o+1),E=.5*i,L=a+1;e(n,r,E,L,p,g||x||w||T),e(n,r+E,E,L,g,x||w||T),e(n+E,r,E,L,x,w||T),e(n+E,r+E,E,L,w,T)}}(0,0,1,0,0,1),O},p;function E(t,e,n){for(var r=1,i=.5,a=.5,o=.5,s=0;s0){e+=Math.abs(o(t[0]));for(var n=1;n2){for(l=0;l=0))throw new Error("precision must be a positive number");var n=Math.pow(10,e||0);return Math.round(t*n)/n},e.radiansToLength=u,e.lengthToRadians=f,e.lengthToDegrees=function(t,e){return h(f(t,e))},e.bearingToAzimuth=function(t){var e=t%360;return e<0&&(e+=360),e},e.radiansToDegrees=h,e.degreesToRadians=function(t){return t%360*Math.PI/180},e.convertLength=function(t,e,n){if(void 0===e&&(e="kilometers"),void 0===n&&(n="kilometers"),!(t>=0))throw new Error("length must be a positive number");return u(f(t,e),n)},e.convertArea=function(t,n,r){if(void 0===n&&(n="meters"),void 0===r&&(r="kilometers"),!(t>=0))throw new Error("area must be a positive number");var i=e.areaFactors[n];if(!i)throw new Error("invalid original units");var a=e.areaFactors[r];if(!a)throw new Error("invalid final units");return t/i*a},e.isNumber=d,e.isObject=function(t){return!!t&&t.constructor===Object},e.validateBBox=function(t){if(!t)throw new Error("bbox is required");if(!Array.isArray(t))throw new Error("bbox must be an Array");if(4!==t.length&&6!==t.length)throw new Error("bbox must be an Array of 4 or 6 numbers");t.forEach((function(t){if(!d(t))throw new Error("bbox must only contain numbers")}))},e.validateId=function(t){if(!t)throw new Error("id is required");if(-1===["string","number"].indexOf(typeof t))throw new Error("id must be a number or a string")}},60302:function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(23132);function i(t,e,n){if(null!==t)for(var r,a,o,s,l,c,u,f,h=0,d=0,p=t.type,g="FeatureCollection"===p,v="Feature"===p,m=g?t.features.length:1,y=0;yc||d>u||p>f)return l=i,c=n,u=d,f=p,void(o=0);var g=r.lineString([l,i],t.properties);if(!1===e(g,n,a,p,o))return!1;o++,l=i}))&&void 0}}}))}function u(t,e){if(!t)throw new Error("geojson is required");l(t,(function(t,n,i){if(null!==t.geometry){var a=t.geometry.type,o=t.geometry.coordinates;switch(a){case"LineString":if(!1===e(t,n,i,0,0))return!1;break;case"Polygon":for(var s=0;st[0]&&(e[0]=t[0]),e[1]>t[1]&&(e[1]=t[1]),e[2]=0))throw new Error("precision must be a positive number");var n=Math.pow(10,e||0);return Math.round(t*n)/n},e.radiansToLength=u,e.lengthToRadians=f,e.lengthToDegrees=function(t,e){return h(f(t,e))},e.bearingToAzimuth=function(t){var e=t%360;return e<0&&(e+=360),e},e.radiansToDegrees=h,e.degreesToRadians=function(t){return t%360*Math.PI/180},e.convertLength=function(t,e,n){if(void 0===e&&(e="kilometers"),void 0===n&&(n="kilometers"),!(t>=0))throw new Error("length must be a positive number");return u(f(t,e),n)},e.convertArea=function(t,n,r){if(void 0===n&&(n="meters"),void 0===r&&(r="kilometers"),!(t>=0))throw new Error("area must be a positive number");var i=e.areaFactors[n];if(!i)throw new Error("invalid original units");var a=e.areaFactors[r];if(!a)throw new Error("invalid final units");return t/i*a},e.isNumber=d,e.isObject=function(t){return!!t&&t.constructor===Object},e.validateBBox=function(t){if(!t)throw new Error("bbox is required");if(!Array.isArray(t))throw new Error("bbox must be an Array");if(4!==t.length&&6!==t.length)throw new Error("bbox must be an Array of 4 or 6 numbers");t.forEach((function(t){if(!d(t))throw new Error("bbox must only contain numbers")}))},e.validateId=function(t){if(!t)throw new Error("id is required");if(-1===["string","number"].indexOf(typeof t))throw new Error("id must be a number or a string")}},27138:function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(94228);function i(t,e,n){if(null!==t)for(var r,a,o,s,l,c,u,f,h=0,d=0,p=t.type,g="FeatureCollection"===p,v="Feature"===p,m=g?t.features.length:1,y=0;yc||d>u||p>f)return l=i,c=n,u=d,f=p,void(o=0);var g=r.lineString([l,i],t.properties);if(!1===e(g,n,a,p,o))return!1;o++,l=i}))&&void 0}}}))}function u(t,e){if(!t)throw new Error("geojson is required");l(t,(function(t,n,i){if(null!==t.geometry){var a=t.geometry.type,o=t.geometry.coordinates;switch(a){case"LineString":if(!1===e(t,n,i,0,0))return!1;break;case"Polygon":for(var s=0;s=0))throw new Error("precision must be a positive number");var n=Math.pow(10,e||0);return Math.round(t*n)/n},e.radiansToLength=u,e.lengthToRadians=f,e.lengthToDegrees=function(t,e){return h(f(t,e))},e.bearingToAzimuth=function(t){var e=t%360;return e<0&&(e+=360),e},e.radiansToDegrees=h,e.degreesToRadians=function(t){return t%360*Math.PI/180},e.convertLength=function(t,e,n){if(void 0===e&&(e="kilometers"),void 0===n&&(n="kilometers"),!(t>=0))throw new Error("length must be a positive number");return u(f(t,e),n)},e.convertArea=function(t,n,r){if(void 0===n&&(n="meters"),void 0===r&&(r="kilometers"),!(t>=0))throw new Error("area must be a positive number");var i=e.areaFactors[n];if(!i)throw new Error("invalid original units");var a=e.areaFactors[r];if(!a)throw new Error("invalid final units");return t/i*a},e.isNumber=d,e.isObject=function(t){return!!t&&t.constructor===Object},e.validateBBox=function(t){if(!t)throw new Error("bbox is required");if(!Array.isArray(t))throw new Error("bbox must be an Array");if(4!==t.length&&6!==t.length)throw new Error("bbox must be an Array of 4 or 6 numbers");t.forEach((function(t){if(!d(t))throw new Error("bbox must only contain numbers")}))},e.validateId=function(t){if(!t)throw new Error("id is required");if(-1===["string","number"].indexOf(typeof t))throw new Error("id must be a number or a string")},e.radians2degrees=function(){throw new Error("method has been renamed to `radiansToDegrees`")},e.degrees2radians=function(){throw new Error("method has been renamed to `degreesToRadians`")},e.distanceToDegrees=function(){throw new Error("method has been renamed to `lengthToDegrees`")},e.distanceToRadians=function(){throw new Error("method has been renamed to `lengthToRadians`")},e.radiansToDistance=function(){throw new Error("method has been renamed to `radiansToLength`")},e.bearingToAngle=function(){throw new Error("method has been renamed to `bearingToAzimuth`")},e.convertDistance=function(){throw new Error("method has been renamed to `convertLength`")}},88553:function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(64182);function i(t,e,n){if(null!==t)for(var r,a,o,s,l,c,u,f,h=0,d=0,p=t.type,g="FeatureCollection"===p,v="Feature"===p,m=g?t.features.length:1,y=0;yc||d>u||p>f)return l=i,c=n,u=d,f=p,void(o=0);var g=r.lineString([l,i],t.properties);if(!1===e(g,n,a,p,o))return!1;o++,l=i}))&&void 0}}}))}function u(t,e){if(!t)throw new Error("geojson is required");l(t,(function(t,n,i){if(null!==t.geometry){var a=t.geometry.type,o=t.geometry.coordinates;switch(a){case"LineString":if(!1===e(t,n,i,0,0))return!1;break;case"Polygon":for(var s=0;si&&(i=t[o]),t[o]1?n-1:0),i=1;i1?n-1:0),i=1;i1?n-1:0),i=1;i1?n-1:0),i=1;it.length)&&(n=t.length),t.substring(n-e.length,n)===e}var v="",m="",y="",b="",x={deepStrictEqual:"Expected values to be strictly deep-equal:",strictEqual:"Expected values to be strictly equal:",strictEqualObject:'Expected "actual" to be reference-equal to "expected":',deepEqual:"Expected values to be loosely deep-equal:",equal:"Expected values to be loosely equal:",notDeepStrictEqual:'Expected "actual" not to be strictly deep-equal to:',notStrictEqual:'Expected "actual" to be strictly unequal to:',notStrictEqualObject:'Expected "actual" not to be reference-equal to "expected":',notDeepEqual:'Expected "actual" not to be loosely deep-equal to:',notEqual:'Expected "actual" to be loosely unequal to:',notIdentical:"Values identical but not reference-equal:"};function w(t){var e=Object.keys(t),n=Object.create(Object.getPrototypeOf(t));return e.forEach((function(e){n[e]=t[e]})),Object.defineProperty(n,"message",{value:t.message}),n}function _(t){return d(t,{compact:!1,customInspect:!1,depth:1e3,maxArrayLength:1/0,showHidden:!1,breakLength:1/0,showProxy:!1,sorted:!0,getters:!0})}function k(t,e,n){var i="",a="",o=0,s="",l=!1,c=_(t),u=c.split("\n"),f=_(e).split("\n"),d=0,p="";if("strictEqual"===n&&"object"===h(t)&&"object"===h(e)&&null!==t&&null!==e&&(n="strictEqualObject"),1===u.length&&1===f.length&&u[0]!==f[0]){var w=u[0].length+f[0].length;if(w<=10){if(("object"!==h(t)||null===t)&&("object"!==h(e)||null===e)&&(0!==t||0!==e))return"".concat(x[n],"\n\n")+"".concat(u[0]," !== ").concat(f[0],"\n")}else if("strictEqualObject"!==n&&w<(r.stderr&&r.stderr.isTTY?r.stderr.columns:80)){for(;u[0][d]===f[0][d];)d++;d>2&&(p="\n ".concat(function(t,e){if(e=Math.floor(e),0==t.length||0==e)return"";var n=t.length*e;for(e=Math.floor(Math.log(e)/Math.log(2));e;)t+=t,e--;return t+t.substring(0,n-t.length)}(" ",d),"^"),d=0)}}for(var k=u[u.length-1],T=f[f.length-1];k===T&&(d++<2?s="\n ".concat(k).concat(s):i=k,u.pop(),f.pop(),0!==u.length&&0!==f.length);)k=u[u.length-1],T=f[f.length-1];var M=Math.max(u.length,f.length);if(0===M){var A=c.split("\n");if(A.length>30)for(A[26]="".concat(v,"...").concat(b);A.length>27;)A.pop();return"".concat(x.notIdentical,"\n\n").concat(A.join("\n"),"\n")}d>3&&(s="\n".concat(v,"...").concat(b).concat(s),l=!0),""!==i&&(s="\n ".concat(i).concat(s),i="");var S=0,E=x[n]+"\n".concat(m,"+ actual").concat(b," ").concat(y,"- expected").concat(b),C=" ".concat(v,"...").concat(b," Lines skipped");for(d=0;d1&&d>2&&(P>4?(a+="\n".concat(v,"...").concat(b),l=!0):P>3&&(a+="\n ".concat(f[d-2]),S++),a+="\n ".concat(f[d-1]),S++),o=d,i+="\n".concat(y,"-").concat(b," ").concat(f[d]),S++;else if(f.length1&&d>2&&(P>4?(a+="\n".concat(v,"...").concat(b),l=!0):P>3&&(a+="\n ".concat(u[d-2]),S++),a+="\n ".concat(u[d-1]),S++),o=d,a+="\n".concat(m,"+").concat(b," ").concat(u[d]),S++;else{var O=f[d],L=u[d],I=L!==O&&(!g(L,",")||L.slice(0,-1)!==O);I&&g(O,",")&&O.slice(0,-1)===L&&(I=!1,L+=","),I?(P>1&&d>2&&(P>4?(a+="\n".concat(v,"...").concat(b),l=!0):P>3&&(a+="\n ".concat(u[d-2]),S++),a+="\n ".concat(u[d-1]),S++),o=d,a+="\n".concat(m,"+").concat(b," ").concat(L),i+="\n".concat(y,"-").concat(b," ").concat(O),S+=2):(a+=i,i="",1!==P&&0!==d||(a+="\n ".concat(L),S++))}if(S>20&&d30)for(T[26]="".concat(v,"...").concat(b);T.length>27;)T.pop();n=1===T.length?o(this,f(e).call(this,"".concat(g," ").concat(T[0]))):o(this,f(e).call(this,"".concat(g,"\n\n").concat(T.join("\n"),"\n")))}else{var M=_(c),A="",S=x[a];"notDeepEqual"===a||"notEqual"===a?(M="".concat(x[a],"\n\n").concat(M)).length>1024&&(M="".concat(M.slice(0,1021),"...")):(A="".concat(_(u)),M.length>512&&(M="".concat(M.slice(0,509),"...")),A.length>512&&(A="".concat(A.slice(0,509),"...")),"deepEqual"===a||"equal"===a?M="".concat(S,"\n\n").concat(M,"\n\nshould equal\n\n"):A=" ".concat(a," ").concat(A)),n=o(this,f(e).call(this,"".concat(M).concat(A)))}return Error.stackTraceLimit=d,n.generatedMessage=!i,Object.defineProperty(s(n),"name",{value:"AssertionError [ERR_ASSERTION]",enumerable:!1,writable:!0,configurable:!0}),n.code="ERR_ASSERTION",n.actual=c,n.expected=u,n.operator=a,Error.captureStackTrace&&Error.captureStackTrace(s(n),l),n.stack,n.name="AssertionError",o(n)}var n,l,c;return function(t,e){if("function"!==typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&u(t,e)}(e,t),n=e,l=[{key:"toString",value:function(){return"".concat(this.name," [").concat(this.code,"]: ").concat(this.message)}},{key:d.custom,value:function(t,e){return d(this,function(t){for(var e=1;e2?"one of ".concat(e," ").concat(t.slice(0,n-1).join(", "),", or ")+t[n-1]:2===n?"one of ".concat(e," ").concat(t[0]," or ").concat(t[1]):"of ".concat(e," ").concat(t[0])}return"of ".concat(e," ").concat(String(t))}u("ERR_AMBIGUOUS_ARGUMENT",'The "%s" argument is ambiguous. %s',TypeError),u("ERR_INVALID_ARG_TYPE",(function(t,e,i){var a,o,l,c;if(void 0===s&&(s=n(32791)),s("string"===typeof t,"'name' must be a string"),"string"===typeof e&&(o="not ",e.substr(!l||l<0?0:+l,o.length)===o)?(a="must not be",e=e.replace(/^not /,"")):a="must be",function(t,e,n){return(void 0===n||n>t.length)&&(n=t.length),t.substring(n-e.length,n)===e}(t," argument"))c="The ".concat(t," ").concat(a," ").concat(f(e,"type"));else{var u=function(t,e,n){return"number"!==typeof n&&(n=0),!(n+e.length>t.length)&&-1!==t.indexOf(e,n)}(t,".")?"property":"argument";c='The "'.concat(t,'" ').concat(u," ").concat(a," ").concat(f(e,"type"))}return c+=". Received type ".concat(r(i))}),TypeError),u("ERR_INVALID_ARG_VALUE",(function(t,e){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"is invalid";void 0===l&&(l=n(43827));var i=l.inspect(e);return i.length>128&&(i="".concat(i.slice(0,128),"...")),"The argument '".concat(t,"' ").concat(r,". Received ").concat(i)}),TypeError,RangeError),u("ERR_INVALID_RETURN_VALUE",(function(t,e,n){var i;return i=n&&n.constructor&&n.constructor.name?"instance of ".concat(n.constructor.name):"type ".concat(r(n)),"Expected ".concat(t,' to be returned from the "').concat(e,'"')+" function but got ".concat(i,".")}),TypeError),u("ERR_MISSING_ARGS",(function(){for(var t=arguments.length,e=new Array(t),r=0;r0,"At least one arg needs to be specified");var i="The ",a=e.length;switch(e=e.map((function(t){return'"'.concat(t,'"')})),a){case 1:i+="".concat(e[0]," argument");break;case 2:i+="".concat(e[0]," and ").concat(e[1]," arguments");break;default:i+=e.slice(0,a-1).join(", "),i+=", and ".concat(e[a-1]," arguments")}return"".concat(i," must be specified")}),TypeError),t.exports.codes=c},74061:function(t,e,n){"use strict";function r(t,e){return function(t){if(Array.isArray(t))return t}(t)||function(t,e){var n=[],r=!0,i=!1,a=void 0;try{for(var o,s=t[Symbol.iterator]();!(r=(o=s.next()).done)&&(n.push(o.value),!e||n.length!==e);r=!0);}catch(l){i=!0,a=l}finally{try{r||null==s.return||s.return()}finally{if(i)throw a}}return n}(t,e)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance")}()}function i(t){return i="function"===typeof Symbol&&"symbol"===typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"===typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},i(t)}var a=void 0!==/a/g.flags,o=function(t){var e=[];return t.forEach((function(t){return e.push(t)})),e},s=function(t){var e=[];return t.forEach((function(t,n){return e.push([n,t])})),e},l=Object.is?Object.is:n(64003),c=Object.getOwnPropertySymbols?Object.getOwnPropertySymbols:function(){return[]},u=Number.isNaN?Number.isNaN:n(15567);function f(t){return t.call.bind(t)}var h=f(Object.prototype.hasOwnProperty),d=f(Object.prototype.propertyIsEnumerable),p=f(Object.prototype.toString),g=n(43827).types,v=g.isAnyArrayBuffer,m=g.isArrayBufferView,y=g.isDate,b=g.isMap,x=g.isRegExp,w=g.isSet,_=g.isNativeError,k=g.isBoxedPrimitive,T=g.isNumberObject,M=g.isStringObject,A=g.isBooleanObject,S=g.isBigIntObject,E=g.isSymbolObject,C=g.isFloat32Array,P=g.isFloat64Array;function O(t){if(0===t.length||t.length>10)return!0;for(var e=0;e57)return!0}return 10===t.length&&t>=Math.pow(2,32)}function L(t){return Object.keys(t).filter(O).concat(c(t).filter(Object.prototype.propertyIsEnumerable.bind(t)))}function I(t,e){if(t===e)return 0;for(var n=t.length,r=e.length,i=0,a=Math.min(n,r);i0?o-4:o;for(n=0;n>16&255,c[u++]=e>>8&255,c[u++]=255&e;return 2===l&&(e=r[t.charCodeAt(n)]<<2|r[t.charCodeAt(n+1)]>>4,c[u++]=255&e),1===l&&(e=r[t.charCodeAt(n)]<<10|r[t.charCodeAt(n+1)]<<4|r[t.charCodeAt(n+2)]>>2,c[u++]=e>>8&255,c[u++]=255&e),c},e.fromByteArray=function(t){for(var e,r=t.length,i=r%3,a=[],o=16383,s=0,c=r-i;sc?c:s+o));return 1===i?(e=t[r-1],a.push(n[e>>2]+n[e<<4&63]+"==")):2===i&&(e=(t[r-2]<<8)+t[r-1],a.push(n[e>>10]+n[e>>4&63]+n[e<<2&63]+"=")),a.join("")};for(var n=[],r=[],i="undefined"!==typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0;o<64;++o)n[o]=a[o],r[a.charCodeAt(o)]=o;function s(t){var e=t.length;if(e%4>0)throw new Error("Invalid string. Length must be a multiple of 4");var n=t.indexOf("=");return-1===n&&(n=e),[n,n===e?0:4-n%4]}function l(t,e,r){for(var i,a,o=[],s=e;s>18&63]+n[a>>12&63]+n[a>>6&63]+n[63&a]);return o.join("")}r["-".charCodeAt(0)]=62,r["_".charCodeAt(0)]=63},91358:function(t){"use strict";function e(t,e,n,r,i){for(var a=i+1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)>=0?(a=o,i=o-1):r=o+1}return a}function n(t,e,n,r,i){for(var a=i+1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)>0?(a=o,i=o-1):r=o+1}return a}function r(t,e,n,r,i){for(var a=r-1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)<0?(a=o,r=o+1):i=o-1}return a}function i(t,e,n,r,i){for(var a=r-1;r<=i;){var o=r+i>>>1,s=t[o];(void 0!==n?n(s,e):s-e)<=0?(a=o,r=o+1):i=o-1}return a}function a(t,e,n,r,i){for(;r<=i;){var a=r+i>>>1,o=t[a],s=void 0!==n?n(o,e):o-e;if(0===s)return a;s<=0?r=a+1:i=a-1}return-1}function o(t,e,n,r,i,a){return"function"===typeof n?a(t,e,n,void 0===r?0:0|r,void 0===i?t.length-1:0|i):a(t,e,void 0,void 0===n?0:0|n,void 0===r?t.length-1:0|r)}t.exports={ge:function(t,n,r,i,a){return o(t,n,r,i,a,e)},gt:function(t,e,r,i,a){return o(t,e,r,i,a,n)},lt:function(t,e,n,i,a){return o(t,e,n,i,a,r)},le:function(t,e,n,r,a){return o(t,e,n,r,a,i)},eq:function(t,e,n,r,i){return o(t,e,n,r,i,a)}}},13547:function(t,e){"use strict";function n(t){var e=32;return(t&=-t)&&e--,65535&t&&(e-=16),16711935&t&&(e-=8),252645135&t&&(e-=4),858993459&t&&(e-=2),1431655765&t&&(e-=1),e}e.INT_BITS=32,e.INT_MAX=2147483647,e.INT_MIN=-1<<31,e.sign=function(t){return(t>0)-(t<0)},e.abs=function(t){var e=t>>31;return(t^e)-e},e.min=function(t,e){return e^(t^e)&-(t65535)<<4,e|=n=((t>>>=e)>255)<<3,e|=n=((t>>>=n)>15)<<2,(e|=n=((t>>>=n)>3)<<1)|(t>>>=n)>>1},e.log10=function(t){return t>=1e9?9:t>=1e8?8:t>=1e7?7:t>=1e6?6:t>=1e5?5:t>=1e4?4:t>=1e3?3:t>=100?2:t>=10?1:0},e.popCount=function(t){return 16843009*((t=(858993459&(t-=t>>>1&1431655765))+(t>>>2&858993459))+(t>>>4)&252645135)>>>24},e.countTrailingZeros=n,e.nextPow2=function(t){return t+=0===t,--t,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,1+(t|=t>>>16)},e.prevPow2=function(t){return t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,(t|=t>>>16)-(t>>>1)},e.parity=function(t){return t^=t>>>16,t^=t>>>8,t^=t>>>4,27030>>>(t&=15)&1};var r=new Array(256);!function(t){for(var e=0;e<256;++e){var n=e,r=e,i=7;for(n>>>=1;n;n>>>=1)r<<=1,r|=1&n,--i;t[e]=r<>>8&255]<<16|r[t>>>16&255]<<8|r[t>>>24&255]},e.interleave2=function(t,e){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t&=65535)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e&=65535)|e<<8))|e<<4))|e<<2))|e<<1))<<1},e.deinterleave2=function(t,e){return(t=65535&((t=16711935&((t=252645135&((t=858993459&((t=t>>>e&1431655765)|t>>>1))|t>>>2))|t>>>4))|t>>>16))<<16>>16},e.interleave3=function(t,e,n){return t=1227133513&((t=3272356035&((t=251719695&((t=4278190335&((t&=1023)|t<<16))|t<<8))|t<<4))|t<<2),(t|=(e=1227133513&((e=3272356035&((e=251719695&((e=4278190335&((e&=1023)|e<<16))|e<<8))|e<<4))|e<<2))<<1)|(n=1227133513&((n=3272356035&((n=251719695&((n=4278190335&((n&=1023)|n<<16))|n<<8))|n<<4))|n<<2))<<2},e.deinterleave3=function(t,e){return(t=1023&((t=4278190335&((t=251719695&((t=3272356035&((t=t>>>e&1227133513)|t>>>2))|t>>>4))|t>>>8))|t>>>16))<<22>>22},e.nextCombination=function(t){var e=t|t-1;return e+1|(~e&-~e)-1>>>n(t)+1}},44781:function(t,e,n){"use strict";var r=n(53435);t.exports=function(t,e){e||(e={});var n,o,s,l,c,u,f,h,d,p,g,v=null==e.cutoff?.25:e.cutoff,m=null==e.radius?8:e.radius,y=e.channel||0;if(ArrayBuffer.isView(t)||Array.isArray(t)){if(!e.width||!e.height)throw Error("For raw data width and height should be provided by options");n=e.width,o=e.height,l=t,u=e.stride?e.stride:Math.floor(t.length/n/o)}else window.HTMLCanvasElement&&t instanceof window.HTMLCanvasElement?(f=(h=t).getContext("2d"),n=h.width,o=h.height,l=(d=f.getImageData(0,0,n,o)).data,u=4):window.CanvasRenderingContext2D&&t instanceof window.CanvasRenderingContext2D?(f=t,n=(h=t.canvas).width,o=h.height,l=(d=f.getImageData(0,0,n,o)).data,u=4):window.ImageData&&t instanceof window.ImageData&&(d=t,n=t.width,o=t.height,l=d.data,u=4);if(s=Math.max(n,o),window.Uint8ClampedArray&&l instanceof window.Uint8ClampedArray||window.Uint8Array&&l instanceof window.Uint8Array)for(c=l,l=Array(n*o),p=0,g=c.length;p-1?i(n):n}},68222:function(t,e,n){"use strict";var r=n(77575),i=n(68318),a=i("%Function.prototype.apply%"),o=i("%Function.prototype.call%"),s=i("%Reflect.apply%",!0)||r.call(o,a),l=i("%Object.getOwnPropertyDescriptor%",!0),c=i("%Object.defineProperty%",!0),u=i("%Math.max%");if(c)try{c({},"a",{value:1})}catch(h){c=null}t.exports=function(t){var e=s(r,o,arguments);return l&&c&&l(e,"length").configurable&&c(e,"length",{value:1+u(0,t.length-(arguments.length-1))}),e};var f=function(){return s(r,a,arguments)};c?c(t.exports,"apply",{value:f}):t.exports.apply=f},53435:function(t){t.exports=function(t,e,n){return en?n:t:te?e:t}},6475:function(t,e,n){"use strict";var r=n(53435);function i(t,e){null==e&&(e=!0);var n=t[0],i=t[1],a=t[2],o=t[3];return null==o&&(o=e?1:255),e&&(n*=255,i*=255,a*=255,o*=255),16777216*(n=255&r(n,0,255))+((i=255&r(i,0,255))<<16)+((a=255&r(a,0,255))<<8)+(o=255&r(o,0,255))}t.exports=i,t.exports.to=i,t.exports.from=function(t,e){var n=(t=+t)>>>24,r=(16711680&t)>>>16,i=(65280&t)>>>8,a=255&t;return!1===e?[n,r,i,a]:[n/255,r/255,i/255,a/255]}},76857:function(t){"use strict";t.exports={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}},25075:function(t,e,n){"use strict";var r=n(36652),i=n(53435),a=n(90660);t.exports=function(t,e){"float"!==e&&e||(e="array"),"uint"===e&&(e="uint8"),"uint_clamped"===e&&(e="uint8_clamped");var n=new(a(e))(4),o="uint8"!==e&&"uint8_clamped"!==e;return t.length&&"string"!==typeof t||((t=r(t))[0]/=255,t[1]/=255,t[2]/=255),function(t){return t instanceof Uint8Array||t instanceof Uint8ClampedArray||!!(Array.isArray(t)&&(t[0]>1||0===t[0])&&(t[1]>1||0===t[1])&&(t[2]>1||0===t[2])&&(!t[3]||t[3]>1))}(t)?(n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=null!=t[3]?t[3]:255,o&&(n[0]/=255,n[1]/=255,n[2]/=255,n[3]/=255),n):(o?(n[0]=t[0],n[1]=t[1],n[2]=t[2],n[3]=null!=t[3]?t[3]:1):(n[0]=i(Math.floor(255*t[0]),0,255),n[1]=i(Math.floor(255*t[1]),0,255),n[2]=i(Math.floor(255*t[2]),0,255),n[3]=null==t[3]?255:i(Math.floor(255*t[3]),0,255)),n)}},90736:function(t,e,n){"use strict";var r=n(76857),i=n(10973),a=n(46775);t.exports=function(t){var e,s,l=[],c=1;if("string"===typeof t)if(r[t])l=r[t].slice(),s="rgb";else if("transparent"===t)c=0,s="rgb",l=[0,0,0];else if(/^#[A-Fa-f0-9]+$/.test(t)){var u=t.slice(1);c=1,(d=u.length)<=4?(l=[parseInt(u[0]+u[0],16),parseInt(u[1]+u[1],16),parseInt(u[2]+u[2],16)],4===d&&(c=parseInt(u[3]+u[3],16)/255)):(l=[parseInt(u[0]+u[1],16),parseInt(u[2]+u[3],16),parseInt(u[4]+u[5],16)],8===d&&(c=parseInt(u[6]+u[7],16)/255)),l[0]||(l[0]=0),l[1]||(l[1]=0),l[2]||(l[2]=0),s="rgb"}else if(e=/^((?:rgb|hs[lvb]|hwb|cmyk?|xy[zy]|gray|lab|lchu?v?|[ly]uv|lms)a?)\s*\(([^\)]*)\)/.exec(t)){var f=e[1],h="rgb"===f;s=u=f.replace(/a$/,"");var d="cmyk"===u?4:"gray"===u?1:3;l=e[2].trim().split(/\s*,\s*/).map((function(t,e){if(/%$/.test(t))return e===d?parseFloat(t)/100:"rgb"===u?255*parseFloat(t)/100:parseFloat(t);if("h"===u[e]){if(/deg$/.test(t))return parseFloat(t);if(void 0!==o[t])return o[t]}return parseFloat(t)})),f===u&&l.push(1),c=h||void 0===l[d]?1:l[d],l=l.slice(0,d)}else t.length>10&&/[0-9](?:\s|\/)/.test(t)&&(l=t.match(/([0-9]+)/g).map((function(t){return parseFloat(t)})),s=t.match(/([a-z])/gi).join("").toLowerCase());else if(isNaN(t))if(i(t)){var p=a(t.r,t.red,t.R,null);null!==p?(s="rgb",l=[p,a(t.g,t.green,t.G),a(t.b,t.blue,t.B)]):(s="hsl",l=[a(t.h,t.hue,t.H),a(t.s,t.saturation,t.S),a(t.l,t.lightness,t.L,t.b,t.brightness)]),c=a(t.a,t.alpha,t.opacity,1),null!=t.opacity&&(c/=100)}else(Array.isArray(t)||n.g.ArrayBuffer&&ArrayBuffer.isView&&ArrayBuffer.isView(t))&&(l=[t[0],t[1],t[2]],s="rgb",c=4===t.length?t[3]:1);else s="rgb",l=[t>>>16,(65280&t)>>>8,255&t];return{space:s,values:l,alpha:c}};var o={red:0,orange:60,yellow:120,green:180,blue:240,purple:300}},36652:function(t,e,n){"use strict";var r=n(90736),i=n(80009),a=n(53435);t.exports=function(t){var e,n=r(t);return n.space?((e=Array(3))[0]=a(n.values[0],0,255),e[1]=a(n.values[1],0,255),e[2]=a(n.values[2],0,255),"h"===n.space[0]&&(e=i.rgb(e)),e.push(a(n.alpha,0,1)),e):[]}},80009:function(t,e,n){"use strict";var r=n(6866);t.exports={name:"hsl",min:[0,0,0],max:[360,100,100],channel:["hue","saturation","lightness"],alias:["HSL"],rgb:function(t){var e,n,r,i,a,o=t[0]/360,s=t[1]/100,l=t[2]/100;if(0===s)return[a=255*l,a,a];e=2*l-(n=l<.5?l*(1+s):l+s-l*s),i=[0,0,0];for(var c=0;c<3;c++)(r=o+1/3*-(c-1))<0?r++:r>1&&r--,a=6*r<1?e+6*(n-e)*r:2*r<1?n:3*r<2?e+(n-e)*(2/3-r)*6:e,i[c]=255*a;return i}},r.hsl=function(t){var e,n,r=t[0]/255,i=t[1]/255,a=t[2]/255,o=Math.min(r,i,a),s=Math.max(r,i,a),l=s-o;return s===o?e=0:r===s?e=(i-a)/l:i===s?e=2+(a-r)/l:a===s&&(e=4+(r-i)/l),(e=Math.min(60*e,360))<0&&(e+=360),n=(o+s)/2,[e,100*(s===o?0:n<=.5?l/(s+o):l/(2-s-o)),100*n]}},6866:function(t){"use strict";t.exports={name:"rgb",min:[0,0,0],max:[255,255,255],channel:["red","green","blue"],alias:["RGB"]}},24138:function(t){t.exports={AFG:"afghan",ALA:"\\b\\wland",ALB:"albania",DZA:"algeria",ASM:"^(?=.*americ).*samoa",AND:"andorra",AGO:"angola",AIA:"anguill?a",ATA:"antarctica",ATG:"antigua",ARG:"argentin",ARM:"armenia",ABW:"^(?!.*bonaire).*\\baruba",AUS:"australia",AUT:"^(?!.*hungary).*austria|\\baustri.*\\bemp",AZE:"azerbaijan",BHS:"bahamas",BHR:"bahrain",BGD:"bangladesh|^(?=.*east).*paki?stan",BRB:"barbados",BLR:"belarus|byelo",BEL:"^(?!.*luxem).*belgium",BLZ:"belize|^(?=.*british).*honduras",BEN:"benin|dahome",BMU:"bermuda",BTN:"bhutan",BOL:"bolivia",BES:"^(?=.*bonaire).*eustatius|^(?=.*carib).*netherlands|\\bbes.?islands",BIH:"herzegovina|bosnia",BWA:"botswana|bechuana",BVT:"bouvet",BRA:"brazil",IOT:"british.?indian.?ocean",BRN:"brunei",BGR:"bulgaria",BFA:"burkina|\\bfaso|upper.?volta",BDI:"burundi",CPV:"verde",KHM:"cambodia|kampuchea|khmer",CMR:"cameroon",CAN:"canada",CYM:"cayman",CAF:"\\bcentral.african.republic",TCD:"\\bchad",CHL:"\\bchile",CHN:"^(?!.*\\bmac)(?!.*\\bhong)(?!.*\\btai)(?!.*\\brep).*china|^(?=.*peo)(?=.*rep).*china",CXR:"christmas",CCK:"\\bcocos|keeling",COL:"colombia",COM:"comoro",COG:"^(?!.*\\bdem)(?!.*\\bd[\\.]?r)(?!.*kinshasa)(?!.*zaire)(?!.*belg)(?!.*l.opoldville)(?!.*free).*\\bcongo",COK:"\\bcook",CRI:"costa.?rica",CIV:"ivoire|ivory",HRV:"croatia",CUB:"\\bcuba",CUW:"^(?!.*bonaire).*\\bcura(c|\xe7)ao",CYP:"cyprus",CSK:"czechoslovakia",CZE:"^(?=.*rep).*czech|czechia|bohemia",COD:"\\bdem.*congo|congo.*\\bdem|congo.*\\bd[\\.]?r|\\bd[\\.]?r.*congo|belgian.?congo|congo.?free.?state|kinshasa|zaire|l.opoldville|drc|droc|rdc",DNK:"denmark",DJI:"djibouti",DMA:"dominica(?!n)",DOM:"dominican.rep",ECU:"ecuador",EGY:"egypt",SLV:"el.?salvador",GNQ:"guine.*eq|eq.*guine|^(?=.*span).*guinea",ERI:"eritrea",EST:"estonia",ETH:"ethiopia|abyssinia",FLK:"falkland|malvinas",FRO:"faroe|faeroe",FJI:"fiji",FIN:"finland",FRA:"^(?!.*\\bdep)(?!.*martinique).*france|french.?republic|\\bgaul",GUF:"^(?=.*french).*guiana",PYF:"french.?polynesia|tahiti",ATF:"french.?southern",GAB:"gabon",GMB:"gambia",GEO:"^(?!.*south).*georgia",DDR:"german.?democratic.?republic|democratic.?republic.*germany|east.germany",DEU:"^(?!.*east).*germany|^(?=.*\\bfed.*\\brep).*german",GHA:"ghana|gold.?coast",GIB:"gibraltar",GRC:"greece|hellenic|hellas",GRL:"greenland",GRD:"grenada",GLP:"guadeloupe",GUM:"\\bguam",GTM:"guatemala",GGY:"guernsey",GIN:"^(?!.*eq)(?!.*span)(?!.*bissau)(?!.*portu)(?!.*new).*guinea",GNB:"bissau|^(?=.*portu).*guinea",GUY:"guyana|british.?guiana",HTI:"haiti",HMD:"heard.*mcdonald",VAT:"holy.?see|vatican|papal.?st",HND:"^(?!.*brit).*honduras",HKG:"hong.?kong",HUN:"^(?!.*austr).*hungary",ISL:"iceland",IND:"india(?!.*ocea)",IDN:"indonesia",IRN:"\\biran|persia",IRQ:"\\biraq|mesopotamia",IRL:"(^ireland)|(^republic.*ireland)",IMN:"^(?=.*isle).*\\bman",ISR:"israel",ITA:"italy",JAM:"jamaica",JPN:"japan",JEY:"jersey",JOR:"jordan",KAZ:"kazak",KEN:"kenya|british.?east.?africa|east.?africa.?prot",KIR:"kiribati",PRK:"^(?=.*democrat|people|north|d.*p.*.r).*\\bkorea|dprk|korea.*(d.*p.*r)",KWT:"kuwait",KGZ:"kyrgyz|kirghiz",LAO:"\\blaos?\\b",LVA:"latvia",LBN:"lebanon",LSO:"lesotho|basuto",LBR:"liberia",LBY:"libya",LIE:"liechtenstein",LTU:"lithuania",LUX:"^(?!.*belg).*luxem",MAC:"maca(o|u)",MDG:"madagascar|malagasy",MWI:"malawi|nyasa",MYS:"malaysia",MDV:"maldive",MLI:"\\bmali\\b",MLT:"\\bmalta",MHL:"marshall",MTQ:"martinique",MRT:"mauritania",MUS:"mauritius",MYT:"\\bmayotte",MEX:"\\bmexic",FSM:"fed.*micronesia|micronesia.*fed",MCO:"monaco",MNG:"mongolia",MNE:"^(?!.*serbia).*montenegro",MSR:"montserrat",MAR:"morocco|\\bmaroc",MOZ:"mozambique",MMR:"myanmar|burma",NAM:"namibia",NRU:"nauru",NPL:"nepal",NLD:"^(?!.*\\bant)(?!.*\\bcarib).*netherlands",ANT:"^(?=.*\\bant).*(nether|dutch)",NCL:"new.?caledonia",NZL:"new.?zealand",NIC:"nicaragua",NER:"\\bniger(?!ia)",NGA:"nigeria",NIU:"niue",NFK:"norfolk",MNP:"mariana",NOR:"norway",OMN:"\\boman|trucial",PAK:"^(?!.*east).*paki?stan",PLW:"palau",PSE:"palestin|\\bgaza|west.?bank",PAN:"panama",PNG:"papua|new.?guinea",PRY:"paraguay",PER:"peru",PHL:"philippines",PCN:"pitcairn",POL:"poland",PRT:"portugal",PRI:"puerto.?rico",QAT:"qatar",KOR:"^(?!.*d.*p.*r)(?!.*democrat)(?!.*people)(?!.*north).*\\bkorea(?!.*d.*p.*r)",MDA:"moldov|b(a|e)ssarabia",REU:"r(e|\xe9)union",ROU:"r(o|u|ou)mania",RUS:"\\brussia|soviet.?union|u\\.?s\\.?s\\.?r|socialist.?republics",RWA:"rwanda",BLM:"barth(e|\xe9)lemy",SHN:"helena",KNA:"kitts|\\bnevis",LCA:"\\blucia",MAF:"^(?=.*collectivity).*martin|^(?=.*france).*martin(?!ique)|^(?=.*french).*martin(?!ique)",SPM:"miquelon",VCT:"vincent",WSM:"^(?!.*amer).*samoa",SMR:"san.?marino",STP:"\\bs(a|\xe3)o.?tom(e|\xe9)",SAU:"\\bsa\\w*.?arabia",SEN:"senegal",SRB:"^(?!.*monte).*serbia",SYC:"seychell",SLE:"sierra",SGP:"singapore",SXM:"^(?!.*martin)(?!.*saba).*maarten",SVK:"^(?!.*cze).*slovak",SVN:"slovenia",SLB:"solomon",SOM:"somali",ZAF:"south.africa|s\\\\..?africa",SGS:"south.?georgia|sandwich",SSD:"\\bs\\w*.?sudan",ESP:"spain",LKA:"sri.?lanka|ceylon",SDN:"^(?!.*\\bs(?!u)).*sudan",SUR:"surinam|dutch.?guiana",SJM:"svalbard",SWZ:"swaziland",SWE:"sweden",CHE:"switz|swiss",SYR:"syria",TWN:"taiwan|taipei|formosa|^(?!.*peo)(?=.*rep).*china",TJK:"tajik",THA:"thailand|\\bsiam",MKD:"macedonia|fyrom",TLS:"^(?=.*leste).*timor|^(?=.*east).*timor",TGO:"togo",TKL:"tokelau",TON:"tonga",TTO:"trinidad|tobago",TUN:"tunisia",TUR:"turkey",TKM:"turkmen",TCA:"turks",TUV:"tuvalu",UGA:"uganda",UKR:"ukrain",ARE:"emirates|^u\\.?a\\.?e\\.?$|united.?arab.?em",GBR:"united.?kingdom|britain|^u\\.?k\\.?$",TZA:"tanzania",USA:"united.?states\\b(?!.*islands)|\\bu\\.?s\\.?a\\.?\\b|^\\s*u\\.?s\\.?\\b(?!.*islands)",UMI:"minor.?outlying.?is",URY:"uruguay",UZB:"uzbek",VUT:"vanuatu|new.?hebrides",VEN:"venezuela",VNM:"^(?!.*republic).*viet.?nam|^(?=.*socialist).*viet.?nam",VGB:"^(?=.*\\bu\\.?\\s?k).*virgin|^(?=.*brit).*virgin|^(?=.*kingdom).*virgin",VIR:"^(?=.*\\bu\\.?\\s?s).*virgin|^(?=.*states).*virgin",WLF:"futuna|wallis",ESH:"western.sahara",YEM:"^(?!.*arab)(?!.*north)(?!.*sana)(?!.*peo)(?!.*dem)(?!.*south)(?!.*aden)(?!.*\\bp\\.?d\\.?r).*yemen",YMD:"^(?=.*peo).*yemen|^(?!.*rep)(?=.*dem).*yemen|^(?=.*south).*yemen|^(?=.*aden).*yemen|^(?=.*\\bp\\.?d\\.?r).*yemen",YUG:"yugoslavia",ZMB:"zambia|northern.?rhodesia",EAZ:"zanzibar",ZWE:"zimbabwe|^(?!.*northern).*rhodesia"}},72791:function(t,e,n){"use strict";t.exports={parse:n(41004),stringify:n(53313)}},63625:function(t,e,n){"use strict";var r=n(40402);t.exports={isSize:function(t){return/^[\d\.]/.test(t)||-1!==t.indexOf("/")||-1!==r.indexOf(t)}}},41004:function(t,e,n){"use strict";var r=n(90448),i=n(38732),a=n(41901),o=n(15659),s=n(96209),l=n(83794),c=n(99011),u=n(63625).isSize;t.exports=h;var f=h.cache={};function h(t){if("string"!==typeof t)throw new Error("Font argument must be a string.");if(f[t])return f[t];if(""===t)throw new Error("Cannot parse an empty string.");if(-1!==a.indexOf(t))return f[t]={system:t};for(var e,n={style:"normal",variant:"normal",weight:"normal",stretch:"normal",lineHeight:"normal",size:"1rem",family:["serif"]},h=c(t,/\s+/);e=h.shift();){if(-1!==i.indexOf(e))return["style","variant","weight","stretch"].forEach((function(t){n[t]=e})),f[t]=n;if(-1===s.indexOf(e))if("normal"!==e&&"small-caps"!==e)if(-1===l.indexOf(e)){if(-1===o.indexOf(e)){if(u(e)){var p=c(e,"/");if(n.size=p[0],null!=p[1]?n.lineHeight=d(p[1]):"/"===h[0]&&(h.shift(),n.lineHeight=d(h.shift())),!h.length)throw new Error("Missing required font-family.");return n.family=c(h.join(" "),/\s*,\s*/).map(r),f[t]=n}throw new Error("Unknown or unsupported font token: "+e)}n.weight=e}else n.stretch=e;else n.variant=e;else n.style=e}throw new Error("Missing required font-size.")}function d(t){var e=parseFloat(t);return e.toString()===t?e:t}},53313:function(t,e,n){"use strict";var r=n(71299),i=n(63625).isSize,a=g(n(38732)),o=g(n(41901)),s=g(n(15659)),l=g(n(96209)),c=g(n(83794)),u={normal:1,"small-caps":1},f={serif:1,"sans-serif":1,monospace:1,cursive:1,fantasy:1,"system-ui":1},h="1rem",d="serif";function p(t,e){if(t&&!e[t]&&!a[t])throw Error("Unknown keyword `"+t+"`");return t}function g(t){for(var e={},n=0;ne?1:t>=e?0:NaN}n.d(e,{j2:function(){return r},Fp:function(){return o},J6:function(){return l},TS:function(){return c},VV:function(){return u},w6:function(){return f},Sm:function(){return h}});var i=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return r(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}}(r),a=(i.right,i.left,Array.prototype);function o(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++ar&&(r=n)}else for(;++a=n)for(r=n;++ar&&(r=n);return r}function s(t){return null===t?NaN:+t}function l(t,e){var n,r=t.length,i=r,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n}function u(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++an&&(r=n)}else for(;++a=n)for(r=n;++an&&(r=n);return r}function f(t,e,n){t=+t,e=+e,n=(i=arguments.length)<2?(e=t,t=0,1):i<3?1:+n;for(var r=-1,i=0|Math.max(0,Math.ceil((e-t)/n)),a=new Array(i);++r=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,u,f,h=-1,d=n.length,p=r[i++],g=o(),v=s();++hr.length)return t;var a,o=i[n-1];return null!=e&&n>=r.length?a=t.entries():(a=[],t.each((function(t,e){a.push({key:e,values:s(t,n)})}))),null!=o?a.sort((function(t,e){return o(t.key,e.key)})):a}return n={object:function(t){return a(t,0,l,c)},map:function(t){return a(t,0,u,f)},entries:function(t){return s(a(t,0,u,f),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}}function l(){return{}}function c(t,e,n){t[e]=n}function u(){return o()}function f(t,e,n){t.set(e,n)}function h(){}var d=o.prototype;h.prototype=function(t,e){var n=new h;if(t instanceof h)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r=(a=(g+m)/2))?g=a:m=a,(u=n>=(o=(v+y)/2))?v=o:y=o,i=d,!(d=d[f=u<<1|c]))return i[f]=p,t;if(s=+t._x.call(null,d.data),l=+t._y.call(null,d.data),e===s&&n===l)return p.next=d,i?i[f]=p:t._root=p,t;do{i=i?i[f]=new Array(4):t._root=new Array(4),(c=e>=(a=(g+m)/2))?g=a:m=a,(u=n>=(o=(v+y)/2))?v=o:y=o}while((f=u<<1|c)===(h=(l>=o)<<1|s>=a));return i[h]=d,i[f]=p,t}function s(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i}function l(t){return t[0]}function c(t){return t[1]}function u(t,e,n){var r=new f(null==e?l:e,null==n?c:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function f(t,e,n,r,i,a){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=a,this._root=void 0}function h(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}n.r(e),n.d(e,{forceCenter:function(){return r},forceCollide:function(){return v},forceLink:function(){return x},forceManyBody:function(){return K},forceRadial:function(){return Z},forceSimulation:function(){return X},forceX:function(){return J},forceY:function(){return Q}});var d=u.prototype=f.prototype;function p(t){return t.x+t.vx}function g(t){return t.y+t.vy}function v(t){var e,n,r=1,o=1;function s(){for(var t,i,s,c,f,h,d,v=e.length,m=0;mc+p||if+p||os.index){var g=c-l.x-l.vx,v=f-l.y-l.vy,m=g*g+v*v;mt.r&&(t.r=t[e].r)}function c(){if(e){var r,i,a=e.length;for(n=new Array(a),r=0;rf&&(f=r),ih&&(h=i));if(c>f||u>h)return this;for(this.cover(c,u).cover(f,h),n=0;nt||t>=i||r>e||e>=a;)switch(s=(ed||(a=c.y0)>p||(o=c.x1)=y)<<1|t>=m)&&(c=g[g.length-1],g[g.length-1]=g[g.length-1-u],g[g.length-1-u]=c)}else{var b=t-+this._x.call(null,v.data),x=e-+this._y.call(null,v.data),w=b*b+x*x;if(w=(s=(p+v)/2))?p=s:v=s,(u=o>=(l=(g+m)/2))?g=l:m=l,e=d,!(d=d[f=u<<1|c]))return this;if(!d.length)break;(e[f+1&3]||e[f+2&3]||e[f+3&3])&&(n=e,h=f)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[f]=i:delete e[f],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[h]=d:this._root=d),this):(this._root=i,this)},d.removeAll=function(t){for(var e=0,n=t.length;e=0&&(e=t.slice(n+1),t=t.slice(0,n)),t&&!r.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}}))),o=-1,s=a.length;if(!(arguments.length<2)){if(null!=e&&"function"!==typeof e)throw new Error("invalid callback: "+e);for(;++o0)for(var n,r,i=new Array(n),a=0;a=0&&e._call.call(null,t),e=e._next;--C}()}finally{C=0,function(){for(var t,e,n=A,r=1/0;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:A=e);S=t,q(r)}(),D=0}}function V(){var t=z.now(),e=t-I;e>L&&(R-=e,I=t)}function q(t){C||(P&&(P=clearTimeout(P)),t-D>24?(t<1/0&&(P=setTimeout(H,t-z.now()-R)),O&&(O=clearInterval(O))):(O||(I=z.now(),O=setInterval(V,L)),C=1,N(H)))}function G(t){return t.x}function W(t){return t.y}B.prototype=U.prototype={constructor:B,restart:function(t,e,n){if("function"!==typeof t)throw new TypeError("callback is not a function");n=(null==n?j():+n)+(null==e?0:+e),this._next||S===this||(S?S._next=this:A=this,S=this),this._call=t,this._time=n,q()},stop:function(){this._call&&(this._call=null,this._time=1/0,q())}};var Y=10,$=Math.PI*(3-Math.sqrt(5));function X(t){var e,n=1,r=.001,i=1-Math.pow(r,1/300),a=0,o=.6,s=(0,m.UI)(),l=U(u),c=E("tick","end");function u(){f(),c.call("tick",e),n1?(null==n?s.remove(t):s.set(t,d(n)),e):s.get(t)},find:function(e,n,r){var i,a,o,s,l,c=0,u=t.length;for(null==r?r=1/0:r*=r,c=0;c1?(c.on(t,n),e):c.on(t)}}}function K(){var t,e,n,r,o=i(-30),s=1,l=1/0,c=.81;function f(r){var i,a=t.length,o=u(t,G,W).visitAfter(d);for(n=r,i=0;i=l)){(t.data!==e||t.next)&&(0===f&&(p+=(f=a())*f),0===h&&(p+=(h=a())*h),p1?r[0]+r.slice(2):r,+t.slice(n+1)]}n.d(e,{WU:function(){return h},FF:function(){return g}});var i,a=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function o(t){if(!(e=a.exec(t)))throw new Error("invalid format: "+t);var e;return new s({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function s(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function l(t,e){var n=r(t,e);if(!n)return t+"";var i=n[0],a=n[1];return a<0?"0."+new Array(-a).join("0")+i:i.length>a+1?i.slice(0,a+1)+"."+i.slice(a+1):i+new Array(a-i.length+2).join("0")}o.prototype=s.prototype,s.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var c={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return l(100*t,e)},r:l,s:function(t,e){var n=r(t,e);if(!n)return t+"";var a=n[0],o=n[1],s=o-(i=3*Math.max(-8,Math.min(8,Math.floor(o/3))))+1,l=a.length;return s===l?a:s>l?a+new Array(s-l+1).join("0"):s>0?a.slice(0,s)+"."+a.slice(s):"0."+new Array(1-s).join("0")+r(t,Math.max(0,e+s-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}};function u(t){return t}var f,h,d=Array.prototype.map,p=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"];function g(t){var e,n,a=void 0===t.grouping||void 0===t.thousands?u:(e=d.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],l=0;i>0&&s>0&&(l+s+1>r&&(s=Math.max(1,r-l)),a.push(t.substring(i-=s,i+s)),!((l+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),s=void 0===t.currency?"":t.currency[0]+"",l=void 0===t.currency?"":t.currency[1]+"",f=void 0===t.decimal?".":t.decimal+"",h=void 0===t.numerals?u:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(d.call(t.numerals,String)),g=void 0===t.percent?"%":t.percent+"",v=void 0===t.minus?"-":t.minus+"",m=void 0===t.nan?"NaN":t.nan+"";function y(t){var e=(t=o(t)).fill,n=t.align,r=t.sign,u=t.symbol,d=t.zero,y=t.width,b=t.comma,x=t.precision,w=t.trim,_=t.type;"n"===_?(b=!0,_="g"):c[_]||(void 0===x&&(x=12),w=!0,_="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var k="$"===u?s:"#"===u&&/[boxX]/.test(_)?"0"+_.toLowerCase():"",T="$"===u?l:/[%p]/.test(_)?g:"",M=c[_],A=/[defgprs%]/.test(_);function S(t){var o,s,l,c=k,u=T;if("c"===_)u=M(t)+u,t="";else{var g=(t=+t)<0||1/t<0;if(t=isNaN(t)?m:M(Math.abs(t),x),w&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),g&&0===+t&&"+"!==r&&(g=!1),c=(g?"("===r?r:v:"-"===r||"("===r?"":r)+c,u=("s"===_?p[8+i/3]:"")+u+(g&&"("===r?")":""),A)for(o=-1,s=t.length;++o(l=t.charCodeAt(o))||l>57){u=(46===l?f+t.slice(o+1):t.slice(o))+u,t=t.slice(0,o);break}}b&&!d&&(t=a(t,1/0));var S=c.length+t.length+u.length,E=S>1)+c+t+u+E.slice(S);break;default:t=E+c+t+u}return h(t)}return x=void 0===x?6:/[gprs]/.test(_)?Math.max(1,Math.min(21,x)):Math.max(0,Math.min(20,x)),S.toString=function(){return t+""},S}return{format:y,formatPrefix:function(t,e){var n,i=y(((t=o(t)).type="f",t)),a=3*Math.max(-8,Math.min(8,Math.floor((n=e,((n=r(Math.abs(n)))?n[1]:NaN)/3)))),s=Math.pow(10,-a),l=p[8+a/3];return function(t){return i(s*t)+l}}}}f=g({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"}),h=f.format,f.formatPrefix},65704:function(t,e,n){"use strict";n.r(e),n.d(e,{geoAiry:function(){return R},geoAiryRaw:function(){return D},geoAitoff:function(){return N},geoAitoffRaw:function(){return z},geoArmadillo:function(){return F},geoArmadilloRaw:function(){return j},geoAugust:function(){return U},geoAugustRaw:function(){return B},geoBaker:function(){return G},geoBakerRaw:function(){return q},geoBerghaus:function(){return $},geoBerghausRaw:function(){return Y},geoBertin1953:function(){return nt},geoBertin1953Raw:function(){return et},geoBoggs:function(){return ut},geoBoggsRaw:function(){return ct},geoBonne:function(){return gt},geoBonneRaw:function(){return pt},geoBottomley:function(){return mt},geoBottomleyRaw:function(){return vt},geoBromley:function(){return bt},geoBromleyRaw:function(){return yt},geoChamberlin:function(){return Et},geoChamberlinAfrica:function(){return St},geoChamberlinRaw:function(){return Mt},geoCollignon:function(){return Pt},geoCollignonRaw:function(){return Ct},geoCraig:function(){return Lt},geoCraigRaw:function(){return Ot},geoCraster:function(){return Rt},geoCrasterRaw:function(){return Dt},geoCylindricalEqualArea:function(){return Nt},geoCylindricalEqualAreaRaw:function(){return zt},geoCylindricalStereographic:function(){return Ft},geoCylindricalStereographicRaw:function(){return jt},geoEckert1:function(){return Ut},geoEckert1Raw:function(){return Bt},geoEckert2:function(){return Vt},geoEckert2Raw:function(){return Ht},geoEckert3:function(){return Gt},geoEckert3Raw:function(){return qt},geoEckert4:function(){return Yt},geoEckert4Raw:function(){return Wt},geoEckert5:function(){return Xt},geoEckert5Raw:function(){return $t},geoEckert6:function(){return Zt},geoEckert6Raw:function(){return Kt},geoEisenlohr:function(){return te},geoEisenlohrRaw:function(){return Qt},geoFahey:function(){return re},geoFaheyRaw:function(){return ne},geoFoucaut:function(){return ae},geoFoucautRaw:function(){return ie},geoFoucautSinusoidal:function(){return se},geoFoucautSinusoidalRaw:function(){return oe},geoGilbert:function(){return he},geoGingery:function(){return ve},geoGingeryRaw:function(){return de},geoGinzburg4:function(){return be},geoGinzburg4Raw:function(){return ye},geoGinzburg5:function(){return we},geoGinzburg5Raw:function(){return xe},geoGinzburg6:function(){return ke},geoGinzburg6Raw:function(){return _e},geoGinzburg8:function(){return Me},geoGinzburg8Raw:function(){return Te},geoGinzburg9:function(){return Se},geoGinzburg9Raw:function(){return Ae},geoGringorten:function(){return Pe},geoGringortenQuincuncial:function(){return ai},geoGringortenRaw:function(){return Ce},geoGuyou:function(){return De},geoGuyouRaw:function(){return Ie},geoHammer:function(){return J},geoHammerRaw:function(){return K},geoHammerRetroazimuthal:function(){return je},geoHammerRetroazimuthalRaw:function(){return ze},geoHealpix:function(){return Ye},geoHealpixRaw:function(){return Ve},geoHill:function(){return Xe},geoHillRaw:function(){return $e},geoHomolosine:function(){return en},geoHomolosineRaw:function(){return tn},geoHufnagel:function(){return rn},geoHufnagelRaw:function(){return nn},geoHyperelliptical:function(){return ln},geoHyperellipticalRaw:function(){return sn},geoInterrupt:function(){return fn},geoInterruptedBoggs:function(){return dn},geoInterruptedHomolosine:function(){return gn},geoInterruptedMollweide:function(){return mn},geoInterruptedMollweideHemispheres:function(){return bn},geoInterruptedQuarticAuthalic:function(){return fr},geoInterruptedSinuMollweide:function(){return wn},geoInterruptedSinusoidal:function(){return kn},geoKavrayskiy7:function(){return Mn},geoKavrayskiy7Raw:function(){return Tn},geoLagrange:function(){return Sn},geoLagrangeRaw:function(){return An},geoLarrivee:function(){return Pn},geoLarriveeRaw:function(){return Cn},geoLaskowski:function(){return Ln},geoLaskowskiRaw:function(){return On},geoLittrow:function(){return Dn},geoLittrowRaw:function(){return In},geoLoximuthal:function(){return zn},geoLoximuthalRaw:function(){return Rn},geoMiller:function(){return jn},geoMillerRaw:function(){return Nn},geoModifiedStereographic:function(){return Kn},geoModifiedStereographicAlaska:function(){return Gn},geoModifiedStereographicGs48:function(){return Wn},geoModifiedStereographicGs50:function(){return Yn},geoModifiedStereographicLee:function(){return Xn},geoModifiedStereographicMiller:function(){return $n},geoModifiedStereographicRaw:function(){return Fn},geoMollweide:function(){return ot},geoMollweideRaw:function(){return at},geoMtFlatPolarParabolic:function(){return tr},geoMtFlatPolarParabolicRaw:function(){return Qn},geoMtFlatPolarQuartic:function(){return nr},geoMtFlatPolarQuarticRaw:function(){return er},geoMtFlatPolarSinusoidal:function(){return ir},geoMtFlatPolarSinusoidalRaw:function(){return rr},geoNaturalEarth:function(){return ar.Z},geoNaturalEarth2:function(){return sr},geoNaturalEarth2Raw:function(){return or},geoNaturalEarthRaw:function(){return ar.K},geoNellHammer:function(){return cr},geoNellHammerRaw:function(){return lr},geoNicolosi:function(){return dr},geoNicolosiRaw:function(){return hr},geoPatterson:function(){return Tr},geoPattersonRaw:function(){return kr},geoPeirceQuincuncial:function(){return oi},geoPierceQuincuncial:function(){return oi},geoPolyconic:function(){return Ar},geoPolyconicRaw:function(){return Mr},geoPolyhedral:function(){return Ir},geoPolyhedralButterfly:function(){return Br},geoPolyhedralCollignon:function(){return Vr},geoPolyhedralWaterman:function(){return qr},geoProject:function(){return Xr},geoQuantize:function(){return si},geoQuincuncial:function(){return ii},geoRectangularPolyconic:function(){return ci},geoRectangularPolyconicRaw:function(){return li},geoRobinson:function(){return hi},geoRobinsonRaw:function(){return fi},geoSatellite:function(){return pi},geoSatelliteRaw:function(){return di},geoSinuMollweide:function(){return Qe},geoSinuMollweideRaw:function(){return Je},geoSinusoidal:function(){return dt},geoSinusoidalRaw:function(){return ht},geoStitch:function(){return Ii},geoTimes:function(){return Ri},geoTimesRaw:function(){return Di},geoTwoPointAzimuthal:function(){return Fi},geoTwoPointAzimuthalRaw:function(){return Ni},geoTwoPointAzimuthalUsa:function(){return ji},geoTwoPointEquidistant:function(){return Hi},geoTwoPointEquidistantRaw:function(){return Bi},geoTwoPointEquidistantUsa:function(){return Ui},geoVanDerGrinten:function(){return qi},geoVanDerGrinten2:function(){return Wi},geoVanDerGrinten2Raw:function(){return Gi},geoVanDerGrinten3:function(){return $i},geoVanDerGrinten3Raw:function(){return Yi},geoVanDerGrinten4:function(){return Ki},geoVanDerGrinten4Raw:function(){return Xi},geoVanDerGrintenRaw:function(){return Vi},geoWagner:function(){return Ji},geoWagner4:function(){return ra},geoWagner4Raw:function(){return na},geoWagner6:function(){return aa},geoWagner6Raw:function(){return ia},geoWagner7:function(){return Qi},geoWagnerRaw:function(){return Zi},geoWiechel:function(){return sa},geoWiechelRaw:function(){return oa},geoWinkel3:function(){return ca},geoWinkel3Raw:function(){return la}});var r=n(15002),i=Math.abs,a=Math.atan,o=Math.atan2,s=(Math.ceil,Math.cos),l=Math.exp,c=Math.floor,u=Math.log,f=Math.max,h=Math.min,d=Math.pow,p=Math.round,g=Math.sign||function(t){return t>0?1:t<0?-1:0},v=Math.sin,m=Math.tan,y=1e-6,b=1e-12,x=Math.PI,w=x/2,_=x/4,k=Math.SQRT1_2,T=O(2),M=O(x),A=2*x,S=180/x,E=x/180;function C(t){return t>1?w:t<-1?-w:Math.asin(t)}function P(t){return t>1?0:t<-1?x:Math.acos(t)}function O(t){return t>0?Math.sqrt(t):0}function L(t){return(l(t)-l(-t))/2}function I(t){return(l(t)+l(-t))/2}function D(t){var e=m(t/2),n=2*u(s(t/2))/(e*e);function r(t,e){var r=s(t),i=s(e),a=v(e),o=i*r,l=-((1-o?u((1+o)/2)/(1-o):-.5)+n/(1+o));return[l*i*v(t),l*a]}return r.invert=function(e,r){var a,l=O(e*e+r*r),c=-t/2,f=50;if(!l)return[0,0];do{var h=c/2,d=s(h),p=v(h),g=p/d,m=-u(i(d));c-=a=(2/g*m-n*g-l)/(-m/(p*p)+1-n/(2*d*d))*(d<0?.7:1)}while(i(a)>y&&--f>0);var b=v(c);return[o(e*b,l*s(c)),C(r*b/l)]},r}function R(){var t=w,e=(0,r.r)(D),n=e(t);return n.radius=function(n){return arguments.length?e(t=n*E):t*S},n.scale(179.976).clipAngle(147)}function z(t,e){var n=s(e),r=function(t){return t?t/Math.sin(t):1}(P(n*s(t/=2)));return[2*n*v(t)*r,v(e)*r]}function N(){return(0,r.Z)(z).scale(152.63)}function j(t){var e=v(t),n=s(t),r=t>=0?1:-1,a=m(r*t),l=(1+e-n)/2;function c(t,i){var c=s(i),u=s(t/=2);return[(1+c)*v(t),(r*i>-o(u,a)-.001?0:10*-r)+l+v(i)*n-(1+c)*e*u]}return c.invert=function(t,c){var u=0,f=0,h=50;do{var d=s(u),p=v(u),g=s(f),m=v(f),b=1+g,x=b*p-t,w=l+m*n-b*e*d-c,_=b*d/2,k=-p*m,T=e*b*p/2,M=n*g+e*d*m,A=k*T-M*_,S=(w*k-x*M)/A/2,E=(x*T-w*_)/A;i(E)>2&&(E/=2),u-=S,f-=E}while((i(S)>y||i(E)>y)&&--h>0);return r*f>-o(s(u),a)-.001?[2*u,f]:null},c}function F(){var t=20*E,e=t>=0?1:-1,n=m(e*t),i=(0,r.r)(j),a=i(t),l=a.stream;return a.parallel=function(r){return arguments.length?(n=m((e=(t=r*E)>=0?1:-1)*t),i(t)):t*S},a.stream=function(r){var i=a.rotate(),c=l(r),u=(a.rotate([0,0]),l(r)),f=a.precision();return a.rotate(i),c.sphere=function(){u.polygonStart(),u.lineStart();for(var r=-180*e;e*r<180;r+=90*e)u.point(r,90*e);if(t)for(;e*(r-=3*e*f)>=-180;)u.point(r,e*-o(s(r*E/2),n)*S);u.lineEnd(),u.polygonEnd()},c},a.scale(218.695).center([0,28.0974])}function B(t,e){var n=m(e/2),r=O(1-n*n),i=1+r*s(t/=2),a=v(t)*r/i,o=n/i,l=a*a,c=o*o;return[4/3*a*(3+l-3*c),4/3*o*(3+3*l-c)]}function U(){return(0,r.Z)(B).scale(66.1603)}z.invert=function(t,e){if(!(t*t+4*e*e>x*x+y)){var n=t,r=e,a=25;do{var o,l=v(n),c=v(n/2),u=s(n/2),f=v(r),h=s(r),d=v(2*r),p=f*f,g=h*h,m=c*c,b=1-g*u*u,w=b?P(h*u)*O(o=1/b):o=0,_=2*w*h*c-t,k=w*f-e,T=o*(g*m+w*h*u*p),M=o*(.5*l*d-2*w*f*c),A=.25*o*(d*c-w*f*g*l),S=o*(p*u+w*m*h),E=M*A-S*T;if(!E)break;var C=(k*M-_*S)/E,L=(_*A-k*T)/E;n-=C,r-=L}while((i(C)>y||i(L)>y)&&--a>0);return[n,r]}},B.invert=function(t,e){if(e*=3/8,!(t*=3/8)&&i(e)>1)return null;var n=1+t*t+e*e,r=O((n-O(n*n-4*e*e))/2),a=C(r)/3,l=r?function(t){return u(t+O(t*t-1))}(i(e/r))/3:function(t){return u(t+O(t*t+1))}(i(t))/3,c=s(a),f=I(l),h=f*f-c*c;return[2*g(t)*o(L(l)*c,.25-h),2*g(e)*o(f*v(a),.25+h)]};var H=O(8),V=u(1+T);function q(t,e){var n=i(e);return n<_?[t,u(m(_+e/2))]:[t*s(n)*(2*T-1/v(n)),g(e)*(2*T*(n-_)-u(m(n/2)))]}function G(){return(0,r.Z)(q).scale(112.314)}q.invert=function(t,e){if((r=i(e))b&&--c>0);return[t/(s(o)*(H-1/v(o))),g(e)*o]};var W=n(17889);function Y(t){var e=2*x/t;function n(t,n){var r=(0,W.N)(t,n);if(i(t)>w){var a=o(r[1],r[0]),l=O(r[0]*r[0]+r[1]*r[1]),c=e*p((a-w)/e)+w,u=o(v(a-=c),2-s(a));a=c+C(x/l*v(u))-u,r[0]=l*s(a),r[1]=l*v(a)}return r}return n.invert=function(t,n){var r=O(t*t+n*n);if(r>w){var i=o(n,t),l=e*p((i-w)/e)+w,c=i>l?-1:1,u=r*s(l-i),f=1/m(c*P((u-x)/O(x*(x-2*u)+r*r)));i=l+2*a((f+c*O(f*f-3))/3),t=r*s(i),n=r*v(i)}return W.N.invert(t,n)},n}function $(){var t=5,e=(0,r.r)(Y),n=e(t),i=n.stream,a=.01,l=-s(a*E),c=v(a*E);return n.lobes=function(n){return arguments.length?e(t=+n):t},n.stream=function(e){var r=n.rotate(),u=i(e),f=(n.rotate([0,0]),i(e));return n.rotate(r),u.sphere=function(){f.polygonStart(),f.lineStart();for(var e=0,n=360/t,r=2*x/t,i=90-180/t,u=w;e0&&i(r)>y);return s<0?NaN:n}function tt(t,e,n){return void 0===e&&(e=40),void 0===n&&(n=b),function(r,a,o,s){var l,c,u;o=void 0===o?0:+o,s=void 0===s?0:+s;for(var f=0;fl)o-=c/=2,s-=u/=2;else{l=g;var v=(o>0?-1:1)*n,m=(s>0?-1:1)*n,y=t(o+v,s),b=t(o,s+m),x=(y[0]-h[0])/v,w=(y[1]-h[1])/v,_=(b[0]-h[0])/m,k=(b[1]-h[1])/m,T=k*x-w*_,M=(i(T)<.5?.5:1)/T;if(o+=c=(p*_-d*k)*M,s+=u=(d*w-p*x)*M,i(c)0&&(i[1]*=1+a/1.5*i[0]*i[0]),i}return e.invert=tt(e),e}function nt(){return(0,r.Z)(et()).rotate([-16.5,-42]).scale(176.57).center([7.93,.09])}function rt(t,e){var n,r=t*v(e),a=30;do{e-=n=(e+v(e)-r)/(1+s(e))}while(i(n)>y&&--a>0);return e/2}function it(t,e,n){function r(r,i){return[t*r*s(i=rt(n,i)),e*v(i)]}return r.invert=function(r,i){return i=C(i/e),[r/(t*s(i)),C((2*i+v(2*i))/n)]},r}Z.invert=function(t,e){var n=2*C(e/2);return[t*s(n/2)/s(n),n]};var at=it(T/w,T,x);function ot(){return(0,r.Z)(at).scale(169.529)}var st=2.00276,lt=1.11072;function ct(t,e){var n=rt(x,e);return[st*t/(1/s(e)+lt/s(n)),(e+T*v(n))/st]}function ut(){return(0,r.Z)(ct).scale(160.857)}function ft(t){var e=0,n=(0,r.r)(t),i=n(e);return i.parallel=function(t){return arguments.length?n(e=t*E):e*S},i}function ht(t,e){return[t*s(e),e]}function dt(){return(0,r.Z)(ht).scale(152.63)}function pt(t){if(!t)return ht;var e=1/m(t);function n(n,r){var i=e+t-r,a=i?n*s(r)/i:i;return[i*v(a),e-i*s(a)]}return n.invert=function(n,r){var i=O(n*n+(r=e-r)*r),a=e+t-i;return[i/s(a)*o(n,r),a]},n}function gt(){return ft(pt).scale(123.082).center([0,26.1441]).parallel(45)}function vt(t){function e(e,n){var r=w-n,i=r?e*t*v(r)/r:r;return[r*v(i)/t,w-r*s(i)]}return e.invert=function(e,n){var r=e*t,i=w-n,a=O(r*r+i*i),s=o(r,i);return[(a?a/v(a):1)*s/t,w-a]},e}function mt(){var t=.5,e=(0,r.r)(vt),n=e(t);return n.fraction=function(n){return arguments.length?e(t=+n):t},n.scale(158.837)}ct.invert=function(t,e){var n,r,a=st*e,o=e<0?-_:_,l=25;do{r=a-T*v(o),o-=n=(v(2*o)+2*o-x*v(r))/(2*s(2*o)+2+x*s(r)*T*s(o))}while(i(n)>y&&--l>0);return r=a-T*v(o),[t*(1/s(r)+lt/s(o))/st,r]},ht.invert=function(t,e){return[t/s(e),e]};var yt=it(1,4/x,x);function bt(){return(0,r.Z)(yt).scale(152.63)}var xt=n(66624),wt=n(49386);function _t(t,e,n,r,a,l){var c,u=s(l);if(i(t)>1||i(l)>1)c=P(n*a+e*r*u);else{var f=v(t/2),h=v(l/2);c=2*C(O(f*f+e*r*h*h))}return i(c)>y?[c,o(r*v(l),e*a-n*r*u)]:[0,0]}function kt(t,e,n){return P((t*t+e*e-n*n)/(2*t*e))}function Tt(t){return t-2*x*c((t+x)/(2*x))}function Mt(t,e,n){for(var r,i=[[t[0],t[1],v(t[1]),s(t[1])],[e[0],e[1],v(e[1]),s(e[1])],[n[0],n[1],v(n[1]),s(n[1])]],a=i[2],o=0;o<3;++o,a=r)r=i[o],a.v=_t(r[1]-a[1],a[3],a[2],r[3],r[2],r[0]-a[0]),a.point=[0,0];var l=kt(i[0].v[0],i[2].v[0],i[1].v[0]),c=kt(i[0].v[0],i[1].v[0],i[2].v[0]),u=x-l;i[2].point[1]=0,i[0].point[0]=-(i[1].point[0]=i[0].v[0]/2);var f=[i[2].point[0]=i[0].point[0]+i[2].v[0]*s(l),2*(i[0].point[1]=i[1].point[1]=i[2].v[0]*v(l))];return function(t,e){var n,r=v(e),a=s(e),o=new Array(3);for(n=0;n<3;++n){var l=i[n];if(o[n]=_t(e-l[1],l[3],l[2],a,r,t-l[0]),!o[n][0])return l.point;o[n][1]=Tt(o[n][1]-l.v[1])}var h=f.slice();for(n=0;n<3;++n){var d=2==n?0:n+1,p=kt(i[n].v[0],o[n][0],o[d][0]);o[n][1]<0&&(p=-p),n?1==n?(p=c-p,h[0]-=o[n][0]*s(p),h[1]-=o[n][0]*v(p)):(p=u-p,h[0]+=o[n][0]*s(p),h[1]+=o[n][0]*v(p)):(h[0]+=o[n][0]*s(p),h[1]-=o[n][0]*v(p))}return h[0]/=3,h[1]/=3,h}}function At(t){return t[0]*=E,t[1]*=E,t}function St(){return Et([0,22],[45,22],[22.5,-22]).scale(380).center([22.5,2])}function Et(t,e,n){var i=(0,xt.Z)({type:"MultiPoint",coordinates:[t,e,n]}),a=[-i[0],-i[1]],o=(0,wt.Z)(a),s=Mt(At(o(t)),At(o(e)),At(o(n)));s.invert=tt(s);var l=(0,r.Z)(s).rotate(a),c=l.center;return delete l.rotate,l.center=function(t){return arguments.length?c(o(t)):o.invert(c())},l.clipAngle(90)}function Ct(t,e){var n=O(1-v(e));return[2/M*t*n,M*(1-n)]}function Pt(){return(0,r.Z)(Ct).scale(95.6464).center([0,30])}function Ot(t){var e=m(t);function n(t,n){return[t,(t?t/v(t):1)*(v(n)*s(t)-e*s(n))]}return n.invert=e?function(t,n){t&&(n*=v(t)/t);var r=s(t);return[t,2*o(O(r*r+e*e-n*n)-r,e-n)]}:function(t,e){return[t,C(t?e*m(t)/t:e)]},n}function Lt(){return ft(Ot).scale(249.828).clipAngle(90)}Ct.invert=function(t,e){var n=(n=e/M-1)*n;return[n>0?t*O(x/n)/2:0,C(1-n)]};var It=O(3);function Dt(t,e){return[It*t*(2*s(2*e/3)-1)/M,It*M*v(e/3)]}function Rt(){return(0,r.Z)(Dt).scale(156.19)}function zt(t){var e=s(t);function n(t,n){return[t*e,v(n)/e]}return n.invert=function(t,n){return[t/e,C(n*e)]},n}function Nt(){return ft(zt).parallel(38.58).scale(195.044)}function jt(t){var e=s(t);function n(t,n){return[t*e,(1+e)*m(n/2)]}return n.invert=function(t,n){return[t/e,2*a(n/(1+e))]},n}function Ft(){return ft(jt).scale(124.75)}function Bt(t,e){var n=O(8/(3*x));return[n*t*(1-i(e)/x),n*e]}function Ut(){return(0,r.Z)(Bt).scale(165.664)}function Ht(t,e){var n=O(4-3*v(i(e)));return[2/O(6*x)*t*n,g(e)*O(2*x/3)*(2-n)]}function Vt(){return(0,r.Z)(Ht).scale(165.664)}function qt(t,e){var n=O(x*(4+x));return[2/n*t*(1+O(1-4*e*e/(x*x))),4/n*e]}function Gt(){return(0,r.Z)(qt).scale(180.739)}function Wt(t,e){var n=(2+w)*v(e);e/=2;for(var r=0,a=1/0;r<10&&i(a)>y;r++){var o=s(e);e-=a=(e+v(e)*(o+2)-n)/(2*o*(1+o))}return[2/O(x*(4+x))*t*(1+s(e)),2*O(x/(4+x))*v(e)]}function Yt(){return(0,r.Z)(Wt).scale(180.739)}function $t(t,e){return[t*(1+s(e))/O(2+x),2*e/O(2+x)]}function Xt(){return(0,r.Z)($t).scale(173.044)}function Kt(t,e){for(var n=(1+w)*v(e),r=0,a=1/0;r<10&&i(a)>y;r++)e-=a=(e+v(e)-n)/(1+s(e));return n=O(2+x),[t*(1+s(e))/n,2*e/n]}function Zt(){return(0,r.Z)(Kt).scale(173.044)}Dt.invert=function(t,e){var n=3*C(e/(It*M));return[M*t/(It*(2*s(2*n/3)-1)),n]},Bt.invert=function(t,e){var n=O(8/(3*x)),r=e/n;return[t/(n*(1-i(r)/x)),r]},Ht.invert=function(t,e){var n=2-i(e)/O(2*x/3);return[t*O(6*x)/(2*n),g(e)*C((4-n*n)/3)]},qt.invert=function(t,e){var n=O(x*(4+x))/2;return[t*n/(1+O(1-e*e*(4+x)/(4*x))),e*n/2]},Wt.invert=function(t,e){var n=e*O((4+x)/x)/2,r=C(n),i=s(r);return[t/(2/O(x*(4+x))*(1+i)),C((r+n*(i+2))/(2+w))]},$t.invert=function(t,e){var n=O(2+x),r=e*n/2;return[n*t/(1+s(r)),r]},Kt.invert=function(t,e){var n=1+w,r=O(n/2);return[2*t*r/(1+s(e*=r)),C((e+v(e))/n)]};var Jt=3+2*T;function Qt(t,e){var n=v(t/=2),r=s(t),i=O(s(e)),o=s(e/=2),l=v(e)/(o+T*r*i),c=O(2/(1+l*l)),f=O((T*o+(r+n)*i)/(T*o+(r-n)*i));return[Jt*(c*(f-1/f)-2*u(f)),Jt*(c*l*(f+1/f)-2*a(l))]}function te(){return(0,r.Z)(Qt).scale(62.5271)}Qt.invert=function(t,e){if(!(n=B.invert(t/1.2,1.065*e)))return null;var n,r=n[0],o=n[1],l=20;t/=Jt,e/=Jt;do{var c=r/2,d=o/2,p=v(c),g=s(c),m=v(d),b=s(d),x=s(o),_=O(x),M=m/(b+T*g*_),A=M*M,S=O(2/(1+A)),E=(T*b+(g+p)*_)/(T*b+(g-p)*_),C=O(E),P=C-1/C,L=C+1/C,I=S*P-2*u(C)-t,D=S*M*L-2*a(M)-e,R=m&&k*_*p*A/m,z=(T*g*b+_)/(2*(b+T*g*_)*(b+T*g*_)*_),N=-.5*M*S*S*S,j=N*R,F=N*z,U=(U=2*b+T*_*(g-p))*U*C,H=(T*g*b*_+x)/U,V=-T*p*m/(_*U),q=P*j-2*H/C+S*(H+H/E),G=P*F-2*V/C+S*(V+V/E),W=M*L*j-2*R/(1+A)+S*L*R+S*M*(H-H/E),Y=M*L*F-2*z/(1+A)+S*L*z+S*M*(V-V/E),$=G*W-Y*q;if(!$)break;var X=(D*G-I*Y)/$,K=(I*W-D*q)/$;r-=X,o=f(-w,h(w,o-K))}while((i(X)>y||i(K)>y)&&--l>0);return i(i(o)-w)r){var h=O(f),d=o(u,c),g=n*p(d/n),m=d-g,b=t*s(m),_=(t*v(m)-m*v(b))/(w-b),k=pe(m,_),T=(x-t)/ge(k,b,x);c=h;var M,A=50;do{c-=M=(t+ge(k,b,c)*T-h)/(k(c)*T)}while(i(M)>y&&--A>0);u=m*v(c),cr){var c=O(l),u=o(a,e),f=n*p(u/n),h=u-f;e=c*s(h),a=c*v(h);for(var d=e-w,g=v(e),m=a/g,y=ey||i(d)>y)&&--m>0);return[p,g]},u}var ye=me(2.8284,-1.6988,.75432,-.18071,1.76003,-.38914,.042555);function be(){return(0,r.Z)(ye).scale(149.995)}var xe=me(2.583819,-.835827,.170354,-.038094,1.543313,-.411435,.082742);function we(){return(0,r.Z)(xe).scale(153.93)}var _e=me(5/6*x,-.62636,-.0344,0,1.3493,-.05524,0,.045);function ke(){return(0,r.Z)(_e).scale(130.945)}function Te(t,e){var n=t*t,r=e*e;return[t*(1-.162388*r)*(.87-952426e-9*n*n),e*(1+r/12)]}function Me(){return(0,r.Z)(Te).scale(131.747)}Te.invert=function(t,e){var n,r=t,a=e,o=50;do{var s=a*a;a-=n=(a*(1+s/12)-e)/(1+s/4)}while(i(n)>y&&--o>0);o=50,t/=1-.162388*s;do{var l=(l=r*r)*l;r-=n=(r*(.87-952426e-9*l)-t)/(.87-.00476213*l)}while(i(n)>y&&--o>0);return[r,a]};var Ae=me(2.6516,-.76534,.19123,-.047094,1.36289,-.13965,.031762);function Se(){return(0,r.Z)(Ae).scale(131.087)}function Ee(t){var e=t(w,0)[0]-t(-w,0)[0];function n(n,r){var i=n>0?-.5:.5,a=t(n+i*x,r);return a[0]-=i*e,a}return t.invert&&(n.invert=function(n,r){var i=n>0?-.5:.5,a=t.invert(n+i*e,r),o=a[0]-i*x;return o<-x?o+=2*x:o>x&&(o-=2*x),a[0]=o,a}),n}function Ce(t,e){var n=g(t),r=g(e),a=s(e),l=s(t)*a,c=v(t)*a,u=v(r*e);t=i(o(c,u)),e=C(l),i(t-w)>y&&(t%=w);var f=function(t,e){if(e===w)return[0,0];var n,r,a=v(e),o=a*a,l=o*o,c=1+l,u=1+3*l,f=1-l,h=C(1/O(c)),d=f+o*c*h,p=(1-a)/d,g=O(p),m=p*c,b=O(m),_=g*f;if(0===t)return[0,-(_+o*b)];var k,T=s(e),M=1/T,A=2*a*T,S=(-3*o+h*u)*A,E=(-d*T-(1-a)*S)/(d*d),P=o*c*E+p*u*A,L=-M*A,I=-M*P,D=-2*M*(f*(.5*E/g)-2*o*g*A),R=4*t/x;if(t>.222*x||e.175*x){if(n=(_+o*O(m*(1+l)-_*_))/(1+l),t>x/4)return[n,n];var z=n,N=.5*n;n=.5*(N+z),r=50;do{var j=n*(D+L*O(m-n*n))+I*C(n/b)-R;if(!j)break;j<0?N=n:z=n,n=.5*(N+z)}while(i(z-N)>y&&--r>0)}else{n=y,r=25;do{var F=n*n,B=O(m-F),U=D+L*B,H=n*U+I*C(n/b)-R;n-=k=B?H/(U+(I-L*F)/B):0}while(i(k)>y&&--r>0)}return[n,-_-o*O(m-n*n)]}(t>x/4?w-t:t,e);return t>x/4&&(u=f[0],f[0]=-f[1],f[1]=-u),f[0]*=n,f[1]*=-r,f}function Pe(){return(0,r.Z)(Ee(Ce)).scale(239.75)}function Oe(t,e){var n,r,o,c,u,f;if(e=1-y)return n=(1-e)/4,o=1/(r=I(t)),[(c=((f=l(2*(f=t)))-1)/(f+1))+n*((u=r*L(t))-t)/(r*r),o-n*c*o*(u-t),o+n*c*o*(u+t),2*a(l(t))-w+n*(u-t)/r];var h=[1,0,0,0,0,0,0,0,0],d=[O(e),0,0,0,0,0,0,0,0],p=0;for(r=O(1-e),u=1;i(d[p]/h[p])>y&&p<8;)n=h[p++],d[p]=(n-r)/2,h[p]=(n+r)/2,r=O(n*r),u*=2;o=u*h[p]*t;do{o=(C(c=d[p]*v(r=o)/h[p])+o)/2}while(--p);return[v(o),c=s(o),c/s(o-r),o]}function Le(t,e){if(!e)return t;if(1===e)return u(m(t/2+_));for(var n=1,r=O(1-e),o=O(e),s=0;i(o)>y;s++){if(t%x){var l=a(r*m(t)/n);l<0&&(l+=x),t+=l+~~(t/x)*x}else t+=t;o=(n+r)/2,r=O(n*r),o=((n=o)-r)/2}return t/(d(2,s)*n)}function Ie(t,e){var n=(T-1)/(T+1),r=O(1-n*n),c=Le(w,r*r),f=u(m(x/4+i(e)/2)),h=l(-1*f)/O(n),d=function(t,e){var n=t*t,r=e+1,i=1-n-e*e;return[.5*((t>=0?w:-w)-o(i,2*t)),-.25*u(i*i+4*n)+.5*u(r*r+n)]}(h*s(-1*t),h*v(-1*t)),p=function(t,e,n){var r=i(t),o=L(i(e));if(r){var s=1/v(r),l=1/(m(r)*m(r)),c=-(l+n*(o*o*s*s)-1+n),u=(-c+O(c*c-(n-1)*l*4))/2;return[Le(a(1/O(u)),n)*g(t),Le(a(O((u/l-1)/n)),1-n)*g(e)]}return[0,Le(a(o),1-n)*g(e)]}(d[0],d[1],r*r);return[-p[1],(e>=0?1:-1)*(.5*c-p[0])]}function De(){return(0,r.Z)(Ee(Ie)).scale(151.496)}Ce.invert=function(t,e){i(t)>1&&(t=2*g(t)-t),i(e)>1&&(e=2*g(e)-e);var n=g(t),r=g(e),a=-n*t,l=-r*e,c=l/a<1,u=function(t,e){for(var n=0,r=1,a=.5,o=50;;){var l=a*a,c=O(a),u=C(1/O(1+l)),f=1-l+a*(1+l)*u,h=(1-c)/f,d=O(h),p=h*(1+l),g=d*(1-l),v=O(p-t*t),m=e+g+a*v;if(i(r-n)0?n=a:r=a,a=.5*(n+r)}if(!o)return null;var y=C(c),w=s(y),_=1/w,k=2*c*w,T=(-f*w-(-3*a+u*(1+3*l))*k*(1-c))/(f*f),M=.5*T/d,A=(1-l)*M-2*a*d*k;return[x/4*(t*(-2*_*A+-_*k*v)+-_*(a*(1+l)*T+h*(1+3*l)*k)*C(t/O(p))),y]}(c?l:a,c?a:l),f=u[0],h=u[1],d=s(h);return c&&(f=-w-f),[n*(o(v(f)*d,-v(h))+x),r*C(s(f)*d)]},Ie.invert=function(t,e){var n=(T-1)/(T+1),r=O(1-n*n),i=function(t,e,n){var r,i,a;return t?(r=Oe(t,n),e?(a=(i=Oe(e,1-n))[1]*i[1]+n*r[0]*r[0]*i[0]*i[0],[[r[0]*i[2]/a,r[1]*r[2]*i[0]*i[1]/a],[r[1]*i[1]/a,-r[0]*r[2]*i[0]*i[2]/a],[r[2]*i[1]*i[2]/a,-n*r[0]*r[1]*i[0]/a]]):[[r[0],0],[r[1],0],[r[2],0]]):[[0,(i=Oe(e,1-n))[0]/i[1]],[1/i[1],0],[i[2]/i[1],0]]}(.5*Le(w,r*r)-e,-t,r*r),s=function(t,e){var n=e[0]*e[0]+e[1]*e[1];return[(t[0]*e[0]+t[1]*e[1])/n,(t[1]*e[0]-t[0]*e[1])/n]}(i[0],i[1]);return[o(s[1],s[0])/-1,2*a(l(-.5*u(n*s[0]*s[0]+n*s[1]*s[1])))-w]};var Re=n(7613);function ze(t){var e=v(t),n=s(t),r=Ne(t);function a(t,a){var o=r(t,a);t=o[0],a=o[1];var l=v(a),c=s(a),u=s(t),f=P(e*l+n*c*u),h=v(f),d=i(h)>y?f/h:1;return[d*n*v(t),(i(t)>w?d:-d)*(e*c-n*l*u)]}return r.invert=Ne(-t),a.invert=function(t,n){var i=O(t*t+n*n),a=-v(i),l=s(i),c=i*l,u=-n*a,f=i*e,h=O(c*c+u*u-f*f),d=o(c*f+u*h,u*f-c*h),p=(i>w?-1:1)*o(t*a,i*s(d)*l+n*v(d)*a);return r.invert(p,d)},a}function Ne(t){var e=v(t),n=s(t);return function(t,r){var i=s(r),a=s(t)*i,l=v(t)*i,c=v(r);return[o(l,a*n-c*e),C(c*n+a*e)]}}function je(){var t=0,e=(0,r.r)(ze),n=e(t),i=n.rotate,a=n.stream,o=(0,Re.Z)();return n.parallel=function(r){if(!arguments.length)return t*S;var i=n.rotate();return e(t=r*E).rotate(i)},n.rotate=function(e){return arguments.length?(i.call(n,[e[0],e[1]-t*S]),o.center([-e[0],-e[1]]),n):((e=i.call(n))[1]+=t*S,e)},n.stream=function(t){return(t=a(t)).sphere=function(){t.polygonStart();var e,n=o.radius(89.99)().coordinates[0],r=n.length-1,i=-1;for(t.lineStart();++i=0;)t.point((e=n[i])[0],e[1]);t.lineEnd(),t.polygonEnd()},t},n.scale(79.4187).parallel(45).clipAngle(179.999)}var Fe=n(33064),Be=n(72736),Ue=C(1-1/3)*S,He=zt(0);function Ve(t){var e=Ue*E,n=Ct(x,e)[0]-Ct(-x,e)[0],r=He(0,e)[1],a=Ct(0,e)[1],o=M-a,s=A/t,l=4/A,u=r+o*o*4/A;function d(d,p){var g,v=i(p);if(v>e){var m=h(t-1,f(0,c((d+x)/s)));(g=Ct(d+=x*(t-1)/t-m*s,v))[0]=g[0]*A/n-A*(t-1)/(2*t)+m*A/t,g[1]=r+4*(g[1]-a)*o/A,p<0&&(g[1]=-g[1])}else g=He(d,p);return g[0]*=l,g[1]/=u,g}return d.invert=function(e,d){e/=l;var p=i(d*=u);if(p>r){var g=h(t-1,f(0,c((e+x)/s)));e=(e+x*(t-1)/t-g*s)*n/A;var v=Ct.invert(e,.25*(p-r)*A/o+a);return v[0]-=x*(t-1)/t-g*s,d<0&&(v[1]=-v[1]),v}return He.invert(e,d)},d}function qe(t,e){return[t,1&e?90-y:Ue]}function Ge(t,e){return[t,1&e?-90+y:-Ue]}function We(t){return[t[0]*(1-y),t[1]]}function Ye(){var t=4,e=(0,r.r)(Ve),n=e(t),i=n.stream;return n.lobes=function(n){return arguments.length?e(t=+n):t},n.stream=function(e){var r=n.rotate(),a=i(e),o=(n.rotate([0,0]),i(e));return n.rotate(r),a.sphere=function(){(0,Be.Z)(function(t){var e=[].concat((0,Fe.w6)(-180,180+t/2,t).map(qe),(0,Fe.w6)(180,-180-t/2,-t).map(Ge));return{type:"Polygon",coordinates:[180===t?e.map(We):e]}}(180/t),o)},a},n.scale(239.75)}function $e(t){var e,n=1+t,r=C(v(1/n)),a=2*O(x/(e=x+4*r*n)),l=.5*a*(n+O(t*(2+t))),c=t*t,u=n*n;function f(f,h){var d,p,g=1-v(h);if(g&&g<2){var m,y=w-h,_=25;do{var k=v(y),T=s(y),M=r+o(k,n-T),A=1+u-2*n*T;y-=m=(y-c*r-n*k+A*M-.5*g*e)/(2*n*k*M)}while(i(m)>b&&--_>0);d=a*O(A),p=f*M/x}else d=a*(t+g),p=f*r/x;return[d*v(p),l-d*s(p)]}return f.invert=function(t,i){var s=t*t+(i-=l)*i,f=(1+u-s/(a*a))/(2*n),h=P(f),d=v(h),p=r+o(d,n-f);return[C(t/O(s))*x/p,C(1-2*(h-c*r-n*d+(1+u-2*n*f)*p)/e)]},f}function Xe(){var t=1,e=(0,r.r)($e),n=e(t);return n.ratio=function(n){return arguments.length?e(t=+n):t},n.scale(167.774).center([0,18.67])}var Ke=.7109889596207567,Ze=.0528035274542;function Je(t,e){return e>-Ke?((t=at(t,e))[1]+=Ze,t):ht(t,e)}function Qe(){return(0,r.Z)(Je).rotate([-20,-55]).scale(164.263).center([0,-5.4036])}function tn(t,e){return i(e)>Ke?((t=at(t,e))[1]-=e>0?Ze:-Ze,t):ht(t,e)}function en(){return(0,r.Z)(tn).scale(152.63)}function nn(t,e,n,r){var i=O(4*x/(2*n+(1+t-e/2)*v(2*n)+(t+e)/2*v(4*n)+e/2*v(6*n))),a=O(r*v(n)*O((1+t*s(2*n)+e*s(4*n))/(1+t+e))),o=n*c(1);function l(n){return O(1+t*s(2*n)+e*s(4*n))}function c(r){var i=r*n;return(2*i+(1+t-e/2)*v(2*i)+(t+e)/2*v(4*i)+e/2*v(6*i))/n}function u(t){return l(t)*v(t)}var f=function(t,e){var r=n*Q(c,o*v(e)/n,e/x);isNaN(r)&&(r=n*g(e));var u=i*l(r);return[u*a*t/x*s(r),u/a*v(r)]};return f.invert=function(t,e){var r=Q(u,e*a/i);return[t*x/(s(r)*i*a*l(r)),C(n*c(r/n)/o)]},0===n&&(i=O(r/x),(f=function(t,e){return[t*i,v(e)/i]}).invert=function(t,e){return[t/i,C(e*i)]}),f}function rn(){var t=1,e=0,n=45*E,i=2,a=(0,r.r)(nn),o=a(t,e,n,i);return o.a=function(r){return arguments.length?a(t=+r,e,n,i):t},o.b=function(r){return arguments.length?a(t,e=+r,n,i):e},o.psiMax=function(r){return arguments.length?a(t,e,n=+r*E,i):n*S},o.ratio=function(r){return arguments.length?a(t,e,n,i=+r):i},o.scale(180.739)}function an(t,e,n,r,i,a,o,s,l,c,u){if(u.nanEncountered)return NaN;var f,h,d,p,g,v,m,y,b,x;if(h=t(e+.25*(f=n-e)),d=t(n-.25*f),isNaN(h))u.nanEncountered=!0;else{if(!isNaN(d))return x=((v=(p=f*(r+4*h+i)/12)+(g=f*(i+4*d+a)/12))-o)/15,c>l?(u.maxDepthCount++,v+x):Math.abs(x)t?n=r:e=r,r=e+n>>1}while(r>e);var i=c[r+1]-c[r];return i&&(i=(t-c[r+1])/i),(r+1+i)/s}var h=2*f(1)/x*o/n,p=function(t,e){var n=f(i(v(e))),a=r(n)*t;return n/=h,[a,e>=0?n:-n]};return p.invert=function(t,e){var n;return i(e*=h)<1&&(n=g(e)*C(a(i(e))*o)),[t/r(i(e)),n]},p}function ln(){var t=0,e=2.5,n=1.183136,i=(0,r.r)(sn),a=i(t,e,n);return a.alpha=function(r){return arguments.length?i(t=+r,e,n):t},a.k=function(r){return arguments.length?i(t,e=+r,n):e},a.gamma=function(r){return arguments.length?i(t,e,n=+r):n},a.scale(152.63)}function cn(t,e){return i(t[0]-e[0])a[o][2][0];++o);var l=t(n-a[o][1][0],r);return l[0]+=t(a[o][1][0],i*r>i*a[o][0][1]?a[o][0][1]:r)[0],l}n?o.invert=n(o):t.invert&&(o.invert=function(n,r){for(var i=a[+(r<0)],s=e[+(r<0)],l=0,c=i.length;l=0;--s)n=(e=t[1][s])[0][0],r=e[0][1],i=e[1][1],a=e[2][0],o=e[2][1],l.push(un([[a-y,o-y],[a-y,i+y],[n+y,i+y],[n+y,r-y]],30));return{type:"Polygon",coordinates:[(0,Fe.TS)(l)]}}(n),e=n.map((function(t){return t.map((function(t){return[[t[0][0]*E,t[0][1]*E],[t[1][0]*E,t[1][1]*E],[t[2][0]*E,t[2][1]*E]]}))})),a=e.map((function(e){return e.map((function(e){var n,r=t(e[0][0],e[0][1])[0],i=t(e[2][0],e[2][1])[0],a=t(e[1][0],e[0][1])[1],o=t(e[1][0],e[1][1])[1];return a>o&&(n=a,a=o,o=n),[[r,a],[i,o]]}))})),s):e.map((function(t){return t.map((function(t){return[[t[0][0]*S,t[0][1]*S],[t[1][0]*S,t[1][1]*S],[t[2][0]*S,t[2][1]*S]]}))}))},null!=e&&s.lobes(e),s}Je.invert=function(t,e){return e>-Ke?at.invert(t,e-Ze):ht.invert(t,e)},tn.invert=function(t,e){return i(e)>Ke?at.invert(t,e+(e>0?Ze:-Ze)):ht.invert(t,e)};var hn=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];function dn(){return fn(ct,hn).scale(160.857)}var pn=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];function gn(){return fn(tn,pn).scale(152.63)}var vn=[[[[-180,0],[-100,90],[-40,0]],[[-40,0],[30,90],[180,0]]],[[[-180,0],[-160,-90],[-100,0]],[[-100,0],[-60,-90],[-20,0]],[[-20,0],[20,-90],[80,0]],[[80,0],[140,-90],[180,0]]]];function mn(){return fn(at,vn).scale(169.529)}var yn=[[[[-180,0],[-90,90],[0,0]],[[0,0],[90,90],[180,0]]],[[[-180,0],[-90,-90],[0,0]],[[0,0],[90,-90],[180,0]]]];function bn(){return fn(at,yn).scale(169.529).rotate([20,0])}var xn=[[[[-180,35],[-30,90],[0,35]],[[0,35],[30,90],[180,35]]],[[[-180,-10],[-102,-90],[-65,-10]],[[-65,-10],[5,-90],[77,-10]],[[77,-10],[103,-90],[180,-10]]]];function wn(){return fn(Je,xn,tt).rotate([-20,-55]).scale(164.263).center([0,-5.4036])}var _n=[[[[-180,0],[-110,90],[-40,0]],[[-40,0],[0,90],[40,0]],[[40,0],[110,90],[180,0]]],[[[-180,0],[-110,-90],[-40,0]],[[-40,0],[0,-90],[40,0]],[[40,0],[110,-90],[180,0]]]];function kn(){return fn(ht,_n).scale(152.63).rotate([-20,0])}function Tn(t,e){return[3/A*t*O(x*x/3-e*e),e]}function Mn(){return(0,r.Z)(Tn).scale(158.837)}function An(t){function e(e,n){if(i(i(n)-w)2)return null;var a=(e/=2)*e,s=(n/=2)*n,l=2*n/(1+a+s);return l=d((1+l)/(1-l),1/t),[o(2*e,1-a-s)/t,C((l-1)/(l+1))]},e}function Sn(){var t=.5,e=(0,r.r)(An),n=e(t);return n.spacing=function(n){return arguments.length?e(t=+n):t},n.scale(124.75)}Tn.invert=function(t,e){return[A/3*t/O(x*x/3-e*e),e]};var En=x/T;function Cn(t,e){return[t*(1+O(s(e)))/2,e/(s(e/2)*s(t/6))]}function Pn(){return(0,r.Z)(Cn).scale(97.2672)}function On(t,e){var n=t*t,r=e*e;return[t*(.975534+r*(-.0143059*n-.119161+-.0547009*r)),e*(1.00384+n*(.0802894+-.02855*r+199025e-9*n)+r*(.0998909+-.0491032*r))]}function Ln(){return(0,r.Z)(On).scale(139.98)}function In(t,e){return[v(t)/s(e),m(e)*s(t)]}function Dn(){return(0,r.Z)(In).scale(144.049).clipAngle(89.999)}function Rn(t){var e=s(t),n=m(_+t/2);function r(r,a){var o=a-t,s=i(o)=0;)h=(f=t[u])[0]+l*(i=h)-c*d,d=f[1]+l*d+c*i;return[h=l*(i=h)-c*d,d=l*d+c*i]}return n.invert=function(n,r){var l=20,c=n,u=r;do{for(var f,h=e,d=t[h],p=d[0],g=d[1],m=0,b=0;--h>=0;)m=p+c*(f=m)-u*b,b=g+c*b+u*f,p=(d=t[h])[0]+c*(f=p)-u*g,g=d[1]+c*g+u*f;var x,w,_=(m=p+c*(f=m)-u*b)*m+(b=g+c*b+u*f)*b;c-=x=((p=c*(f=p)-u*g-n)*m+(g=c*g+u*f-r)*b)/_,u-=w=(g*m-p*b)/_}while(i(x)+i(w)>y*y&&--l>0);if(l){var k=O(c*c+u*u),T=2*a(.5*k),M=v(T);return[o(c*M,k*s(T)),k?C(u*M/k):0]}},n}Cn.invert=function(t,e){var n=i(t),r=i(e),a=y,o=w;ry||i(b)>y)&&--a>0);return a&&[n,r]},In.invert=function(t,e){var n=t*t,r=e*e+1,i=n+r,a=t?k*O((i-O(i*i-4*n))/n):1/O(r);return[C(t*a),g(e)*P(a)]},Nn.invert=function(t,e){return[t,2.5*a(l(.8*e))-.625*x]};var Bn=[[.9972523,0],[.0052513,-.0041175],[.0074606,.0048125],[-.0153783,-.1968253],[.0636871,-.1408027],[.3660976,-.2937382]],Un=[[.98879,0],[0,0],[-.050909,0],[0,0],[.075528,0]],Hn=[[.984299,0],[.0211642,.0037608],[-.1036018,-.0575102],[-.0329095,-.0320119],[.0499471,.1223335],[.026046,.0899805],[7388e-7,-.1435792],[.0075848,-.1334108],[-.0216473,.0776645],[-.0225161,.0853673]],Vn=[[.9245,0],[0,0],[.01943,0]],qn=[[.721316,0],[0,0],[-.00881625,-.00617325]];function Gn(){return Kn(Bn,[152,-64]).scale(1400).center([-160.908,62.4864]).clipAngle(30).angle(7.8)}function Wn(){return Kn(Un,[95,-38]).scale(1e3).clipAngle(55).center([-96.5563,38.8675])}function Yn(){return Kn(Hn,[120,-45]).scale(359.513).clipAngle(55).center([-117.474,53.0628])}function $n(){return Kn(Vn,[-20,-18]).scale(209.091).center([20,16.7214]).clipAngle(82)}function Xn(){return Kn(qn,[165,10]).scale(250).clipAngle(130).center([-165,-10])}function Kn(t,e){var n=(0,r.Z)(Fn(t)).rotate(e).clipAngle(90),i=(0,wt.Z)(e),a=n.center;return delete n.rotate,n.center=function(t){return arguments.length?a(i(t)):i.invert(a())},n}var Zn=O(6),Jn=O(7);function Qn(t,e){var n=C(7*v(e)/(3*Zn));return[Zn*t*(2*s(2*n/3)-1)/Jn,9*v(n/3)/Jn]}function tr(){return(0,r.Z)(Qn).scale(164.859)}function er(t,e){for(var n,r=(1+k)*v(e),a=e,o=0;o<25&&(a-=n=(v(a/2)+v(a)-r)/(.5*s(a/2)+s(a)),!(i(n)b&&--l>0);return[t/(.84719-.13063*(r=s*s)+(o=r*(a=r*r))*o*(.05494*r-.04515-.02326*a+.00331*o)),s]},lr.invert=function(t,e){for(var n=e/2,r=0,a=1/0;r<10&&i(a)>y;++r){var o=s(e/2);e-=a=(e-m(e/2)-n)/(1-.5/(o*o))}return[2*t/(1+s(e)),e]};var ur=[[[[-180,0],[-90,90],[0,0]],[[0,0],[90,90],[180,0]]],[[[-180,0],[-90,-90],[0,0]],[[0,0],[90,-90],[180,0]]]];function fr(){return fn(K(1/0),ur).rotate([20,0]).scale(152.63)}function hr(t,e){var n=v(e),r=s(e),a=g(t);if(0===t||i(e)===w)return[0,e];if(0===e)return[t,0];if(i(t)===w)return[t*r,w*n];var o=x/(2*t)-2*t/x,l=2*e/x,c=(1-l*l)/(n-l),u=o*o,f=c*c,h=1+u/f,d=1+f/u,p=(o*n/c-o/2)/h,m=(f*n/u+c/2)/d,y=m*m-(f*n*n/u+c*n-1)/d;return[w*(p+O(p*p+r*r/h)*a),w*(m+O(y<0?0:y)*g(-e*o)*a)]}function dr(){return(0,r.Z)(hr).scale(127.267)}hr.invert=function(t,e){var n=(t/=w)*t,r=n+(e/=w)*e,i=x*x;return[t?(r-1+O((1-r)*(1-r)+4*n))/(2*t)*w:0,Q((function(t){return r*(x*v(t)-2*t)*x+4*t*t*(e-v(t))+2*x*t-i*e}),0)]};var pr=1.0148,gr=.23185,vr=-.14499,mr=.02406,yr=pr,br=5*gr,xr=7*vr,wr=9*mr,_r=1.790857183;function kr(t,e){var n=e*e;return[t,e*(pr+n*n*(gr+n*(vr+mr*n)))]}function Tr(){return(0,r.Z)(kr).scale(139.319)}function Mr(t,e){if(i(e)_r?e=_r:e<-1.790857183&&(e=-1.790857183);var n,r=e;do{var a=r*r;r-=n=(r*(pr+a*a*(gr+a*(vr+mr*a)))-e)/(yr+a*a*(br+a*(xr+wr*a)))}while(i(n)>y);return[t,r]},Mr.invert=function(t,e){if(i(e)y&&--o>0);return l=m(a),[(i(e)=0;)if(r=e[s],n[0]===r[0]&&n[1]===r[1]){if(a)return[a,n];a=n}}}(e.face,n.face),i=Cr(r.map(n.project),r.map(e.project));e.transform=n.transform?Pr(n.transform,i):i;for(var a=n.edges,o=0,s=a.length;o0?[-e[0],0]:[180-e[0],180])};var e=Fr.map((function(e){return{face:e,project:t(e)}}));return[-1,0,0,1,0,1,4,5].forEach((function(t,n){var r=e[t];r&&(r.children||(r.children=[])).push(e[n])})),Ir(e[0],(function(t,n){return e[t<-x/2?n<0?6:4:t<0?n<0?2:0:tr^d>r&&n<(h-c)*(r-u)/(d-u)+c&&(i=!i)}return i}(t[0],n))return t.push(e),!0}))||t.push([e])})),ti=[],t.length?t.length>1?{type:"MultiPolygon",coordinates:t}:{type:"Polygon",coordinates:t[0]}:null}};function ii(t){var e=t(w,0)[0]-t(-w,0)[0];function n(n,r){var a=i(n)0?n-x:n+x,r),s=(o[0]-o[1])*k,l=(o[0]+o[1])*k;if(a)return[s,l];var c=e*k,u=s>0^l>0?-1:1;return[u*s-g(l)*c,u*l-g(s)*c]}return t.invert&&(n.invert=function(n,r){var a=(n+r)*k,o=(r-n)*k,s=i(a)<.5*e&&i(o)<.5*e;if(!s){var l=e*k,c=a>0^o>0?-1:1,u=-c*n+(o>0?1:-1)*l,f=-c*r+(a>0?1:-1)*l;a=(-u-f)*k,o=(u-f)*k}var h=t.invert(a,o);return s||(h[0]+=a>0?x:-x),h}),(0,r.Z)(n).rotate([-90,-90,45]).clipAngle(179.999)}function ai(){return ii(Ce).scale(176.423)}function oi(){return ii(Ie).scale(111.48)}function si(t,e){if(!(0<=(e=+e)&&e<=20))throw new Error("invalid digits");function n(t){var n=t.length,r=2,i=new Array(n);for(i[0]=+t[0].toFixed(e),i[1]=+t[1].toFixed(e);r2||a[0]!=e[0]||a[1]!=e[1])&&(r.push(a),e=a)}return 1===r.length&&t.length>1&&r.push(n(t[t.length-1])),r}function a(t){return t.map(i)}function o(t){if(null==t)return t;var e;switch(t.type){case"GeometryCollection":e={type:"GeometryCollection",geometries:t.geometries.map(o)};break;case"Point":e={type:"Point",coordinates:n(t.coordinates)};break;case"MultiPoint":e={type:t.type,coordinates:r(t.coordinates)};break;case"LineString":e={type:t.type,coordinates:i(t.coordinates)};break;case"MultiLineString":case"Polygon":e={type:t.type,coordinates:a(t.coordinates)};break;case"MultiPolygon":e={type:"MultiPolygon",coordinates:t.coordinates.map(a)};break;default:return t}return null!=t.bbox&&(e.bbox=t.bbox),e}function s(t){var e={type:"Feature",properties:t.properties,geometry:o(t.geometry)};return null!=t.id&&(e.id=t.id),null!=t.bbox&&(e.bbox=t.bbox),e}if(null!=t)switch(t.type){case"Feature":return s(t);case"FeatureCollection":var l={type:"FeatureCollection",features:t.features.map(s)};return null!=t.bbox&&(l.bbox=t.bbox),l;default:return o(t)}return t}function li(t){var e=v(t);function n(n,r){var i=e?m(n*e/2)/e:n/2;if(!r)return[2*i,-t];var o=2*a(i*v(r)),l=1/m(r);return[v(o)*l,r+(1-s(o))*l-t]}return n.invert=function(n,r){if(i(r+=t)y&&--u>0);var p=n*(f=m(c)),g=m(i(r)0?w:-w)*(f+o*(p-l)/2+o*o*(p-2*f+l)/2)]}function hi(){return(0,r.Z)(fi).scale(152.63)}function di(t,e){var n=function(t){function e(e,n){var r=s(n),i=(t-1)/(t-r*s(e));return[i*r*v(e),i*v(n)]}return e.invert=function(e,n){var r=e*e+n*n,i=O(r),a=(t-O(1-r*(t+1)/(t-1)))/((t-1)/i+i/(t-1));return[o(e*a,i*O(1-a*a)),i?C(n*a/i):0]},e}(t);if(!e)return n;var r=s(e),i=v(e);function a(e,a){var o=n(e,a),s=o[1],l=s*i/(t-1)+r;return[o[0]*r/l,s/l]}return a.invert=function(e,a){var o=(t-1)/(t-1-a*i);return n.invert(o*e,o*a*r)},a}function pi(){var t=2,e=0,n=(0,r.r)(di),i=n(t,e);return i.distance=function(r){return arguments.length?n(t=+r,e):t},i.tilt=function(r){return arguments.length?n(t,e=r*E):e*S},i.scale(432.147).clipAngle(P(1/t)*S-1e-6)}ui.forEach((function(t){t[1]*=1.0144})),fi.invert=function(t,e){var n=e/w,r=90*n,a=h(18,i(r/5)),o=f(0,c(a));do{var s=ui[o][1],l=ui[o+1][1],u=ui[h(19,o+2)][1],d=u-s,p=u-2*l+s,g=2*(i(n)-l)/d,v=p/d,m=g*(1-v*g*(1-2*v*g));if(m>=0||1===o){r=(e>=0?5:-5)*(m+a);var y,x=50;do{m=(a=h(18,i(r)/5))-(o=c(a)),s=ui[o][1],l=ui[o+1][1],u=ui[h(19,o+2)][1],r-=(y=(e>=0?w:-w)*(l+m*(u-s)/2+m*m*(u-2*l+s)/2)-e)*S}while(i(y)>b&&--x>0);break}}while(--o>=0);var _=ui[o][0],k=ui[o+1][0],T=ui[h(19,o+2)][0];return[t/(k+m*(T-_)/2+m*m*(T-2*k+_)/2),r*E]};var gi=1e-4,vi=1e4,mi=-180,yi=mi+gi,bi=180,xi=bi-gi,wi=-90,_i=wi+gi,ki=90,Ti=ki-gi;function Mi(t){return t.length>0}function Ai(t){return t===wi||t===ki?[0,t]:[mi,(e=t,Math.floor(e*vi)/vi)];var e}function Si(t){var e=t[0],n=t[1],r=!1;return e<=yi?(e=mi,r=!0):e>=xi&&(e=bi,r=!0),n<=_i?(n=wi,r=!0):n>=Ti&&(n=ki,r=!0),r?[e,n]:t}function Ei(t){return t.map(Si)}function Ci(t,e,n){for(var r=0,i=t.length;r=xi||u<=_i||u>=Ti){a[o]=Si(l);for(var f=o+1;fyi&&d_i&&p=s)break;n.push({index:-1,polygon:e,ring:a=a.slice(f-1)}),a[0]=Ai(a[0][1]),o=-1,s=a.length}}}}function Pi(t){var e,n,r,i,a,o,s=t.length,l={},c={};for(e=0;e0?x-l:l)*S],u=(0,r.Z)(t(s)).rotate(c),f=(0,wt.Z)(c),h=u.center;return delete u.rotate,u.center=function(t){return arguments.length?h(f(t)):f.invert(h())},u.clipAngle(90)}function Ni(t){var e=s(t);function n(t,n){var r=(0,Nr.M)(t,n);return r[0]*=e,r}return n.invert=function(t,n){return Nr.M.invert(t/e,n)},n}function ji(){return Fi([-158,21.5],[-77,39]).clipAngle(60).scale(400)}function Fi(t,e){return zi(Ni,t,e)}function Bi(t){if(!(t*=2))return W.N;var e=-t/2,n=-e,r=t*t,i=m(n),a=.5/v(n);function l(i,a){var o=P(s(a)*s(i-e)),l=P(s(a)*s(i-n));return[((o*=o)-(l*=l))/(2*t),(a<0?-1:1)*O(4*r*l-(r-o+l)*(r-o+l))/(2*t)]}return l.invert=function(t,r){var l,c,u=r*r,f=s(O(u+(l=t+e)*l)),h=s(O(u+(l=t+n)*l));return[o(c=f-h,l=(f+h)*i),(r<0?-1:1)*P(O(l*l+c*c)*a)]},l}function Ui(){return Hi([-158,21.5],[-77,39]).clipAngle(130).scale(122.571)}function Hi(t,e){return zi(Bi,t,e)}function Vi(t,e){if(i(e)y&&--l>0);return[g(t)*(O(a*a+4)+a)*x/4,w*s]};var ta=4*x+3*O(3),ea=2*O(2*x*O(3)/ta),na=it(ea*O(3)/x,ea,ta/6);function ra(){return(0,r.Z)(na).scale(176.84)}function ia(t,e){return[t*O(1-3*e*e/(x*x)),e]}function aa(){return(0,r.Z)(ia).scale(152.63)}function oa(t,e){var n=s(e),r=s(t)*n,i=1-r,a=s(t=o(v(t)*n,-v(e))),l=v(t);return[l*(n=O(1-r*r))-a*i,-a*n-l*i]}function sa(){return(0,r.Z)(oa).rotate([0,-90,45]).scale(124.75).clipAngle(179.999)}function la(t,e){var n=z(t,e);return[(n[0]+t/w)/2,(n[1]+e)/2]}function ca(){return(0,r.Z)(la).scale(158.837)}ia.invert=function(t,e){return[t/O(1-3*e*e/(x*x)),e]},oa.invert=function(t,e){var n=(t*t+e*e)/-2,r=O(-n*(2+n)),i=e*n+t*r,a=t*n-e*r,s=O(a*a+i*i);return[o(r*i,s*(1+n)),s?-C(r*a/s):0]},la.invert=function(t,e){var n=t,r=e,a=25;do{var o,l=s(r),c=v(r),u=v(2*r),f=c*c,h=l*l,d=v(n),p=s(n/2),g=v(n/2),m=g*g,b=1-h*p*p,x=b?P(l*p)*O(o=1/b):o=0,_=.5*(2*x*l*g+n/w)-t,k=.5*(x*c+r)-e,T=.5*o*(h*m+x*l*p*f)+.5/w,M=o*(d*u/4-x*c*g),A=.125*o*(u*g-x*c*h*d),S=.5*o*(f*p+x*m*l)+.5,E=M*A-S*T,C=(k*M-_*S)/E,L=(_*A-k*T)/E;n-=C,r-=L}while((i(C)>y||i(L)>y)&&--a>0);return[n,r]}},33940:function(t,e,n){"use strict";function r(){return new i}function i(){this.reset()}n.d(e,{Z:function(){return r}}),i.prototype={constructor:i,reset:function(){this.s=this.t=0},add:function(t){o(a,t,this.t),o(this,a.s,this.s),this.s?this.t+=a.t:this.s=a.t},valueOf:function(){return this.s}};var a=new i;function o(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}},97860:function(t,e,n){"use strict";n.d(e,{L9:function(){return h},ZP:function(){return b},gL:function(){return p}});var r,i,a,o,s,l=n(33940),c=n(39695),u=n(73182),f=n(72736),h=(0,l.Z)(),d=(0,l.Z)(),p={point:u.Z,lineStart:u.Z,lineEnd:u.Z,polygonStart:function(){h.reset(),p.lineStart=g,p.lineEnd=v},polygonEnd:function(){var t=+h;d.add(t<0?c.BZ+t:t),this.lineStart=this.lineEnd=this.point=u.Z},sphere:function(){d.add(c.BZ)}};function g(){p.point=m}function v(){y(r,i)}function m(t,e){p.point=y,r=t,i=e,t*=c.uR,e*=c.uR,a=t,o=(0,c.mC)(e=e/2+c.pu),s=(0,c.O$)(e)}function y(t,e){t*=c.uR,e=(e*=c.uR)/2+c.pu;var n=t-a,r=n>=0?1:-1,i=r*n,l=(0,c.mC)(e),u=(0,c.O$)(e),f=s*u,d=o*l+f*(0,c.mC)(i),p=f*r*(0,c.O$)(i);h.add((0,c.fv)(p,d)),a=t,o=l,s=u}function b(t){return d.reset(),(0,f.Z)(t,p),2*d}},77338:function(t,e,n){"use strict";n.d(e,{Z:function(){return P}});var r,i,a,o,s,l,c,u,f,h,d=n(33940),p=n(97860),g=n(7620),v=n(39695),m=n(72736),y=(0,d.Z)(),b={point:x,lineStart:_,lineEnd:k,polygonStart:function(){b.point=T,b.lineStart=M,b.lineEnd=A,y.reset(),p.gL.polygonStart()},polygonEnd:function(){p.gL.polygonEnd(),b.point=x,b.lineStart=_,b.lineEnd=k,p.L9<0?(r=-(a=180),i=-(o=90)):y>v.Ho?o=90:y<-v.Ho&&(i=-90),h[0]=r,h[1]=a},sphere:function(){r=-(a=180),i=-(o=90)}};function x(t,e){f.push(h=[r=t,a=t]),eo&&(o=e)}function w(t,e){var n=(0,g.Og)([t*v.uR,e*v.uR]);if(u){var l=(0,g.T5)(u,n),c=[l[1],-l[0],0],d=(0,g.T5)(c,l);(0,g.iJ)(d),d=(0,g.Y1)(d);var p,m=t-s,y=m>0?1:-1,b=d[0]*v.RW*y,x=(0,v.Wn)(m)>180;x^(y*so&&(o=p):x^(y*s<(b=(b+360)%360-180)&&bo&&(o=e)),x?tS(r,a)&&(a=t):S(t,a)>S(r,a)&&(r=t):a>=r?(ta&&(a=t)):t>s?S(r,t)>S(r,a)&&(a=t):S(t,a)>S(r,a)&&(r=t)}else f.push(h=[r=t,a=t]);eo&&(o=e),u=n,s=t}function _(){b.point=w}function k(){h[0]=r,h[1]=a,b.point=x,u=null}function T(t,e){if(u){var n=t-s;y.add((0,v.Wn)(n)>180?n+(n>0?360:-360):n)}else l=t,c=e;p.gL.point(t,e),w(t,e)}function M(){p.gL.lineStart()}function A(){T(l,c),p.gL.lineEnd(),(0,v.Wn)(y)>v.Ho&&(r=-(a=180)),h[0]=r,h[1]=a,u=null}function S(t,e){return(e-=t)<0?e+360:e}function E(t,e){return t[0]-e[0]}function C(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:eS(s[0],s[1])&&(s[1]=l[1]),S(l[0],s[1])>S(s[0],s[1])&&(s[0]=l[0])):c.push(s=l);for(u=-1/0,e=0,s=c[n=c.length-1];e<=n;s=l,++e)l=c[e],(d=S(s[1],l[0]))>u&&(u=d,r=l[0],a=s[1])}return f=h=null,r===1/0||i===1/0?[[NaN,NaN],[NaN,NaN]]:[[r,i],[a,o]]}},7620:function(t,e,n){"use strict";n.d(e,{Og:function(){return a},T:function(){return c},T5:function(){return s},Y1:function(){return i},iJ:function(){return u},j9:function(){return o},s0:function(){return l}});var r=n(39695);function i(t){return[(0,r.fv)(t[1],t[0]),(0,r.ZR)(t[2])]}function a(t){var e=t[0],n=t[1],i=(0,r.mC)(n);return[i*(0,r.mC)(e),i*(0,r.O$)(e),(0,r.O$)(n)]}function o(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function s(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function l(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function c(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function u(t){var e=(0,r._b)(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}},66624:function(t,e,n){"use strict";n.d(e,{Z:function(){return I}});var r,i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b=n(39695),x=n(73182),w=n(72736),_={sphere:x.Z,point:k,lineStart:M,lineEnd:E,polygonStart:function(){_.lineStart=C,_.lineEnd=P},polygonEnd:function(){_.lineStart=M,_.lineEnd=E}};function k(t,e){t*=b.uR,e*=b.uR;var n=(0,b.mC)(e);T(n*(0,b.mC)(t),n*(0,b.O$)(t),(0,b.O$)(e))}function T(t,e,n){++r,a+=(t-a)/r,o+=(e-o)/r,s+=(n-s)/r}function M(){_.point=A}function A(t,e){t*=b.uR,e*=b.uR;var n=(0,b.mC)(e);v=n*(0,b.mC)(t),m=n*(0,b.O$)(t),y=(0,b.O$)(e),_.point=S,T(v,m,y)}function S(t,e){t*=b.uR,e*=b.uR;var n=(0,b.mC)(e),r=n*(0,b.mC)(t),a=n*(0,b.O$)(t),o=(0,b.O$)(e),s=(0,b.fv)((0,b._b)((s=m*o-y*a)*s+(s=y*r-v*o)*s+(s=v*a-m*r)*s),v*r+m*a+y*o);i+=s,l+=s*(v+(v=r)),c+=s*(m+(m=a)),u+=s*(y+(y=o)),T(v,m,y)}function E(){_.point=k}function C(){_.point=O}function P(){L(p,g),_.point=k}function O(t,e){p=t,g=e,t*=b.uR,e*=b.uR,_.point=L;var n=(0,b.mC)(e);v=n*(0,b.mC)(t),m=n*(0,b.O$)(t),y=(0,b.O$)(e),T(v,m,y)}function L(t,e){t*=b.uR,e*=b.uR;var n=(0,b.mC)(e),r=n*(0,b.mC)(t),a=n*(0,b.O$)(t),o=(0,b.O$)(e),s=m*o-y*a,p=y*r-v*o,g=v*a-m*r,x=(0,b._b)(s*s+p*p+g*g),w=(0,b.ZR)(x),_=x&&-w/x;f+=_*s,h+=_*p,d+=_*g,i+=w,l+=w*(v+(v=r)),c+=w*(m+(m=a)),u+=w*(y+(y=o)),T(v,m,y)}function I(t){r=i=a=o=s=l=c=u=f=h=d=0,(0,w.Z)(t,_);var e=f,n=h,p=d,g=e*e+n*n+p*p;return g0?os)&&(o+=i*a.BZ));for(var h,d=o;i>0?d>s:d0?i.pi:-i.pi,c=(0,i.Wn)(o-n);(0,i.Wn)(c-i.pi)0?i.ou:-i.ou),t.point(a,r),t.lineEnd(),t.lineStart(),t.point(l,r),t.point(o,r),e=0):a!==l&&c>=i.pi&&((0,i.Wn)(n-a)i.Ho?(0,i.z4)(((0,i.O$)(e)*(o=(0,i.mC)(r))*(0,i.O$)(n)-(0,i.O$)(r)*(a=(0,i.mC)(e))*(0,i.O$)(t))/(a*o*s)):(e+r)/2}(n,r,o,s),t.point(a,r),t.lineEnd(),t.lineStart(),t.point(l,r),e=0),t.point(n=o,r=s),a=l},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var a;if(null==t)a=n*i.ou,r.point(-i.pi,a),r.point(0,a),r.point(i.pi,a),r.point(i.pi,0),r.point(i.pi,-a),r.point(0,-a),r.point(-i.pi,-a),r.point(-i.pi,0),r.point(-i.pi,a);else if((0,i.Wn)(t[0]-e[0])>i.Ho){var o=t[0]1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}}},1457:function(t,e,n){"use strict";n.d(e,{Z:function(){return l}});var r=n(7620),i=n(7613),a=n(39695),o=n(67108),s=n(97023);function l(t){var e=(0,a.mC)(t),n=6*a.uR,l=e>0,c=(0,a.Wn)(e)>a.Ho;function u(t,n){return(0,a.mC)(t)*(0,a.mC)(n)>e}function f(t,n,i){var o=(0,r.Og)(t),s=(0,r.Og)(n),l=[1,0,0],c=(0,r.T5)(o,s),u=(0,r.j9)(c,c),f=c[0],h=u-f*f;if(!h)return!i&&t;var d=e*u/h,p=-e*f/h,g=(0,r.T5)(l,c),v=(0,r.T)(l,d),m=(0,r.T)(c,p);(0,r.s0)(v,m);var y=g,b=(0,r.j9)(v,y),x=(0,r.j9)(y,y),w=b*b-x*((0,r.j9)(v,v)-1);if(!(w<0)){var _=(0,a._b)(w),k=(0,r.T)(y,(-b-_)/x);if((0,r.s0)(k,v),k=(0,r.Y1)(k),!i)return k;var T,M=t[0],A=n[0],S=t[1],E=n[1];A0^k[1]<((0,a.Wn)(k[0]-M)a.pi^(M<=k[0]&&k[0]<=A)){var O=(0,r.T)(y,(-b+_)/x);return(0,r.s0)(O,v),[k,(0,r.Y1)(O)]}}}function h(e,n){var r=l?t:a.pi-t,i=0;return e<-r?i|=1:e>r&&(i|=2),n<-r?i|=4:n>r&&(i|=8),i}return(0,s.Z)(u,(function(t){var e,n,r,i,s;return{lineStart:function(){i=r=!1,s=1},point:function(d,p){var g,v=[d,p],m=u(d,p),y=l?m?0:h(d,p):m?h(d+(d<0?a.pi:-a.pi),p):0;if(!e&&(i=r=m)&&t.lineStart(),m!==r&&(!(g=f(e,v))||(0,o.Z)(e,g)||(0,o.Z)(v,g))&&(v[2]=1),m!==r)s=0,m?(t.lineStart(),g=f(v,e),t.point(g[0],g[1])):(g=f(e,v),t.point(g[0],g[1],2),t.lineEnd()),e=g;else if(c&&e&&l^m){var b;y&n||!(b=f(v,e,!0))||(s=0,l?(t.lineStart(),t.point(b[0][0],b[0][1]),t.point(b[1][0],b[1][1]),t.lineEnd()):(t.point(b[1][0],b[1][1]),t.lineEnd(),t.lineStart(),t.point(b[0][0],b[0][1],3)))}!m||e&&(0,o.Z)(e,v)||t.point(v[0],v[1]),e=v,r=m,n=y},lineEnd:function(){r&&t.lineEnd(),e=null},clean:function(){return s|(i&&r)<<1}}}),(function(e,r,a,o){(0,i.m)(o,t,n,a,e,r)}),l?[0,-t]:[-a.pi,t-a.pi])}},97023:function(t,e,n){"use strict";n.d(e,{Z:function(){return l}});var r=n(85272),i=n(46225),a=n(39695),o=n(23071),s=n(33064);function l(t,e,n,a){return function(l){var f,h,d,p=e(l),g=(0,r.Z)(),v=e(g),m=!1,y={point:b,lineStart:w,lineEnd:_,polygonStart:function(){y.point=k,y.lineStart=T,y.lineEnd=M,h=[],f=[]},polygonEnd:function(){y.point=b,y.lineStart=w,y.lineEnd=_,h=(0,s.TS)(h);var t=(0,o.Z)(f,a);h.length?(m||(l.polygonStart(),m=!0),(0,i.Z)(h,u,t,n,l)):t&&(m||(l.polygonStart(),m=!0),l.lineStart(),n(null,null,1,l),l.lineEnd()),m&&(l.polygonEnd(),m=!1),h=f=null},sphere:function(){l.polygonStart(),l.lineStart(),n(null,null,1,l),l.lineEnd(),l.polygonEnd()}};function b(e,n){t(e,n)&&l.point(e,n)}function x(t,e){p.point(t,e)}function w(){y.point=x,p.lineStart()}function _(){y.point=b,p.lineEnd()}function k(t,e){d.push([t,e]),v.point(t,e)}function T(){v.lineStart(),d=[]}function M(){k(d[0][0],d[0][1]),v.lineEnd();var t,e,n,r,i=v.clean(),a=g.result(),o=a.length;if(d.pop(),f.push(d),d=null,o)if(1&i){if((e=(n=a[0]).length-1)>0){for(m||(l.polygonStart(),m=!0),l.lineStart(),t=0;t1&&2&i&&a.push(a.pop().concat(a.shift())),h.push(a.filter(c))}return y}}function c(t){return t.length>1}function u(t,e){return((t=t.x)[0]<0?t[1]-a.ou-a.Ho:a.ou-t[1])-((e=e.x)[0]<0?e[1]-a.ou-a.Ho:a.ou-e[1])}},87605:function(t,e,n){"use strict";n.d(e,{Z:function(){return c}});var r=n(39695),i=n(85272),a=n(46225),o=n(33064),s=1e9,l=-s;function c(t,e,n,c){function u(r,i){return t<=r&&r<=n&&e<=i&&i<=c}function f(r,i,a,o){var s=0,l=0;if(null==r||(s=h(r,a))!==(l=h(i,a))||p(r,i)<0^a>0)do{o.point(0===s||3===s?t:n,s>1?c:e)}while((s=(s+a+4)%4)!==l);else o.point(i[0],i[1])}function h(i,a){return(0,r.Wn)(i[0]-t)0?0:3:(0,r.Wn)(i[0]-n)0?2:1:(0,r.Wn)(i[1]-e)0?1:0:a>0?3:2}function d(t,e){return p(t.x,e.x)}function p(t,e){var n=h(t,1),r=h(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(r){var h,p,g,v,m,y,b,x,w,_,k,T=r,M=(0,i.Z)(),A={point:S,lineStart:function(){A.point=E,p&&p.push(g=[]),_=!0,w=!1,b=x=NaN},lineEnd:function(){h&&(E(v,m),y&&w&&M.rejoin(),h.push(M.result())),A.point=S,w&&T.lineEnd()},polygonStart:function(){T=M,h=[],p=[],k=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,r=p.length;nc&&(f-i)*(c-a)>(h-a)*(t-i)&&++e:h<=c&&(f-i)*(c-a)<(h-a)*(t-i)&&--e;return e}(),n=k&&e,i=(h=(0,o.TS)(h)).length;(n||i)&&(r.polygonStart(),n&&(r.lineStart(),f(null,null,1,r),r.lineEnd()),i&&(0,a.Z)(h,d,e,f,r),r.polygonEnd()),T=r,h=p=g=null}};function S(t,e){u(t,e)&&T.point(t,e)}function E(r,i){var a=u(r,i);if(p&&g.push([r,i]),_)v=r,m=i,y=a,_=!1,a&&(T.lineStart(),T.point(r,i));else if(a&&w)T.point(r,i);else{var o=[b=Math.max(l,Math.min(s,b)),x=Math.max(l,Math.min(s,x))],f=[r=Math.max(l,Math.min(s,r)),i=Math.max(l,Math.min(s,i))];!function(t,e,n,r,i,a){var o,s=t[0],l=t[1],c=0,u=1,f=e[0]-s,h=e[1]-l;if(o=n-s,f||!(o>0)){if(o/=f,f<0){if(o0){if(o>u)return;o>c&&(c=o)}if(o=i-s,f||!(o<0)){if(o/=f,f<0){if(o>u)return;o>c&&(c=o)}else if(f>0){if(o0)){if(o/=h,h<0){if(o0){if(o>u)return;o>c&&(c=o)}if(o=a-l,h||!(o<0)){if(o/=h,h<0){if(o>u)return;o>c&&(c=o)}else if(h>0){if(o0&&(t[0]=s+c*f,t[1]=l+c*h),u<1&&(e[0]=s+u*f,e[1]=l+u*h),!0}}}}}(o,f,t,e,n,c)?a&&(T.lineStart(),T.point(r,i),k=!1):(w||(T.lineStart(),T.point(o[0],o[1])),T.point(f[0],f[1]),a||T.lineEnd(),k=!1)}b=r,x=i,w=a}return A}}},46225:function(t,e,n){"use strict";n.d(e,{Z:function(){return o}});var r=n(67108),i=n(39695);function a(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}function o(t,e,n,o,l){var c,u,f=[],h=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,o=t[0],s=t[e];if((0,r.Z)(o,s)){if(!o[2]&&!s[2]){for(l.lineStart(),c=0;c=0;--c)l.point((p=d[c])[0],p[1]);else o(v.x,v.p.x,-1,l);v=v.p}d=(v=v.o).z,m=!m}while(!v.v);l.lineEnd()}}}function s(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r0&&(i=S(t[a],t[a-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))v.Ho})).map(l)).concat((0,N.w6)((0,v.mD)(a/d)*d,i,d).filter((function(t){return(0,v.Wn)(t%g)>v.Ho})).map(c))}return y.lines=function(){return b().map((function(t){return{type:"LineString",coordinates:t}}))},y.outline=function(){return{type:"Polygon",coordinates:[u(r).concat(f(o).slice(1),u(n).reverse().slice(1),f(s).reverse().slice(1))]}},y.extent=function(t){return arguments.length?y.extentMajor(t).extentMinor(t):y.extentMinor()},y.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],s=+t[0][1],o=+t[1][1],r>n&&(t=r,r=n,n=t),s>o&&(t=s,s=o,o=t),y.precision(m)):[[r,s],[n,o]]},y.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],a=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),a>i&&(n=a,a=i,i=n),y.precision(m)):[[e,a],[t,i]]},y.step=function(t){return arguments.length?y.stepMajor(t).stepMinor(t):y.stepMinor()},y.stepMajor=function(t){return arguments.length?(p=+t[0],g=+t[1],y):[p,g]},y.stepMinor=function(t){return arguments.length?(h=+t[0],d=+t[1],y):[h,d]},y.precision=function(h){return arguments.length?(m=+h,l=j(a,i,90),c=F(e,t,m),u=j(s,o,90),f=F(r,n,m),y):m},y.extentMajor([[-180,-90+v.Ho],[180,90-v.Ho]]).extentMinor([[-180,-80-v.Ho],[180,80+v.Ho]])}function U(){return B()()}var H,V,q,G,W=n(83074),Y=n(8593),$=(0,g.Z)(),X=(0,g.Z)(),K={point:m.Z,lineStart:m.Z,lineEnd:m.Z,polygonStart:function(){K.lineStart=Z,K.lineEnd=tt},polygonEnd:function(){K.lineStart=K.lineEnd=K.point=m.Z,$.add((0,v.Wn)(X)),X.reset()},result:function(){var t=$/2;return $.reset(),t}};function Z(){K.point=J}function J(t,e){K.point=Q,H=q=t,V=G=e}function Q(t,e){X.add(G*t-q*e),q=t,G=e}function tt(){Q(H,V)}var et,nt,rt,it,at=K,ot=n(3559),st=0,lt=0,ct=0,ut=0,ft=0,ht=0,dt=0,pt=0,gt=0,vt={point:mt,lineStart:yt,lineEnd:wt,polygonStart:function(){vt.lineStart=_t,vt.lineEnd=kt},polygonEnd:function(){vt.point=mt,vt.lineStart=yt,vt.lineEnd=wt},result:function(){var t=gt?[dt/gt,pt/gt]:ht?[ut/ht,ft/ht]:ct?[st/ct,lt/ct]:[NaN,NaN];return st=lt=ct=ut=ft=ht=dt=pt=gt=0,t}};function mt(t,e){st+=t,lt+=e,++ct}function yt(){vt.point=bt}function bt(t,e){vt.point=xt,mt(rt=t,it=e)}function xt(t,e){var n=t-rt,r=e-it,i=(0,v._b)(n*n+r*r);ut+=i*(rt+t)/2,ft+=i*(it+e)/2,ht+=i,mt(rt=t,it=e)}function wt(){vt.point=mt}function _t(){vt.point=Tt}function kt(){Mt(et,nt)}function Tt(t,e){vt.point=Mt,mt(et=rt=t,nt=it=e)}function Mt(t,e){var n=t-rt,r=e-it,i=(0,v._b)(n*n+r*r);ut+=i*(rt+t)/2,ft+=i*(it+e)/2,ht+=i,dt+=(i=it*t-rt*e)*(rt+t),pt+=i*(it+e),gt+=3*i,mt(rt=t,it=e)}var At=vt;function St(t){this._context=t}St.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,v.BZ)}},result:m.Z};var Et,Ct,Pt,Ot,Lt,It=(0,g.Z)(),Dt={point:m.Z,lineStart:function(){Dt.point=Rt},lineEnd:function(){Et&&zt(Ct,Pt),Dt.point=m.Z},polygonStart:function(){Et=!0},polygonEnd:function(){Et=null},result:function(){var t=+It;return It.reset(),t}};function Rt(t,e){Dt.point=zt,Ct=Ot=t,Pt=Lt=e}function zt(t,e){Ot-=t,Lt-=e,It.add((0,v._b)(Ot*Ot+Lt*Lt)),Ot=t,Lt=e}var Nt=Dt;function jt(){this._string=[]}function Ft(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Bt(t,e){var n,r,i=4.5;function a(t){return t&&("function"===typeof i&&r.pointRadius(+i.apply(this,arguments)),(0,y.Z)(t,n(r))),r.result()}return a.area=function(t){return(0,y.Z)(t,n(at)),at.result()},a.measure=function(t){return(0,y.Z)(t,n(Nt)),Nt.result()},a.bounds=function(t){return(0,y.Z)(t,n(ot.Z)),ot.Z.result()},a.centroid=function(t){return(0,y.Z)(t,n(At)),At.result()},a.projection=function(e){return arguments.length?(n=null==e?(t=null,Y.Z):(t=e).stream,a):t},a.context=function(t){return arguments.length?(r=null==t?(e=null,new jt):new St(e=t),"function"!==typeof i&&r.pointRadius(i),a):e},a.pointRadius=function(t){return arguments.length?(i="function"===typeof t?t:(r.pointRadius(+t),+t),a):i},a.projection(t).context(e)}jt.prototype={_radius:4.5,_circle:Ft(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=Ft(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var Ut=n(15002);function Ht(t){var e=0,n=v.pi/3,r=(0,Ut.r)(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*v.uR,n=t[1]*v.uR):[e*v.RW,n*v.RW]},i}function Vt(t,e){var n=(0,v.O$)(t),r=(n+(0,v.O$)(e))/2;if((0,v.Wn)(r)=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?l:o).invert(t)},u.stream=function(n){return t&&e===n?t:t=function(t){var e=t.length;return{point:function(n,r){for(var i=-1;++i0?e<-v.ou+v.Ho&&(e=-v.ou+v.Ho):e>v.ou-v.Ho&&(e=v.ou-v.Ho);var n=i/(0,v.sQ)(te(e),r);return[n*(0,v.O$)(r*t),i-n*(0,v.mC)(r*t)]}return a.invert=function(t,e){var n=i-e,a=(0,v.Xx)(r)*(0,v._b)(t*t+n*n),o=(0,v.fv)(t,(0,v.Wn)(n))*(0,v.Xx)(n);return n*r<0&&(o-=v.pi*(0,v.Xx)(t)*(0,v.Xx)(n)),[o/r,2*(0,v.z4)((0,v.sQ)(i/a,1/r))-v.ou]},a}function ne(){return Ht(ee).scale(109.5).parallels([30,30])}Zt.invert=function(t,e){return[t,2*(0,v.z4)((0,v.Qq)(e))-v.ou]};var re=n(97492);function ie(t,e){var n=(0,v.mC)(t),r=t===e?(0,v.O$)(t):(n-(0,v.mC)(e))/(e-t),i=n/r+t;if((0,v.Wn)(r)2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)}be.invert=(0,ye.O)((function(t){return 2*(0,v.z4)(t)})),we.invert=function(t,e){return[-e,2*(0,v.z4)((0,v.Qq)(t))-v.ou]}},83074:function(t,e,n){"use strict";n.d(e,{Z:function(){return i}});var r=n(39695);function i(t,e){var n=t[0]*r.uR,i=t[1]*r.uR,a=e[0]*r.uR,o=e[1]*r.uR,s=(0,r.mC)(i),l=(0,r.O$)(i),c=(0,r.mC)(o),u=(0,r.O$)(o),f=s*(0,r.mC)(n),h=s*(0,r.O$)(n),d=c*(0,r.mC)(a),p=c*(0,r.O$)(a),g=2*(0,r.ZR)((0,r._b)((0,r.Jy)(o-i)+s*c*(0,r.Jy)(a-n))),v=(0,r.O$)(g),m=g?function(t){var e=(0,r.O$)(t*=g)/v,n=(0,r.O$)(g-t)/v,i=n*f+e*d,a=n*h+e*p,o=n*l+e*u;return[(0,r.fv)(a,i)*r.RW,(0,r.fv)(o,(0,r._b)(i*i+a*a))*r.RW]}:function(){return[n*r.RW,i*r.RW]};return m.distance=g,m}},39695:function(t,e,n){"use strict";n.d(e,{BZ:function(){return l},Ho:function(){return r},Jy:function(){return M},Kh:function(){return k},O$:function(){return b},OR:function(){return _},Qq:function(){return v},RW:function(){return c},Wn:function(){return f},Xx:function(){return x},ZR:function(){return T},_b:function(){return w},aW:function(){return i},cM:function(){return m},fv:function(){return d},mC:function(){return p},mD:function(){return g},ou:function(){return o},pi:function(){return a},pu:function(){return s},sQ:function(){return y},uR:function(){return u},z4:function(){return h}});var r=1e-6,i=1e-12,a=Math.PI,o=a/2,s=a/4,l=2*a,c=180/a,u=a/180,f=Math.abs,h=Math.atan,d=Math.atan2,p=Math.cos,g=Math.ceil,v=Math.exp,m=(Math.floor,Math.log),y=Math.pow,b=Math.sin,x=Math.sign||function(t){return t>0?1:t<0?-1:0},w=Math.sqrt,_=Math.tan;function k(t){return t>1?0:t<-1?a:Math.acos(t)}function T(t){return t>1?o:t<-1?-o:Math.asin(t)}function M(t){return(t=b(t/2))*t}},73182:function(t,e,n){"use strict";function r(){}n.d(e,{Z:function(){return r}})},3559:function(t,e,n){"use strict";var r=n(73182),i=1/0,a=i,o=-i,s=o,l={point:function(t,e){to&&(o=t),es&&(s=e)},lineStart:r.Z,lineEnd:r.Z,polygonStart:r.Z,polygonEnd:r.Z,result:function(){var t=[[i,a],[o,s]];return o=s=-(a=i=1/0),t}};e.Z=l},67108:function(t,e,n){"use strict";n.d(e,{Z:function(){return i}});var r=n(39695);function i(t,e){return(0,r.Wn)(t[0]-e[0])=0?1:-1,C=E*S,P=C>a.pi,O=b*M;if(o.add((0,a.fv)(O*E*(0,a.O$)(C),x*A+O*(0,a.mC)(C))),u+=P?S+E*a.BZ:S,P^m>=n^k>=n){var L=(0,i.T5)((0,i.Og)(v),(0,i.Og)(_));(0,i.iJ)(L);var I=(0,i.T5)(c,L);(0,i.iJ)(I);var D=(P^S>=0?-1:1)*(0,a.ZR)(I[2]);(r>D||r===D&&(L[0]||L[1]))&&(f+=P^S>=0?1:-1)}}return(u<-a.Ho||u4*e&&m--){var _=o+d,k=s+g,T=c+v,M=(0,l._b)(_*_+k*k+T*T),A=(0,l.ZR)(T/=M),S=(0,l.Wn)((0,l.Wn)(T)-1)e||(0,l.Wn)((b*O+x*L)/w-.5)>.3||o*d+s*g+c*v2?t[2]%360*l.uR:0,H()):[C*l.RW,P*l.RW,O*l.RW]},B.angle=function(t){return arguments.length?(L=t%360*l.uR,H()):L*l.RW},B.reflectX=function(t){return arguments.length?(I=t?-1:1,H()):I<0},B.reflectY=function(t){return arguments.length?(D=t?-1:1,H()):D<0},B.precision=function(t){return arguments.length?(b=g(x,F=t*t),V()):(0,l._b)(F)},B.fitExtent=function(t,e){return(0,f.qg)(B,t,e)},B.fitSize=function(t,e){return(0,f.mF)(B,t,e)},B.fitWidth=function(t,e){return(0,f.V6)(B,t,e)},B.fitHeight=function(t,e){return(0,f.rf)(B,t,e)},function(){return e=t.apply(this,arguments),B.invert=e.invert&&U,H()}}},26867:function(t,e,n){"use strict";n.d(e,{K:function(){return a},Z:function(){return o}});var r=n(15002),i=n(39695);function a(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}function o(){return(0,r.Z)(a).scale(175.295)}a.invert=function(t,e){var n,r=e,a=25;do{var o=r*r,s=o*o;r-=n=(r*(1.007226+o*(.015085+s*(.028874*o-.044475-.005916*s)))-e)/(1.007226+o*(.045255+s*(.259866*o-.311325-.005916*11*s)))}while((0,i.Wn)(n)>i.Ho&&--a>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]}},57962:function(t,e,n){"use strict";n.d(e,{I:function(){return o},Z:function(){return s}});var r=n(39695),i=n(25382),a=n(15002);function o(t,e){return[(0,r.mC)(e)*(0,r.O$)(t),(0,r.O$)(e)]}function s(){return(0,a.Z)(o).scale(249.5).clipAngle(90+r.Ho)}o.invert=(0,i.O)(r.ZR)},49386:function(t,e,n){"use strict";n.d(e,{I:function(){return o},Z:function(){return u}});var r=n(96059),i=n(39695);function a(t,e){return[(0,i.Wn)(t)>i.pi?t+Math.round(-t/i.BZ)*i.BZ:t,e]}function o(t,e,n){return(t%=i.BZ)?e||n?(0,r.Z)(l(t),c(e,n)):l(t):e||n?c(e,n):a}function s(t){return function(e,n){return[(e+=t)>i.pi?e-i.BZ:e<-i.pi?e+i.BZ:e,n]}}function l(t){var e=s(t);return e.invert=s(-t),e}function c(t,e){var n=(0,i.mC)(t),r=(0,i.O$)(t),a=(0,i.mC)(e),o=(0,i.O$)(e);function s(t,e){var s=(0,i.mC)(e),l=(0,i.mC)(t)*s,c=(0,i.O$)(t)*s,u=(0,i.O$)(e),f=u*n+l*r;return[(0,i.fv)(c*a-f*o,l*n-u*r),(0,i.ZR)(f*a+c*o)]}return s.invert=function(t,e){var s=(0,i.mC)(e),l=(0,i.mC)(t)*s,c=(0,i.O$)(t)*s,u=(0,i.O$)(e),f=u*a-c*o;return[(0,i.fv)(c*a+u*o,l*n+f*r),(0,i.ZR)(f*n-l*r)]},s}function u(t){function e(e){return(e=t(e[0]*i.uR,e[1]*i.uR))[0]*=i.RW,e[1]*=i.RW,e}return t=o(t[0]*i.uR,t[1]*i.uR,t.length>2?t[2]*i.uR:0),e.invert=function(e){return(e=t.invert(e[0]*i.uR,e[1]*i.uR))[0]*=i.RW,e[1]*=i.RW,e},e}a.invert=a},72736:function(t,e,n){"use strict";function r(t,e){t&&a.hasOwnProperty(t.type)&&a[t.type](t,e)}n.d(e,{Z:function(){return l}});var i={Feature:function(t,e){r(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,i=-1,a=n.length;++i=0;)e+=n[r].value;else e=1;t.value=e}function l(t,e){var n,r,i,a,o,s=new h(t),l=+t.value&&(s.value=t.value),u=[s];for(null==e&&(e=c);n=u.pop();)if(l&&(n.value=+n.data.value),(i=e(n.data))&&(o=i.length))for(n.children=new Array(o),a=o-1;a>=0;--a)u.push(r=n.children[a]=new h(i[a])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(f)}function c(t){return t.children}function u(t){t.data=t.data.data}function f(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function h(t){this.data=t,this.depth=this.height=0,this.parent=null}n.r(e),n.d(e,{cluster:function(){return o},hierarchy:function(){return l},pack:function(){return L},packEnclose:function(){return p},packSiblings:function(){return S},partition:function(){return j},stratify:function(){return q},tree:function(){return Z},treemap:function(){return nt},treemapBinary:function(){return rt},treemapDice:function(){return N},treemapResquarify:function(){return at},treemapSlice:function(){return J},treemapSliceDice:function(){return it},treemapSquarify:function(){return et}}),h.prototype=l.prototype={constructor:h,count:function(){return this.eachAfter(s)},each:function(t){var e,n,r,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),n=a.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;for(t=n.pop(),e=r.pop();t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return l(this).eachBefore(u)}};var d=Array.prototype.slice;function p(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(d.call(t))).length,a=[];r0&&n*n>r*r+i*i}function y(t,e){for(var n=0;n(o*=o)?(r=(c+o-i)/(2*c),a=Math.sqrt(Math.max(0,o/c-r*r)),n.x=t.x-r*s-a*l,n.y=t.y-r*l+a*s):(r=(c+i-o)/(2*c),a=Math.sqrt(Math.max(0,i/c-r*r)),n.x=e.x+r*s-a*l,n.y=e.y+r*l+a*s)):(n.x=e.x+n.r,n.y=e.y)}function k(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function T(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,a=(e.y*n.r+n.y*e.r)/r;return i*i+a*a}function M(t){this._=t,this.next=null,this.previous=null}function A(t){if(!(i=t.length))return 0;var e,n,r,i,a,o,s,l,c,u,f;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;_(n,e,r=t[2]),e=new M(e),n=new M(n),r=new M(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(s=3;s0)throw new Error("cycle");return a}return n.id=function(e){return arguments.length?(t=E(e),n):t},n.parentId=function(t){return arguments.length?(e=E(t),n):e},n}function G(t,e){return t.parent===e.parent?1:2}function W(t){var e=t.children;return e?e[0]:t.t}function Y(t){var e=t.children;return e?e[e.length-1]:t.t}function $(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function X(t,e,n){return t.a.parent===e.parent?t.a:n}function K(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}function Z(){var t=G,e=1,n=1,r=null;function i(i){var l=function(t){for(var e,n,r,i,a,o=new K(t,0),s=[o];e=s.pop();)if(r=e._.children)for(e.children=new Array(a=r.length),i=a-1;i>=0;--i)s.push(n=e.children[i]=new K(r[i],i)),n.parent=e;return(o.parent=new K(null,0)).children=[o],o}(i);if(l.eachAfter(a),l.parent.m=-l.z,l.eachBefore(o),r)i.eachBefore(s);else{var c=i,u=i,f=i;i.eachBefore((function(t){t.xu.x&&(u=t),t.depth>f.depth&&(f=t)}));var h=c===u?1:t(c,u)/2,d=h-c.x,p=e/(u.x+h+d),g=n/(f.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function a(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var a=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,a=e,o=e,s=n,l=a.parent.children[0],c=a.m,u=o.m,f=s.m,h=l.m;s=Y(s),a=W(a),s&&a;)l=W(l),(o=Y(o)).a=e,(i=s.z+f-a.z-c+t(s._,a._))>0&&($(X(s,e,r),e,i),c+=i,u+=i),f+=s.m,c+=a.m,h+=l.m,u+=o.m;s&&!Y(o)&&(o.t=s,o.m+=f-u),a&&!W(l)&&(l.t=a,l.m+=c-h,r=e)}return r}(e,i,e.parent.A||r[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i}function J(t,e,n,r,i){for(var a,o=t.children,s=-1,l=o.length,c=t.value&&(i-n)/t.value;++sh&&(h=s),v=u*u*g,(d=Math.max(h/v,v/f))>p){u-=s;break}p=d}m.push(o={value:u,dice:l1?e:1)},n}(Q);function nt(){var t=et,e=!1,n=1,r=1,i=[0],a=C,o=C,s=C,l=C,c=C;function u(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(f),i=[0],e&&t.eachBefore(z),t}function f(e){var n=i[e.depth],r=e.x0+n,u=e.y0+n,f=e.x1-n,h=e.y1-n;f=n-1){var u=s[e];return u.x0=i,u.y0=a,u.x1=o,void(u.y1=l)}for(var f=c[e],h=r/2+f,d=e+1,p=n-1;d>>1;c[g]l-a){var y=(i*m+o*v)/r;t(e,d,v,i,a,y,l),t(d,n,m,y,a,o,l)}else{var b=(a*m+l*v)/r;t(e,d,v,i,a,o,b),t(d,n,m,i,b,o,l)}}(0,l,t.value,e,n,r,i)}function it(t,e,n,r,i){(1&t.depth?J:N)(t,e,n,r,i)}var at=function t(e){function n(t,n,r,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,l,c,u,f=-1,h=o.length,d=t.value;++f1?e:1)},n}(Q)},45879:function(t,e,n){"use strict";n.d(e,{h5:function(){return y}});var r=Math.PI,i=2*r,a=1e-6,o=i-a;function s(){this._x0=this._y0=this._x1=this._y1=null,this._=""}function l(){return new s}s.prototype=l.prototype={constructor:s,moveTo:function(t,e){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)},closePath:function(){null!==this._x1&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")},lineTo:function(t,e){this._+="L"+(this._x1=+t)+","+(this._y1=+e)},quadraticCurveTo:function(t,e,n,r){this._+="Q"+ +t+","+ +e+","+(this._x1=+n)+","+(this._y1=+r)},bezierCurveTo:function(t,e,n,r,i,a){this._+="C"+ +t+","+ +e+","+ +n+","+ +r+","+(this._x1=+i)+","+(this._y1=+a)},arcTo:function(t,e,n,i,o){t=+t,e=+e,n=+n,i=+i,o=+o;var s=this._x1,l=this._y1,c=n-t,u=i-e,f=s-t,h=l-e,d=f*f+h*h;if(o<0)throw new Error("negative radius: "+o);if(null===this._x1)this._+="M"+(this._x1=t)+","+(this._y1=e);else if(d>a)if(Math.abs(h*c-u*f)>a&&o){var p=n-s,g=i-l,v=c*c+u*u,m=p*p+g*g,y=Math.sqrt(v),b=Math.sqrt(d),x=o*Math.tan((r-Math.acos((v+d-m)/(2*y*b)))/2),w=x/b,_=x/y;Math.abs(w-1)>a&&(this._+="L"+(t+w*f)+","+(e+w*h)),this._+="A"+o+","+o+",0,0,"+ +(h*p>f*g)+","+(this._x1=t+_*c)+","+(this._y1=e+_*u)}else this._+="L"+(this._x1=t)+","+(this._y1=e)},arc:function(t,e,n,s,l,c){t=+t,e=+e,c=!!c;var u=(n=+n)*Math.cos(s),f=n*Math.sin(s),h=t+u,d=e+f,p=1^c,g=c?s-l:l-s;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+h+","+d:(Math.abs(this._x1-h)>a||Math.abs(this._y1-d)>a)&&(this._+="L"+h+","+d),n&&(g<0&&(g=g%i+i),g>o?this._+="A"+n+","+n+",0,1,"+p+","+(t-u)+","+(e-f)+"A"+n+","+n+",0,1,"+p+","+(this._x1=h)+","+(this._y1=d):g>a&&(this._+="A"+n+","+n+",0,"+ +(g>=r)+","+p+","+(this._x1=t+n*Math.cos(l))+","+(this._y1=e+n*Math.sin(l))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};var c=l,u=Array.prototype.slice;function f(t){return function(){return t}}function h(t){return t[0]}function d(t){return t[1]}function p(t){return t.source}function g(t){return t.target}function v(t){var e=p,n=g,r=h,i=d,a=null;function o(){var o,s=u.call(arguments),l=e.apply(this,s),f=n.apply(this,s);if(a||(a=o=c()),t(a,+r.apply(this,(s[0]=l,s)),+i.apply(this,s),+r.apply(this,(s[0]=f,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(t){return arguments.length?(e=t,o):e},o.target=function(t){return arguments.length?(n=t,o):n},o.x=function(t){return arguments.length?(r="function"===typeof t?t:f(+t),o):r},o.y=function(t){return arguments.length?(i="function"===typeof t?t:f(+t),o):i},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o}function m(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function y(){return v(m)}},84096:function(t,e,n){"use strict";n.d(e,{i$:function(){return p},Dq:function(){return h},g0:function(){return g}});var r=n(58176),i=n(48480),a=n(59879),o=n(82301),s=n(34823),l=n(79791);function c(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function u(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function f(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}function h(t){var e=t.dateTime,n=t.date,s=t.time,l=t.periods,h=t.days,d=t.shortDays,p=t.months,g=t.shortMonths,m=_(l),y=k(l),b=_(h),x=k(h),w=_(d),St=k(d),Et=_(p),Ct=k(p),Pt=_(g),Ot=k(g),Lt={a:function(t){return d[t.getDay()]},A:function(t){return h[t.getDay()]},b:function(t){return g[t.getMonth()]},B:function(t){return p[t.getMonth()]},c:null,d:q,e:q,f:X,H:G,I:W,j:Y,L:$,m:K,M:Z,p:function(t){return l[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:Mt,s:At,S:J,u:Q,U:tt,V:et,w:nt,W:rt,x:null,X:null,y:it,Y:at,Z:ot,"%":Tt},It={a:function(t){return d[t.getUTCDay()]},A:function(t){return h[t.getUTCDay()]},b:function(t){return g[t.getUTCMonth()]},B:function(t){return p[t.getUTCMonth()]},c:null,d:st,e:st,f:ht,H:lt,I:ct,j:ut,L:ft,m:dt,M:pt,p:function(t){return l[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:Mt,s:At,S:gt,u:vt,U:mt,V:yt,w:bt,W:xt,x:null,X:null,y:wt,Y:_t,Z:kt,"%":Tt},Dt={a:function(t,e,n){var r=w.exec(e.slice(n));return r?(t.w=St[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=b.exec(e.slice(n));return r?(t.w=x[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=Pt.exec(e.slice(n));return r?(t.m=Ot[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=Et.exec(e.slice(n));return r?(t.m=Ct[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return Nt(t,e,n,r)},d:D,e:D,f:B,H:z,I:z,j:R,L:F,m:I,M:N,p:function(t,e,n){var r=m.exec(e.slice(n));return r?(t.p=y[r[0].toLowerCase()],n+r[0].length):-1},q:L,Q:H,s:V,S:j,u:M,U:A,V:S,w:T,W:E,x:function(t,e,r){return Nt(t,n,e,r)},X:function(t,e,n){return Nt(t,s,e,n)},y:P,Y:C,Z:O,"%":U};function Rt(t,e){return function(n){var r,i,a,o=[],s=-1,l=0,c=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in h||(h.w=1),"Z"in h?(l=(s=u(f(h.y,0,1))).getUTCDay(),s=l>4||0===l?r.l6.ceil(s):(0,r.l6)(s),s=i.Z.offset(s,7*(h.V-1)),h.y=s.getUTCFullYear(),h.m=s.getUTCMonth(),h.d=s.getUTCDate()+(h.w+6)%7):(l=(s=c(f(h.y,0,1))).getDay(),s=l>4||0===l?a.wA.ceil(s):(0,a.wA)(s),s=o.Z.offset(s,7*(h.V-1)),h.y=s.getFullYear(),h.m=s.getMonth(),h.d=s.getDate()+(h.w+6)%7)}else("W"in h||"U"in h)&&("w"in h||(h.w="u"in h?h.u%7:"W"in h?1:0),l="Z"in h?u(f(h.y,0,1)).getUTCDay():c(f(h.y,0,1)).getDay(),h.m=0,h.d="W"in h?(h.w+6)%7+7*h.W-(l+5)%7:h.w+7*h.U-(l+6)%7);return"Z"in h?(h.H+=h.Z/100|0,h.M+=h.Z%100,u(h)):c(h)}}function Nt(t,e,n,r){for(var i,a,o=0,s=e.length,l=n.length;o=l)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=Dt[i in v?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return Lt.x=Rt(n,Lt),Lt.X=Rt(s,Lt),Lt.c=Rt(e,Lt),It.x=Rt(n,It),It.X=Rt(s,It),It.c=Rt(e,It),{format:function(t){var e=Rt(t+="",Lt);return e.toString=function(){return t},e},parse:function(t){var e=zt(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=Rt(t+="",It);return e.toString=function(){return t},e},utcParse:function(t){var e=zt(t+="",!0);return e.toString=function(){return t},e}}}var d,p,g,v={"-":"",_:" ",0:"0"},m=/^\s*\d+/,y=/^%/,b=/[\\^$*+?|[\]().{}]/g;function x(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a68?1900:2e3),n+r[0].length):-1}function O(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function L(t,e,n){var r=m.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function I(t,e,n){var r=m.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function D(t,e,n){var r=m.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function R(t,e,n){var r=m.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function z(t,e,n){var r=m.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function N(t,e,n){var r=m.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function j(t,e,n){var r=m.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function F(t,e,n){var r=m.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function B(t,e,n){var r=m.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function U(t,e,n){var r=y.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function H(t,e,n){var r=m.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function V(t,e,n){var r=m.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function q(t,e){return x(t.getDate(),e,2)}function G(t,e){return x(t.getHours(),e,2)}function W(t,e){return x(t.getHours()%12||12,e,2)}function Y(t,e){return x(1+o.Z.count((0,s.Z)(t),t),e,3)}function $(t,e){return x(t.getMilliseconds(),e,3)}function X(t,e){return $(t,e)+"000"}function K(t,e){return x(t.getMonth()+1,e,2)}function Z(t,e){return x(t.getMinutes(),e,2)}function J(t,e){return x(t.getSeconds(),e,2)}function Q(t){var e=t.getDay();return 0===e?7:e}function tt(t,e){return x(a.OM.count((0,s.Z)(t)-1,t),e,2)}function et(t,e){var n=t.getDay();return t=n>=4||0===n?(0,a.bL)(t):a.bL.ceil(t),x(a.bL.count((0,s.Z)(t),t)+(4===(0,s.Z)(t).getDay()),e,2)}function nt(t){return t.getDay()}function rt(t,e){return x(a.wA.count((0,s.Z)(t)-1,t),e,2)}function it(t,e){return x(t.getFullYear()%100,e,2)}function at(t,e){return x(t.getFullYear()%1e4,e,4)}function ot(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+x(e/60|0,"0",2)+x(e%60,"0",2)}function st(t,e){return x(t.getUTCDate(),e,2)}function lt(t,e){return x(t.getUTCHours(),e,2)}function ct(t,e){return x(t.getUTCHours()%12||12,e,2)}function ut(t,e){return x(1+i.Z.count((0,l.Z)(t),t),e,3)}function ft(t,e){return x(t.getUTCMilliseconds(),e,3)}function ht(t,e){return ft(t,e)+"000"}function dt(t,e){return x(t.getUTCMonth()+1,e,2)}function pt(t,e){return x(t.getUTCMinutes(),e,2)}function gt(t,e){return x(t.getUTCSeconds(),e,2)}function vt(t){var e=t.getUTCDay();return 0===e?7:e}function mt(t,e){return x(r.Ox.count((0,l.Z)(t)-1,t),e,2)}function yt(t,e){var n=t.getUTCDay();return t=n>=4||0===n?(0,r.hB)(t):r.hB.ceil(t),x(r.hB.count((0,l.Z)(t),t)+(4===(0,l.Z)(t).getUTCDay()),e,2)}function bt(t){return t.getUTCDay()}function xt(t,e){return x(r.l6.count((0,l.Z)(t)-1,t),e,2)}function wt(t,e){return x(t.getUTCFullYear()%100,e,2)}function _t(t,e){return x(t.getUTCFullYear()%1e4,e,4)}function kt(){return"+0000"}function Tt(){return"%"}function Mt(t){return+t}function At(t){return Math.floor(+t/1e3)}d=h({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]}),p=d.format,d.parse,g=d.utcFormat,d.utcParse},82301:function(t,e,n){"use strict";n.d(e,{a:function(){return o}});var r=n(30052),i=n(54263),a=(0,r.Z)((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-(e.getTimezoneOffset()-t.getTimezoneOffset())*i.yB)/i.UD}),(function(t){return t.getDate()-1}));e.Z=a;var o=a.range},54263:function(t,e,n){"use strict";n.d(e,{UD:function(){return o},Y2:function(){return a},Ym:function(){return r},iM:function(){return s},yB:function(){return i}});var r=1e3,i=6e4,a=36e5,o=864e5,s=6048e5},81041:function(t,e,n){"use strict";n.r(e),n.d(e,{timeDay:function(){return m.Z},timeDays:function(){return m.a},timeFriday:function(){return y.mC},timeFridays:function(){return y.b$},timeHour:function(){return g},timeHours:function(){return v},timeInterval:function(){return r.Z},timeMillisecond:function(){return a},timeMilliseconds:function(){return o},timeMinute:function(){return h},timeMinutes:function(){return d},timeMonday:function(){return y.wA},timeMondays:function(){return y.bJ},timeMonth:function(){return x},timeMonths:function(){return w},timeSaturday:function(){return y.EY},timeSaturdays:function(){return y.Ff},timeSecond:function(){return c},timeSeconds:function(){return u},timeSunday:function(){return y.OM},timeSundays:function(){return y.vm},timeThursday:function(){return y.bL},timeThursdays:function(){return y.$t},timeTuesday:function(){return y.sy},timeTuesdays:function(){return y.aU},timeWednesday:function(){return y.zg},timeWednesdays:function(){return y.Ld},timeWeek:function(){return y.OM},timeWeeks:function(){return y.vm},timeYear:function(){return _.Z},timeYears:function(){return _.g},utcDay:function(){return C.Z},utcDays:function(){return C.y},utcFriday:function(){return P.QQ},utcFridays:function(){return P.fz},utcHour:function(){return S},utcHours:function(){return E},utcMillisecond:function(){return a},utcMilliseconds:function(){return o},utcMinute:function(){return T},utcMinutes:function(){return M},utcMonday:function(){return P.l6},utcMondays:function(){return P.$3},utcMonth:function(){return L},utcMonths:function(){return I},utcSaturday:function(){return P.g4},utcSaturdays:function(){return P.Q_},utcSecond:function(){return c},utcSeconds:function(){return u},utcSunday:function(){return P.Ox},utcSundays:function(){return P.SU},utcThursday:function(){return P.hB},utcThursdays:function(){return P.xj},utcTuesday:function(){return P.J1},utcTuesdays:function(){return P.DK},utcWednesday:function(){return P.b3},utcWednesdays:function(){return P.uy},utcWeek:function(){return P.Ox},utcWeeks:function(){return P.SU},utcYear:function(){return D.Z},utcYears:function(){return D.D}});var r=n(30052),i=(0,r.Z)((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?(0,r.Z)((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):i:null};var a=i,o=i.range,s=n(54263),l=(0,r.Z)((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+e*s.Ym)}),(function(t,e){return(e-t)/s.Ym}),(function(t){return t.getUTCSeconds()})),c=l,u=l.range,f=(0,r.Z)((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*s.Ym)}),(function(t,e){t.setTime(+t+e*s.yB)}),(function(t,e){return(e-t)/s.yB}),(function(t){return t.getMinutes()})),h=f,d=f.range,p=(0,r.Z)((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*s.Ym-t.getMinutes()*s.yB)}),(function(t,e){t.setTime(+t+e*s.Y2)}),(function(t,e){return(e-t)/s.Y2}),(function(t){return t.getHours()})),g=p,v=p.range,m=n(82301),y=n(59879),b=(0,r.Z)((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),x=b,w=b.range,_=n(34823),k=(0,r.Z)((function(t){t.setUTCSeconds(0,0)}),(function(t,e){t.setTime(+t+e*s.yB)}),(function(t,e){return(e-t)/s.yB}),(function(t){return t.getUTCMinutes()})),T=k,M=k.range,A=(0,r.Z)((function(t){t.setUTCMinutes(0,0,0)}),(function(t,e){t.setTime(+t+e*s.Y2)}),(function(t,e){return(e-t)/s.Y2}),(function(t){return t.getUTCHours()})),S=A,E=A.range,C=n(48480),P=n(58176),O=(0,r.Z)((function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCMonth(t.getUTCMonth()+e)}),(function(t,e){return e.getUTCMonth()-t.getUTCMonth()+12*(e.getUTCFullYear()-t.getUTCFullYear())}),(function(t){return t.getUTCMonth()})),L=O,I=O.range,D=n(79791)},30052:function(t,e,n){"use strict";n.d(e,{Z:function(){return a}});var r=new Date,i=new Date;function a(t,e,n,o){function s(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return s.floor=function(e){return t(e=new Date(+e)),e},s.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},s.round=function(t){var e=s(t),n=s.ceil(t);return t-e0))return o;do{o.push(a=new Date(+n)),e(n,i),t(n)}while(a=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(s.count=function(e,a){return r.setTime(+e),i.setTime(+a),t(r),t(i),Math.floor(n(r,i))},s.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?s.filter(o?function(e){return o(e)%t===0}:function(e){return s.count(0,e)%t===0}):s:null}),s}},48480:function(t,e,n){"use strict";n.d(e,{y:function(){return o}});var r=n(30052),i=n(54263),a=(0,r.Z)((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/i.UD}),(function(t){return t.getUTCDate()-1}));e.Z=a;var o=a.range},58176:function(t,e,n){"use strict";n.d(e,{$3:function(){return p},DK:function(){return g},J1:function(){return l},Ox:function(){return o},QQ:function(){return f},Q_:function(){return b},SU:function(){return d},b3:function(){return c},fz:function(){return y},g4:function(){return h},hB:function(){return u},l6:function(){return s},uy:function(){return v},xj:function(){return m}});var r=n(30052),i=n(54263);function a(t){return(0,r.Z)((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/i.iM}))}var o=a(0),s=a(1),l=a(2),c=a(3),u=a(4),f=a(5),h=a(6),d=o.range,p=s.range,g=l.range,v=c.range,m=u.range,y=f.range,b=h.range},79791:function(t,e,n){"use strict";n.d(e,{D:function(){return a}});var r=n(30052),i=(0,r.Z)((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));i.every=function(t){return isFinite(t=Math.floor(t))&&t>0?(0,r.Z)((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null},e.Z=i;var a=i.range},59879:function(t,e,n){"use strict";n.d(e,{$t:function(){return m},EY:function(){return h},Ff:function(){return b},Ld:function(){return v},OM:function(){return o},aU:function(){return g},b$:function(){return y},bJ:function(){return p},bL:function(){return u},mC:function(){return f},sy:function(){return l},vm:function(){return d},wA:function(){return s},zg:function(){return c}});var r=n(30052),i=n(54263);function a(t){return(0,r.Z)((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-(e.getTimezoneOffset()-t.getTimezoneOffset())*i.yB)/i.iM}))}var o=a(0),s=a(1),l=a(2),c=a(3),u=a(4),f=a(5),h=a(6),d=o.range,p=s.range,g=l.range,v=c.range,m=u.range,y=f.range,b=h.range},34823:function(t,e,n){"use strict";n.d(e,{g:function(){return a}});var r=n(30052),i=(0,r.Z)((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));i.every=function(t){return isFinite(t=Math.floor(t))&&t>0?(0,r.Z)((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null},e.Z=i;var a=i.range},17045:function(t,e,n){"use strict";var r=n(8709),i="function"===typeof Symbol&&"symbol"===typeof Symbol("foo"),a=Object.prototype.toString,o=Array.prototype.concat,s=Object.defineProperty,l=n(55622)(),c=s&&l,u=function(t,e,n,r){if(e in t)if(!0===r){if(t[e]===n)return}else if("function"!==typeof(i=r)||"[object Function]"!==a.call(i)||!r())return;var i;c?s(t,e,{configurable:!0,enumerable:!1,value:n,writable:!0}):t[e]=n},f=function(t,e){var n=arguments.length>2?arguments[2]:{},a=r(e);i&&(a=o.call(a,Object.getOwnPropertySymbols(e)));for(var s=0;ss*l){var d=(h-f)/s;o[u]=1e3*d}}return o}function i(t){for(var e=[],n=t[0];n<=t[1];n++)for(var r=String.fromCharCode(n),i=t[0];i0)return function(t,e){var n,r;for(n=new Array(t),r=0;r80*a){o=l=t[0],s=f=t[1];for(var x=a;xl&&(l=h),p>f&&(f=p);g=0!==(g=Math.max(l-o,f-s))?1/g:0}return i(y,b,a,o,s,g),b}function n(t,e,n,r,i){var a,o;if(i===A(t,e,n,r)>0)for(a=e;a=e;a-=r)o=k(a,t[a],t[a+1],o);return o&&m(o,o.next)&&(T(o),o=o.next),o}function r(t,e){if(!t)return t;e||(e=t);var n,r=t;do{if(n=!1,r.steiner||!m(r,r.next)&&0!==v(r.prev,r,r.next))r=r.next;else{if(T(r),(r=e=r.prev)===r.next)break;n=!0}}while(n||r!==e);return e}function i(t,e,n,c,u,f,d){if(t){!d&&f&&function(t,e,n,r){var i=t;do{null===i.z&&(i.z=h(i.x,i.y,e,n,r)),i.prevZ=i.prev,i.nextZ=i.next,i=i.next}while(i!==t);i.prevZ.nextZ=null,i.prevZ=null,function(t){var e,n,r,i,a,o,s,l,c=1;do{for(n=t,t=null,a=null,o=0;n;){for(o++,r=n,s=0,e=0;e0||l>0&&r;)0!==s&&(0===l||!r||n.z<=r.z)?(i=n,n=n.nextZ,s--):(i=r,r=r.nextZ,l--),a?a.nextZ=i:t=i,i.prevZ=a,a=i;n=r}a.nextZ=null,c*=2}while(o>1)}(i)}(t,c,u,f);for(var p,g,v=t;t.prev!==t.next;)if(p=t.prev,g=t.next,f?o(t,c,u,f):a(t))e.push(p.i/n),e.push(t.i/n),e.push(g.i/n),T(t),t=g.next,v=g.next;else if((t=g)===v){d?1===d?i(t=s(r(t),e,n),e,n,c,u,f,2):2===d&&l(t,e,n,c,u,f):i(r(t),e,n,c,u,f,1);break}}}function a(t){var e=t.prev,n=t,r=t.next;if(v(e,n,r)>=0)return!1;for(var i=t.next.next;i!==t.prev;){if(p(e.x,e.y,n.x,n.y,r.x,r.y,i.x,i.y)&&v(i.prev,i,i.next)>=0)return!1;i=i.next}return!0}function o(t,e,n,r){var i=t.prev,a=t,o=t.next;if(v(i,a,o)>=0)return!1;for(var s=i.xa.x?i.x>o.x?i.x:o.x:a.x>o.x?a.x:o.x,u=i.y>a.y?i.y>o.y?i.y:o.y:a.y>o.y?a.y:o.y,f=h(s,l,e,n,r),d=h(c,u,e,n,r),g=t.prevZ,m=t.nextZ;g&&g.z>=f&&m&&m.z<=d;){if(g!==t.prev&&g!==t.next&&p(i.x,i.y,a.x,a.y,o.x,o.y,g.x,g.y)&&v(g.prev,g,g.next)>=0)return!1;if(g=g.prevZ,m!==t.prev&&m!==t.next&&p(i.x,i.y,a.x,a.y,o.x,o.y,m.x,m.y)&&v(m.prev,m,m.next)>=0)return!1;m=m.nextZ}for(;g&&g.z>=f;){if(g!==t.prev&&g!==t.next&&p(i.x,i.y,a.x,a.y,o.x,o.y,g.x,g.y)&&v(g.prev,g,g.next)>=0)return!1;g=g.prevZ}for(;m&&m.z<=d;){if(m!==t.prev&&m!==t.next&&p(i.x,i.y,a.x,a.y,o.x,o.y,m.x,m.y)&&v(m.prev,m,m.next)>=0)return!1;m=m.nextZ}return!0}function s(t,e,n){var i=t;do{var a=i.prev,o=i.next.next;!m(a,o)&&y(a,i,i.next,o)&&w(a,o)&&w(o,a)&&(e.push(a.i/n),e.push(i.i/n),e.push(o.i/n),T(i),T(i.next),i=t=o),i=i.next}while(i!==t);return r(i)}function l(t,e,n,a,o,s){var l=t;do{for(var c=l.next.next;c!==l.prev;){if(l.i!==c.i&&g(l,c)){var u=_(l,c);return l=r(l,l.next),u=r(u,u.next),i(l,e,n,a,o,s),void i(u,e,n,a,o,s)}c=c.next}l=l.next}while(l!==t)}function c(t,e){return t.x-e.x}function u(t,e){if(e=function(t,e){var n,r=e,i=t.x,a=t.y,o=-1/0;do{if(a<=r.y&&a>=r.next.y&&r.next.y!==r.y){var s=r.x+(a-r.y)*(r.next.x-r.x)/(r.next.y-r.y);if(s<=i&&s>o){if(o=s,s===i){if(a===r.y)return r;if(a===r.next.y)return r.next}n=r.x=r.x&&r.x>=u&&i!==r.x&&p(an.x||r.x===n.x&&f(n,r)))&&(n=r,d=l)),r=r.next}while(r!==c);return n}(t,e),e){var n=_(e,t);r(e,e.next),r(n,n.next)}}function f(t,e){return v(t.prev,t,e.prev)<0&&v(e.next,t,t.next)<0}function h(t,e,n,r,i){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t=32767*(t-n)*i)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e=32767*(e-r)*i)|e<<8))|e<<4))|e<<2))|e<<1))<<1}function d(t){var e=t,n=t;do{(e.x=0&&(t-o)*(r-s)-(n-o)*(e-s)>=0&&(n-o)*(a-s)-(i-o)*(r-s)>=0}function g(t,e){return t.next.i!==e.i&&t.prev.i!==e.i&&!function(t,e){var n=t;do{if(n.i!==t.i&&n.next.i!==t.i&&n.i!==e.i&&n.next.i!==e.i&&y(n,n.next,t,e))return!0;n=n.next}while(n!==t);return!1}(t,e)&&(w(t,e)&&w(e,t)&&function(t,e){var n=t,r=!1,i=(t.x+e.x)/2,a=(t.y+e.y)/2;do{n.y>a!==n.next.y>a&&n.next.y!==n.y&&i<(n.next.x-n.x)*(a-n.y)/(n.next.y-n.y)+n.x&&(r=!r),n=n.next}while(n!==t);return r}(t,e)&&(v(t.prev,t,e.prev)||v(t,e.prev,e))||m(t,e)&&v(t.prev,t,t.next)>0&&v(e.prev,e,e.next)>0)}function v(t,e,n){return(e.y-t.y)*(n.x-e.x)-(e.x-t.x)*(n.y-e.y)}function m(t,e){return t.x===e.x&&t.y===e.y}function y(t,e,n,r){var i=x(v(t,e,n)),a=x(v(t,e,r)),o=x(v(n,r,t)),s=x(v(n,r,e));return i!==a&&o!==s||!(0!==i||!b(t,n,e))||!(0!==a||!b(t,r,e))||!(0!==o||!b(n,t,r))||!(0!==s||!b(n,e,r))}function b(t,e,n){return e.x<=Math.max(t.x,n.x)&&e.x>=Math.min(t.x,n.x)&&e.y<=Math.max(t.y,n.y)&&e.y>=Math.min(t.y,n.y)}function x(t){return t>0?1:t<0?-1:0}function w(t,e){return v(t.prev,t,t.next)<0?v(t,e,t.next)>=0&&v(t,t.prev,e)>=0:v(t,e,t.prev)<0||v(t,t.next,e)<0}function _(t,e){var n=new M(t.i,t.x,t.y),r=new M(e.i,e.x,e.y),i=t.next,a=e.prev;return t.next=e,e.prev=t,n.next=i,i.prev=n,r.next=n,n.prev=r,a.next=r,r.prev=a,r}function k(t,e,n,r){var i=new M(t,e,n);return r?(i.next=r.next,i.prev=r,r.next.prev=i,r.next=i):(i.prev=i,i.next=i),i}function T(t){t.next.prev=t.prev,t.prev.next=t.next,t.prevZ&&(t.prevZ.nextZ=t.nextZ),t.nextZ&&(t.nextZ.prevZ=t.prevZ)}function M(t,e,n){this.i=t,this.x=e,this.y=n,this.prev=null,this.next=null,this.z=null,this.prevZ=null,this.nextZ=null,this.steiner=!1}function A(t,e,n,r){for(var i=0,a=e,o=n-r;a0&&(r+=t[i-1].length,n.holes.push(r))}return n}},2502:function(t,e,n){var r=n(68664);t.exports=function(t,e){var n,i=[],a=[],o=[],s={},l=[];function c(t){o[t]=!1,s.hasOwnProperty(t)&&Object.keys(s[t]).forEach((function(e){delete s[t][e],o[e]&&c(e)}))}function u(t){var e,r,i=!1;for(a.push(t),o[t]=!0,e=0;e=e}))}(e);for(var n,i=r(t).components.filter((function(t){return t.length>1})),a=1/0,o=0;o=55296&&y<=56319&&(_+=t[++n]),_=k?h.call(k,T,_,g):_,e?(d.value=_,p(v,g,d)):v[g]=_,++g;m=g}if(void 0===m)for(m=o(t.length),e&&(v=new e(m)),n=0;n0?1:-1}},56247:function(t,e,n){"use strict";var r=n(9953),i=Math.abs,a=Math.floor;t.exports=function(t){return isNaN(t)?0:0!==(t=Number(t))&&isFinite(t)?r(t)*a(i(t)):t}},35976:function(t,e,n){"use strict";var r=n(56247),i=Math.max;t.exports=function(t){return i(0,r(t))}},67260:function(t,e,n){"use strict";var r=n(78513),i=n(36672),a=Function.prototype.bind,o=Function.prototype.call,s=Object.keys,l=Object.prototype.propertyIsEnumerable;t.exports=function(t,e){return function(n,c){var u,f=arguments[2],h=arguments[3];return n=Object(i(n)),r(c),u=s(n),h&&u.sort("function"===typeof h?a.call(h,n):void 0),"function"!==typeof t&&(t=u[t]),o.call(t,u,(function(t,r){return l.call(n,t)?o.call(c,f,n[t],t,n,r):e}))}}},95879:function(t,e,n){"use strict";t.exports=n(73583)()?Object.assign:n(34205)},73583:function(t){"use strict";t.exports=function(){var t,e=Object.assign;return"function"===typeof e&&(e(t={foo:"raz"},{bar:"dwa"},{trzy:"trzy"}),t.foo+t.bar+t.trzy==="razdwatrzy")}},34205:function(t,e,n){"use strict";var r=n(68700),i=n(36672),a=Math.max;t.exports=function(t,e){var n,o,s,l=a(arguments.length,2);for(t=Object(i(t)),s=function(r){try{t[r]=e[r]}catch(i){n||(n=i)}},o=1;o-1}},87963:function(t){"use strict";var e=Object.prototype.toString,n=e.call("");t.exports=function(t){return"string"===typeof t||t&&"object"===typeof t&&(t instanceof String||e.call(t)===n)||!1}},43043:function(t){"use strict";var e=Object.create(null),n=Math.random;t.exports=function(){var t;do{t=n().toString(36).slice(2)}while(e[t]);return t}},32411:function(t,e,n){"use strict";var r,i=n(1496),a=n(66741),o=n(62072),s=n(8260),l=n(95426),c=Object.defineProperty;r=t.exports=function(t,e){if(!(this instanceof r))throw new TypeError("Constructor requires 'new'");l.call(this,t),e=e?a.call(e,"key+value")?"key+value":a.call(e,"key")?"key":"value":"value",c(this,"__kind__",o("",e))},i&&i(r,l),delete r.prototype.constructor,r.prototype=Object.create(l.prototype,{_resolve:o((function(t){return"value"===this.__kind__?this.__list__[t]:"key+value"===this.__kind__?[t,this.__list__[t]]:t}))}),c(r.prototype,s.toStringTag,o("c","Array Iterator"))},27515:function(t,e,n){"use strict";var r=n(73051),i=n(78513),a=n(87963),o=n(66661),s=Array.isArray,l=Function.prototype.call,c=Array.prototype.some;t.exports=function(t,e){var n,u,f,h,d,p,g,v,m=arguments[2];if(s(t)||r(t)?n="array":a(t)?n="string":t=o(t),i(e),f=function(){h=!0},"array"!==n)if("string"!==n)for(u=t.next();!u.done;){if(l.call(e,m,u.value,f),h)return;u=t.next()}else for(p=t.length,d=0;d=55296&&v<=56319&&(g+=t[++d]),l.call(e,m,g,f),!h);++d);else c.call(t,(function(t){return l.call(e,m,t,f),h}))}},66661:function(t,e,n){"use strict";var r=n(73051),i=n(87963),a=n(32411),o=n(259),s=n(58095),l=n(8260).iterator;t.exports=function(t){return"function"===typeof s(t)[l]?t[l]():r(t)?new a(t):i(t)?new o(t):new a(t)}},95426:function(t,e,n){"use strict";var r,i=n(16134),a=n(95879),o=n(78513),s=n(36672),l=n(62072),c=n(55174),u=n(8260),f=Object.defineProperty,h=Object.defineProperties;t.exports=r=function(t,e){if(!(this instanceof r))throw new TypeError("Constructor requires 'new'");h(this,{__list__:l("w",s(t)),__context__:l("w",e),__nextIndex__:l("w",0)}),e&&(o(e.on),e.on("_add",this._onAdd),e.on("_delete",this._onDelete),e.on("_clear",this._onClear))},delete r.prototype.constructor,h(r.prototype,a({_next:l((function(){var t;if(this.__list__)return this.__redo__&&void 0!==(t=this.__redo__.shift())?t:this.__nextIndex__=this.__nextIndex__||(++this.__nextIndex__,this.__redo__?(this.__redo__.forEach((function(e,n){e>=t&&(this.__redo__[n]=++e)}),this),this.__redo__.push(t)):f(this,"__redo__",l("c",[t])))})),_onDelete:l((function(t){var e;t>=this.__nextIndex__||(--this.__nextIndex__,this.__redo__&&(-1!==(e=this.__redo__.indexOf(t))&&this.__redo__.splice(e,1),this.__redo__.forEach((function(e,n){e>t&&(this.__redo__[n]=--e)}),this)))})),_onClear:l((function(){this.__redo__&&i.call(this.__redo__),this.__nextIndex__=0}))}))),f(r.prototype,u.iterator,l((function(){return this})))},35940:function(t,e,n){"use strict";var r=n(73051),i=n(95296),a=n(87963),o=n(8260).iterator,s=Array.isArray;t.exports=function(t){return!!i(t)&&(!!s(t)||!!a(t)||!!r(t)||"function"===typeof t[o])}},259:function(t,e,n){"use strict";var r,i=n(1496),a=n(62072),o=n(8260),s=n(95426),l=Object.defineProperty;r=t.exports=function(t){if(!(this instanceof r))throw new TypeError("Constructor requires 'new'");t=String(t),s.call(this,t),l(this,"__length__",a("",t.length))},i&&i(r,s),delete r.prototype.constructor,r.prototype=Object.create(s.prototype,{_next:a((function(){if(this.__list__)return this.__nextIndex__=55296&&e<=56319?n+this.__list__[this.__nextIndex__++]:n}))}),l(r.prototype,o.toStringTag,a("c","String Iterator"))},58095:function(t,e,n){"use strict";var r=n(35940);t.exports=function(t){if(!r(t))throw new TypeError(t+" is not iterable");return t}},73523:function(t){"use strict";function e(t,e){if(void 0===t||null===t)throw new TypeError("Cannot convert first argument to object");for(var n=Object(t),r=1;r0&&o.length>i&&!o.warned){o.warned=!0;var u=new Error("Possible EventEmitter memory leak detected. "+o.length+" "+String(e)+" listeners added. Use emitter.setMaxListeners() to increase limit");u.name="MaxListenersExceededWarning",u.emitter=t,u.type=e,u.count=o.length,c=u,console&&console.warn&&console.warn(c)}return t}function u(){if(!this.fired)return this.target.removeListener(this.type,this.wrapFn),this.fired=!0,0===arguments.length?this.listener.call(this.target):this.listener.apply(this.target,arguments)}function f(t,e,n){var r={fired:!1,wrapFn:void 0,target:t,type:e,listener:n},i=u.bind(r);return i.listener=n,r.wrapFn=i,i}function h(t,e,n){var r=t._events;if(void 0===r)return[];var i=r[e];return void 0===i?[]:"function"===typeof i?n?[i.listener||i]:[i]:n?function(t){for(var e=new Array(t.length),n=0;n0&&(o=e[0]),o instanceof Error)throw o;var s=new Error("Unhandled error."+(o?" ("+o.message+")":""));throw s.context=o,s}var l=a[t];if(void 0===l)return!1;if("function"===typeof l)r(l,this,e);else{var c=l.length,u=p(l,c);for(n=0;n=0;a--)if(n[a]===e||n[a].listener===e){o=n[a].listener,i=a;break}if(i<0)return this;0===i?n.shift():function(t,e){for(;e+1=0;r--)this.removeListener(t,e[r]);return this},a.prototype.listeners=function(t){return h(this,t,!0)},a.prototype.rawListeners=function(t){return h(this,t,!1)},a.listenerCount=function(t,e){return"function"===typeof t.listenerCount?t.listenerCount(e):d.call(t,e)},a.prototype.listenerCount=d,a.prototype.eventNames=function(){return this._eventsCount>0?e(this._events):[]}},60774:function(t){var e=function(){if("object"===typeof self&&self)return self;if("object"===typeof window&&window)return window;throw new Error("Unable to resolve global `this`")};t.exports=function(){if(this)return this;try{Object.defineProperty(Object.prototype,"__global__",{get:function(){return this},configurable:!0})}catch(t){return e()}try{return __global__||e()}finally{delete Object.prototype.__global__}}()},94908:function(t,e,n){"use strict";t.exports=n(51152)()?globalThis:n(60774)},51152:function(t){"use strict";t.exports=function(){return"object"===typeof globalThis&&!!globalThis&&globalThis.Array===Array}},92770:function(t,e,n){"use strict";var r=n(18546);t.exports=function(t){var e=typeof t;if("string"===e){var n=t;if(0===(t=+t)&&r(n))return!1}else if("number"!==e)return!1;return t-t<1}},30120:function(t,e,n){var r=n(90660);t.exports=function(t,e,n){if(!t)throw new TypeError("must specify data as first parameter");if(n=0|+(n||0),Array.isArray(t)&&t[0]&&"number"===typeof t[0][0]){var i,a,o,s,l=t[0].length,c=t.length*l;e&&"string"!==typeof e||(e=new(r(e||"float32"))(c+n));var u=e.length-n;if(c!==u)throw new Error("source length "+c+" ("+l+"x"+t.length+") does not match destination length "+u);for(i=0,o=n;ie[0]-o[0]/2&&(h=o[0]/2,d+=o[1]);return n}},32879:function(t){"use strict";function e(t,a){a||(a={}),("string"===typeof t||Array.isArray(t))&&(a.family=t);var o=Array.isArray(a.family)?a.family.join(", "):a.family;if(!o)throw Error("`family` must be defined");var s=a.size||a.fontSize||a.em||48,l=a.weight||a.fontWeight||"",c=(t=[a.style||a.fontStyle||"",l,s].join(" ")+"px "+o,a.origin||"top");if(e.cache[o]&&s<=e.cache[o].em)return n(e.cache[o],c);var u=a.canvas||e.canvas,f=u.getContext("2d"),h={upper:void 0!==a.upper?a.upper:"H",lower:void 0!==a.lower?a.lower:"x",descent:void 0!==a.descent?a.descent:"p",ascent:void 0!==a.ascent?a.ascent:"h",tittle:void 0!==a.tittle?a.tittle:"i",overshoot:void 0!==a.overshoot?a.overshoot:"O"},d=Math.ceil(1.5*s);u.height=d,u.width=.5*d,f.font=t;var p="H",g={top:0};f.clearRect(0,0,d,d),f.textBaseline="top",f.fillStyle="black",f.fillText(p,0,0);var v=r(f.getImageData(0,0,d,d));f.clearRect(0,0,d,d),f.textBaseline="bottom",f.fillText(p,0,d);var m=r(f.getImageData(0,0,d,d));g.lineHeight=g.bottom=d-m+v,f.clearRect(0,0,d,d),f.textBaseline="alphabetic",f.fillText(p,0,d);var y=d-r(f.getImageData(0,0,d,d))-1+v;g.baseline=g.alphabetic=y,f.clearRect(0,0,d,d),f.textBaseline="middle",f.fillText(p,0,.5*d);var b=r(f.getImageData(0,0,d,d));g.median=g.middle=d-b-1+v-.5*d,f.clearRect(0,0,d,d),f.textBaseline="hanging",f.fillText(p,0,.5*d);var x=r(f.getImageData(0,0,d,d));g.hanging=d-x-1+v-.5*d,f.clearRect(0,0,d,d),f.textBaseline="ideographic",f.fillText(p,0,d);var w=r(f.getImageData(0,0,d,d));if(g.ideographic=d-w-1+v,h.upper&&(f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.upper,0,0),g.upper=r(f.getImageData(0,0,d,d)),g.capHeight=g.baseline-g.upper),h.lower&&(f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.lower,0,0),g.lower=r(f.getImageData(0,0,d,d)),g.xHeight=g.baseline-g.lower),h.tittle&&(f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.tittle,0,0),g.tittle=r(f.getImageData(0,0,d,d))),h.ascent&&(f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.ascent,0,0),g.ascent=r(f.getImageData(0,0,d,d))),h.descent&&(f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.descent,0,0),g.descent=i(f.getImageData(0,0,d,d))),h.overshoot){f.clearRect(0,0,d,d),f.textBaseline="top",f.fillText(h.overshoot,0,0);var _=i(f.getImageData(0,0,d,d));g.overshoot=_-y}for(var k in g)g[k]/=s;return g.em=s,e.cache[o]=g,n(g,c)}function n(t,e){var n={};for(var r in"string"===typeof e&&(e=t[e]),t)"em"!==r&&(n[r]=t[r]-e);return n}function r(t){for(var e=t.height,n=t.data,r=3;r0;r-=4)if(0!==n[r])return Math.floor(.25*(r-3)/e)}t.exports=e,e.canvas=document.createElement("canvas"),e.cache={}},31353:function(t,e,n){"use strict";var r=n(85395),i=Object.prototype.toString,a=Object.prototype.hasOwnProperty;t.exports=function(t,e,n){if(!r(e))throw new TypeError("iterator must be a function");var o;arguments.length>=3&&(o=n),"[object Array]"===i.call(t)?function(t,e,n){for(var r=0,i=t.length;r1&&"boolean"!==typeof e)throw new o('"allowMissing" argument must be a boolean');if(null===M(/^%?[^%]*%?$/,t))throw new i("`%` may not be present anywhere but at the beginning and end of the intrinsic name");var n=function(t){var e=T(t,0,1),n=T(t,-1);if("%"===e&&"%"!==n)throw new i("invalid intrinsic syntax, expected closing `%`");if("%"===n&&"%"!==e)throw new i("invalid intrinsic syntax, expected opening `%`");var r=[];return k(t,A,(function(t,e,n,i){r[r.length]=n?k(i,S,"$1"):e||t})),r}(t),r=n.length>0?n[0]:"",a=E("%"+r+"%",e),s=a.name,c=a.value,u=!1,f=a.alias;f&&(r=f[0],_(n,w([0,1],f)));for(var h=1,d=!0;h=n.length){var y=l(c,p);c=(d=!!y)&&"get"in y&&!("originalValue"in y.get)?y.get:c[p]}else d=x(c,p),c=c[p];d&&!u&&(g[s]=c)}}return c}},85400:function(t){t.exports=function(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=e[4],s=e[5],l=e[6],c=e[7],u=e[8],f=e[9],h=e[10],d=e[11],p=e[12],g=e[13],v=e[14],m=e[15];return t[0]=s*(h*m-d*v)-f*(l*m-c*v)+g*(l*d-c*h),t[1]=-(r*(h*m-d*v)-f*(i*m-a*v)+g*(i*d-a*h)),t[2]=r*(l*m-c*v)-s*(i*m-a*v)+g*(i*c-a*l),t[3]=-(r*(l*d-c*h)-s*(i*d-a*h)+f*(i*c-a*l)),t[4]=-(o*(h*m-d*v)-u*(l*m-c*v)+p*(l*d-c*h)),t[5]=n*(h*m-d*v)-u*(i*m-a*v)+p*(i*d-a*h),t[6]=-(n*(l*m-c*v)-o*(i*m-a*v)+p*(i*c-a*l)),t[7]=n*(l*d-c*h)-o*(i*d-a*h)+u*(i*c-a*l),t[8]=o*(f*m-d*g)-u*(s*m-c*g)+p*(s*d-c*f),t[9]=-(n*(f*m-d*g)-u*(r*m-a*g)+p*(r*d-a*f)),t[10]=n*(s*m-c*g)-o*(r*m-a*g)+p*(r*c-a*s),t[11]=-(n*(s*d-c*f)-o*(r*d-a*f)+u*(r*c-a*s)),t[12]=-(o*(f*v-h*g)-u*(s*v-l*g)+p*(s*h-l*f)),t[13]=n*(f*v-h*g)-u*(r*v-i*g)+p*(r*h-i*f),t[14]=-(n*(s*v-l*g)-o*(r*v-i*g)+p*(r*l-i*s)),t[15]=n*(s*h-l*f)-o*(r*h-i*f)+u*(r*l-i*s),t}},42331:function(t){t.exports=function(t){var e=new Float32Array(16);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e[3]=t[3],e[4]=t[4],e[5]=t[5],e[6]=t[6],e[7]=t[7],e[8]=t[8],e[9]=t[9],e[10]=t[10],e[11]=t[11],e[12]=t[12],e[13]=t[13],e[14]=t[14],e[15]=t[15],e}},31042:function(t){t.exports=function(t,e){return t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=e[3],t[4]=e[4],t[5]=e[5],t[6]=e[6],t[7]=e[7],t[8]=e[8],t[9]=e[9],t[10]=e[10],t[11]=e[11],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15],t}},11902:function(t){t.exports=function(){var t=new Float32Array(16);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},89887:function(t){t.exports=function(t){var e=t[0],n=t[1],r=t[2],i=t[3],a=t[4],o=t[5],s=t[6],l=t[7],c=t[8],u=t[9],f=t[10],h=t[11],d=t[12],p=t[13],g=t[14],v=t[15];return(e*o-n*a)*(f*v-h*g)-(e*s-r*a)*(u*v-h*p)+(e*l-i*a)*(u*g-f*p)+(n*s-r*o)*(c*v-h*d)-(n*l-i*o)*(c*g-f*d)+(r*l-i*s)*(c*p-u*d)}},27812:function(t){t.exports=function(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=n+n,s=r+r,l=i+i,c=n*o,u=r*o,f=r*s,h=i*o,d=i*s,p=i*l,g=a*o,v=a*s,m=a*l;return t[0]=1-f-p,t[1]=u+m,t[2]=h-v,t[3]=0,t[4]=u-m,t[5]=1-c-p,t[6]=d+g,t[7]=0,t[8]=h+v,t[9]=d-g,t[10]=1-c-f,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},34045:function(t){t.exports=function(t,e,n){var r,i,a,o=n[0],s=n[1],l=n[2],c=Math.sqrt(o*o+s*s+l*l);return Math.abs(c)<1e-6?null:(o*=c=1/c,s*=c,l*=c,r=Math.sin(e),i=Math.cos(e),a=1-i,t[0]=o*o*a+i,t[1]=s*o*a+l*r,t[2]=l*o*a-s*r,t[3]=0,t[4]=o*s*a-l*r,t[5]=s*s*a+i,t[6]=l*s*a+o*r,t[7]=0,t[8]=o*l*a+s*r,t[9]=s*l*a-o*r,t[10]=l*l*a+i,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t)}},45973:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3],s=r+r,l=i+i,c=a+a,u=r*s,f=r*l,h=r*c,d=i*l,p=i*c,g=a*c,v=o*s,m=o*l,y=o*c;return t[0]=1-(d+g),t[1]=f+y,t[2]=h-m,t[3]=0,t[4]=f-y,t[5]=1-(u+g),t[6]=p+v,t[7]=0,t[8]=h+m,t[9]=p-v,t[10]=1-(u+d),t[11]=0,t[12]=n[0],t[13]=n[1],t[14]=n[2],t[15]=1,t}},81472:function(t){t.exports=function(t,e){return t[0]=e[0],t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=e[1],t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=e[2],t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},14669:function(t){t.exports=function(t,e){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=e[0],t[13]=e[1],t[14]=e[2],t[15]=1,t}},75262:function(t){t.exports=function(t,e){var n=Math.sin(e),r=Math.cos(e);return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=r,t[6]=n,t[7]=0,t[8]=0,t[9]=-n,t[10]=r,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},331:function(t){t.exports=function(t,e){var n=Math.sin(e),r=Math.cos(e);return t[0]=r,t[1]=0,t[2]=-n,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=n,t[9]=0,t[10]=r,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},11049:function(t){t.exports=function(t,e){var n=Math.sin(e),r=Math.cos(e);return t[0]=r,t[1]=n,t[2]=0,t[3]=0,t[4]=-n,t[5]=r,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},75195:function(t){t.exports=function(t,e,n,r,i,a,o){var s=1/(n-e),l=1/(i-r),c=1/(a-o);return t[0]=2*a*s,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=2*a*l,t[6]=0,t[7]=0,t[8]=(n+e)*s,t[9]=(i+r)*l,t[10]=(o+a)*c,t[11]=-1,t[12]=0,t[13]=0,t[14]=o*a*2*c,t[15]=0,t}},71551:function(t){t.exports=function(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}},79576:function(t,e,n){t.exports={create:n(11902),clone:n(42331),copy:n(31042),identity:n(71551),transpose:n(88654),invert:n(95874),adjoint:n(85400),determinant:n(89887),multiply:n(91362),translate:n(31283),scale:n(10789),rotate:n(65074),rotateX:n(35545),rotateY:n(94918),rotateZ:n(15692),fromRotation:n(34045),fromRotationTranslation:n(45973),fromScaling:n(81472),fromTranslation:n(14669),fromXRotation:n(75262),fromYRotation:n(331),fromZRotation:n(11049),fromQuat:n(27812),frustum:n(75195),perspective:n(7864),perspectiveFromFieldOfView:n(35279),ortho:n(60378),lookAt:n(65551),str:n(6726)}},95874:function(t){t.exports=function(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=e[4],s=e[5],l=e[6],c=e[7],u=e[8],f=e[9],h=e[10],d=e[11],p=e[12],g=e[13],v=e[14],m=e[15],y=n*s-r*o,b=n*l-i*o,x=n*c-a*o,w=r*l-i*s,_=r*c-a*s,k=i*c-a*l,T=u*g-f*p,M=u*v-h*p,A=u*m-d*p,S=f*v-h*g,E=f*m-d*g,C=h*m-d*v,P=y*C-b*E+x*S+w*A-_*M+k*T;return P?(P=1/P,t[0]=(s*C-l*E+c*S)*P,t[1]=(i*E-r*C-a*S)*P,t[2]=(g*k-v*_+m*w)*P,t[3]=(h*_-f*k-d*w)*P,t[4]=(l*A-o*C-c*M)*P,t[5]=(n*C-i*A+a*M)*P,t[6]=(v*x-p*k-m*b)*P,t[7]=(u*k-h*x+d*b)*P,t[8]=(o*E-s*A+c*T)*P,t[9]=(r*A-n*E-a*T)*P,t[10]=(p*_-g*x+m*y)*P,t[11]=(f*x-u*_-d*y)*P,t[12]=(s*M-o*S-l*T)*P,t[13]=(n*S-r*M+i*T)*P,t[14]=(g*b-p*w-v*y)*P,t[15]=(u*w-f*b+h*y)*P,t):null}},65551:function(t,e,n){var r=n(71551);t.exports=function(t,e,n,i){var a,o,s,l,c,u,f,h,d,p,g=e[0],v=e[1],m=e[2],y=i[0],b=i[1],x=i[2],w=n[0],_=n[1],k=n[2];return Math.abs(g-w)<1e-6&&Math.abs(v-_)<1e-6&&Math.abs(m-k)<1e-6?r(t):(f=g-w,h=v-_,d=m-k,p=1/Math.sqrt(f*f+h*h+d*d),a=b*(d*=p)-x*(h*=p),o=x*(f*=p)-y*d,s=y*h-b*f,(p=Math.sqrt(a*a+o*o+s*s))?(a*=p=1/p,o*=p,s*=p):(a=0,o=0,s=0),l=h*s-d*o,c=d*a-f*s,u=f*o-h*a,(p=Math.sqrt(l*l+c*c+u*u))?(l*=p=1/p,c*=p,u*=p):(l=0,c=0,u=0),t[0]=a,t[1]=l,t[2]=f,t[3]=0,t[4]=o,t[5]=c,t[6]=h,t[7]=0,t[8]=s,t[9]=u,t[10]=d,t[11]=0,t[12]=-(a*g+o*v+s*m),t[13]=-(l*g+c*v+u*m),t[14]=-(f*g+h*v+d*m),t[15]=1,t)}},91362:function(t){t.exports=function(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3],s=e[4],l=e[5],c=e[6],u=e[7],f=e[8],h=e[9],d=e[10],p=e[11],g=e[12],v=e[13],m=e[14],y=e[15],b=n[0],x=n[1],w=n[2],_=n[3];return t[0]=b*r+x*s+w*f+_*g,t[1]=b*i+x*l+w*h+_*v,t[2]=b*a+x*c+w*d+_*m,t[3]=b*o+x*u+w*p+_*y,b=n[4],x=n[5],w=n[6],_=n[7],t[4]=b*r+x*s+w*f+_*g,t[5]=b*i+x*l+w*h+_*v,t[6]=b*a+x*c+w*d+_*m,t[7]=b*o+x*u+w*p+_*y,b=n[8],x=n[9],w=n[10],_=n[11],t[8]=b*r+x*s+w*f+_*g,t[9]=b*i+x*l+w*h+_*v,t[10]=b*a+x*c+w*d+_*m,t[11]=b*o+x*u+w*p+_*y,b=n[12],x=n[13],w=n[14],_=n[15],t[12]=b*r+x*s+w*f+_*g,t[13]=b*i+x*l+w*h+_*v,t[14]=b*a+x*c+w*d+_*m,t[15]=b*o+x*u+w*p+_*y,t}},60378:function(t){t.exports=function(t,e,n,r,i,a,o){var s=1/(e-n),l=1/(r-i),c=1/(a-o);return t[0]=-2*s,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=-2*l,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=2*c,t[11]=0,t[12]=(e+n)*s,t[13]=(i+r)*l,t[14]=(o+a)*c,t[15]=1,t}},7864:function(t){t.exports=function(t,e,n,r,i){var a=1/Math.tan(e/2),o=1/(r-i);return t[0]=a/n,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=a,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=(i+r)*o,t[11]=-1,t[12]=0,t[13]=0,t[14]=2*i*r*o,t[15]=0,t}},35279:function(t){t.exports=function(t,e,n,r){var i=Math.tan(e.upDegrees*Math.PI/180),a=Math.tan(e.downDegrees*Math.PI/180),o=Math.tan(e.leftDegrees*Math.PI/180),s=Math.tan(e.rightDegrees*Math.PI/180),l=2/(o+s),c=2/(i+a);return t[0]=l,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=c,t[6]=0,t[7]=0,t[8]=-(o-s)*l*.5,t[9]=(i-a)*c*.5,t[10]=r/(n-r),t[11]=-1,t[12]=0,t[13]=0,t[14]=r*n/(n-r),t[15]=0,t}},65074:function(t){t.exports=function(t,e,n,r){var i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b,x,w,_,k,T,M,A,S,E=r[0],C=r[1],P=r[2],O=Math.sqrt(E*E+C*C+P*P);return Math.abs(O)<1e-6?null:(E*=O=1/O,C*=O,P*=O,i=Math.sin(n),a=Math.cos(n),o=1-a,s=e[0],l=e[1],c=e[2],u=e[3],f=e[4],h=e[5],d=e[6],p=e[7],g=e[8],v=e[9],m=e[10],y=e[11],b=E*E*o+a,x=C*E*o+P*i,w=P*E*o-C*i,_=E*C*o-P*i,k=C*C*o+a,T=P*C*o+E*i,M=E*P*o+C*i,A=C*P*o-E*i,S=P*P*o+a,t[0]=s*b+f*x+g*w,t[1]=l*b+h*x+v*w,t[2]=c*b+d*x+m*w,t[3]=u*b+p*x+y*w,t[4]=s*_+f*k+g*T,t[5]=l*_+h*k+v*T,t[6]=c*_+d*k+m*T,t[7]=u*_+p*k+y*T,t[8]=s*M+f*A+g*S,t[9]=l*M+h*A+v*S,t[10]=c*M+d*A+m*S,t[11]=u*M+p*A+y*S,e!==t&&(t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t)}},35545:function(t){t.exports=function(t,e,n){var r=Math.sin(n),i=Math.cos(n),a=e[4],o=e[5],s=e[6],l=e[7],c=e[8],u=e[9],f=e[10],h=e[11];return e!==t&&(t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=e[3],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t[4]=a*i+c*r,t[5]=o*i+u*r,t[6]=s*i+f*r,t[7]=l*i+h*r,t[8]=c*i-a*r,t[9]=u*i-o*r,t[10]=f*i-s*r,t[11]=h*i-l*r,t}},94918:function(t){t.exports=function(t,e,n){var r=Math.sin(n),i=Math.cos(n),a=e[0],o=e[1],s=e[2],l=e[3],c=e[8],u=e[9],f=e[10],h=e[11];return e!==t&&(t[4]=e[4],t[5]=e[5],t[6]=e[6],t[7]=e[7],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t[0]=a*i-c*r,t[1]=o*i-u*r,t[2]=s*i-f*r,t[3]=l*i-h*r,t[8]=a*r+c*i,t[9]=o*r+u*i,t[10]=s*r+f*i,t[11]=l*r+h*i,t}},15692:function(t){t.exports=function(t,e,n){var r=Math.sin(n),i=Math.cos(n),a=e[0],o=e[1],s=e[2],l=e[3],c=e[4],u=e[5],f=e[6],h=e[7];return e!==t&&(t[8]=e[8],t[9]=e[9],t[10]=e[10],t[11]=e[11],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t[0]=a*i+c*r,t[1]=o*i+u*r,t[2]=s*i+f*r,t[3]=l*i+h*r,t[4]=c*i-a*r,t[5]=u*i-o*r,t[6]=f*i-s*r,t[7]=h*i-l*r,t}},10789:function(t){t.exports=function(t,e,n){var r=n[0],i=n[1],a=n[2];return t[0]=e[0]*r,t[1]=e[1]*r,t[2]=e[2]*r,t[3]=e[3]*r,t[4]=e[4]*i,t[5]=e[5]*i,t[6]=e[6]*i,t[7]=e[7]*i,t[8]=e[8]*a,t[9]=e[9]*a,t[10]=e[10]*a,t[11]=e[11]*a,t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15],t}},6726:function(t){t.exports=function(t){return"mat4("+t[0]+", "+t[1]+", "+t[2]+", "+t[3]+", "+t[4]+", "+t[5]+", "+t[6]+", "+t[7]+", "+t[8]+", "+t[9]+", "+t[10]+", "+t[11]+", "+t[12]+", "+t[13]+", "+t[14]+", "+t[15]+")"}},31283:function(t){t.exports=function(t,e,n){var r,i,a,o,s,l,c,u,f,h,d,p,g=n[0],v=n[1],m=n[2];return e===t?(t[12]=e[0]*g+e[4]*v+e[8]*m+e[12],t[13]=e[1]*g+e[5]*v+e[9]*m+e[13],t[14]=e[2]*g+e[6]*v+e[10]*m+e[14],t[15]=e[3]*g+e[7]*v+e[11]*m+e[15]):(r=e[0],i=e[1],a=e[2],o=e[3],s=e[4],l=e[5],c=e[6],u=e[7],f=e[8],h=e[9],d=e[10],p=e[11],t[0]=r,t[1]=i,t[2]=a,t[3]=o,t[4]=s,t[5]=l,t[6]=c,t[7]=u,t[8]=f,t[9]=h,t[10]=d,t[11]=p,t[12]=r*g+s*v+f*m+e[12],t[13]=i*g+l*v+h*m+e[13],t[14]=a*g+c*v+d*m+e[14],t[15]=o*g+u*v+p*m+e[15]),t}},88654:function(t){t.exports=function(t,e){if(t===e){var n=e[1],r=e[2],i=e[3],a=e[6],o=e[7],s=e[11];t[1]=e[4],t[2]=e[8],t[3]=e[12],t[4]=n,t[6]=e[9],t[7]=e[13],t[8]=r,t[9]=a,t[11]=e[14],t[12]=i,t[13]=o,t[14]=s}else t[0]=e[0],t[1]=e[4],t[2]=e[8],t[3]=e[12],t[4]=e[1],t[5]=e[5],t[6]=e[9],t[7]=e[13],t[8]=e[2],t[9]=e[6],t[10]=e[10],t[11]=e[14],t[12]=e[3],t[13]=e[7],t[14]=e[11],t[15]=e[15];return t}},42505:function(t,e,n){"use strict";var r=n(72791),i=n(71299),a=n(98580),o=n(12018),s=n(83522),l=n(25075),c=n(68016),u=n(58404),f=n(18863),h=n(10973),d=n(25677),p=n(75686),g=n(53545),v=n(56131),m=n(32879),y=n(30120),b=n(13547).nextPow2,x=new s,w=!1;if(document.body){var _=document.body.appendChild(document.createElement("div"));_.style.font="italic small-caps bold condensed 16px/2 cursive",getComputedStyle(_).fontStretch&&(w=!0),document.body.removeChild(_)}var k=function(t){!function(t){return"function"===typeof t&&t._gl&&t.prop&&t.texture&&t.buffer}(t)?this.gl=o(t):(t={regl:t},this.gl=t.regl._gl),this.shader=x.get(this.gl),this.shader?this.regl=this.shader.regl:this.regl=t.regl||a({gl:this.gl}),this.charBuffer=this.regl.buffer({type:"uint8",usage:"stream"}),this.sizeBuffer=this.regl.buffer({type:"float",usage:"stream"}),this.shader||(this.shader=this.createShader(),x.set(this.gl,this.shader)),this.batch=[],this.fontSize=[],this.font=[],this.fontAtlas=[],this.draw=this.shader.draw.bind(this),this.render=function(){this.regl._refresh(),this.draw(this.batch)},this.canvas=this.gl.canvas,this.update(h(t)?t:{})};k.prototype.createShader=function(){var t=this.regl,e=t({blend:{enable:!0,color:[0,0,0,1],func:{srcRGB:"src alpha",dstRGB:"one minus src alpha",srcAlpha:"one minus dst alpha",dstAlpha:"one"}},stencil:{enable:!1},depth:{enable:!1},count:t.prop("count"),offset:t.prop("offset"),attributes:{charOffset:{offset:4,stride:8,buffer:t.this("sizeBuffer")},width:{offset:0,stride:8,buffer:t.this("sizeBuffer")},char:t.this("charBuffer"),position:t.this("position")},uniforms:{atlasSize:function(t,e){return[e.atlas.width,e.atlas.height]},atlasDim:function(t,e){return[e.atlas.cols,e.atlas.rows]},atlas:function(t,e){return e.atlas.texture},charStep:function(t,e){return e.atlas.step},em:function(t,e){return e.atlas.em},color:t.prop("color"),opacity:t.prop("opacity"),viewport:t.this("viewportArray"),scale:t.this("scale"),align:t.prop("align"),baseline:t.prop("baseline"),translate:t.this("translate"),positionOffset:t.prop("positionOffset")},primitive:"points",viewport:t.this("viewport"),vert:"\n\t\t\tprecision highp float;\n\t\t\tattribute float width, charOffset, char;\n\t\t\tattribute vec2 position;\n\t\t\tuniform float fontSize, charStep, em, align, baseline;\n\t\t\tuniform vec4 viewport;\n\t\t\tuniform vec4 color;\n\t\t\tuniform vec2 atlasSize, atlasDim, scale, translate, positionOffset;\n\t\t\tvarying vec2 charCoord, charId;\n\t\t\tvarying float charWidth;\n\t\t\tvarying vec4 fontColor;\n\t\t\tvoid main () {\n\t\t\t\tvec2 offset = floor(em * (vec2(align + charOffset, baseline)\n\t\t\t\t\t+ vec2(positionOffset.x, -positionOffset.y)))\n\t\t\t\t\t/ (viewport.zw * scale.xy);\n\n\t\t\t\tvec2 position = (position + translate) * scale;\n\t\t\t\tposition += offset * scale;\n\n\t\t\t\tcharCoord = position * viewport.zw + viewport.xy;\n\n\t\t\t\tgl_Position = vec4(position * 2. - 1., 0, 1);\n\n\t\t\t\tgl_PointSize = charStep;\n\n\t\t\t\tcharId.x = mod(char, atlasDim.x);\n\t\t\t\tcharId.y = floor(char / atlasDim.x);\n\n\t\t\t\tcharWidth = width * em;\n\n\t\t\t\tfontColor = color / 255.;\n\t\t\t}",frag:"\n\t\t\tprecision highp float;\n\t\t\tuniform float fontSize, charStep, opacity;\n\t\t\tuniform vec2 atlasSize;\n\t\t\tuniform vec4 viewport;\n\t\t\tuniform sampler2D atlas;\n\t\t\tvarying vec4 fontColor;\n\t\t\tvarying vec2 charCoord, charId;\n\t\t\tvarying float charWidth;\n\n\t\t\tfloat lightness(vec4 color) {\n\t\t\t\treturn color.r * 0.299 + color.g * 0.587 + color.b * 0.114;\n\t\t\t}\n\n\t\t\tvoid main () {\n\t\t\t\tvec2 uv = gl_FragCoord.xy - charCoord + charStep * .5;\n\t\t\t\tfloat halfCharStep = floor(charStep * .5 + .5);\n\n\t\t\t\t// invert y and shift by 1px (FF expecially needs that)\n\t\t\t\tuv.y = charStep - uv.y;\n\n\t\t\t\t// ignore points outside of character bounding box\n\t\t\t\tfloat halfCharWidth = ceil(charWidth * .5);\n\t\t\t\tif (floor(uv.x) > halfCharStep + halfCharWidth ||\n\t\t\t\t\tfloor(uv.x) < halfCharStep - halfCharWidth) return;\n\n\t\t\t\tuv += charId * charStep;\n\t\t\t\tuv = uv / atlasSize;\n\n\t\t\t\tvec4 color = fontColor;\n\t\t\t\tvec4 mask = texture2D(atlas, uv);\n\n\t\t\t\tfloat maskY = lightness(mask);\n\t\t\t\t// float colorY = lightness(color);\n\t\t\t\tcolor.a *= maskY;\n\t\t\t\tcolor.a *= opacity;\n\n\t\t\t\t// color.a += .1;\n\n\t\t\t\t// antialiasing, see yiq color space y-channel formula\n\t\t\t\t// color.rgb += (1. - color.rgb) * (1. - mask.rgb);\n\n\t\t\t\tgl_FragColor = color;\n\t\t\t}"});return{regl:t,draw:e,atlas:{}}},k.prototype.update=function(t){var e=this;if("string"===typeof t)t={text:t};else if(!t)return;null!=(t=i(t,{position:"position positions coord coords coordinates",font:"font fontFace fontface typeface cssFont css-font family fontFamily",fontSize:"fontSize fontsize size font-size",text:"text texts chars characters value values symbols",align:"align alignment textAlign textbaseline",baseline:"baseline textBaseline textbaseline",direction:"dir direction textDirection",color:"color colour fill fill-color fillColor textColor textcolor",kerning:"kerning kern",range:"range dataBox",viewport:"vp viewport viewBox viewbox viewPort",opacity:"opacity alpha transparency visible visibility opaque",offset:"offset positionOffset padding shift indent indentation"},!0)).opacity&&(Array.isArray(t.opacity)?this.opacity=t.opacity.map((function(t){return parseFloat(t)})):this.opacity=parseFloat(t.opacity)),null!=t.viewport&&(this.viewport=f(t.viewport),this.viewportArray=[this.viewport.x,this.viewport.y,this.viewport.width,this.viewport.height]),null==this.viewport&&(this.viewport={x:0,y:0,width:this.gl.drawingBufferWidth,height:this.gl.drawingBufferHeight},this.viewportArray=[this.viewport.x,this.viewport.y,this.viewport.width,this.viewport.height]),null!=t.kerning&&(this.kerning=t.kerning),null!=t.offset&&("number"===typeof t.offset&&(t.offset=[t.offset,0]),this.positionOffset=y(t.offset)),t.direction&&(this.direction=t.direction),t.range&&(this.range=t.range,this.scale=[1/(t.range[2]-t.range[0]),1/(t.range[3]-t.range[1])],this.translate=[-t.range[0],-t.range[1]]),t.scale&&(this.scale=t.scale),t.translate&&(this.translate=t.translate),this.scale||(this.scale=[1/this.viewport.width,1/this.viewport.height]),this.translate||(this.translate=[0,0]),this.font.length||t.font||(t.font=k.baseFontSize+"px sans-serif");var n,a=!1,o=!1;if(t.font&&(Array.isArray(t.font)?t.font:[t.font]).forEach((function(t,n){if("string"===typeof t)try{t=r.parse(t)}catch(f){t=r.parse(k.baseFontSize+"px "+t)}else t=r.parse(r.stringify(t));var i=r.stringify({size:k.baseFontSize,family:t.family,stretch:w?t.stretch:void 0,variant:t.variant,weight:t.weight,style:t.style}),s=d(t.size),l=Math.round(s[0]*p(s[1]));if(l!==e.fontSize[n]&&(o=!0,e.fontSize[n]=l),(!e.font[n]||i!=e.font[n].baseString)&&(a=!0,e.font[n]=k.fonts[i],!e.font[n])){var c=t.family.join(", "),u=[t.style];t.style!=t.variant&&u.push(t.variant),t.variant!=t.weight&&u.push(t.weight),w&&t.weight!=t.stretch&&u.push(t.stretch),e.font[n]={baseString:i,family:c,weight:t.weight,stretch:t.stretch,style:t.style,variant:t.variant,width:{},kerning:{},metrics:m(c,{origin:"top",fontSize:k.baseFontSize,fontStyle:u.join(" ")})},k.fonts[i]=e.font[n]}})),(a||o)&&this.font.forEach((function(n,i){var a=r.stringify({size:e.fontSize[i],family:n.family,stretch:w?n.stretch:void 0,variant:n.variant,weight:n.weight,style:n.style});if(e.fontAtlas[i]=e.shader.atlas[a],!e.fontAtlas[i]){var o=n.metrics;e.shader.atlas[a]=e.fontAtlas[i]={fontString:a,step:2*Math.ceil(e.fontSize[i]*o.bottom*.5),em:e.fontSize[i],cols:0,rows:0,height:0,width:0,chars:[],ids:{},texture:e.regl.texture()}}null==t.text&&(t.text=e.text)})),"string"===typeof t.text&&t.position&&t.position.length>2){for(var s=Array(.5*t.position.length),h=0;h2){for(var _=!t.position[0].length,T=u.mallocFloat(2*this.count),M=0,A=0;M1?e.align[n]:e.align[0]:e.align;if("number"===typeof r)return r;switch(r){case"right":case"end":return-t;case"center":case"centre":case"middle":return.5*-t}return 0}))),null==this.baseline&&null==t.baseline&&(t.baseline=0),null!=t.baseline&&(this.baseline=t.baseline,Array.isArray(this.baseline)||(this.baseline=[this.baseline]),this.baselineOffset=this.baseline.map((function(t,n){var r=(e.font[n]||e.font[0]).metrics,i=0;return i+=.5*r.bottom,i+="number"===typeof t?t-r.baseline:-r[t],i*=-1}))),null!=t.color)if(t.color||(t.color="transparent"),"string"!==typeof t.color&&isNaN(t.color)){var q;if("number"===typeof t.color[0]&&t.color.length>this.counts.length){var G=t.color.length;q=u.mallocUint8(G);for(var W=(t.color.subarray||t.color.slice).bind(t.color),Y=0;Y4||this.baselineOffset.length>1||this.align&&this.align.length>1||this.fontAtlas.length>1||this.positionOffset.length>2){var K=Math.max(.5*this.position.length||0,.25*this.color.length||0,this.baselineOffset.length||0,this.alignOffset.length||0,this.font.length||0,this.opacity.length||0,.5*this.positionOffset.length||0);this.batch=Array(K);for(var Z=0;Z1?this.counts[Z]:this.counts[0],offset:this.textOffsets.length>1?this.textOffsets[Z]:this.textOffsets[0],color:this.color?this.color.length<=4?this.color:this.color.subarray(4*Z,4*Z+4):[0,0,0,255],opacity:Array.isArray(this.opacity)?this.opacity[Z]:this.opacity,baseline:null!=this.baselineOffset[Z]?this.baselineOffset[Z]:this.baselineOffset[0],align:this.align?null!=this.alignOffset[Z]?this.alignOffset[Z]:this.alignOffset[0]:0,atlas:this.fontAtlas[Z]||this.fontAtlas[0],positionOffset:this.positionOffset.length>2?this.positionOffset.subarray(2*Z,2*Z+2):this.positionOffset}}else this.count?this.batch=[{count:this.count,offset:0,color:this.color||[0,0,0,255],opacity:Array.isArray(this.opacity)?this.opacity[0]:this.opacity,baseline:this.baselineOffset[0],align:this.alignOffset?this.alignOffset[0]:0,atlas:this.fontAtlas[0],positionOffset:this.positionOffset}]:this.batch=[]},k.prototype.destroy=function(){},k.prototype.kerning=!0,k.prototype.position={constant:new Float32Array(2)},k.prototype.translate=null,k.prototype.scale=null,k.prototype.font=null,k.prototype.text="",k.prototype.positionOffset=[0,0],k.prototype.opacity=1,k.prototype.color=new Uint8Array([0,0,0,255]),k.prototype.alignOffset=[0,0],k.maxAtlasSize=1024,k.atlasCanvas=document.createElement("canvas"),k.atlasContext=k.atlasCanvas.getContext("2d",{alpha:!1}),k.baseFontSize=64,k.fonts={},t.exports=k},12018:function(t,e,n){"use strict";var r=n(71299);function i(t){if(t.container)if(t.container==document.body)document.body.style.width||(t.canvas.width=t.width||t.pixelRatio*n.g.innerWidth),document.body.style.height||(t.canvas.height=t.height||t.pixelRatio*n.g.innerHeight);else{var e=t.container.getBoundingClientRect();t.canvas.width=t.width||e.right-e.left,t.canvas.height=t.height||e.bottom-e.top}}function a(t){return"function"===typeof t.getContext&&"width"in t&&"height"in t}function o(){var t=document.createElement("canvas");return t.style.position="absolute",t.style.top=0,t.style.left=0,t}t.exports=function(t){var e;if(t?"string"===typeof t&&(t={container:t}):t={},(t=a(t)||"string"===typeof(e=t).nodeName&&"function"===typeof e.appendChild&&"function"===typeof e.getBoundingClientRect?{container:t}:function(t){return"function"===typeof t.drawArrays||"function"===typeof t.drawElements}(t)?{gl:t}:r(t,{container:"container target element el canvas holder parent parentNode wrapper use ref root node",gl:"gl context webgl glContext",attrs:"attributes attrs contextAttributes",pixelRatio:"pixelRatio pxRatio px ratio pxratio pixelratio",width:"w width",height:"h height"},!0)).pixelRatio||(t.pixelRatio=n.g.pixelRatio||1),t.gl)return t.gl;if(t.canvas&&(t.container=t.canvas.parentNode),t.container){if("string"===typeof t.container){var s=document.querySelector(t.container);if(!s)throw Error("Element "+t.container+" is not found");t.container=s}a(t.container)?(t.canvas=t.container,t.container=t.canvas.parentNode):t.canvas||(t.canvas=o(),t.container.appendChild(t.canvas),i(t))}else if(!t.canvas){if("undefined"===typeof document)throw Error("Not DOM environment. Use headless-gl.");t.container=document.body||document.documentElement,t.canvas=o(),t.container.appendChild(t.canvas),i(t)}return t.gl||["webgl","experimental-webgl","webgl-experimental"].some((function(n){try{t.gl=t.canvas.getContext(n,t.attrs)}catch(e){}return t.gl})),t.gl}},56068:function(t){t.exports=function(t){"string"===typeof t&&(t=[t]);for(var e=[].slice.call(arguments,1),n=[],r=0;r>1,u=-7,f=n?i-1:0,h=n?-1:1,d=t[e+f];for(f+=h,a=d&(1<<-u)-1,d>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=r;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(d?-1:1);o+=Math.pow(2,r),a-=c}return(d?-1:1)*o*Math.pow(2,a-r)},e.write=function(t,e,n,r,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=r?0:a-1,p=r?1:-1,g=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[n+d]=255&s,d+=p,s/=256,i-=8);for(o=o<0;t[n+d]=255&o,d+=p,o/=256,c-=8);t[n+d-p]|=128*g}},42018:function(t){"function"===typeof Object.create?t.exports=function(t,e){e&&(t.super_=e,t.prototype=Object.create(e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}))}:t.exports=function(t,e){if(e){t.super_=e;var n=function(){};n.prototype=e.prototype,t.prototype=new n,t.prototype.constructor=t}}},47216:function(t,e,n){"use strict";var r=n(84543)(),i=n(6614)("Object.prototype.toString"),a=function(t){return!(r&&t&&"object"===typeof t&&Symbol.toStringTag in t)&&"[object Arguments]"===i(t)},o=function(t){return!!a(t)||null!==t&&"object"===typeof t&&"number"===typeof t.length&&t.length>=0&&"[object Array]"!==i(t)&&"[object Function]"===i(t.callee)},s=function(){return a(arguments)}();a.isLegacyArguments=o,t.exports=s?a:o},54404:function(t){t.exports=!0},85395:function t(e){"use strict";var n,r,i=Function.prototype.toString,a="object"===typeof Reflect&&null!==Reflect&&Reflect.apply;if("function"===typeof a&&"function"===typeof Object.defineProperty)try{n=Object.defineProperty({},"length",{get:function(){throw r}}),r={},a((function(){throw 42}),null,n)}catch(t){t!==r&&(a=null)}else a=null;var o=/^\s*class\b/,s=function(t){try{var e=i.call(t);return o.test(e)}catch(n){return!1}},l=function(t){try{return!s(t)&&(i.call(t),!0)}catch(e){return!1}},c=Object.prototype.toString,u="function"===typeof Symbol&&!!Symbol.toStringTag,f=!(0 in[,]),h=function(){return!1};if("object"===typeof document){var d=document.all;c.call(d)===c.call(document.all)&&(h=function(t){if((f||!t)&&("undefined"===typeof t||"object"===typeof t))try{var e=c.call(t);return("[object HTMLAllCollection]"===e||"[object HTML document.all class]"===e||"[object HTMLCollection]"===e||"[object Object]"===e)&&null==t("")}catch(n){}return!1})}e.exports=a?function(t){if(h(t))return!0;if(!t)return!1;if("function"!==typeof t&&"object"!==typeof t)return!1;try{a(t,null,n)}catch(e){if(e!==r)return!1}return!s(t)&&l(t)}:function(t){if(h(t))return!0;if(!t)return!1;if("function"!==typeof t&&"object"!==typeof t)return!1;if(u)return l(t);if(s(t))return!1;var e=c.call(t);return!("[object Function]"!==e&&"[object GeneratorFunction]"!==e&&!/^\[object HTML/.test(e))&&l(t)}},65481:function(t,e,n){"use strict";var r,i=Object.prototype.toString,a=Function.prototype.toString,o=/^\s*(?:function)?\*/,s=n(84543)(),l=Object.getPrototypeOf;t.exports=function(t){if("function"!==typeof t)return!1;if(o.test(a.call(t)))return!0;if(!s)return"[object GeneratorFunction]"===i.call(t);if(!l)return!1;if("undefined"===typeof r){var e=function(){if(!s)return!1;try{return Function("return function*() {}")()}catch(t){}}();r=!!e&&l(e)}return l(t)===r}},62683:function(t){"use strict";t.exports="undefined"!==typeof navigator&&(/MSIE/.test(navigator.userAgent)||/Trident\//.test(navigator.appVersion))},64274:function(t){"use strict";t.exports=function(t){return t!==t}},15567:function(t,e,n){"use strict";var r=n(68222),i=n(17045),a=n(64274),o=n(14922),s=n(22442),l=r(o(),Number);i(l,{getPolyfill:o,implementation:a,shim:s}),t.exports=l},14922:function(t,e,n){"use strict";var r=n(64274);t.exports=function(){return Number.isNaN&&Number.isNaN(NaN)&&!Number.isNaN("a")?Number.isNaN:r}},22442:function(t,e,n){"use strict";var r=n(17045),i=n(14922);t.exports=function(){var t=i();return r(Number,{isNaN:t},{isNaN:function(){return Number.isNaN!==t}}),t}},64941:function(t){"use strict";t.exports=function(t){var e=typeof t;return null!==t&&("object"===e||"function"===e)}},10973:function(t){"use strict";var e=Object.prototype.toString;t.exports=function(t){var n;return"[object Object]"===e.call(t)&&(null===(n=Object.getPrototypeOf(t))||n===Object.getPrototypeOf({}))}},18546:function(t){"use strict";t.exports=function(t){for(var e,n=t.length,r=0;r13)&&32!==e&&133!==e&&160!==e&&5760!==e&&6158!==e&&(e<8192||e>8205)&&8232!==e&&8233!==e&&8239!==e&&8287!==e&&8288!==e&&12288!==e&&65279!==e)return!1;return!0}},89546:function(t){"use strict";t.exports=function(t){return"string"===typeof t&&(t=t.trim(),!!(/^[mzlhvcsqta]\s*[-+.0-9][^mlhvzcsqta]+/i.test(t)&&/[\dz]$/i.test(t)&&t.length>4))}},9187:function(t,e,n){"use strict";var r=n(31353),i=n(72077),a=n(6614),o=a("Object.prototype.toString"),s=n(84543)(),l=n(40383),c="undefined"===typeof globalThis?n.g:globalThis,u=i(),f=a("Array.prototype.indexOf",!0)||function(t,e){for(var n=0;n-1}return!!l&&function(t){var e=!1;return r(d,(function(n,r){if(!e)try{e=n.call(t)===r}catch(i){}})),e}(t)}},44517:function(t){t.exports=function(){"use strict";var t,e,n;function r(r,i){if(t)if(e){var a="var sharedChunk = {}; ("+t+")(sharedChunk); ("+e+")(sharedChunk);",o={};t(o),(n=i(o)).workerUrl=window.URL.createObjectURL(new Blob([a],{type:"text/javascript"}))}else e=i;else t=i}return r(["exports"],(function(t){function e(t,e){return t(e={exports:{}},e.exports),e.exports}var n="1.10.1",r=i;function i(t,e,n,r){this.cx=3*t,this.bx=3*(n-t)-this.cx,this.ax=1-this.cx-this.bx,this.cy=3*e,this.by=3*(r-e)-this.cy,this.ay=1-this.cy-this.by,this.p1x=t,this.p1y=r,this.p2x=n,this.p2y=r}i.prototype.sampleCurveX=function(t){return((this.ax*t+this.bx)*t+this.cx)*t},i.prototype.sampleCurveY=function(t){return((this.ay*t+this.by)*t+this.cy)*t},i.prototype.sampleCurveDerivativeX=function(t){return(3*this.ax*t+2*this.bx)*t+this.cx},i.prototype.solveCurveX=function(t,e){var n,r,i,a,o;for("undefined"===typeof e&&(e=1e-6),i=t,o=0;o<8;o++){if(a=this.sampleCurveX(i)-t,Math.abs(a)(r=1))return r;for(;na?n=i:r=i,i=.5*(r-n)+n}return i},i.prototype.solve=function(t,e){return this.sampleCurveY(this.solveCurveX(t,e))};var a=o;function o(t,e){this.x=t,this.y=e}function s(t,e){if(Array.isArray(t)){if(!Array.isArray(e)||t.length!==e.length)return!1;for(var n=0;n=1)return 1;var e=t*t,n=e*t;return 4*(t<.5?n:3*(t-e)+n-.75)}function c(t,e,n,i){var a=new r(t,e,n,i);return function(t){return a.solve(t)}}o.prototype={clone:function(){return new o(this.x,this.y)},add:function(t){return this.clone()._add(t)},sub:function(t){return this.clone()._sub(t)},multByPoint:function(t){return this.clone()._multByPoint(t)},divByPoint:function(t){return this.clone()._divByPoint(t)},mult:function(t){return this.clone()._mult(t)},div:function(t){return this.clone()._div(t)},rotate:function(t){return this.clone()._rotate(t)},rotateAround:function(t,e){return this.clone()._rotateAround(t,e)},matMult:function(t){return this.clone()._matMult(t)},unit:function(){return this.clone()._unit()},perp:function(){return this.clone()._perp()},round:function(){return this.clone()._round()},mag:function(){return Math.sqrt(this.x*this.x+this.y*this.y)},equals:function(t){return this.x===t.x&&this.y===t.y},dist:function(t){return Math.sqrt(this.distSqr(t))},distSqr:function(t){var e=t.x-this.x,n=t.y-this.y;return e*e+n*n},angle:function(){return Math.atan2(this.y,this.x)},angleTo:function(t){return Math.atan2(this.y-t.y,this.x-t.x)},angleWith:function(t){return this.angleWithSep(t.x,t.y)},angleWithSep:function(t,e){return Math.atan2(this.x*e-this.y*t,this.x*t+this.y*e)},_matMult:function(t){var e=t[0]*this.x+t[1]*this.y,n=t[2]*this.x+t[3]*this.y;return this.x=e,this.y=n,this},_add:function(t){return this.x+=t.x,this.y+=t.y,this},_sub:function(t){return this.x-=t.x,this.y-=t.y,this},_mult:function(t){return this.x*=t,this.y*=t,this},_div:function(t){return this.x/=t,this.y/=t,this},_multByPoint:function(t){return this.x*=t.x,this.y*=t.y,this},_divByPoint:function(t){return this.x/=t.x,this.y/=t.y,this},_unit:function(){return this._div(this.mag()),this},_perp:function(){var t=this.y;return this.y=this.x,this.x=-t,this},_rotate:function(t){var e=Math.cos(t),n=Math.sin(t),r=e*this.x-n*this.y,i=n*this.x+e*this.y;return this.x=r,this.y=i,this},_rotateAround:function(t,e){var n=Math.cos(t),r=Math.sin(t),i=e.x+n*(this.x-e.x)-r*(this.y-e.y),a=e.y+r*(this.x-e.x)+n*(this.y-e.y);return this.x=i,this.y=a,this},_round:function(){return this.x=Math.round(this.x),this.y=Math.round(this.y),this}},o.convert=function(t){return t instanceof o?t:Array.isArray(t)?new o(t[0],t[1]):t};var u=c(.25,.1,.25,1);function f(t,e,n){return Math.min(n,Math.max(e,t))}function h(t,e,n){var r=n-e,i=((t-e)%r+r)%r+e;return i===e?n:i}function d(t,e,n){if(!t.length)return n(null,[]);var r=t.length,i=new Array(t.length),a=null;t.forEach((function(t,o){e(t,(function(t,e){t&&(a=t),i[o]=e,0===--r&&n(a,i)}))}))}function p(t){var e=[];for(var n in t)e.push(t[n]);return e}function g(t,e){var n=[];for(var r in t)r in e||n.push(r);return n}function v(t){for(var e=[],n=arguments.length-1;n-- >0;)e[n]=arguments[n+1];for(var r=0,i=e;r>e/4).toString(16):([1e7]+-[1e3]+-4e3+-8e3+-1e11).replace(/[018]/g,t)}return t()}function w(t){return!!t&&/^[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(t)}function _(t,e){t.forEach((function(t){e[t]&&(e[t]=e[t].bind(e))}))}function k(t,e){return-1!==t.indexOf(e,t.length-e.length)}function T(t,e,n){var r={};for(var i in t)r[i]=e.call(n||this,t[i],i,t);return r}function M(t,e,n){var r={};for(var i in t)e.call(n||this,t[i],i,t)&&(r[i]=t[i]);return r}function A(t){return Array.isArray(t)?t.map(A):"object"===typeof t&&t?T(t,A):t}function S(t,e){for(var n=0;n=0)return!0;return!1}var E={};function C(t){E[t]||("undefined"!==typeof console&&console.warn(t),E[t]=!0)}function P(t,e,n){return(n.y-t.y)*(e.x-t.x)>(e.y-t.y)*(n.x-t.x)}function O(t){for(var e=0,n=0,r=t.length,i=r-1,a=void 0,o=void 0;n@\,;\:\\"\/\[\]\?\=\{\}\x7F]+)(?:\=(?:([^\x00-\x20\(\)<>@\,;\:\\"\/\[\]\?\=\{\}\x7F]+)|(?:\"((?:[^"\\]|\\.)*)\")))?/g,n={};if(t.replace(e,(function(t,e,r,i){var a=r||i;return n[e]=!a||a.toLowerCase(),""})),n["max-age"]){var r=parseInt(n["max-age"],10);isNaN(r)?delete n["max-age"]:n["max-age"]=r}return n}var R=null;function z(t){if(null==R){var e=t.navigator?t.navigator.userAgent:null;R=!!t.safari||!(!e||!(/\b(iPad|iPhone|iPod)\b/.test(e)||e.match("Safari")&&!e.match("Chrome")))}return R}function N(t){try{var e=self[t];return e.setItem("_mapbox_test_",1),e.removeItem("_mapbox_test_"),!0}catch(n){return!1}}function j(t){return self.btoa(encodeURIComponent(t).replace(/%([0-9A-F]{2})/g,(function(t,e){return String.fromCharCode(Number("0x"+e))})))}function F(t){return decodeURIComponent(self.atob(t).split("").map((function(t){return"%"+("00"+t.charCodeAt(0).toString(16)).slice(-2)})).join(""))}var B,U,H,V,q=self.performance&&self.performance.now?self.performance.now.bind(self.performance):Date.now.bind(Date),G=self.requestAnimationFrame||self.mozRequestAnimationFrame||self.webkitRequestAnimationFrame||self.msRequestAnimationFrame,W=self.cancelAnimationFrame||self.mozCancelAnimationFrame||self.webkitCancelAnimationFrame||self.msCancelAnimationFrame,Y={now:q,frame:function(t){var e=G(t);return{cancel:function(){return W(e)}}},getImageData:function(t,e){void 0===e&&(e=0);var n=self.document.createElement("canvas"),r=n.getContext("2d");if(!r)throw new Error("failed to create canvas 2d context");return n.width=t.width,n.height=t.height,r.drawImage(t,0,0,t.width,t.height),r.getImageData(-e,-e,t.width+2*e,t.height+2*e)},resolveURL:function(t){return B||(B=self.document.createElement("a")),B.href=t,B.href},hardwareConcurrency:self.navigator.hardwareConcurrency||4,get devicePixelRatio(){return self.devicePixelRatio},get prefersReducedMotion(){return!!self.matchMedia&&(null==U&&(U=self.matchMedia("(prefers-reduced-motion: reduce)")),U.matches)}},$={API_URL:"https://api.mapbox.com",get EVENTS_URL(){return this.API_URL?0===this.API_URL.indexOf("https://api.mapbox.cn")?"https://events.mapbox.cn/events/v2":0===this.API_URL.indexOf("https://api.mapbox.com")?"https://events.mapbox.com/events/v2":null:null},FEEDBACK_URL:"https://apps.mapbox.com/feedback",REQUIRE_ACCESS_TOKEN:!0,ACCESS_TOKEN:null,MAX_PARALLEL_IMAGE_REQUESTS:16},X={supported:!1,testSupport:J},K=!1,Z=!1;function J(t){!K&&V&&(Z?Q(t):H=t)}function Q(t){var e=t.createTexture();t.bindTexture(t.TEXTURE_2D,e);try{if(t.texImage2D(t.TEXTURE_2D,0,t.RGBA,t.RGBA,t.UNSIGNED_BYTE,V),t.isContextLost())return;X.supported=!0}catch(n){}t.deleteTexture(e),K=!0}self.document&&((V=self.document.createElement("img")).onload=function(){H&&Q(H),H=null,Z=!0},V.onerror=function(){K=!0,H=null},V.src="data:image/webp;base64,UklGRh4AAABXRUJQVlA4TBEAAAAvAQAAAAfQ//73v/+BiOh/AAA=");var tt="01";function et(){for(var t="1",e="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",n="",r=0;r<10;r++)n+=e[Math.floor(62*Math.random())];var i=432e5;return{token:[t,tt,n].join(""),tokenExpiresAt:Date.now()+i}}var nt=function(t,e){this._transformRequestFn=t,this._customAccessToken=e,this._createSkuToken()};function rt(t){return 0===t.indexOf("mapbox:")}nt.prototype._createSkuToken=function(){var t=et();this._skuToken=t.token,this._skuTokenExpiresAt=t.tokenExpiresAt},nt.prototype._isSkuTokenExpired=function(){return Date.now()>this._skuTokenExpiresAt},nt.prototype.transformRequest=function(t,e){return this._transformRequestFn&&this._transformRequestFn(t,e)||{url:t}},nt.prototype.normalizeStyleURL=function(t,e){if(!rt(t))return t;var n=ct(t);return n.path="/styles/v1"+n.path,this._makeAPIURL(n,this._customAccessToken||e)},nt.prototype.normalizeGlyphsURL=function(t,e){if(!rt(t))return t;var n=ct(t);return n.path="/fonts/v1"+n.path,this._makeAPIURL(n,this._customAccessToken||e)},nt.prototype.normalizeSourceURL=function(t,e){if(!rt(t))return t;var n=ct(t);return n.path="/v4/"+n.authority+".json",n.params.push("secure"),this._makeAPIURL(n,this._customAccessToken||e)},nt.prototype.normalizeSpriteURL=function(t,e,n,r){var i=ct(t);return rt(t)?(i.path="/styles/v1"+i.path+"/sprite"+e+n,this._makeAPIURL(i,this._customAccessToken||r)):(i.path+=""+e+n,ut(i))},nt.prototype.normalizeTileURL=function(t,e){if(this._isSkuTokenExpired()&&this._createSkuToken(),t&&!rt(t))return t;var n=ct(t),r=/(\.(png|jpg)\d*)(?=$)/,i=/^.+\/v4\//,a=Y.devicePixelRatio>=2||512===e?"@2x":"",o=X.supported?".webp":"$1";n.path=n.path.replace(r,""+a+o),n.path=n.path.replace(i,"/"),n.path="/v4"+n.path;var s=this._customAccessToken||st(n.params)||$.ACCESS_TOKEN;return $.REQUIRE_ACCESS_TOKEN&&s&&this._skuToken&&n.params.push("sku="+this._skuToken),this._makeAPIURL(n,s)},nt.prototype.canonicalizeTileURL=function(t,e){var n="/v4/",r=/\.[\w]+$/,i=ct(t);if(!i.path.match(/(^\/v4\/)/)||!i.path.match(r))return t;var a="mapbox://tiles/";a+=i.path.replace(n,"");var o=i.params;return e&&(o=o.filter((function(t){return!t.match(/^access_token=/)}))),o.length&&(a+="?"+o.join("&")),a},nt.prototype.canonicalizeTileset=function(t,e){for(var n=!!e&&rt(e),r=[],i=0,a=t.tiles||[];i=1&&self.localStorage.setItem(e,JSON.stringify(this.eventData))}catch(r){C("Unable to write to LocalStorage")}},dt.prototype.processRequests=function(t){},dt.prototype.postEvent=function(t,e,r,i){var a=this;if($.EVENTS_URL){var o=ct($.EVENTS_URL);o.params.push("access_token="+(i||$.ACCESS_TOKEN||""));var s={event:this.type,created:new Date(t).toISOString(),sdkIdentifier:"mapbox-gl-js",sdkVersion:n,skuId:tt,userId:this.anonId},l=e?v(s,e):s,c={url:ut(o),headers:{"Content-Type":"text/plain"},body:JSON.stringify([l])};this.pendingRequest=$t(c,(function(t){a.pendingRequest=null,r(t),a.saveEventData(),a.processRequests(i)}))}},dt.prototype.queueRequest=function(t,e){this.queue.push(t),this.processRequests(e)};var pt,gt,vt=function(t){function e(){t.call(this,"map.load"),this.success={},this.skuToken=""}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.postMapLoadEvent=function(t,e,n,r){this.skuToken=n,($.EVENTS_URL&&r||$.ACCESS_TOKEN&&Array.isArray(t)&&t.some((function(t){return rt(t)||at(t)})))&&this.queueRequest({id:e,timestamp:Date.now()},r)},e.prototype.processRequests=function(t){var e=this;if(!this.pendingRequest&&0!==this.queue.length){var n=this.queue.shift(),r=n.id,i=n.timestamp;r&&this.success[r]||(this.anonId||this.fetchEventData(),w(this.anonId)||(this.anonId=x()),this.postEvent(i,{skuToken:this.skuToken},(function(t){t||r&&(e.success[r]=!0)}),t))}},e}(dt),mt=function(t){function e(e){t.call(this,"appUserTurnstile"),this._customAccessToken=e}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.postTurnstileEvent=function(t,e){$.EVENTS_URL&&$.ACCESS_TOKEN&&Array.isArray(t)&&t.some((function(t){return rt(t)||at(t)}))&&this.queueRequest(Date.now(),e)},e.prototype.processRequests=function(t){var e=this;if(!this.pendingRequest&&0!==this.queue.length){this.anonId&&this.eventData.lastSuccess&&this.eventData.tokenU||this.fetchEventData();var n=ht($.ACCESS_TOKEN),r=n?n.u:$.ACCESS_TOKEN,i=r!==this.eventData.tokenU;w(this.anonId)||(this.anonId=x(),i=!0);var a=this.queue.shift();if(this.eventData.lastSuccess){var o=new Date(this.eventData.lastSuccess),s=new Date(a),l=(a-this.eventData.lastSuccess)/864e5;i=i||l>=1||l<-1||o.getDate()!==s.getDate()}else i=!0;if(!i)return this.processRequests();this.postEvent(a,{"enabled.telemetry":!1},(function(t){t||(e.eventData.lastSuccess=a,e.eventData.tokenU=r)}),t)}},e}(dt),yt=new mt,bt=yt.postTurnstileEvent.bind(yt),xt=new vt,wt=xt.postMapLoadEvent.bind(xt),_t="mapbox-tiles",kt=500,Tt=50,Mt=42e4;function At(){self.caches&&!pt&&(pt=self.caches.open(_t))}function St(t,e){if(void 0===gt)try{new Response(new ReadableStream),gt=!0}catch(n){gt=!1}gt?e(t.body):t.blob().then(e)}function Et(t,e,n){if(At(),pt){var r={status:e.status,statusText:e.statusText,headers:new self.Headers};e.headers.forEach((function(t,e){return r.headers.set(e,t)}));var i=D(e.headers.get("Cache-Control")||"");i["no-store"]||(i["max-age"]&&r.headers.set("Expires",new Date(n+1e3*i["max-age"]).toUTCString()),new Date(r.headers.get("Expires")).getTime()-nDate.now()&&!n["no-cache"]}var Lt,It=1/0;function Dt(t){++It>Tt&&(t.getActor().send("enforceCacheSizeLimit",kt),It=0)}function Rt(t){At(),pt&&pt.then((function(e){e.keys().then((function(n){for(var r=0;r=200&&n.status<300||0===n.status)&&null!==n.response){var r=n.response;if("json"===t.type)try{r=JSON.parse(n.response)}catch(i){return e(i)}e(null,r,n.getResponseHeader("Cache-Control"),n.getResponseHeader("Expires"))}else e(new Bt(n.statusText,n.status,t.url))},n.send(t.body),{cancel:function(){return n.abort()}}}var Gt=function(t,e){if(!Ht(t.url)){if(self.fetch&&self.Request&&self.AbortController&&self.Request.prototype.hasOwnProperty("signal"))return Vt(t,e);if(I()&&self.worker&&self.worker.actor){var n=!0;return self.worker.actor.send("getResource",t,e,void 0,n)}}return qt(t,e)},Wt=function(t,e){return Gt(v(t,{type:"json"}),e)},Yt=function(t,e){return Gt(v(t,{type:"arrayBuffer"}),e)},$t=function(t,e){return Gt(v(t,{method:"POST"}),e)};function Xt(t){var e=self.document.createElement("a");return e.href=t,e.protocol===self.document.location.protocol&&e.host===self.document.location.host}var Kt,Zt,Jt="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQYV2NgAAIAAAUAAarVyFEAAAAASUVORK5CYII=";function Qt(t,e,n,r){var i=new self.Image,a=self.URL;i.onload=function(){e(null,i),a.revokeObjectURL(i.src)},i.onerror=function(){return e(new Error("Could not load image. Please make sure to use a supported image type such as PNG or JPEG. Note that SVGs are not supported."))};var o=new self.Blob([new Uint8Array(t)],{type:"image/png"});i.cacheControl=n,i.expires=r,i.src=t.byteLength?a.createObjectURL(o):Jt}function te(t,e){var n=new self.Blob([new Uint8Array(t)],{type:"image/png"});self.createImageBitmap(n).then((function(t){e(null,t)})).catch((function(t){e(new Error("Could not load image because of "+t.message+". Please make sure to use a supported image type such as PNG or JPEG. Note that SVGs are not supported."))}))}Kt=[],Zt=0;var ee=function t(e,n){if(X.supported&&(e.headers||(e.headers={}),e.headers.accept="image/webp,*/*"),Zt>=$.MAX_PARALLEL_IMAGE_REQUESTS){var r={requestParameters:e,callback:n,cancelled:!1,cancel:function(){this.cancelled=!0}};return Kt.push(r),r}Zt++;var i=!1,a=function(){if(!i)for(i=!0,Zt--;Kt.length&&Zt<$.MAX_PARALLEL_IMAGE_REQUESTS;){var e=Kt.shift(),n=e.requestParameters,r=e.callback;e.cancelled||(e.cancel=t(n,r).cancel)}},o=Yt(e,(function(t,e,r,i){a(),t?n(t):e&&(jt()?te(e,n):Qt(e,n,r,i))}));return{cancel:function(){o.cancel(),a()}}},ne=function(t,e){var n=self.document.createElement("video");n.muted=!0,n.onloadstart=function(){e(null,n)};for(var r=0;r0||this._oneTimeListeners&&this._oneTimeListeners[t]&&this._oneTimeListeners[t].length>0||this._eventedParent&&this._eventedParent.listens(t)},se.prototype.setEventedParent=function(t,e){return this._eventedParent=t,this._eventedParentData=e,this};var le={$version:8,$root:{version:{required:!0,type:"enum",values:[8]},name:{type:"string"},metadata:{type:"*"},center:{type:"array",value:"number"},zoom:{type:"number"},bearing:{type:"number",default:0,period:360,units:"degrees"},pitch:{type:"number",default:0,units:"degrees"},light:{type:"light"},sources:{required:!0,type:"sources"},sprite:{type:"string"},glyphs:{type:"string"},transition:{type:"transition"},layers:{required:!0,type:"array",value:"layer"}},sources:{"*":{type:"source"}},source:["source_vector","source_raster","source_raster_dem","source_geojson","source_video","source_image"],source_vector:{type:{required:!0,type:"enum",values:{vector:{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},scheme:{type:"enum",values:{xyz:{},tms:{}},default:"xyz"},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},attribution:{type:"string"},promoteId:{type:"promoteId"},"*":{type:"*"}},source_raster:{type:{required:!0,type:"enum",values:{raster:{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},tileSize:{type:"number",default:512,units:"pixels"},scheme:{type:"enum",values:{xyz:{},tms:{}},default:"xyz"},attribution:{type:"string"},"*":{type:"*"}},source_raster_dem:{type:{required:!0,type:"enum",values:{"raster-dem":{}}},url:{type:"string"},tiles:{type:"array",value:"string"},bounds:{type:"array",value:"number",length:4,default:[-180,-85.051129,180,85.051129]},minzoom:{type:"number",default:0},maxzoom:{type:"number",default:22},tileSize:{type:"number",default:512,units:"pixels"},attribution:{type:"string"},encoding:{type:"enum",values:{terrarium:{},mapbox:{}},default:"mapbox"},"*":{type:"*"}},source_geojson:{type:{required:!0,type:"enum",values:{geojson:{}}},data:{type:"*"},maxzoom:{type:"number",default:18},attribution:{type:"string"},buffer:{type:"number",default:128,maximum:512,minimum:0},tolerance:{type:"number",default:.375},cluster:{type:"boolean",default:!1},clusterRadius:{type:"number",default:50,minimum:0},clusterMaxZoom:{type:"number"},clusterProperties:{type:"*"},lineMetrics:{type:"boolean",default:!1},generateId:{type:"boolean",default:!1},promoteId:{type:"promoteId"}},source_video:{type:{required:!0,type:"enum",values:{video:{}}},urls:{required:!0,type:"array",value:"string"},coordinates:{required:!0,type:"array",length:4,value:{type:"array",length:2,value:"number"}}},source_image:{type:{required:!0,type:"enum",values:{image:{}}},url:{required:!0,type:"string"},coordinates:{required:!0,type:"array",length:4,value:{type:"array",length:2,value:"number"}}},layer:{id:{type:"string",required:!0},type:{type:"enum",values:{fill:{},line:{},symbol:{},circle:{},heatmap:{},"fill-extrusion":{},raster:{},hillshade:{},background:{}},required:!0},metadata:{type:"*"},source:{type:"string"},"source-layer":{type:"string"},minzoom:{type:"number",minimum:0,maximum:24},maxzoom:{type:"number",minimum:0,maximum:24},filter:{type:"filter"},layout:{type:"layout"},paint:{type:"paint"}},layout:["layout_fill","layout_line","layout_circle","layout_heatmap","layout_fill-extrusion","layout_symbol","layout_raster","layout_hillshade","layout_background"],layout_background:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_fill:{"fill-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_circle:{"circle-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_heatmap:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},"layout_fill-extrusion":{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_line:{"line-cap":{type:"enum",values:{butt:{},round:{},square:{}},default:"butt",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"line-join":{type:"enum",values:{bevel:{},round:{},miter:{}},default:"miter",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"line-miter-limit":{type:"number",default:2,requires:[{"line-join":"miter"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-round-limit":{type:"number",default:1.05,requires:[{"line-join":"round"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_symbol:{"symbol-placement":{type:"enum",values:{point:{},line:{},"line-center":{}},default:"point",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"symbol-spacing":{type:"number",default:250,minimum:1,units:"pixels",requires:[{"symbol-placement":"line"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"symbol-avoid-edges":{type:"boolean",default:!1,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"symbol-sort-key":{type:"number",expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"symbol-z-order":{type:"enum",values:{auto:{},"viewport-y":{},source:{}},default:"auto",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-allow-overlap":{type:"boolean",default:!1,requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-ignore-placement":{type:"boolean",default:!1,requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-optional":{type:"boolean",default:!1,requires:["icon-image","text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-rotation-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-size":{type:"number",default:1,minimum:0,units:"factor of the original icon size",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-text-fit":{type:"enum",values:{none:{},width:{},height:{},both:{}},default:"none",requires:["icon-image","text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-text-fit-padding":{type:"array",value:"number",length:4,default:[0,0,0,0],units:"pixels",requires:["icon-image","text-field",{"icon-text-fit":["both","width","height"]}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-image":{type:"resolvedImage",tokens:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-rotate":{type:"number",default:0,period:360,units:"degrees",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-padding":{type:"number",default:2,minimum:0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-keep-upright":{type:"boolean",default:!1,requires:["icon-image",{"icon-rotation-alignment":"map"},{"symbol-placement":["line","line-center"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"icon-offset":{type:"array",value:"number",length:2,default:[0,0],requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-anchor":{type:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},default:"center",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"icon-pitch-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-pitch-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-rotation-alignment":{type:"enum",values:{map:{},viewport:{},auto:{}},default:"auto",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-field":{type:"formatted",default:"",tokens:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-font":{type:"array",value:"string",default:["Open Sans Regular","Arial Unicode MS Regular"],requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-size":{type:"number",default:16,minimum:0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-max-width":{type:"number",default:10,minimum:0,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-line-height":{type:"number",default:1.2,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-letter-spacing":{type:"number",default:0,units:"ems",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-justify":{type:"enum",values:{auto:{},left:{},center:{},right:{}},default:"center",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-radial-offset":{type:"number",units:"ems",default:0,requires:["text-field"],"property-type":"data-driven",expression:{interpolated:!0,parameters:["zoom","feature"]}},"text-variable-anchor":{type:"array",value:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},requires:["text-field",{"symbol-placement":["point"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-anchor":{type:"enum",values:{center:{},left:{},right:{},top:{},bottom:{},"top-left":{},"top-right":{},"bottom-left":{},"bottom-right":{}},default:"center",requires:["text-field",{"!":"text-variable-anchor"}],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-max-angle":{type:"number",default:45,units:"degrees",requires:["text-field",{"symbol-placement":["line","line-center"]}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-writing-mode":{type:"array",value:"enum",values:{horizontal:{},vertical:{}},requires:["text-field",{"symbol-placement":["point"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-rotate":{type:"number",default:0,period:360,units:"degrees",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-padding":{type:"number",default:2,minimum:0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-keep-upright":{type:"boolean",default:!0,requires:["text-field",{"text-rotation-alignment":"map"},{"symbol-placement":["line","line-center"]}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-transform":{type:"enum",values:{none:{},uppercase:{},lowercase:{}},default:"none",requires:["text-field"],expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-offset":{type:"array",value:"number",units:"ems",length:2,default:[0,0],requires:["text-field",{"!":"text-radial-offset"}],expression:{interpolated:!0,parameters:["zoom","feature"]},"property-type":"data-driven"},"text-allow-overlap":{type:"boolean",default:!1,requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-ignore-placement":{type:"boolean",default:!1,requires:["text-field"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-optional":{type:"boolean",default:!1,requires:["text-field","icon-image"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_raster:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},layout_hillshade:{visibility:{type:"enum",values:{visible:{},none:{}},default:"visible","property-type":"constant"}},filter:{type:"array",value:"*"},filter_operator:{type:"enum",values:{"==":{},"!=":{},">":{},">=":{},"<":{},"<=":{},in:{},"!in":{},all:{},any:{},none:{},has:{},"!has":{},within:{}}},geometry_type:{type:"enum",values:{Point:{},LineString:{},Polygon:{}}},function:{expression:{type:"expression"},stops:{type:"array",value:"function_stop"},base:{type:"number",default:1,minimum:0},property:{type:"string",default:"$zoom"},type:{type:"enum",values:{identity:{},exponential:{},interval:{},categorical:{}},default:"exponential"},colorSpace:{type:"enum",values:{rgb:{},lab:{},hcl:{}},default:"rgb"},default:{type:"*",required:!1}},function_stop:{type:"array",minimum:0,maximum:24,value:["number","color"],length:2},expression:{type:"array",value:"*",minimum:1},expression_name:{type:"enum",values:{let:{group:"Variable binding"},var:{group:"Variable binding"},literal:{group:"Types"},array:{group:"Types"},at:{group:"Lookup"},in:{group:"Lookup"},"index-of":{group:"Lookup"},slice:{group:"Lookup"},case:{group:"Decision"},match:{group:"Decision"},coalesce:{group:"Decision"},step:{group:"Ramps, scales, curves"},interpolate:{group:"Ramps, scales, curves"},"interpolate-hcl":{group:"Ramps, scales, curves"},"interpolate-lab":{group:"Ramps, scales, curves"},ln2:{group:"Math"},pi:{group:"Math"},e:{group:"Math"},typeof:{group:"Types"},string:{group:"Types"},number:{group:"Types"},boolean:{group:"Types"},object:{group:"Types"},collator:{group:"Types"},format:{group:"Types"},image:{group:"Types"},"number-format":{group:"Types"},"to-string":{group:"Types"},"to-number":{group:"Types"},"to-boolean":{group:"Types"},"to-rgba":{group:"Color"},"to-color":{group:"Types"},rgb:{group:"Color"},rgba:{group:"Color"},get:{group:"Lookup"},has:{group:"Lookup"},length:{group:"Lookup"},properties:{group:"Feature data"},"feature-state":{group:"Feature data"},"geometry-type":{group:"Feature data"},id:{group:"Feature data"},zoom:{group:"Zoom"},"heatmap-density":{group:"Heatmap"},"line-progress":{group:"Feature data"},accumulated:{group:"Feature data"},"+":{group:"Math"},"*":{group:"Math"},"-":{group:"Math"},"/":{group:"Math"},"%":{group:"Math"},"^":{group:"Math"},sqrt:{group:"Math"},log10:{group:"Math"},ln:{group:"Math"},log2:{group:"Math"},sin:{group:"Math"},cos:{group:"Math"},tan:{group:"Math"},asin:{group:"Math"},acos:{group:"Math"},atan:{group:"Math"},min:{group:"Math"},max:{group:"Math"},round:{group:"Math"},abs:{group:"Math"},ceil:{group:"Math"},floor:{group:"Math"},distance:{group:"Math"},"==":{group:"Decision"},"!=":{group:"Decision"},">":{group:"Decision"},"<":{group:"Decision"},">=":{group:"Decision"},"<=":{group:"Decision"},all:{group:"Decision"},any:{group:"Decision"},"!":{group:"Decision"},within:{group:"Decision"},"is-supported-script":{group:"String"},upcase:{group:"String"},downcase:{group:"String"},concat:{group:"String"},"resolved-locale":{group:"String"}}},light:{anchor:{type:"enum",default:"viewport",values:{map:{},viewport:{}},"property-type":"data-constant",transition:!1,expression:{interpolated:!1,parameters:["zoom"]}},position:{type:"array",default:[1.15,210,30],length:3,value:"number","property-type":"data-constant",transition:!0,expression:{interpolated:!0,parameters:["zoom"]}},color:{type:"color","property-type":"data-constant",default:"#ffffff",expression:{interpolated:!0,parameters:["zoom"]},transition:!0},intensity:{type:"number","property-type":"data-constant",default:.5,minimum:0,maximum:1,expression:{interpolated:!0,parameters:["zoom"]},transition:!0}},paint:["paint_fill","paint_line","paint_circle","paint_heatmap","paint_fill-extrusion","paint_symbol","paint_raster","paint_hillshade","paint_background"],paint_fill:{"fill-antialias":{type:"boolean",default:!0,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"fill-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-outline-color":{type:"color",transition:!0,requires:[{"!":"fill-pattern"},{"fill-antialias":!0}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["fill-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"}},"paint_fill-extrusion":{"fill-extrusion-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"fill-extrusion-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["fill-extrusion-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"fill-extrusion-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"},"fill-extrusion-height":{type:"number",default:0,minimum:0,units:"meters",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-base":{type:"number",default:0,minimum:0,units:"meters",transition:!0,requires:["fill-extrusion-height"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"fill-extrusion-vertical-gradient":{type:"boolean",default:!0,transition:!1,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"}},paint_line:{"line-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"line-pattern"}],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"line-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["line-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"line-width":{type:"number",default:1,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-gap-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-offset":{type:"number",default:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"line-dasharray":{type:"array",value:"number",minimum:0,transition:!0,units:"line widths",requires:[{"!":"line-pattern"}],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"cross-faded"},"line-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom","feature"]},"property-type":"cross-faded-data-driven"},"line-gradient":{type:"color",transition:!1,requires:[{"!":"line-dasharray"},{"!":"line-pattern"},{source:"geojson",has:{lineMetrics:!0}}],expression:{interpolated:!0,parameters:["line-progress"]},"property-type":"color-ramp"}},paint_circle:{"circle-radius":{type:"number",default:5,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-blur":{type:"number",default:0,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"circle-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["circle-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-pitch-scale":{type:"enum",values:{map:{},viewport:{}},default:"map",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-pitch-alignment":{type:"enum",values:{map:{},viewport:{}},default:"viewport",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"circle-stroke-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-stroke-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"circle-stroke-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"}},paint_heatmap:{"heatmap-radius":{type:"number",default:30,minimum:1,transition:!0,units:"pixels",expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"heatmap-weight":{type:"number",default:1,minimum:0,transition:!1,expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"heatmap-intensity":{type:"number",default:1,minimum:0,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"heatmap-color":{type:"color",default:["interpolate",["linear"],["heatmap-density"],0,"rgba(0, 0, 255, 0)",.1,"royalblue",.3,"cyan",.5,"lime",.7,"yellow",1,"red"],transition:!1,expression:{interpolated:!0,parameters:["heatmap-density"]},"property-type":"color-ramp"},"heatmap-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_symbol:{"icon-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-color":{type:"color",default:"#000000",transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-color":{type:"color",default:"rgba(0, 0, 0, 0)",transition:!0,requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-halo-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"icon-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",requires:["icon-image"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"icon-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["icon-image","icon-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"text-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-color":{type:"color",default:"#000000",transition:!0,overridable:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-color":{type:"color",default:"rgba(0, 0, 0, 0)",transition:!0,requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-width":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-halo-blur":{type:"number",default:0,minimum:0,transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom","feature","feature-state"]},"property-type":"data-driven"},"text-translate":{type:"array",value:"number",length:2,default:[0,0],transition:!0,units:"pixels",requires:["text-field"],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"text-translate-anchor":{type:"enum",values:{map:{},viewport:{}},default:"map",requires:["text-field","text-translate"],expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"}},paint_raster:{"raster-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-hue-rotate":{type:"number",default:0,period:360,transition:!0,units:"degrees",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-brightness-min":{type:"number",default:0,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-brightness-max":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-saturation":{type:"number",default:0,minimum:-1,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-contrast":{type:"number",default:0,minimum:-1,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"raster-resampling":{type:"enum",values:{linear:{},nearest:{}},default:"linear",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"raster-fade-duration":{type:"number",default:300,minimum:0,transition:!1,units:"milliseconds",expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_hillshade:{"hillshade-illumination-direction":{type:"number",default:335,minimum:0,maximum:359,transition:!1,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-illumination-anchor":{type:"enum",values:{map:{},viewport:{}},default:"viewport",expression:{interpolated:!1,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-exaggeration":{type:"number",default:.5,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-shadow-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-highlight-color":{type:"color",default:"#FFFFFF",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"hillshade-accent-color":{type:"color",default:"#000000",transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},paint_background:{"background-color":{type:"color",default:"#000000",transition:!0,requires:[{"!":"background-pattern"}],expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"},"background-pattern":{type:"resolvedImage",transition:!0,expression:{interpolated:!1,parameters:["zoom"]},"property-type":"cross-faded"},"background-opacity":{type:"number",default:1,minimum:0,maximum:1,transition:!0,expression:{interpolated:!0,parameters:["zoom"]},"property-type":"data-constant"}},transition:{duration:{type:"number",default:300,minimum:0,units:"milliseconds"},delay:{type:"number",default:0,minimum:0,units:"milliseconds"}},"property-type":{"data-driven":{type:"property-type"},"cross-faded":{type:"property-type"},"cross-faded-data-driven":{type:"property-type"},"color-ramp":{type:"property-type"},"data-constant":{type:"property-type"},constant:{type:"property-type"}},promoteId:{"*":{type:"string"}}},ce=function(t,e,n,r){this.message=(t?t+": ":"")+n,r&&(this.identifier=r),null!==e&&void 0!==e&&e.__line__&&(this.line=e.__line__)};function ue(t){var e=t.key,n=t.value;return n?[new ce(e,n,"constants have been deprecated as of v8")]:[]}function fe(t){for(var e=[],n=arguments.length-1;n-- >0;)e[n]=arguments[n+1];for(var r=0,i=e;r":"value"===t.itemType.kind?"array":"array<"+e+">"}return t.kind}var Ce=[ve,me,ye,be,xe,Me,we,Se(_e),Ae];function Pe(t,e){if("error"===e.kind)return null;if("array"===t.kind){if("array"===e.kind&&(0===e.N&&"value"===e.itemType.kind||!Pe(t.itemType,e.itemType))&&("number"!==typeof t.N||t.N===e.N))return null}else{if(t.kind===e.kind)return null;if("value"===t.kind)for(var n=0,r=Ce;n255?255:t}function i(t){return t<0?0:t>1?1:t}function a(t){return"%"===t[t.length-1]?r(parseFloat(t)/100*255):r(parseInt(t))}function o(t){return"%"===t[t.length-1]?i(parseFloat(t)/100):i(parseFloat(t))}function s(t,e,n){return n<0?n+=1:n>1&&(n-=1),6*n<1?t+(e-t)*n*6:2*n<1?e:3*n<2?t+(e-t)*(2/3-n)*6:t}function l(t){var e,i=t.replace(/ /g,"").toLowerCase();if(i in n)return n[i].slice();if("#"===i[0])return 4===i.length?(e=parseInt(i.substr(1),16))>=0&&e<=4095?[(3840&e)>>4|(3840&e)>>8,240&e|(240&e)>>4,15&e|(15&e)<<4,1]:null:7===i.length&&(e=parseInt(i.substr(1),16))>=0&&e<=16777215?[(16711680&e)>>16,(65280&e)>>8,255&e,1]:null;var l=i.indexOf("("),c=i.indexOf(")");if(-1!==l&&c+1===i.length){var u=i.substr(0,l),f=i.substr(l+1,c-(l+1)).split(","),h=1;switch(u){case"rgba":if(4!==f.length)return null;h=o(f.pop());case"rgb":return 3!==f.length?null:[a(f[0]),a(f[1]),a(f[2]),h];case"hsla":if(4!==f.length)return null;h=o(f.pop());case"hsl":if(3!==f.length)return null;var d=(parseFloat(f[0])%360+360)%360/360,p=o(f[1]),g=o(f[2]),v=g<=.5?g*(p+1):g+p-g*p,m=2*g-v;return[r(255*s(m,v,d+1/3)),r(255*s(m,v,d)),r(255*s(m,v,d-1/3)),h];default:return null}}return null}try{e.parseCSSColor=l}catch(c){}})),De=Ie.parseCSSColor,Re=function(t,e,n,r){void 0===r&&(r=1),this.r=t,this.g=e,this.b=n,this.a=r};Re.parse=function(t){if(t){if(t instanceof Re)return t;if("string"===typeof t){var e=De(t);if(e)return new Re(e[0]/255*e[3],e[1]/255*e[3],e[2]/255*e[3],e[3])}}},Re.prototype.toString=function(){var t=this.toArray(),e=t[0],n=t[1],r=t[2],i=t[3];return"rgba("+Math.round(e)+","+Math.round(n)+","+Math.round(r)+","+i+")"},Re.prototype.toArray=function(){var t=this,e=t.r,n=t.g,r=t.b,i=t.a;return 0===i?[0,0,0,0]:[255*e/i,255*n/i,255*r/i,i]},Re.black=new Re(0,0,0,1),Re.white=new Re(1,1,1,1),Re.transparent=new Re(0,0,0,0),Re.red=new Re(1,0,0,1);var ze=function(t,e,n){this.sensitivity=t?e?"variant":"case":e?"accent":"base",this.locale=n,this.collator=new Intl.Collator(this.locale?this.locale:[],{sensitivity:this.sensitivity,usage:"search"})};ze.prototype.compare=function(t,e){return this.collator.compare(t,e)},ze.prototype.resolvedLocale=function(){return new Intl.Collator(this.locale?this.locale:[]).resolvedOptions().locale};var Ne=function(t,e,n,r,i){this.text=t,this.image=e,this.scale=n,this.fontStack=r,this.textColor=i},je=function(t){this.sections=t};je.fromString=function(t){return new je([new Ne(t,null,null,null,null)])},je.prototype.isEmpty=function(){return 0===this.sections.length||!this.sections.some((function(t){return 0!==t.text.length||t.image&&0!==t.image.name.length}))},je.factory=function(t){return t instanceof je?t:je.fromString(t)},je.prototype.toString=function(){return 0===this.sections.length?"":this.sections.map((function(t){return t.text})).join("")},je.prototype.serialize=function(){for(var t=["format"],e=0,n=this.sections;e=0&&t<=255&&"number"===typeof e&&e>=0&&e<=255&&"number"===typeof n&&n>=0&&n<=255?"undefined"===typeof r||"number"===typeof r&&r>=0&&r<=1?null:"Invalid rgba value ["+[t,e,n,r].join(", ")+"]: 'a' must be between 0 and 1.":"Invalid rgba value ["+("number"===typeof r?[t,e,n,r]:[t,e,n]).join(", ")+"]: 'r', 'g', and 'b' must be between 0 and 255."}function Ue(t){if(null===t)return!0;if("string"===typeof t)return!0;if("boolean"===typeof t)return!0;if("number"===typeof t)return!0;if(t instanceof Re)return!0;if(t instanceof ze)return!0;if(t instanceof je)return!0;if(t instanceof Fe)return!0;if(Array.isArray(t)){for(var e=0,n=t;e2){var s=t[1];if("string"!==typeof s||!(s in We)||"object"===s)return e.error('The item type argument of "array" must be one of string, number, boolean',1);a=We[s],r++}else a=_e;if(t.length>3){if(null!==t[2]&&("number"!==typeof t[2]||t[2]<0||t[2]!==Math.floor(t[2])))return e.error('The length argument to "array" must be a positive integer literal',2);o=t[2],r++}n=Se(a,o)}else n=We[i];for(var l=[];r1)&&e.push(r)}}return e.concat(this.args.map((function(t){return t.serialize()})))};var $e=function(t){this.type=Me,this.sections=t};$e.parse=function(t,e){if(t.length<2)return e.error("Expected at least one argument.");var n=t[1];if(!Array.isArray(n)&&"object"===typeof n)return e.error("First argument must be an image or text section.");for(var r=[],i=!1,a=1;a<=t.length-1;++a){var o=t[a];if(i&&"object"===typeof o&&!Array.isArray(o)){i=!1;var s=null;if(o["font-scale"]&&!(s=e.parse(o["font-scale"],1,me)))return null;var l=null;if(o["text-font"]&&!(l=e.parse(o["text-font"],1,Se(ye))))return null;var c=null;if(o["text-color"]&&!(c=e.parse(o["text-color"],1,xe)))return null;var u=r[r.length-1];u.scale=s,u.font=l,u.textColor=c}else{var f=e.parse(t[a],1,_e);if(!f)return null;var h=f.type.kind;if("string"!==h&&"value"!==h&&"null"!==h&&"resolvedImage"!==h)return e.error("Formatted text type must be 'string', 'value', 'image' or 'null'.");i=!0,r.push({content:f,scale:null,font:null,textColor:null})}}return new $e(r)},$e.prototype.evaluate=function(t){var e=function(e){var n=e.content.evaluate(t);return He(n)===Ae?new Ne("",n,null,null,null):new Ne(Ve(n),null,e.scale?e.scale.evaluate(t):null,e.font?e.font.evaluate(t).join(","):null,e.textColor?e.textColor.evaluate(t):null)};return new je(this.sections.map(e))},$e.prototype.eachChild=function(t){for(var e=0,n=this.sections;e-1),n},Xe.prototype.eachChild=function(t){t(this.input)},Xe.prototype.outputDefined=function(){return!1},Xe.prototype.serialize=function(){return["image",this.input.serialize()]};var Ke={"to-boolean":be,"to-color":xe,"to-number":me,"to-string":ye},Ze=function(t,e){this.type=t,this.args=e};Ze.parse=function(t,e){if(t.length<2)return e.error("Expected at least one argument.");var n=t[0];if(("to-boolean"===n||"to-string"===n)&&2!==t.length)return e.error("Expected one argument.");for(var r=Ke[n],i=[],a=1;a4?"Invalid rbga value "+JSON.stringify(e)+": expected an array containing either three or four numeric values.":Be(e[0],e[1],e[2],e[3])))return new Re(e[0]/255,e[1]/255,e[2]/255,e[3])}throw new Ge(n||"Could not parse color from value '"+("string"===typeof e?e:String(JSON.stringify(e)))+"'")}if("number"===this.type.kind){for(var o=null,s=0,l=this.args;s=e[2])&&!(t[1]<=e[1])&&!(t[3]>=e[3])}function cn(t,e){var n=on(t[0]),r=sn(t[1]),i=Math.pow(2,e.z);return[Math.round(n*i*rn),Math.round(r*i*rn)]}function un(t,e,n){var r=t[0]-e[0],i=t[1]-e[1],a=t[0]-n[0],o=t[1]-n[1];return r*o-a*i===0&&r*a<=0&&i*o<=0}function fn(t,e,n){return e[1]>t[1]!==n[1]>t[1]&&t[0]<(n[0]-e[0])*(t[1]-e[1])/(n[1]-e[1])+e[0]}function hn(t,e){for(var n=!1,r=0,i=e.length;r0&&f<0||u<0&&f>0}function vn(t,e,n,r){var i=[e[0]-t[0],e[1]-t[1]];return 0!==pn([r[0]-n[0],r[1]-n[1]],i)&&!(!gn(t,e,n,r)||!gn(n,r,t,e))}function mn(t,e,n){for(var r=0,i=n;rn[2]){var i=.5*r,a=t[0]-n[0]>i?-r:n[0]-t[0]>i?r:0;0===a&&(a=t[0]-n[2]>i?-r:n[2]-t[0]>i?r:0),t[0]+=a}an(e,t)}function kn(t){t[0]=t[1]=1/0,t[2]=t[3]=-1/0}function Tn(t,e,n,r){for(var i=Math.pow(2,r.z)*rn,a=[r.x*rn,r.y*rn],o=[],s=0,l=t;s=0)return!1;var n=!0;return t.eachChild((function(t){n&&!On(t,e)&&(n=!1)})),n}En.parse=function(t,e){if(2!==t.length)return e.error("'within' expression requires exactly one argument, but found "+(t.length-1)+" instead.");if(Ue(t[1])){var n=t[1];if("FeatureCollection"===n.type)for(var r=0;re))throw new Ge("Input is not a number.");o=s-1}return 0}In.prototype.parse=function(t,e,n,r,i){return void 0===i&&(i={}),e?this.concat(e,n,r)._parse(t,i):this._parse(t,i)},In.prototype._parse=function(t,e){function n(t,e,n){return"assert"===n?new Ye(e,[t]):"coerce"===n?new Ze(e,[t]):t}if(null!==t&&"string"!==typeof t&&"boolean"!==typeof t&&"number"!==typeof t||(t=["literal",t]),Array.isArray(t)){if(0===t.length)return this.error('Expected an array with at least one element. If you wanted a literal array, use ["literal", []].');var r=t[0];if("string"!==typeof r)return this.error("Expression name must be a string, but found "+typeof r+' instead. If you wanted a literal array, use ["literal", [...]].',0),null;var i=this.registry[r];if(i){var a=i.parse(t,this);if(!a)return null;if(this.expectedType){var o=this.expectedType,s=a.type;if("string"!==o.kind&&"number"!==o.kind&&"boolean"!==o.kind&&"object"!==o.kind&&"array"!==o.kind||"value"!==s.kind)if("color"!==o.kind&&"formatted"!==o.kind&&"resolvedImage"!==o.kind||"value"!==s.kind&&"string"!==s.kind){if(this.checkSubtype(o,s))return null}else a=n(a,o,e.typeAnnotation||"coerce");else a=n(a,o,e.typeAnnotation||"assert")}if(!(a instanceof qe)&&"resolvedImage"!==a.type.kind&&Dn(a)){var l=new Qe;try{a=new qe(a.type,a.evaluate(l))}catch(c){return this.error(c.message),null}}return a}return this.error('Unknown expression "'+r+'". If you wanted a literal array, use ["literal", [...]].',0)}return"undefined"===typeof t?this.error("'undefined' value invalid. Use null instead."):"object"===typeof t?this.error('Bare objects invalid. Use ["literal", {...}] instead.'):this.error("Expected an array, but found "+typeof t+" instead.")},In.prototype.concat=function(t,e,n){var r="number"===typeof t?this.path.concat(t):this.path,i=n?this.scope.concat(n):this.scope;return new In(this.registry,r,e||null,i,this.errors)},In.prototype.error=function(t){for(var e=[],n=arguments.length-1;n-- >0;)e[n]=arguments[n+1];var r=""+this.key+e.map((function(t){return"["+t+"]"})).join("");this.errors.push(new pe(r,t))},In.prototype.checkSubtype=function(t,e){var n=Pe(t,e);return n&&this.error(n),n};var zn=function(t,e,n){this.type=t,this.input=e,this.labels=[],this.outputs=[];for(var r=0,i=n;r=o)return e.error('Input/output pairs for "step" expressions must be arranged with input values in strictly ascending order.',l);var u=e.parse(s,c,i);if(!u)return null;i=i||u.type,r.push([o,u])}return new zn(i,n,r)},zn.prototype.evaluate=function(t){var e=this.labels,n=this.outputs;if(1===e.length)return n[0].evaluate(t);var r=this.input.evaluate(t);if(r<=e[0])return n[0].evaluate(t);var i=e.length;return r>=e[i-1]?n[i-1].evaluate(t):n[Rn(e,r)].evaluate(t)},zn.prototype.eachChild=function(t){t(this.input);for(var e=0,n=this.outputs;e0&&t.push(this.labels[e]),t.push(this.outputs[e].serialize());return t};var Bn=Object.freeze({__proto__:null,number:Nn,color:jn,array:Fn}),Un=.95047,Hn=1,Vn=1.08883,qn=4/29,Gn=6/29,Wn=3*Gn*Gn,Yn=Gn*Gn*Gn,$n=Math.PI/180,Xn=180/Math.PI;function Kn(t){return t>Yn?Math.pow(t,1/3):t/Wn+qn}function Zn(t){return t>Gn?t*t*t:Wn*(t-qn)}function Jn(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Qn(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function tr(t){var e=Qn(t.r),n=Qn(t.g),r=Qn(t.b),i=Kn((.4124564*e+.3575761*n+.1804375*r)/Un),a=Kn((.2126729*e+.7151522*n+.072175*r)/Hn);return{l:116*a-16,a:500*(i-a),b:200*(a-Kn((.0193339*e+.119192*n+.9503041*r)/Vn)),alpha:t.a}}function er(t){var e=(t.l+16)/116,n=isNaN(t.a)?e:e+t.a/500,r=isNaN(t.b)?e:e-t.b/200;return e=Hn*Zn(e),n=Un*Zn(n),r=Vn*Zn(r),new Re(Jn(3.2404542*n-1.5371385*e-.4985314*r),Jn(-.969266*n+1.8760108*e+.041556*r),Jn(.0556434*n-.2040259*e+1.0572252*r),t.alpha)}function nr(t,e,n){return{l:Nn(t.l,e.l,n),a:Nn(t.a,e.a,n),b:Nn(t.b,e.b,n),alpha:Nn(t.alpha,e.alpha,n)}}function rr(t){var e=tr(t),n=e.l,r=e.a,i=e.b,a=Math.atan2(i,r)*Xn;return{h:a<0?a+360:a,c:Math.sqrt(r*r+i*i),l:n,alpha:t.a}}function ir(t){var e=t.h*$n,n=t.c;return er({l:t.l,a:Math.cos(e)*n,b:Math.sin(e)*n,alpha:t.alpha})}function ar(t,e,n){var r=e-t;return t+n*(r>180||r<-180?r-360*Math.round(r/360):r)}function or(t,e,n){return{h:ar(t.h,e.h,n),c:Nn(t.c,e.c,n),l:Nn(t.l,e.l,n),alpha:Nn(t.alpha,e.alpha,n)}}var sr={forward:tr,reverse:er,interpolate:nr},lr={forward:rr,reverse:ir,interpolate:or},cr=Object.freeze({__proto__:null,lab:sr,hcl:lr}),ur=function(t,e,n,r,i){this.type=t,this.operator=e,this.interpolation=n,this.input=r,this.labels=[],this.outputs=[];for(var a=0,o=i;a1})))return e.error("Cubic bezier interpolation requires four numeric arguments with values between 0 and 1.",1);r={name:"cubic-bezier",controlPoints:s}}if(t.length-1<4)return e.error("Expected at least 4 arguments, but found only "+(t.length-1)+".");if((t.length-1)%2!==0)return e.error("Expected an even number of arguments.");if(!(i=e.parse(i,2,me)))return null;var l=[],c=null;"interpolate-hcl"===n||"interpolate-lab"===n?c=xe:e.expectedType&&"value"!==e.expectedType.kind&&(c=e.expectedType);for(var u=0;u=f)return e.error('Input/output pairs for "interpolate" expressions must be arranged with input values in strictly ascending order.',d);var g=e.parse(h,p,c);if(!g)return null;c=c||g.type,l.push([f,g])}return"number"===c.kind||"color"===c.kind||"array"===c.kind&&"number"===c.itemType.kind&&"number"===typeof c.N?new ur(c,n,r,i,l):e.error("Type "+Ee(c)+" is not interpolatable.")},ur.prototype.evaluate=function(t){var e=this.labels,n=this.outputs;if(1===e.length)return n[0].evaluate(t);var r=this.input.evaluate(t);if(r<=e[0])return n[0].evaluate(t);var i=e.length;if(r>=e[i-1])return n[i-1].evaluate(t);var a=Rn(e,r),o=e[a],s=e[a+1],l=ur.interpolationFactor(this.interpolation,r,o,s),c=n[a].evaluate(t),u=n[a+1].evaluate(t);return"interpolate"===this.operator?Bn[this.type.kind.toLowerCase()](c,u,l):"interpolate-hcl"===this.operator?lr.reverse(lr.interpolate(lr.forward(c),lr.forward(u),l)):sr.reverse(sr.interpolate(sr.forward(c),sr.forward(u),l))},ur.prototype.eachChild=function(t){t(this.input);for(var e=0,n=this.outputs;e=n.length)throw new Ge("Array index out of bounds: "+e+" > "+(n.length-1)+".");if(e!==Math.floor(e))throw new Ge("Array index must be an integer, but found "+e+" instead.");return n[e]},pr.prototype.eachChild=function(t){t(this.index),t(this.input)},pr.prototype.outputDefined=function(){return!1},pr.prototype.serialize=function(){return["at",this.index.serialize(),this.input.serialize()]};var gr=function(t,e){this.type=be,this.needle=t,this.haystack=e};gr.parse=function(t,e){if(3!==t.length)return e.error("Expected 2 arguments, but found "+(t.length-1)+" instead.");var n=e.parse(t[1],1,_e),r=e.parse(t[2],2,_e);return n&&r?Oe(n.type,[be,ye,me,ve,_e])?new gr(n,r):e.error("Expected first argument to be of type boolean, string, number or null, but found "+Ee(n.type)+" instead"):null},gr.prototype.evaluate=function(t){var e=this.needle.evaluate(t),n=this.haystack.evaluate(t);if(!n)return!1;if(!Le(e,["boolean","string","number","null"]))throw new Ge("Expected first argument to be of type boolean, string, number or null, but found "+Ee(He(e))+" instead.");if(!Le(n,["string","array"]))throw new Ge("Expected second argument to be of type array or string, but found "+Ee(He(n))+" instead.");return n.indexOf(e)>=0},gr.prototype.eachChild=function(t){t(this.needle),t(this.haystack)},gr.prototype.outputDefined=function(){return!0},gr.prototype.serialize=function(){return["in",this.needle.serialize(),this.haystack.serialize()]};var vr=function(t,e,n){this.type=me,this.needle=t,this.haystack=e,this.fromIndex=n};vr.parse=function(t,e){if(t.length<=2||t.length>=5)return e.error("Expected 3 or 4 arguments, but found "+(t.length-1)+" instead.");var n=e.parse(t[1],1,_e),r=e.parse(t[2],2,_e);if(!n||!r)return null;if(!Oe(n.type,[be,ye,me,ve,_e]))return e.error("Expected first argument to be of type boolean, string, number or null, but found "+Ee(n.type)+" instead");if(4===t.length){var i=e.parse(t[3],3,me);return i?new vr(n,r,i):null}return new vr(n,r)},vr.prototype.evaluate=function(t){var e=this.needle.evaluate(t),n=this.haystack.evaluate(t);if(!Le(e,["boolean","string","number","null"]))throw new Ge("Expected first argument to be of type boolean, string, number or null, but found "+Ee(He(e))+" instead.");if(!Le(n,["string","array"]))throw new Ge("Expected second argument to be of type array or string, but found "+Ee(He(n))+" instead.");if(this.fromIndex){var r=this.fromIndex.evaluate(t);return n.indexOf(e,r)}return n.indexOf(e)},vr.prototype.eachChild=function(t){t(this.needle),t(this.haystack),this.fromIndex&&t(this.fromIndex)},vr.prototype.outputDefined=function(){return!1},vr.prototype.serialize=function(){if(null!=this.fromIndex&&void 0!==this.fromIndex){var t=this.fromIndex.serialize();return["index-of",this.needle.serialize(),this.haystack.serialize(),t]}return["index-of",this.needle.serialize(),this.haystack.serialize()]};var mr=function(t,e,n,r,i,a){this.inputType=t,this.type=e,this.input=n,this.cases=r,this.outputs=i,this.otherwise=a};mr.parse=function(t,e){if(t.length<5)return e.error("Expected at least 4 arguments, but found only "+(t.length-1)+".");if(t.length%2!==1)return e.error("Expected an even number of arguments.");var n,r;e.expectedType&&"value"!==e.expectedType.kind&&(r=e.expectedType);for(var i={},a=[],o=2;oNumber.MAX_SAFE_INTEGER)return c.error("Branch labels must be integers no larger than "+Number.MAX_SAFE_INTEGER+".");if("number"===typeof h&&Math.floor(h)!==h)return c.error("Numeric branch labels must be integer values.");if(n){if(c.checkSubtype(n,He(h)))return null}else n=He(h);if("undefined"!==typeof i[String(h)])return c.error("Branch labels must be unique.");i[String(h)]=a.length}var d=e.parse(l,o,r);if(!d)return null;r=r||d.type,a.push(d)}var p=e.parse(t[1],1,_e);if(!p)return null;var g=e.parse(t[t.length-1],t.length-1,r);return g?"value"!==p.type.kind&&e.concat(1).checkSubtype(n,p.type)?null:new mr(n,r,p,i,a,g):null},mr.prototype.evaluate=function(t){var e=this.input.evaluate(t);return(He(e)===this.inputType&&this.outputs[this.cases[e]]||this.otherwise).evaluate(t)},mr.prototype.eachChild=function(t){t(this.input),this.outputs.forEach(t),t(this.otherwise)},mr.prototype.outputDefined=function(){return this.outputs.every((function(t){return t.outputDefined()}))&&this.otherwise.outputDefined()},mr.prototype.serialize=function(){for(var t=this,e=["match",this.input.serialize()],n=[],r={},i=0,a=Object.keys(this.cases).sort();in}function Mr(t,e,n){return e<=n}function Ar(t,e,n){return e>=n}function Sr(t,e,n,r){return 0===r.compare(e,n)}function Er(t,e,n,r){return!Sr(t,e,n,r)}function Cr(t,e,n,r){return r.compare(e,n)<0}function Pr(t,e,n,r){return r.compare(e,n)>0}function Or(t,e,n,r){return r.compare(e,n)<=0}function Lr(t,e,n,r){return r.compare(e,n)>=0}function Ir(t,e,n){var r="=="!==t&&"!="!==t;return function(){function i(t,e,n){this.type=be,this.lhs=t,this.rhs=e,this.collator=n,this.hasUntypedArgument="value"===t.type.kind||"value"===e.type.kind}return i.parse=function(t,e){if(3!==t.length&&4!==t.length)return e.error("Expected two or three arguments.");var n=t[0],a=e.parse(t[1],1,_e);if(!a)return null;if(!xr(n,a.type))return e.concat(1).error('"'+n+"\" comparisons are not supported for type '"+Ee(a.type)+"'.");var o=e.parse(t[2],2,_e);if(!o)return null;if(!xr(n,o.type))return e.concat(2).error('"'+n+"\" comparisons are not supported for type '"+Ee(o.type)+"'.");if(a.type.kind!==o.type.kind&&"value"!==a.type.kind&&"value"!==o.type.kind)return e.error("Cannot compare types '"+Ee(a.type)+"' and '"+Ee(o.type)+"'.");r&&("value"===a.type.kind&&"value"!==o.type.kind?a=new Ye(o.type,[a]):"value"!==a.type.kind&&"value"===o.type.kind&&(o=new Ye(a.type,[o])));var s=null;if(4===t.length){if("string"!==a.type.kind&&"string"!==o.type.kind&&"value"!==a.type.kind&&"value"!==o.type.kind)return e.error("Cannot use collator to compare non-string types.");if(!(s=e.parse(t[3],3,Te)))return null}return new i(a,o,s)},i.prototype.evaluate=function(i){var a=this.lhs.evaluate(i),o=this.rhs.evaluate(i);if(r&&this.hasUntypedArgument){var s=He(a),l=He(o);if(s.kind!==l.kind||"string"!==s.kind&&"number"!==s.kind)throw new Ge('Expected arguments for "'+t+'" to be (string, string) or (number, number), but found ('+s.kind+", "+l.kind+") instead.")}if(this.collator&&!r&&this.hasUntypedArgument){var c=He(a),u=He(o);if("string"!==c.kind||"string"!==u.kind)return e(i,a,o)}return this.collator?n(i,a,o,this.collator.evaluate(i)):e(i,a,o)},i.prototype.eachChild=function(t){t(this.lhs),t(this.rhs),this.collator&&t(this.collator)},i.prototype.outputDefined=function(){return!0},i.prototype.serialize=function(){var e=[t];return this.eachChild((function(t){e.push(t.serialize())})),e},i}()}br.parse=function(t,e){if(t.length<=2||t.length>=5)return e.error("Expected 3 or 4 arguments, but found "+(t.length-1)+" instead.");var n=e.parse(t[1],1,_e),r=e.parse(t[2],2,me);if(!n||!r)return null;if(!Oe(n.type,[Se(_e),ye,_e]))return e.error("Expected first argument to be of type array or string, but found "+Ee(n.type)+" instead");if(4===t.length){var i=e.parse(t[3],3,me);return i?new br(n.type,n,r,i):null}return new br(n.type,n,r)},br.prototype.evaluate=function(t){var e=this.input.evaluate(t),n=this.beginIndex.evaluate(t);if(!Le(e,["string","array"]))throw new Ge("Expected first argument to be of type array or string, but found "+Ee(He(e))+" instead.");if(this.endIndex){var r=this.endIndex.evaluate(t);return e.slice(n,r)}return e.slice(n)},br.prototype.eachChild=function(t){t(this.input),t(this.beginIndex),this.endIndex&&t(this.endIndex)},br.prototype.outputDefined=function(){return!1},br.prototype.serialize=function(){if(null!=this.endIndex&&void 0!==this.endIndex){var t=this.endIndex.serialize();return["slice",this.input.serialize(),this.beginIndex.serialize(),t]}return["slice",this.input.serialize(),this.beginIndex.serialize()]};var Dr=Ir("==",wr,Sr),Rr=Ir("!=",_r,Er),zr=Ir("<",kr,Cr),Nr=Ir(">",Tr,Pr),jr=Ir("<=",Mr,Or),Fr=Ir(">=",Ar,Lr),Br=function(t,e,n,r,i){this.type=ye,this.number=t,this.locale=e,this.currency=n,this.minFractionDigits=r,this.maxFractionDigits=i};Br.parse=function(t,e){if(3!==t.length)return e.error("Expected two arguments.");var n=e.parse(t[1],1,me);if(!n)return null;var r=t[2];if("object"!==typeof r||Array.isArray(r))return e.error("NumberFormat options argument must be an object.");var i=null;if(r.locale&&!(i=e.parse(r.locale,1,ye)))return null;var a=null;if(r.currency&&!(a=e.parse(r.currency,1,ye)))return null;var o=null;if(r["min-fraction-digits"]&&!(o=e.parse(r["min-fraction-digits"],1,me)))return null;var s=null;return r["max-fraction-digits"]&&!(s=e.parse(r["max-fraction-digits"],1,me))?null:new Br(n,i,a,o,s)},Br.prototype.evaluate=function(t){return new Intl.NumberFormat(this.locale?this.locale.evaluate(t):[],{style:this.currency?"currency":"decimal",currency:this.currency?this.currency.evaluate(t):void 0,minimumFractionDigits:this.minFractionDigits?this.minFractionDigits.evaluate(t):void 0,maximumFractionDigits:this.maxFractionDigits?this.maxFractionDigits.evaluate(t):void 0}).format(this.number.evaluate(t))},Br.prototype.eachChild=function(t){t(this.number),this.locale&&t(this.locale),this.currency&&t(this.currency),this.minFractionDigits&&t(this.minFractionDigits),this.maxFractionDigits&&t(this.maxFractionDigits)},Br.prototype.outputDefined=function(){return!1},Br.prototype.serialize=function(){var t={};return this.locale&&(t.locale=this.locale.serialize()),this.currency&&(t.currency=this.currency.serialize()),this.minFractionDigits&&(t["min-fraction-digits"]=this.minFractionDigits.serialize()),this.maxFractionDigits&&(t["max-fraction-digits"]=this.maxFractionDigits.serialize()),["number-format",this.number.serialize(),t]};var Ur=function(t){this.type=me,this.input=t};Ur.parse=function(t,e){if(2!==t.length)return e.error("Expected 1 argument, but found "+(t.length-1)+" instead.");var n=e.parse(t[1],1);return n?"array"!==n.type.kind&&"string"!==n.type.kind&&"value"!==n.type.kind?e.error("Expected argument of type string or array, but found "+Ee(n.type)+" instead."):new Ur(n):null},Ur.prototype.evaluate=function(t){var e=this.input.evaluate(t);if("string"===typeof e)return e.length;if(Array.isArray(e))return e.length;throw new Ge("Expected value to be of type string or array, but found "+Ee(He(e))+" instead.")},Ur.prototype.eachChild=function(t){t(this.input)},Ur.prototype.outputDefined=function(){return!1},Ur.prototype.serialize=function(){var t=["length"];return this.eachChild((function(e){t.push(e.serialize())})),t};var Hr={"==":Dr,"!=":Rr,">":Nr,"<":zr,">=":Fr,"<=":jr,array:Ye,at:pr,boolean:Ye,case:yr,coalesce:hr,collator:nn,format:$e,image:Xe,in:gr,"index-of":vr,interpolate:ur,"interpolate-hcl":ur,"interpolate-lab":ur,length:Ur,let:dr,literal:qe,match:mr,number:Ye,"number-format":Br,object:Ye,slice:br,step:zn,string:Ye,"to-boolean":Ze,"to-color":Ze,"to-number":Ze,"to-string":Ze,var:Ln,within:En};function Vr(t,e){var n=e[0],r=e[1],i=e[2],a=e[3];n=n.evaluate(t),r=r.evaluate(t),i=i.evaluate(t);var o=a?a.evaluate(t):1,s=Be(n,r,i,o);if(s)throw new Ge(s);return new Re(n/255*o,r/255*o,i/255*o,o)}function qr(t,e){return t in e}function Gr(t,e){var n=e[t];return"undefined"===typeof n?null:n}function Wr(t,e,n,r){for(;n<=r;){var i=n+r>>1;if(e[i]===t)return!0;e[i]>t?r=i-1:n=i+1}return!1}function Yr(t){return{type:t}}function $r(t){return{result:"success",value:t}}function Xr(t){return{result:"error",value:t}}function Kr(t){return"data-driven"===t["property-type"]||"cross-faded-data-driven"===t["property-type"]}function Zr(t){return!!t.expression&&t.expression.parameters.indexOf("zoom")>-1}function Jr(t){return!!t.expression&&t.expression.interpolated}function Qr(t){return t instanceof Number?"number":t instanceof String?"string":t instanceof Boolean?"boolean":Array.isArray(t)?"array":null===t?"null":typeof t}function ti(t){return"object"===typeof t&&null!==t&&!Array.isArray(t)}function ei(t){return t}function ni(t,e){var n,r,i,a="color"===e.type,o=t.stops&&"object"===typeof t.stops[0][0],s=o||void 0!==t.property,l=o||!s,c=t.type||(Jr(e)?"exponential":"interval");if(a&&((t=fe({},t)).stops&&(t.stops=t.stops.map((function(t){return[t[0],Re.parse(t[1])]}))),t.default?t.default=Re.parse(t.default):t.default=Re.parse(e.default)),t.colorSpace&&"rgb"!==t.colorSpace&&!cr[t.colorSpace])throw new Error("Unknown color space: "+t.colorSpace);if("exponential"===c)n=oi;else if("interval"===c)n=ai;else if("categorical"===c){n=ii,r=Object.create(null);for(var u=0,f=t.stops;u=t.stops[r-1][0])return t.stops[r-1][1];var i=Rn(t.stops.map((function(t){return t[0]})),n);return t.stops[i][1]}function oi(t,e,n){var r=void 0!==t.base?t.base:1;if("number"!==Qr(n))return ri(t.default,e.default);var i=t.stops.length;if(1===i)return t.stops[0][1];if(n<=t.stops[0][0])return t.stops[0][1];if(n>=t.stops[i-1][0])return t.stops[i-1][1];var a=Rn(t.stops.map((function(t){return t[0]})),n),o=li(n,r,t.stops[a][0],t.stops[a+1][0]),s=t.stops[a][1],l=t.stops[a+1][1],c=Bn[e.type]||ei;if(t.colorSpace&&"rgb"!==t.colorSpace){var u=cr[t.colorSpace];c=function(t,e){return u.reverse(u.interpolate(u.forward(t),u.forward(e),o))}}return"function"===typeof s.evaluate?{evaluate:function(){for(var t=[],e=arguments.length;e--;)t[e]=arguments[e];var n=s.evaluate.apply(void 0,t),r=l.evaluate.apply(void 0,t);if(void 0!==n&&void 0!==r)return c(n,r,o)}}:c(s,l,o)}function si(t,e,n){return"color"===e.type?n=Re.parse(n):"formatted"===e.type?n=je.fromString(n.toString()):"resolvedImage"===e.type?n=Fe.fromString(n.toString()):Qr(n)===e.type||"enum"===e.type&&e.values[n]||(n=void 0),ri(n,t.default,e.default)}function li(t,e,n,r){var i=r-n,a=t-n;return 0===i?0:1===e?a/i:(Math.pow(e,a)-1)/(Math.pow(e,i)-1)}tn.register(Hr,{error:[ke,[ye],function(t,e){var n=e[0];throw new Ge(n.evaluate(t))}],typeof:[ye,[_e],function(t,e){return Ee(He(e[0].evaluate(t)))}],"to-rgba":[Se(me,4),[xe],function(t,e){return e[0].evaluate(t).toArray()}],rgb:[xe,[me,me,me],Vr],rgba:[xe,[me,me,me,me],Vr],has:{type:be,overloads:[[[ye],function(t,e){return qr(e[0].evaluate(t),t.properties())}],[[ye,we],function(t,e){var n=e[0],r=e[1];return qr(n.evaluate(t),r.evaluate(t))}]]},get:{type:_e,overloads:[[[ye],function(t,e){return Gr(e[0].evaluate(t),t.properties())}],[[ye,we],function(t,e){var n=e[0],r=e[1];return Gr(n.evaluate(t),r.evaluate(t))}]]},"feature-state":[_e,[ye],function(t,e){return Gr(e[0].evaluate(t),t.featureState||{})}],properties:[we,[],function(t){return t.properties()}],"geometry-type":[ye,[],function(t){return t.geometryType()}],id:[_e,[],function(t){return t.id()}],zoom:[me,[],function(t){return t.globals.zoom}],"heatmap-density":[me,[],function(t){return t.globals.heatmapDensity||0}],"line-progress":[me,[],function(t){return t.globals.lineProgress||0}],accumulated:[_e,[],function(t){return void 0===t.globals.accumulated?null:t.globals.accumulated}],"+":[me,Yr(me),function(t,e){for(var n=0,r=0,i=e;r":[be,[ye,_e],function(t,e){var n=e[0],r=e[1],i=t.properties()[n.value],a=r.value;return typeof i===typeof a&&i>a}],"filter-id->":[be,[_e],function(t,e){var n=e[0],r=t.id(),i=n.value;return typeof r===typeof i&&r>i}],"filter-<=":[be,[ye,_e],function(t,e){var n=e[0],r=e[1],i=t.properties()[n.value],a=r.value;return typeof i===typeof a&&i<=a}],"filter-id-<=":[be,[_e],function(t,e){var n=e[0],r=t.id(),i=n.value;return typeof r===typeof i&&r<=i}],"filter->=":[be,[ye,_e],function(t,e){var n=e[0],r=e[1],i=t.properties()[n.value],a=r.value;return typeof i===typeof a&&i>=a}],"filter-id->=":[be,[_e],function(t,e){var n=e[0],r=t.id(),i=n.value;return typeof r===typeof i&&r>=i}],"filter-has":[be,[_e],function(t,e){return e[0].value in t.properties()}],"filter-has-id":[be,[],function(t){return null!==t.id()&&void 0!==t.id()}],"filter-type-in":[be,[Se(ye)],function(t,e){return e[0].value.indexOf(t.geometryType())>=0}],"filter-id-in":[be,[Se(_e)],function(t,e){return e[0].value.indexOf(t.id())>=0}],"filter-in-small":[be,[ye,Se(_e)],function(t,e){var n=e[0];return e[1].value.indexOf(t.properties()[n.value])>=0}],"filter-in-large":[be,[ye,Se(_e)],function(t,e){var n=e[0],r=e[1];return Wr(t.properties()[n.value],r.value,0,r.value.length-1)}],all:{type:be,overloads:[[[be,be],function(t,e){var n=e[0],r=e[1];return n.evaluate(t)&&r.evaluate(t)}],[Yr(be),function(t,e){for(var n=0,r=e;n0&&"string"===typeof t[0]&&t[0]in Hr}function fi(t,e){var n=new In(Hr,[],e?yi(e):void 0),r=n.parse(t,void 0,void 0,void 0,e&&"string"===e.type?{typeAnnotation:"coerce"}:void 0);return r?$r(new ci(r,e)):Xr(n.errors)}ci.prototype.evaluateWithoutErrorHandling=function(t,e,n,r,i,a){return this._evaluator.globals=t,this._evaluator.feature=e,this._evaluator.featureState=n,this._evaluator.canonical=r,this._evaluator.availableImages=i||null,this._evaluator.formattedSection=a,this.expression.evaluate(this._evaluator)},ci.prototype.evaluate=function(t,e,n,r,i,a){this._evaluator.globals=t,this._evaluator.feature=e||null,this._evaluator.featureState=n||null,this._evaluator.canonical=r,this._evaluator.availableImages=i||null,this._evaluator.formattedSection=a||null;try{var o=this.expression.evaluate(this._evaluator);if(null===o||void 0===o||"number"===typeof o&&o!==o)return this._defaultValue;if(this._enumValues&&!(o in this._enumValues))throw new Ge("Expected value to be one of "+Object.keys(this._enumValues).map((function(t){return JSON.stringify(t)})).join(", ")+", but found "+JSON.stringify(o)+" instead.");return o}catch(s){return this._warningHistory[s.message]||(this._warningHistory[s.message]=!0,"undefined"!==typeof console&&console.warn(s.message)),this._defaultValue}};var hi=function(t,e){this.kind=t,this._styleExpression=e,this.isStateDependent="constant"!==t&&!Pn(e.expression)};hi.prototype.evaluateWithoutErrorHandling=function(t,e,n,r,i,a){return this._styleExpression.evaluateWithoutErrorHandling(t,e,n,r,i,a)},hi.prototype.evaluate=function(t,e,n,r,i,a){return this._styleExpression.evaluate(t,e,n,r,i,a)};var di=function(t,e,n,r){this.kind=t,this.zoomStops=n,this._styleExpression=e,this.isStateDependent="camera"!==t&&!Pn(e.expression),this.interpolationType=r};function pi(t,e){if("error"===(t=fi(t,e)).result)return t;var n=t.value.expression,r=Cn(n);if(!r&&!Kr(e))return Xr([new pe("","data expressions not supported")]);var i=On(n,["zoom"]);if(!i&&!Zr(e))return Xr([new pe("","zoom expressions not supported")]);var a=mi(n);if(!a&&!i)return Xr([new pe("",'"zoom" expression may only be used as input to a top-level "step" or "interpolate" expression.')]);if(a instanceof pe)return Xr([a]);if(a instanceof ur&&!Jr(e))return Xr([new pe("",'"interpolate" expressions cannot be used with this property')]);if(!a)return $r(new hi(r?"constant":"source",t.value));var o=a instanceof ur?a.interpolation:void 0;return $r(new di(r?"camera":"composite",t.value,a.labels,o))}di.prototype.evaluateWithoutErrorHandling=function(t,e,n,r,i,a){return this._styleExpression.evaluateWithoutErrorHandling(t,e,n,r,i,a)},di.prototype.evaluate=function(t,e,n,r,i,a){return this._styleExpression.evaluate(t,e,n,r,i,a)},di.prototype.interpolationFactor=function(t,e,n){return this.interpolationType?ur.interpolationFactor(this.interpolationType,t,e,n):0};var gi=function(t,e){this._parameters=t,this._specification=e,fe(this,ni(this._parameters,this._specification))};function vi(t,e){if(ti(t))return new gi(t,e);if(ui(t)){var n=pi(t,e);if("error"===n.result)throw new Error(n.value.map((function(t){return t.key+": "+t.message})).join(", "));return n.value}var r=t;return"string"===typeof t&&"color"===e.type&&(r=Re.parse(t)),{kind:"constant",evaluate:function(){return r}}}function mi(t){var e=null;if(t instanceof dr)e=mi(t.result);else if(t instanceof hr)for(var n=0,r=t.args;nr.maximum?[new ce(e,n,n+" is greater than the maximum value "+r.maximum)]:[]}function ki(t){var e,n,r,i=t.valueSpec,a=he(t.value.type),o={},s="categorical"!==a&&void 0===t.value.property,l=!s,c="array"===Qr(t.value.stops)&&"array"===Qr(t.value.stops[0])&&"object"===Qr(t.value.stops[0][0]),u=xi({key:t.key,value:t.value,valueSpec:t.styleSpec.function,style:t.style,styleSpec:t.styleSpec,objectElementValidators:{stops:f,default:p}});return"identity"===a&&s&&u.push(new ce(t.key,t.value,'missing required property "property"')),"identity"===a||t.value.stops||u.push(new ce(t.key,t.value,'missing required property "stops"')),"exponential"===a&&t.valueSpec.expression&&!Jr(t.valueSpec)&&u.push(new ce(t.key,t.value,"exponential functions not supported")),t.styleSpec.$version>=8&&(l&&!Kr(t.valueSpec)?u.push(new ce(t.key,t.value,"property functions not supported")):s&&!Zr(t.valueSpec)&&u.push(new ce(t.key,t.value,"zoom functions not supported"))),"categorical"!==a&&!c||void 0!==t.value.property||u.push(new ce(t.key,t.value,'"property" property is required')),u;function f(t){if("identity"===a)return[new ce(t.key,t.value,'identity function may not have a "stops" property')];var e=[],n=t.value;return e=e.concat(wi({key:t.key,value:n,valueSpec:t.valueSpec,style:t.style,styleSpec:t.styleSpec,arrayElementValidator:h})),"array"===Qr(n)&&0===n.length&&e.push(new ce(t.key,n,"array must have at least one stop")),e}function h(t){var e=[],a=t.value,s=t.key;if("array"!==Qr(a))return[new ce(s,a,"array expected, "+Qr(a)+" found")];if(2!==a.length)return[new ce(s,a,"array length 2 expected, length "+a.length+" found")];if(c){if("object"!==Qr(a[0]))return[new ce(s,a,"object expected, "+Qr(a[0])+" found")];if(void 0===a[0].zoom)return[new ce(s,a,"object stop key must have zoom")];if(void 0===a[0].value)return[new ce(s,a,"object stop key must have value")];if(r&&r>he(a[0].zoom))return[new ce(s,a[0].zoom,"stop zoom values must appear in ascending order")];he(a[0].zoom)!==r&&(r=he(a[0].zoom),n=void 0,o={}),e=e.concat(xi({key:s+"[0]",value:a[0],valueSpec:{zoom:{}},style:t.style,styleSpec:t.styleSpec,objectElementValidators:{zoom:_i,value:d}}))}else e=e.concat(d({key:s+"[0]",value:a[0],valueSpec:{},style:t.style,styleSpec:t.styleSpec},a));return ui(de(a[1]))?e.concat([new ce(s+"[1]",a[1],"expressions are not allowed in function stops.")]):e.concat(Qi({key:s+"[1]",value:a[1],valueSpec:i,style:t.style,styleSpec:t.styleSpec}))}function d(t,r){var s=Qr(t.value),l=he(t.value),c=null!==t.value?t.value:r;if(e){if(s!==e)return[new ce(t.key,c,s+" stop domain type must match previous stop domain type "+e)]}else e=s;if("number"!==s&&"string"!==s&&"boolean"!==s)return[new ce(t.key,c,"stop domain value must be a number, string, or boolean")];if("number"!==s&&"categorical"!==a){var u="number expected, "+s+" found";return Kr(i)&&void 0===a&&(u+='\nIf you intended to use a categorical function, specify `"type": "categorical"`.'),[new ce(t.key,c,u)]}return"categorical"!==a||"number"!==s||isFinite(l)&&Math.floor(l)===l?"categorical"!==a&&"number"===s&&void 0!==n&&l=2&&"$id"!==t[1]&&"$type"!==t[1];case"in":return t.length>=3&&("string"!==typeof t[1]||Array.isArray(t[2]));case"!in":case"!has":case"none":return!1;case"==":case"!=":case">":case">=":case"<":case"<=":return 3!==t.length||Array.isArray(t[1])||Array.isArray(t[2]);case"any":case"all":for(var e=0,n=t.slice(1);ee?1:0}function Li(t){if(!Array.isArray(t))return!1;if("within"===t[0])return!0;for(var e=1;e"===e||"<="===e||">="===e?Di(t[1],t[2],e):"any"===e?Ri(t.slice(1)):"all"===e?["all"].concat(t.slice(1).map(Ii)):"none"===e?["all"].concat(t.slice(1).map(Ii).map(ji)):"in"===e?zi(t[1],t.slice(2)):"!in"===e?ji(zi(t[1],t.slice(2))):"has"===e?Ni(t[1]):"!has"===e?ji(Ni(t[1])):"within"!==e||t}function Di(t,e,n){switch(t){case"$type":return["filter-type-"+n,e];case"$id":return["filter-id-"+n,e];default:return["filter-"+n,t,e]}}function Ri(t){return["any"].concat(t.map(Ii))}function zi(t,e){if(0===e.length)return!1;switch(t){case"$type":return["filter-type-in",["literal",e]];case"$id":return["filter-id-in",["literal",e]];default:return e.length>200&&!e.some((function(t){return typeof t!==typeof e[0]}))?["filter-in-large",t,["literal",e.sort(Oi)]]:["filter-in-small",t,["literal",e]]}}function Ni(t){switch(t){case"$type":return!0;case"$id":return["filter-has-id"];default:return["filter-has",t]}}function ji(t){return["!",t]}function Fi(t){return Ei(de(t.value))?Ti(fe({},t,{expressionContext:"filter",valueSpec:{value:"boolean"}})):Bi(t)}function Bi(t){var e=t.value,n=t.key;if("array"!==Qr(e))return[new ce(n,e,"array expected, "+Qr(e)+" found")];var r,i=t.styleSpec,a=[];if(e.length<1)return[new ce(n,e,"filter array must have at least 1 element")];switch(a=a.concat(Si({key:n+"[0]",value:e[0],valueSpec:i.filter_operator,style:t.style,styleSpec:t.styleSpec})),he(e[0])){case"<":case"<=":case">":case">=":e.length>=2&&"$type"===he(e[1])&&a.push(new ce(n,e,'"$type" cannot be use with operator "'+e[0]+'"'));case"==":case"!=":3!==e.length&&a.push(new ce(n,e,'filter array for operator "'+e[0]+'" must have 3 elements'));case"in":case"!in":e.length>=2&&"string"!==(r=Qr(e[1]))&&a.push(new ce(n+"[1]",e[1],"string expected, "+r+" found"));for(var o=2;o=u[d+0]&&r>=u[d+1])?(o[h]=!0,a.push(c[h])):o[h]=!1}}},fa.prototype._forEachCell=function(t,e,n,r,i,a,o,s){for(var l=this._convertToCellCoord(t),c=this._convertToCellCoord(e),u=this._convertToCellCoord(n),f=this._convertToCellCoord(r),h=l;h<=u;h++)for(var d=c;d<=f;d++){var p=this.d*d+h;if((!s||s(this._convertFromCellCoord(h),this._convertFromCellCoord(d),this._convertFromCellCoord(h+1),this._convertFromCellCoord(d+1)))&&i.call(this,t,e,n,r,p,a,o,s))return}},fa.prototype._convertFromCellCoord=function(t){return(t-this.padding)/this.scale},fa.prototype._convertToCellCoord=function(t){return Math.max(0,Math.min(this.d-1,Math.floor(t*this.scale)+this.padding))},fa.prototype.toArrayBuffer=function(){if(this.arrayBuffer)return this.arrayBuffer;for(var t=this.cells,e=ua+this.cells.length+1+1,n=0,r=0;r=0)){var f=t[u];c[u]=pa[l].shallow.indexOf(u)>=0?f:ba(f,e)}t instanceof Error&&(c.message=t.message)}if(c.$name)throw new Error("$name property is reserved for worker serialization logic.");return"Object"!==l&&(c.$name=l),c}throw new Error("can't serialize object of type "+typeof t)}function xa(t){if(null===t||void 0===t||"boolean"===typeof t||"number"===typeof t||"string"===typeof t||t instanceof Boolean||t instanceof Number||t instanceof String||t instanceof Date||t instanceof RegExp||ma(t)||ya(t)||ArrayBuffer.isView(t)||t instanceof ha)return t;if(Array.isArray(t))return t.map(xa);if("object"===typeof t){var e=t.$name||"Object",n=pa[e].klass;if(!n)throw new Error("can't deserialize unregistered class "+e);if(n.deserialize)return n.deserialize(t);for(var r=Object.create(n.prototype),i=0,a=Object.keys(t);i=0?s:xa(s)}}return r}throw new Error("can't deserialize object of type "+typeof t)}var wa=function(){this.first=!0};wa.prototype.update=function(t,e){var n=Math.floor(t);return this.first?(this.first=!1,this.lastIntegerZoom=n,this.lastIntegerZoomTime=0,this.lastZoom=t,this.lastFloorZoom=n,!0):(this.lastFloorZoom>n?(this.lastIntegerZoom=n+1,this.lastIntegerZoomTime=e):this.lastFloorZoom=128&&t<=255},Arabic:function(t){return t>=1536&&t<=1791},"Arabic Supplement":function(t){return t>=1872&&t<=1919},"Arabic Extended-A":function(t){return t>=2208&&t<=2303},"Hangul Jamo":function(t){return t>=4352&&t<=4607},"Unified Canadian Aboriginal Syllabics":function(t){return t>=5120&&t<=5759},Khmer:function(t){return t>=6016&&t<=6143},"Unified Canadian Aboriginal Syllabics Extended":function(t){return t>=6320&&t<=6399},"General Punctuation":function(t){return t>=8192&&t<=8303},"Letterlike Symbols":function(t){return t>=8448&&t<=8527},"Number Forms":function(t){return t>=8528&&t<=8591},"Miscellaneous Technical":function(t){return t>=8960&&t<=9215},"Control Pictures":function(t){return t>=9216&&t<=9279},"Optical Character Recognition":function(t){return t>=9280&&t<=9311},"Enclosed Alphanumerics":function(t){return t>=9312&&t<=9471},"Geometric Shapes":function(t){return t>=9632&&t<=9727},"Miscellaneous Symbols":function(t){return t>=9728&&t<=9983},"Miscellaneous Symbols and Arrows":function(t){return t>=11008&&t<=11263},"CJK Radicals Supplement":function(t){return t>=11904&&t<=12031},"Kangxi Radicals":function(t){return t>=12032&&t<=12255},"Ideographic Description Characters":function(t){return t>=12272&&t<=12287},"CJK Symbols and Punctuation":function(t){return t>=12288&&t<=12351},Hiragana:function(t){return t>=12352&&t<=12447},Katakana:function(t){return t>=12448&&t<=12543},Bopomofo:function(t){return t>=12544&&t<=12591},"Hangul Compatibility Jamo":function(t){return t>=12592&&t<=12687},Kanbun:function(t){return t>=12688&&t<=12703},"Bopomofo Extended":function(t){return t>=12704&&t<=12735},"CJK Strokes":function(t){return t>=12736&&t<=12783},"Katakana Phonetic Extensions":function(t){return t>=12784&&t<=12799},"Enclosed CJK Letters and Months":function(t){return t>=12800&&t<=13055},"CJK Compatibility":function(t){return t>=13056&&t<=13311},"CJK Unified Ideographs Extension A":function(t){return t>=13312&&t<=19903},"Yijing Hexagram Symbols":function(t){return t>=19904&&t<=19967},"CJK Unified Ideographs":function(t){return t>=19968&&t<=40959},"Yi Syllables":function(t){return t>=40960&&t<=42127},"Yi Radicals":function(t){return t>=42128&&t<=42191},"Hangul Jamo Extended-A":function(t){return t>=43360&&t<=43391},"Hangul Syllables":function(t){return t>=44032&&t<=55215},"Hangul Jamo Extended-B":function(t){return t>=55216&&t<=55295},"Private Use Area":function(t){return t>=57344&&t<=63743},"CJK Compatibility Ideographs":function(t){return t>=63744&&t<=64255},"Arabic Presentation Forms-A":function(t){return t>=64336&&t<=65023},"Vertical Forms":function(t){return t>=65040&&t<=65055},"CJK Compatibility Forms":function(t){return t>=65072&&t<=65103},"Small Form Variants":function(t){return t>=65104&&t<=65135},"Arabic Presentation Forms-B":function(t){return t>=65136&&t<=65279},"Halfwidth and Fullwidth Forms":function(t){return t>=65280&&t<=65519}};function ka(t){for(var e=0,n=t;e=65097&&t<=65103)||!!_a["CJK Compatibility Ideographs"](t)||!!_a["CJK Compatibility"](t)||!!_a["CJK Radicals Supplement"](t)||!!_a["CJK Strokes"](t)||!(!_a["CJK Symbols and Punctuation"](t)||t>=12296&&t<=12305||t>=12308&&t<=12319||12336===t)||!!_a["CJK Unified Ideographs Extension A"](t)||!!_a["CJK Unified Ideographs"](t)||!!_a["Enclosed CJK Letters and Months"](t)||!!_a["Hangul Compatibility Jamo"](t)||!!_a["Hangul Jamo Extended-A"](t)||!!_a["Hangul Jamo Extended-B"](t)||!!_a["Hangul Jamo"](t)||!!_a["Hangul Syllables"](t)||!!_a.Hiragana(t)||!!_a["Ideographic Description Characters"](t)||!!_a.Kanbun(t)||!!_a["Kangxi Radicals"](t)||!!_a["Katakana Phonetic Extensions"](t)||!(!_a.Katakana(t)||12540===t)||!(!_a["Halfwidth and Fullwidth Forms"](t)||65288===t||65289===t||65293===t||t>=65306&&t<=65310||65339===t||65341===t||65343===t||t>=65371&&t<=65503||65507===t||t>=65512&&t<=65519)||!(!_a["Small Form Variants"](t)||t>=65112&&t<=65118||t>=65123&&t<=65126)||!!_a["Unified Canadian Aboriginal Syllabics"](t)||!!_a["Unified Canadian Aboriginal Syllabics Extended"](t)||!!_a["Vertical Forms"](t)||!!_a["Yijing Hexagram Symbols"](t)||!!_a["Yi Syllables"](t)||!!_a["Yi Radicals"](t))}function Ea(t){return!(!_a["Latin-1 Supplement"](t)||167!==t&&169!==t&&174!==t&&177!==t&&188!==t&&189!==t&&190!==t&&215!==t&&247!==t)||!(!_a["General Punctuation"](t)||8214!==t&&8224!==t&&8225!==t&&8240!==t&&8241!==t&&8251!==t&&8252!==t&&8258!==t&&8263!==t&&8264!==t&&8265!==t&&8273!==t)||!!_a["Letterlike Symbols"](t)||!!_a["Number Forms"](t)||!(!_a["Miscellaneous Technical"](t)||!(t>=8960&&t<=8967||t>=8972&&t<=8991||t>=8996&&t<=9e3||9003===t||t>=9085&&t<=9114||t>=9150&&t<=9165||9167===t||t>=9169&&t<=9179||t>=9186&&t<=9215))||!(!_a["Control Pictures"](t)||9251===t)||!!_a["Optical Character Recognition"](t)||!!_a["Enclosed Alphanumerics"](t)||!!_a["Geometric Shapes"](t)||!(!_a["Miscellaneous Symbols"](t)||t>=9754&&t<=9759)||!(!_a["Miscellaneous Symbols and Arrows"](t)||!(t>=11026&&t<=11055||t>=11088&&t<=11097||t>=11192&&t<=11243))||!!_a["CJK Symbols and Punctuation"](t)||!!_a.Katakana(t)||!!_a["Private Use Area"](t)||!!_a["CJK Compatibility Forms"](t)||!!_a["Small Form Variants"](t)||!!_a["Halfwidth and Fullwidth Forms"](t)||8734===t||8756===t||8757===t||t>=9984&&t<=10087||t>=10102&&t<=10131||65532===t||65533===t}function Ca(t){return!(Sa(t)||Ea(t))}function Pa(t){return _a.Arabic(t)||_a["Arabic Supplement"](t)||_a["Arabic Extended-A"](t)||_a["Arabic Presentation Forms-A"](t)||_a["Arabic Presentation Forms-B"](t)}function Oa(t){return t>=1424&&t<=2303||_a["Arabic Presentation Forms-A"](t)||_a["Arabic Presentation Forms-B"](t)}function La(t,e){return!(!e&&Oa(t))&&!(t>=2304&&t<=3583||t>=3840&&t<=4255||_a.Khmer(t))}function Ia(t){for(var e=0,n=t;e-1&&(Na=Ra.error),za&&za(t)};function Ba(){Ua.fire(new ae("pluginStateChange",{pluginStatus:Na,pluginURL:ja}))}var Ua=new se,Ha=function(){return Na},Va=function(t){return t({pluginStatus:Na,pluginURL:ja}),Ua.on("pluginStateChange",t),t},qa=function(t,e,n){if(void 0===n&&(n=!1),Na===Ra.deferred||Na===Ra.loading||Na===Ra.loaded)throw new Error("setRTLTextPlugin cannot be called multiple times.");ja=Y.resolveURL(t),Na=Ra.deferred,za=e,Ba(),n||Ga()},Ga=function(){if(Na!==Ra.deferred||!ja)throw new Error("rtl-text-plugin cannot be downloaded unless a pluginURL is specified");Na=Ra.loading,Ba(),ja&&Yt({url:ja},(function(t){t?Fa(t):(Na=Ra.loaded,Ba())}))},Wa={applyArabicShaping:null,processBidirectionalText:null,processStyledBidirectionalText:null,isLoaded:function(){return Na===Ra.loaded||null!=Wa.applyArabicShaping},isLoading:function(){return Na===Ra.loading},setState:function(t){Na=t.pluginStatus,ja=t.pluginURL},isParsed:function(){return null!=Wa.applyArabicShaping&&null!=Wa.processBidirectionalText&&null!=Wa.processStyledBidirectionalText},getPluginURL:function(){return ja}},Ya=function(){Wa.isLoading()||Wa.isLoaded()||"deferred"!==Ha()||Ga()},$a=function(t,e){this.zoom=t,e?(this.now=e.now,this.fadeDuration=e.fadeDuration,this.zoomHistory=e.zoomHistory,this.transition=e.transition):(this.now=0,this.fadeDuration=0,this.zoomHistory=new wa,this.transition={})};$a.prototype.isSupportedScript=function(t){return Da(t,Wa.isLoaded())},$a.prototype.crossFadingFactor=function(){return 0===this.fadeDuration?1:Math.min((this.now-this.zoomHistory.lastIntegerZoomTime)/this.fadeDuration,1)},$a.prototype.getCrossfadeParameters=function(){var t=this.zoom,e=t-Math.floor(t),n=this.crossFadingFactor();return t>this.zoomHistory.lastIntegerZoom?{fromScale:2,toScale:1,t:e+(1-e)*n}:{fromScale:.5,toScale:1,t:1-(1-n)*e}};var Xa=function(t,e){this.property=t,this.value=e,this.expression=vi(void 0===e?t.specification.default:e,t.specification)};Xa.prototype.isDataDriven=function(){return"source"===this.expression.kind||"composite"===this.expression.kind},Xa.prototype.possiblyEvaluate=function(t,e,n){return this.property.possiblyEvaluate(this,t,e,n)};var Ka=function(t){this.property=t,this.value=new Xa(t,void 0)};Ka.prototype.transitioned=function(t,e){return new Ja(this.property,this.value,e,v({},t.transition,this.transition),t.now)},Ka.prototype.untransitioned=function(){return new Ja(this.property,this.value,null,{},0)};var Za=function(t){this._properties=t,this._values=Object.create(t.defaultTransitionablePropertyValues)};Za.prototype.getValue=function(t){return A(this._values[t].value.value)},Za.prototype.setValue=function(t,e){this._values.hasOwnProperty(t)||(this._values[t]=new Ka(this._values[t].property)),this._values[t].value=new Xa(this._values[t].property,null===e?void 0:A(e))},Za.prototype.getTransition=function(t){return A(this._values[t].transition)},Za.prototype.setTransition=function(t,e){this._values.hasOwnProperty(t)||(this._values[t]=new Ka(this._values[t].property)),this._values[t].transition=A(e)||void 0},Za.prototype.serialize=function(){for(var t={},e=0,n=Object.keys(this._values);ethis.end)return this.prior=null,i;if(this.value.isDataDriven())return this.prior=null,i;if(rr.zoomHistory.lastIntegerZoom?{from:t,to:e}:{from:n,to:e}},e.prototype.interpolate=function(t){return t},e}(io),oo=function(t){this.specification=t};oo.prototype.possiblyEvaluate=function(t,e,n,r){if(void 0!==t.value){if("constant"===t.expression.kind){var i=t.expression.evaluate(e,null,{},n,r);return this._calculate(i,i,i,e)}return this._calculate(t.expression.evaluate(new $a(Math.floor(e.zoom-1),e)),t.expression.evaluate(new $a(Math.floor(e.zoom),e)),t.expression.evaluate(new $a(Math.floor(e.zoom+1),e)),e)}},oo.prototype._calculate=function(t,e,n,r){return r.zoom>r.zoomHistory.lastIntegerZoom?{from:t,to:e}:{from:n,to:e}},oo.prototype.interpolate=function(t){return t};var so=function(t){this.specification=t};so.prototype.possiblyEvaluate=function(t,e,n,r){return!!t.expression.evaluate(e,null,{},n,r)},so.prototype.interpolate=function(){return!1};var lo=function(t){for(var e in this.properties=t,this.defaultPropertyValues={},this.defaultTransitionablePropertyValues={},this.defaultTransitioningPropertyValues={},this.defaultPossiblyEvaluatedValues={},this.overridableProperties=[],t){var n=t[e];n.specification.overridable&&this.overridableProperties.push(e);var r=this.defaultPropertyValues[e]=new Xa(n,void 0),i=this.defaultTransitionablePropertyValues[e]=new Ka(n);this.defaultTransitioningPropertyValues[e]=i.untransitioned(),this.defaultPossiblyEvaluatedValues[e]=r.possiblyEvaluate({})}};ga("DataDrivenProperty",io),ga("DataConstantProperty",ro),ga("CrossFadedDataDrivenProperty",ao),ga("CrossFadedProperty",oo),ga("ColorRampProperty",so);var co="-transition",uo=function(t){function e(e,n){if(t.call(this),this.id=e.id,this.type=e.type,this._featureFilter={filter:function(){return!0},needGeometry:!1},"custom"!==e.type&&(this.metadata=e.metadata,this.minzoom=e.minzoom,this.maxzoom=e.maxzoom,"background"!==e.type&&(this.source=e.source,this.sourceLayer=e["source-layer"],this.filter=e.filter),n.layout&&(this._unevaluatedLayout=new to(n.layout)),n.paint)){for(var r in this._transitionablePaint=new Za(n.paint),e.paint)this.setPaintProperty(r,e.paint[r],{validate:!1});for(var i in e.layout)this.setLayoutProperty(i,e.layout[i],{validate:!1});this._transitioningPaint=this._transitionablePaint.untransitioned(),this.paint=new no(n.paint)}}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getCrossfadeParameters=function(){return this._crossfadeParameters},e.prototype.getLayoutProperty=function(t){return"visibility"===t?this.visibility:this._unevaluatedLayout.getValue(t)},e.prototype.setLayoutProperty=function(t,e,n){if(void 0===n&&(n={}),null!==e&&void 0!==e){var r="layers."+this.id+".layout."+t;if(this._validate(sa,r,t,e,n))return}"visibility"!==t?this._unevaluatedLayout.setValue(t,e):this.visibility=e},e.prototype.getPaintProperty=function(t){return k(t,co)?this._transitionablePaint.getTransition(t.slice(0,-co.length)):this._transitionablePaint.getValue(t)},e.prototype.setPaintProperty=function(t,e,n){if(void 0===n&&(n={}),null!==e&&void 0!==e){var r="layers."+this.id+".paint."+t;if(this._validate(oa,r,t,e,n))return!1}if(k(t,co))return this._transitionablePaint.setTransition(t.slice(0,-co.length),e||void 0),!1;var i=this._transitionablePaint._values[t],a="cross-faded-data-driven"===i.property.specification["property-type"],o=i.value.isDataDriven(),s=i.value;this._transitionablePaint.setValue(t,e),this._handleSpecialPaintPropertyUpdate(t);var l=this._transitionablePaint._values[t].value;return l.isDataDriven()||o||a||this._handleOverridablePaintPropertyUpdate(t,s,l)},e.prototype._handleSpecialPaintPropertyUpdate=function(t){},e.prototype._handleOverridablePaintPropertyUpdate=function(t,e,n){return!1},e.prototype.isHidden=function(t){return!!(this.minzoom&&t=this.maxzoom)||"none"===this.visibility},e.prototype.updateTransitions=function(t){this._transitioningPaint=this._transitionablePaint.transitioned(t,this._transitioningPaint)},e.prototype.hasTransition=function(){return this._transitioningPaint.hasTransition()},e.prototype.recalculate=function(t,e){t.getCrossfadeParameters&&(this._crossfadeParameters=t.getCrossfadeParameters()),this._unevaluatedLayout&&(this.layout=this._unevaluatedLayout.possiblyEvaluate(t,void 0,e)),this.paint=this._transitioningPaint.possiblyEvaluate(t,void 0,e)},e.prototype.serialize=function(){var t={id:this.id,type:this.type,source:this.source,"source-layer":this.sourceLayer,metadata:this.metadata,minzoom:this.minzoom,maxzoom:this.maxzoom,filter:this.filter,layout:this._unevaluatedLayout&&this._unevaluatedLayout.serialize(),paint:this._transitionablePaint&&this._transitionablePaint.serialize()};return this.visibility&&(t.layout=t.layout||{},t.layout.visibility=this.visibility),M(t,(function(t,e){return void 0!==t&&!("layout"===e&&!Object.keys(t).length)&&!("paint"===e&&!Object.keys(t).length)}))},e.prototype._validate=function(t,e,n,r,i){return void 0===i&&(i={}),(!i||!1!==i.validate)&&la(this,t.call(ia,{key:e,layerType:this.type,objectKey:n,value:r,styleSpec:le,style:{glyphs:!0,sprite:!0}}))},e.prototype.is3D=function(){return!1},e.prototype.isTileClipped=function(){return!1},e.prototype.hasOffscreenPass=function(){return!1},e.prototype.resize=function(){},e.prototype.isStateDependent=function(){for(var t in this.paint._values){var e=this.paint.get(t);if(e instanceof eo&&Kr(e.property.specification)&&("source"===e.value.kind||"composite"===e.value.kind)&&e.value.isStateDependent)return!0}return!1},e}(se),fo={Int8:Int8Array,Uint8:Uint8Array,Int16:Int16Array,Uint16:Uint16Array,Int32:Int32Array,Uint32:Uint32Array,Float32:Float32Array},ho=function(t,e){this._structArray=t,this._pos1=e*this.size,this._pos2=this._pos1/2,this._pos4=this._pos1/4,this._pos8=this._pos1/8},po=128,go=5,vo=function(){this.isTransferred=!1,this.capacity=-1,this.resize(0)};function mo(t,e){void 0===e&&(e=1);var n=0,r=0;return{members:t.map((function(t){var i=yo(t.type),a=n=bo(n,Math.max(e,i)),o=t.components||1;return r=Math.max(r,i),n+=i*o,{name:t.name,type:t.type,components:o,offset:a}})),size:bo(n,Math.max(r,e)),alignment:e}}function yo(t){return fo[t].BYTES_PER_ELEMENT}function bo(t,e){return Math.ceil(t/e)*e}vo.serialize=function(t,e){return t._trim(),e&&(t.isTransferred=!0,e.push(t.arrayBuffer)),{length:t.length,arrayBuffer:t.arrayBuffer}},vo.deserialize=function(t){var e=Object.create(this.prototype);return e.arrayBuffer=t.arrayBuffer,e.length=t.length,e.capacity=t.arrayBuffer.byteLength/e.bytesPerElement,e._refreshViews(),e},vo.prototype._trim=function(){this.length!==this.capacity&&(this.capacity=this.length,this.arrayBuffer=this.arrayBuffer.slice(0,this.length*this.bytesPerElement),this._refreshViews())},vo.prototype.clear=function(){this.length=0},vo.prototype.resize=function(t){this.reserve(t),this.length=t},vo.prototype.reserve=function(t){if(t>this.capacity){this.capacity=Math.max(t,Math.floor(this.capacity*go),po),this.arrayBuffer=new ArrayBuffer(this.capacity*this.bytesPerElement);var e=this.uint8;this._refreshViews(),e&&this.uint8.set(e)}},vo.prototype._refreshViews=function(){throw new Error("_refreshViews() must be implemented by each concrete StructArray layout")};var xo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var n=this.length;return this.resize(n+1),this.emplace(n,t,e)},e.prototype.emplace=function(t,e,n){var r=2*t;return this.int16[r+0]=e,this.int16[r+1]=n,t},e}(vo);xo.prototype.bytesPerElement=4,ga("StructArrayLayout2i4",xo);var wo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,n,r)},e.prototype.emplace=function(t,e,n,r,i){var a=4*t;return this.int16[a+0]=e,this.int16[a+1]=n,this.int16[a+2]=r,this.int16[a+3]=i,t},e}(vo);wo.prototype.bytesPerElement=8,ga("StructArrayLayout4i8",wo);var _o=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,n,r,i,a)},e.prototype.emplace=function(t,e,n,r,i,a,o){var s=6*t;return this.int16[s+0]=e,this.int16[s+1]=n,this.int16[s+2]=r,this.int16[s+3]=i,this.int16[s+4]=a,this.int16[s+5]=o,t},e}(vo);_o.prototype.bytesPerElement=12,ga("StructArrayLayout2i4i12",_o);var ko=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,n,r,i,a)},e.prototype.emplace=function(t,e,n,r,i,a,o){var s=4*t,l=8*t;return this.int16[s+0]=e,this.int16[s+1]=n,this.uint8[l+4]=r,this.uint8[l+5]=i,this.uint8[l+6]=a,this.uint8[l+7]=o,t},e}(vo);ko.prototype.bytesPerElement=8,ga("StructArrayLayout2i4ub8",ko);var To=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a,o,s,l,c){var u=this.length;return this.resize(u+1),this.emplace(u,t,e,n,r,i,a,o,s,l,c)},e.prototype.emplace=function(t,e,n,r,i,a,o,s,l,c,u){var f=9*t,h=18*t;return this.uint16[f+0]=e,this.uint16[f+1]=n,this.uint16[f+2]=r,this.uint16[f+3]=i,this.uint16[f+4]=a,this.uint16[f+5]=o,this.uint16[f+6]=s,this.uint16[f+7]=l,this.uint8[h+16]=c,this.uint8[h+17]=u,t},e}(vo);To.prototype.bytesPerElement=18,ga("StructArrayLayout8ui2ub18",To);var Mo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a,o,s,l,c,u,f){var h=this.length;return this.resize(h+1),this.emplace(h,t,e,n,r,i,a,o,s,l,c,u,f)},e.prototype.emplace=function(t,e,n,r,i,a,o,s,l,c,u,f,h){var d=12*t;return this.int16[d+0]=e,this.int16[d+1]=n,this.int16[d+2]=r,this.int16[d+3]=i,this.uint16[d+4]=a,this.uint16[d+5]=o,this.uint16[d+6]=s,this.uint16[d+7]=l,this.int16[d+8]=c,this.int16[d+9]=u,this.int16[d+10]=f,this.int16[d+11]=h,t},e}(vo);Mo.prototype.bytesPerElement=24,ga("StructArrayLayout4i4ui4i24",Mo);var Ao=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n){var r=this.length;return this.resize(r+1),this.emplace(r,t,e,n)},e.prototype.emplace=function(t,e,n,r){var i=3*t;return this.float32[i+0]=e,this.float32[i+1]=n,this.float32[i+2]=r,t},e}(vo);Ao.prototype.bytesPerElement=12,ga("StructArrayLayout3f12",Ao);var So=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var n=1*t;return this.uint32[n+0]=e,t},e}(vo);So.prototype.bytesPerElement=4,ga("StructArrayLayout1ul4",So);var Eo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a,o,s,l){var c=this.length;return this.resize(c+1),this.emplace(c,t,e,n,r,i,a,o,s,l)},e.prototype.emplace=function(t,e,n,r,i,a,o,s,l,c){var u=10*t,f=5*t;return this.int16[u+0]=e,this.int16[u+1]=n,this.int16[u+2]=r,this.int16[u+3]=i,this.int16[u+4]=a,this.int16[u+5]=o,this.uint32[f+3]=s,this.uint16[u+8]=l,this.uint16[u+9]=c,t},e}(vo);Eo.prototype.bytesPerElement=20,ga("StructArrayLayout6i1ul2ui20",Eo);var Co=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a){var o=this.length;return this.resize(o+1),this.emplace(o,t,e,n,r,i,a)},e.prototype.emplace=function(t,e,n,r,i,a,o){var s=6*t;return this.int16[s+0]=e,this.int16[s+1]=n,this.int16[s+2]=r,this.int16[s+3]=i,this.int16[s+4]=a,this.int16[s+5]=o,t},e}(vo);Co.prototype.bytesPerElement=12,ga("StructArrayLayout2i2i2i12",Co);var Po=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i){var a=this.length;return this.resize(a+1),this.emplace(a,t,e,n,r,i)},e.prototype.emplace=function(t,e,n,r,i,a){var o=4*t,s=8*t;return this.float32[o+0]=e,this.float32[o+1]=n,this.float32[o+2]=r,this.int16[s+6]=i,this.int16[s+7]=a,t},e}(vo);Po.prototype.bytesPerElement=16,ga("StructArrayLayout2f1f2i16",Po);var Oo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,n,r)},e.prototype.emplace=function(t,e,n,r,i){var a=12*t,o=3*t;return this.uint8[a+0]=e,this.uint8[a+1]=n,this.float32[o+1]=r,this.float32[o+2]=i,t},e}(vo);Oo.prototype.bytesPerElement=12,ga("StructArrayLayout2ub2f12",Oo);var Lo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n){var r=this.length;return this.resize(r+1),this.emplace(r,t,e,n)},e.prototype.emplace=function(t,e,n,r){var i=3*t;return this.uint16[i+0]=e,this.uint16[i+1]=n,this.uint16[i+2]=r,t},e}(vo);Lo.prototype.bytesPerElement=6,ga("StructArrayLayout3ui6",Lo);var Io=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v){var m=this.length;return this.resize(m+1),this.emplace(m,t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v)},e.prototype.emplace=function(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v,m){var y=24*t,b=12*t,x=48*t;return this.int16[y+0]=e,this.int16[y+1]=n,this.uint16[y+2]=r,this.uint16[y+3]=i,this.uint32[b+2]=a,this.uint32[b+3]=o,this.uint32[b+4]=s,this.uint16[y+10]=l,this.uint16[y+11]=c,this.uint16[y+12]=u,this.float32[b+7]=f,this.float32[b+8]=h,this.uint8[x+36]=d,this.uint8[x+37]=p,this.uint8[x+38]=g,this.uint32[b+10]=v,this.int16[y+22]=m,t},e}(vo);Io.prototype.bytesPerElement=48,ga("StructArrayLayout2i2ui3ul3ui2f3ub1ul1i48",Io);var Do=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b,x,w,_,k,T,M,A,S){var E=this.length;return this.resize(E+1),this.emplace(E,t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b,x,w,_,k,T,M,A,S)},e.prototype.emplace=function(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g,v,m,y,b,x,w,_,k,T,M,A,S,E){var C=34*t,P=17*t;return this.int16[C+0]=e,this.int16[C+1]=n,this.int16[C+2]=r,this.int16[C+3]=i,this.int16[C+4]=a,this.int16[C+5]=o,this.int16[C+6]=s,this.int16[C+7]=l,this.uint16[C+8]=c,this.uint16[C+9]=u,this.uint16[C+10]=f,this.uint16[C+11]=h,this.uint16[C+12]=d,this.uint16[C+13]=p,this.uint16[C+14]=g,this.uint16[C+15]=v,this.uint16[C+16]=m,this.uint16[C+17]=y,this.uint16[C+18]=b,this.uint16[C+19]=x,this.uint16[C+20]=w,this.uint16[C+21]=_,this.uint16[C+22]=k,this.uint32[P+12]=T,this.float32[P+13]=M,this.float32[P+14]=A,this.float32[P+15]=S,this.float32[P+16]=E,t},e}(vo);Do.prototype.bytesPerElement=68,ga("StructArrayLayout8i15ui1ul4f68",Do);var Ro=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var n=1*t;return this.float32[n+0]=e,t},e}(vo);Ro.prototype.bytesPerElement=4,ga("StructArrayLayout1f4",Ro);var zo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.int16=new Int16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n){var r=this.length;return this.resize(r+1),this.emplace(r,t,e,n)},e.prototype.emplace=function(t,e,n,r){var i=3*t;return this.int16[i+0]=e,this.int16[i+1]=n,this.int16[i+2]=r,t},e}(vo);zo.prototype.bytesPerElement=6,ga("StructArrayLayout3i6",zo);var No=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint32=new Uint32Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n){var r=this.length;return this.resize(r+1),this.emplace(r,t,e,n)},e.prototype.emplace=function(t,e,n,r){var i=2*t,a=4*t;return this.uint32[i+0]=e,this.uint16[a+2]=n,this.uint16[a+3]=r,t},e}(vo);No.prototype.bytesPerElement=8,ga("StructArrayLayout1ul2ui8",No);var jo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var n=this.length;return this.resize(n+1),this.emplace(n,t,e)},e.prototype.emplace=function(t,e,n){var r=2*t;return this.uint16[r+0]=e,this.uint16[r+1]=n,t},e}(vo);jo.prototype.bytesPerElement=4,ga("StructArrayLayout2ui4",jo);var Fo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.uint16=new Uint16Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t){var e=this.length;return this.resize(e+1),this.emplace(e,t)},e.prototype.emplace=function(t,e){var n=1*t;return this.uint16[n+0]=e,t},e}(vo);Fo.prototype.bytesPerElement=2,ga("StructArrayLayout1ui2",Fo);var Bo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e){var n=this.length;return this.resize(n+1),this.emplace(n,t,e)},e.prototype.emplace=function(t,e,n){var r=2*t;return this.float32[r+0]=e,this.float32[r+1]=n,t},e}(vo);Bo.prototype.bytesPerElement=8,ga("StructArrayLayout2f8",Bo);var Uo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._refreshViews=function(){this.uint8=new Uint8Array(this.arrayBuffer),this.float32=new Float32Array(this.arrayBuffer)},e.prototype.emplaceBack=function(t,e,n,r){var i=this.length;return this.resize(i+1),this.emplace(i,t,e,n,r)},e.prototype.emplace=function(t,e,n,r,i){var a=4*t;return this.float32[a+0]=e,this.float32[a+1]=n,this.float32[a+2]=r,this.float32[a+3]=i,t},e}(vo);Uo.prototype.bytesPerElement=16,ga("StructArrayLayout4f16",Uo);var Ho=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var n={anchorPointX:{configurable:!0},anchorPointY:{configurable:!0},x1:{configurable:!0},y1:{configurable:!0},x2:{configurable:!0},y2:{configurable:!0},featureIndex:{configurable:!0},sourceLayerIndex:{configurable:!0},bucketIndex:{configurable:!0},anchorPoint:{configurable:!0}};return n.anchorPointX.get=function(){return this._structArray.int16[this._pos2+0]},n.anchorPointY.get=function(){return this._structArray.int16[this._pos2+1]},n.x1.get=function(){return this._structArray.int16[this._pos2+2]},n.y1.get=function(){return this._structArray.int16[this._pos2+3]},n.x2.get=function(){return this._structArray.int16[this._pos2+4]},n.y2.get=function(){return this._structArray.int16[this._pos2+5]},n.featureIndex.get=function(){return this._structArray.uint32[this._pos4+3]},n.sourceLayerIndex.get=function(){return this._structArray.uint16[this._pos2+8]},n.bucketIndex.get=function(){return this._structArray.uint16[this._pos2+9]},n.anchorPoint.get=function(){return new a(this.anchorPointX,this.anchorPointY)},Object.defineProperties(e.prototype,n),e}(ho);Ho.prototype.size=20;var Vo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new Ho(this,t)},e}(Eo);ga("CollisionBoxArray",Vo);var qo=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var n={anchorX:{configurable:!0},anchorY:{configurable:!0},glyphStartIndex:{configurable:!0},numGlyphs:{configurable:!0},vertexStartIndex:{configurable:!0},lineStartIndex:{configurable:!0},lineLength:{configurable:!0},segment:{configurable:!0},lowerSize:{configurable:!0},upperSize:{configurable:!0},lineOffsetX:{configurable:!0},lineOffsetY:{configurable:!0},writingMode:{configurable:!0},placedOrientation:{configurable:!0},hidden:{configurable:!0},crossTileID:{configurable:!0},associatedIconIndex:{configurable:!0}};return n.anchorX.get=function(){return this._structArray.int16[this._pos2+0]},n.anchorY.get=function(){return this._structArray.int16[this._pos2+1]},n.glyphStartIndex.get=function(){return this._structArray.uint16[this._pos2+2]},n.numGlyphs.get=function(){return this._structArray.uint16[this._pos2+3]},n.vertexStartIndex.get=function(){return this._structArray.uint32[this._pos4+2]},n.lineStartIndex.get=function(){return this._structArray.uint32[this._pos4+3]},n.lineLength.get=function(){return this._structArray.uint32[this._pos4+4]},n.segment.get=function(){return this._structArray.uint16[this._pos2+10]},n.lowerSize.get=function(){return this._structArray.uint16[this._pos2+11]},n.upperSize.get=function(){return this._structArray.uint16[this._pos2+12]},n.lineOffsetX.get=function(){return this._structArray.float32[this._pos4+7]},n.lineOffsetY.get=function(){return this._structArray.float32[this._pos4+8]},n.writingMode.get=function(){return this._structArray.uint8[this._pos1+36]},n.placedOrientation.get=function(){return this._structArray.uint8[this._pos1+37]},n.placedOrientation.set=function(t){this._structArray.uint8[this._pos1+37]=t},n.hidden.get=function(){return this._structArray.uint8[this._pos1+38]},n.hidden.set=function(t){this._structArray.uint8[this._pos1+38]=t},n.crossTileID.get=function(){return this._structArray.uint32[this._pos4+10]},n.crossTileID.set=function(t){this._structArray.uint32[this._pos4+10]=t},n.associatedIconIndex.get=function(){return this._structArray.int16[this._pos2+22]},Object.defineProperties(e.prototype,n),e}(ho);qo.prototype.size=48;var Go=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new qo(this,t)},e}(Io);ga("PlacedSymbolArray",Go);var Wo=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var n={anchorX:{configurable:!0},anchorY:{configurable:!0},rightJustifiedTextSymbolIndex:{configurable:!0},centerJustifiedTextSymbolIndex:{configurable:!0},leftJustifiedTextSymbolIndex:{configurable:!0},verticalPlacedTextSymbolIndex:{configurable:!0},placedIconSymbolIndex:{configurable:!0},verticalPlacedIconSymbolIndex:{configurable:!0},key:{configurable:!0},textBoxStartIndex:{configurable:!0},textBoxEndIndex:{configurable:!0},verticalTextBoxStartIndex:{configurable:!0},verticalTextBoxEndIndex:{configurable:!0},iconBoxStartIndex:{configurable:!0},iconBoxEndIndex:{configurable:!0},verticalIconBoxStartIndex:{configurable:!0},verticalIconBoxEndIndex:{configurable:!0},featureIndex:{configurable:!0},numHorizontalGlyphVertices:{configurable:!0},numVerticalGlyphVertices:{configurable:!0},numIconVertices:{configurable:!0},numVerticalIconVertices:{configurable:!0},useRuntimeCollisionCircles:{configurable:!0},crossTileID:{configurable:!0},textBoxScale:{configurable:!0},textOffset0:{configurable:!0},textOffset1:{configurable:!0},collisionCircleDiameter:{configurable:!0}};return n.anchorX.get=function(){return this._structArray.int16[this._pos2+0]},n.anchorY.get=function(){return this._structArray.int16[this._pos2+1]},n.rightJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+2]},n.centerJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+3]},n.leftJustifiedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+4]},n.verticalPlacedTextSymbolIndex.get=function(){return this._structArray.int16[this._pos2+5]},n.placedIconSymbolIndex.get=function(){return this._structArray.int16[this._pos2+6]},n.verticalPlacedIconSymbolIndex.get=function(){return this._structArray.int16[this._pos2+7]},n.key.get=function(){return this._structArray.uint16[this._pos2+8]},n.textBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+9]},n.textBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+10]},n.verticalTextBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+11]},n.verticalTextBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+12]},n.iconBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+13]},n.iconBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+14]},n.verticalIconBoxStartIndex.get=function(){return this._structArray.uint16[this._pos2+15]},n.verticalIconBoxEndIndex.get=function(){return this._structArray.uint16[this._pos2+16]},n.featureIndex.get=function(){return this._structArray.uint16[this._pos2+17]},n.numHorizontalGlyphVertices.get=function(){return this._structArray.uint16[this._pos2+18]},n.numVerticalGlyphVertices.get=function(){return this._structArray.uint16[this._pos2+19]},n.numIconVertices.get=function(){return this._structArray.uint16[this._pos2+20]},n.numVerticalIconVertices.get=function(){return this._structArray.uint16[this._pos2+21]},n.useRuntimeCollisionCircles.get=function(){return this._structArray.uint16[this._pos2+22]},n.crossTileID.get=function(){return this._structArray.uint32[this._pos4+12]},n.crossTileID.set=function(t){this._structArray.uint32[this._pos4+12]=t},n.textBoxScale.get=function(){return this._structArray.float32[this._pos4+13]},n.textOffset0.get=function(){return this._structArray.float32[this._pos4+14]},n.textOffset1.get=function(){return this._structArray.float32[this._pos4+15]},n.collisionCircleDiameter.get=function(){return this._structArray.float32[this._pos4+16]},Object.defineProperties(e.prototype,n),e}(ho);Wo.prototype.size=68;var Yo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new Wo(this,t)},e}(Do);ga("SymbolInstanceArray",Yo);var $o=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getoffsetX=function(t){return this.float32[1*t+0]},e}(Ro);ga("GlyphOffsetArray",$o);var Xo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.getx=function(t){return this.int16[3*t+0]},e.prototype.gety=function(t){return this.int16[3*t+1]},e.prototype.gettileUnitDistanceFromAnchor=function(t){return this.int16[3*t+2]},e}(zo);ga("SymbolLineVertexArray",Xo);var Ko=function(t){function e(){t.apply(this,arguments)}t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e;var n={featureIndex:{configurable:!0},sourceLayerIndex:{configurable:!0},bucketIndex:{configurable:!0}};return n.featureIndex.get=function(){return this._structArray.uint32[this._pos4+0]},n.sourceLayerIndex.get=function(){return this._structArray.uint16[this._pos2+2]},n.bucketIndex.get=function(){return this._structArray.uint16[this._pos2+3]},Object.defineProperties(e.prototype,n),e}(ho);Ko.prototype.size=8;var Zo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.get=function(t){return new Ko(this,t)},e}(No);ga("FeatureIndexArray",Zo);var Jo=mo([{name:"a_pos",components:2,type:"Int16"}],4).members,Qo=function(t){void 0===t&&(t=[]),this.segments=t};function ts(t,e){return 256*(t=f(Math.floor(t),0,255))+(e=f(Math.floor(e),0,255))}Qo.prototype.prepareSegment=function(t,e,n,r){var i=this.segments[this.segments.length-1];return t>Qo.MAX_VERTEX_ARRAY_LENGTH&&C("Max vertices per segment is "+Qo.MAX_VERTEX_ARRAY_LENGTH+": bucket requested "+t),(!i||i.vertexLength+t>Qo.MAX_VERTEX_ARRAY_LENGTH||i.sortKey!==r)&&(i={vertexOffset:e.length,primitiveOffset:n.length,vertexLength:0,primitiveLength:0},void 0!==r&&(i.sortKey=r),this.segments.push(i)),i},Qo.prototype.get=function(){return this.segments},Qo.prototype.destroy=function(){for(var t=0,e=this.segments;t>>16)*o&65535)<<16)&4294967295)<<15|l>>>17))*s+(((l>>>16)*s&65535)<<16)&4294967295)<<13|i>>>19))+((5*(i>>>16)&65535)<<16)&4294967295))+((58964+(a>>>16)&65535)<<16);switch(l=0,n){case 3:l^=(255&t.charCodeAt(c+2))<<16;case 2:l^=(255&t.charCodeAt(c+1))<<8;case 1:i^=l=(65535&(l=(l=(65535&(l^=255&t.charCodeAt(c)))*o+(((l>>>16)*o&65535)<<16)&4294967295)<<15|l>>>17))*s+(((l>>>16)*s&65535)<<16)&4294967295}return i^=t.length,i=2246822507*(65535&(i^=i>>>16))+((2246822507*(i>>>16)&65535)<<16)&4294967295,i=3266489909*(65535&(i^=i>>>13))+((3266489909*(i>>>16)&65535)<<16)&4294967295,(i^=i>>>16)>>>0}t.exports=e})),rs=e((function(t){function e(t,e){for(var n,r=t.length,i=e^r,a=0;r>=4;)n=1540483477*(65535&(n=255&t.charCodeAt(a)|(255&t.charCodeAt(++a))<<8|(255&t.charCodeAt(++a))<<16|(255&t.charCodeAt(++a))<<24))+((1540483477*(n>>>16)&65535)<<16),i=1540483477*(65535&i)+((1540483477*(i>>>16)&65535)<<16)^(n=1540483477*(65535&(n^=n>>>24))+((1540483477*(n>>>16)&65535)<<16)),r-=4,++a;switch(r){case 3:i^=(255&t.charCodeAt(a+2))<<16;case 2:i^=(255&t.charCodeAt(a+1))<<8;case 1:i=1540483477*(65535&(i^=255&t.charCodeAt(a)))+((1540483477*(i>>>16)&65535)<<16)}return i=1540483477*(65535&(i^=i>>>13))+((1540483477*(i>>>16)&65535)<<16),(i^=i>>>15)>>>0}t.exports=e})),is=ns,as=ns,os=rs;is.murmur3=as,is.murmur2=os;var ss=function(){this.ids=[],this.positions=[],this.indexed=!1};ss.prototype.add=function(t,e,n,r){this.ids.push(cs(t)),this.positions.push(e,n,r)},ss.prototype.getPositions=function(t){for(var e=cs(t),n=0,r=this.ids.length-1;n>1;this.ids[i]>=e?r=i:n=i+1}for(var a=[];this.ids[n]===e;){var o=this.positions[3*n],s=this.positions[3*n+1],l=this.positions[3*n+2];a.push({index:o,start:s,end:l}),n++}return a},ss.serialize=function(t,e){var n=new Float64Array(t.ids),r=new Uint32Array(t.positions);return us(n,r,0,n.length-1),e&&e.push(n.buffer,r.buffer),{ids:n,positions:r}},ss.deserialize=function(t){var e=new ss;return e.ids=t.ids,e.positions=t.positions,e.indexed=!0,e};var ls=Math.pow(2,53)-1;function cs(t){var e=+t;return!isNaN(e)&&e<=ls?e:is(String(t))}function us(t,e,n,r){for(;n>1],a=n-1,o=r+1;;){do{a++}while(t[a]i);if(a>=o)break;fs(t,a,o),fs(e,3*a,3*o),fs(e,3*a+1,3*o+1),fs(e,3*a+2,3*o+2)}o-nDs.max||o.yDs.max)&&(C("Geometry exceeds allowed extent, reduce your vector tile buffer size"),o.x=f(o.x,Ds.min,Ds.max),o.y=f(o.y,Ds.min,Ds.max))}return n}function zs(t,e,n,r,i){t.emplaceBack(2*e+(r+1)/2,2*n+(i+1)/2)}var Ns=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.layoutVertexArray=new xo,this.indexArray=new Lo,this.segments=new Qo,this.programConfigurations=new Es(Jo,t.layers,t.zoom),this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};function js(t,e){for(var n=0;n=3)for(var a=0;a1){if(Vs(t,e))return!0;for(var r=0;r1?t.distSqr(n):t.distSqr(n.sub(e)._mult(i)._add(e))}function Ys(t,e){for(var n,r,i,a=!1,o=0;oe.y!==i.y>e.y&&e.x<(i.x-r.x)*(e.y-r.y)/(i.y-r.y)+r.x&&(a=!a);return a}function $s(t,e){for(var n=!1,r=0,i=t.length-1;re.y!==o.y>e.y&&e.x<(o.x-a.x)*(e.y-a.y)/(o.y-a.y)+a.x&&(n=!n)}return n}function Xs(t,e,n,r,i){for(var o=0,s=t;o=l.x&&i>=l.y)return!0}var c=[new a(e,n),new a(e,i),new a(r,i),new a(r,n)];if(t.length>2)for(var u=0,f=c;ui.x&&e.x>i.x||t.yi.y&&e.y>i.y)return!1;var a=P(t,e,n[0]);return a!==P(t,e,n[1])||a!==P(t,e,n[2])||a!==P(t,e,n[3])}function Zs(t,e,n){var r=e.paint.get(t).value;return"constant"===r.kind?r.value:n.programConfigurations.get(e.id).getMaxValue(t)}function Js(t){return Math.sqrt(t[0]*t[0]+t[1]*t[1])}function Qs(t,e,n,r,i){if(!e[0]&&!e[1])return t;var o=a.convert(e)._mult(i);"viewport"===n&&o._rotate(-r);for(var s=[],l=0;l=Ls||u<0||u>=Ls)){var f=this.segments.prepareSegment(4,this.layoutVertexArray,this.indexArray,t.sortKey),h=f.vertexLength;zs(this.layoutVertexArray,c,u,-1,-1),zs(this.layoutVertexArray,c,u,1,-1),zs(this.layoutVertexArray,c,u,1,1),zs(this.layoutVertexArray,c,u,-1,1),this.indexArray.emplaceBack(h,h+1,h+2),this.indexArray.emplaceBack(h,h+3,h+2),f.vertexLength+=4,f.primitiveLength+=2}}this.programConfigurations.populatePaintArrays(this.layoutVertexArray.length,t,n,{},r)},ga("CircleBucket",Ns,{omit:["layers"]});var tl=new lo({"circle-sort-key":new io(le.layout_circle["circle-sort-key"])}),el={paint:new lo({"circle-radius":new io(le.paint_circle["circle-radius"]),"circle-color":new io(le.paint_circle["circle-color"]),"circle-blur":new io(le.paint_circle["circle-blur"]),"circle-opacity":new io(le.paint_circle["circle-opacity"]),"circle-translate":new ro(le.paint_circle["circle-translate"]),"circle-translate-anchor":new ro(le.paint_circle["circle-translate-anchor"]),"circle-pitch-scale":new ro(le.paint_circle["circle-pitch-scale"]),"circle-pitch-alignment":new ro(le.paint_circle["circle-pitch-alignment"]),"circle-stroke-width":new io(le.paint_circle["circle-stroke-width"]),"circle-stroke-color":new io(le.paint_circle["circle-stroke-color"]),"circle-stroke-opacity":new io(le.paint_circle["circle-stroke-opacity"])}),layout:tl},nl="undefined"!==typeof Float32Array?Float32Array:Array;function rl(){var t=new nl(4);return nl!=Float32Array&&(t[1]=0,t[2]=0),t[0]=1,t[3]=1,t}function il(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3],s=Math.sin(n),l=Math.cos(n);return t[0]=r*l+a*s,t[1]=i*l+o*s,t[2]=r*-s+a*l,t[3]=i*-s+o*l,t}function al(){var t=new nl(9);return nl!=Float32Array&&(t[1]=0,t[2]=0,t[3]=0,t[5]=0,t[6]=0,t[7]=0),t[0]=1,t[4]=1,t[8]=1,t}function ol(t,e){var n=Math.sin(e),r=Math.cos(e);return t[0]=r,t[1]=n,t[2]=0,t[3]=-n,t[4]=r,t[5]=0,t[6]=0,t[7]=0,t[8]=1,t}function sl(){var t=new nl(16);return nl!=Float32Array&&(t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[11]=0,t[12]=0,t[13]=0,t[14]=0),t[0]=1,t[5]=1,t[10]=1,t[15]=1,t}function ll(t){var e=new nl(16);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e[3]=t[3],e[4]=t[4],e[5]=t[5],e[6]=t[6],e[7]=t[7],e[8]=t[8],e[9]=t[9],e[10]=t[10],e[11]=t[11],e[12]=t[12],e[13]=t[13],e[14]=t[14],e[15]=t[15],e}function cl(t){return t[0]=1,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=1,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=1,t[11]=0,t[12]=0,t[13]=0,t[14]=0,t[15]=1,t}function ul(t,e){var n=e[0],r=e[1],i=e[2],a=e[3],o=e[4],s=e[5],l=e[6],c=e[7],u=e[8],f=e[9],h=e[10],d=e[11],p=e[12],g=e[13],v=e[14],m=e[15],y=n*s-r*o,b=n*l-i*o,x=n*c-a*o,w=r*l-i*s,_=r*c-a*s,k=i*c-a*l,T=u*g-f*p,M=u*v-h*p,A=u*m-d*p,S=f*v-h*g,E=f*m-d*g,C=h*m-d*v,P=y*C-b*E+x*S+w*A-_*M+k*T;return P?(P=1/P,t[0]=(s*C-l*E+c*S)*P,t[1]=(i*E-r*C-a*S)*P,t[2]=(g*k-v*_+m*w)*P,t[3]=(h*_-f*k-d*w)*P,t[4]=(l*A-o*C-c*M)*P,t[5]=(n*C-i*A+a*M)*P,t[6]=(v*x-p*k-m*b)*P,t[7]=(u*k-h*x+d*b)*P,t[8]=(o*E-s*A+c*T)*P,t[9]=(r*A-n*E-a*T)*P,t[10]=(p*_-g*x+m*y)*P,t[11]=(f*x-u*_-d*y)*P,t[12]=(s*M-o*S-l*T)*P,t[13]=(n*S-r*M+i*T)*P,t[14]=(g*b-p*w-v*y)*P,t[15]=(u*w-f*b+h*y)*P,t):null}function fl(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3],s=e[4],l=e[5],c=e[6],u=e[7],f=e[8],h=e[9],d=e[10],p=e[11],g=e[12],v=e[13],m=e[14],y=e[15],b=n[0],x=n[1],w=n[2],_=n[3];return t[0]=b*r+x*s+w*f+_*g,t[1]=b*i+x*l+w*h+_*v,t[2]=b*a+x*c+w*d+_*m,t[3]=b*o+x*u+w*p+_*y,b=n[4],x=n[5],w=n[6],_=n[7],t[4]=b*r+x*s+w*f+_*g,t[5]=b*i+x*l+w*h+_*v,t[6]=b*a+x*c+w*d+_*m,t[7]=b*o+x*u+w*p+_*y,b=n[8],x=n[9],w=n[10],_=n[11],t[8]=b*r+x*s+w*f+_*g,t[9]=b*i+x*l+w*h+_*v,t[10]=b*a+x*c+w*d+_*m,t[11]=b*o+x*u+w*p+_*y,b=n[12],x=n[13],w=n[14],_=n[15],t[12]=b*r+x*s+w*f+_*g,t[13]=b*i+x*l+w*h+_*v,t[14]=b*a+x*c+w*d+_*m,t[15]=b*o+x*u+w*p+_*y,t}function hl(t,e,n){var r,i,a,o,s,l,c,u,f,h,d,p,g=n[0],v=n[1],m=n[2];return e===t?(t[12]=e[0]*g+e[4]*v+e[8]*m+e[12],t[13]=e[1]*g+e[5]*v+e[9]*m+e[13],t[14]=e[2]*g+e[6]*v+e[10]*m+e[14],t[15]=e[3]*g+e[7]*v+e[11]*m+e[15]):(r=e[0],i=e[1],a=e[2],o=e[3],s=e[4],l=e[5],c=e[6],u=e[7],f=e[8],h=e[9],d=e[10],p=e[11],t[0]=r,t[1]=i,t[2]=a,t[3]=o,t[4]=s,t[5]=l,t[6]=c,t[7]=u,t[8]=f,t[9]=h,t[10]=d,t[11]=p,t[12]=r*g+s*v+f*m+e[12],t[13]=i*g+l*v+h*m+e[13],t[14]=a*g+c*v+d*m+e[14],t[15]=o*g+u*v+p*m+e[15]),t}function dl(t,e,n){var r=n[0],i=n[1],a=n[2];return t[0]=e[0]*r,t[1]=e[1]*r,t[2]=e[2]*r,t[3]=e[3]*r,t[4]=e[4]*i,t[5]=e[5]*i,t[6]=e[6]*i,t[7]=e[7]*i,t[8]=e[8]*a,t[9]=e[9]*a,t[10]=e[10]*a,t[11]=e[11]*a,t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15],t}function pl(t,e,n){var r=Math.sin(n),i=Math.cos(n),a=e[4],o=e[5],s=e[6],l=e[7],c=e[8],u=e[9],f=e[10],h=e[11];return e!==t&&(t[0]=e[0],t[1]=e[1],t[2]=e[2],t[3]=e[3],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t[4]=a*i+c*r,t[5]=o*i+u*r,t[6]=s*i+f*r,t[7]=l*i+h*r,t[8]=c*i-a*r,t[9]=u*i-o*r,t[10]=f*i-s*r,t[11]=h*i-l*r,t}function gl(t,e,n){var r=Math.sin(n),i=Math.cos(n),a=e[0],o=e[1],s=e[2],l=e[3],c=e[4],u=e[5],f=e[6],h=e[7];return e!==t&&(t[8]=e[8],t[9]=e[9],t[10]=e[10],t[11]=e[11],t[12]=e[12],t[13]=e[13],t[14]=e[14],t[15]=e[15]),t[0]=a*i+c*r,t[1]=o*i+u*r,t[2]=s*i+f*r,t[3]=l*i+h*r,t[4]=c*i-a*r,t[5]=u*i-o*r,t[6]=f*i-s*r,t[7]=h*i-l*r,t}function vl(t,e,n,r,i){var a,o=1/Math.tan(e/2);return t[0]=o/n,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=o,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[11]=-1,t[12]=0,t[13]=0,t[15]=0,null!=i&&i!==1/0?(a=1/(r-i),t[10]=(i+r)*a,t[14]=2*i*r*a):(t[10]=-1,t[14]=-2*r),t}function ml(t,e,n,r,i,a,o){var s=1/(e-n),l=1/(r-i),c=1/(a-o);return t[0]=-2*s,t[1]=0,t[2]=0,t[3]=0,t[4]=0,t[5]=-2*l,t[6]=0,t[7]=0,t[8]=0,t[9]=0,t[10]=2*c,t[11]=0,t[12]=(e+n)*s,t[13]=(i+r)*l,t[14]=(o+a)*c,t[15]=1,t}Math.hypot||(Math.hypot=function(){for(var t=arguments,e=0,n=arguments.length;n--;)e+=t[n]*t[n];return Math.sqrt(e)});var yl=fl;function bl(){var t=new nl(3);return nl!=Float32Array&&(t[0]=0,t[1]=0,t[2]=0),t}function xl(t){var e=new nl(3);return e[0]=t[0],e[1]=t[1],e[2]=t[2],e}function wl(t,e,n){return t[0]=e[0]+n[0],t[1]=e[1]+n[1],t[2]=e[2]+n[2],t}function _l(t,e,n){return t[0]=e[0]-n[0],t[1]=e[1]-n[1],t[2]=e[2]-n[2],t}function kl(t,e,n){return t[0]=e[0]*n,t[1]=e[1]*n,t[2]=e[2]*n,t}function Tl(t,e){var n=e[0],r=e[1],i=e[2],a=n*n+r*r+i*i;return a>0&&(a=1/Math.sqrt(a)),t[0]=e[0]*a,t[1]=e[1]*a,t[2]=e[2]*a,t}function Ml(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function Al(t,e,n){var r=e[0],i=e[1],a=e[2],o=n[0],s=n[1],l=n[2];return t[0]=i*l-a*s,t[1]=a*o-r*l,t[2]=r*s-i*o,t}function Sl(t,e,n){var r=e[0],i=e[1],a=e[2];return t[0]=r*n[0]+i*n[3]+a*n[6],t[1]=r*n[1]+i*n[4]+a*n[7],t[2]=r*n[2]+i*n[5]+a*n[8],t}var El=_l;function Cl(){var t=new nl(4);return nl!=Float32Array&&(t[0]=0,t[1]=0,t[2]=0,t[3]=0),t}function Pl(t,e,n){return t[0]=e[0]*n,t[1]=e[1]*n,t[2]=e[2]*n,t[3]=e[3]*n,t}function Ol(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]+t[3]*e[3]}function Ll(t,e,n){var r=e[0],i=e[1],a=e[2],o=e[3];return t[0]=n[0]*r+n[4]*i+n[8]*a+n[12]*o,t[1]=n[1]*r+n[5]*i+n[9]*a+n[13]*o,t[2]=n[2]*r+n[6]*i+n[10]*a+n[14]*o,t[3]=n[3]*r+n[7]*i+n[11]*a+n[15]*o,t}function Il(){var t=new nl(2);return nl!=Float32Array&&(t[0]=0,t[1]=0),t}function Dl(t){var e=t[0],n=t[1];return e*e+n*n}bl(),Cl();var Rl=Dl,zl=(Il(),function(t){function e(e){t.call(this,e,el)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.createBucket=function(t){return new Ns(t)},e.prototype.queryRadius=function(t){var e=t;return Zs("circle-radius",this,e)+Zs("circle-stroke-width",this,e)+Js(this.paint.get("circle-translate"))},e.prototype.queryIntersectsFeature=function(t,e,n,r,i,a,o,s){for(var l=Qs(t,this.paint.get("circle-translate"),this.paint.get("circle-translate-anchor"),a.angle,o),c=this.paint.get("circle-radius").evaluate(e,n)+this.paint.get("circle-stroke-width").evaluate(e,n),u="map"===this.paint.get("circle-pitch-alignment"),f=u?l:jl(l,s),h=u?c*o:c,d=0,p=r;dt.width||i.height>t.height||n.x>t.width-i.width||n.y>t.height-i.height)throw new RangeError("out of range source coordinates for image copy");if(i.width>e.width||i.height>e.height||r.x>e.width-i.width||r.y>e.height-i.height)throw new RangeError("out of range destination coordinates for image copy");for(var o=t.data,s=e.data,l=0;l80*n){r=a=t[0],i=o=t[1];for(var p=n;pa&&(a=s),l>o&&(o=l);c=0!==(c=Math.max(a-r,o-i))?1/c:0}return nc(h,d,n,r,i,c),d}function tc(t,e,n,r,i){var a,o;if(i===Pc(t,e,n,r)>0)for(a=e;a=e;a-=r)o=Sc(a,t[a],t[a+1],o);return o&&bc(o,o.next)&&(Ec(o),o=o.next),o}function ec(t,e){if(!t)return t;e||(e=t);var n,r=t;do{if(n=!1,r.steiner||!bc(r,r.next)&&0!==yc(r.prev,r,r.next))r=r.next;else{if(Ec(r),(r=e=r.prev)===r.next)break;n=!0}}while(n||r!==e);return e}function nc(t,e,n,r,i,a,o){if(t){!o&&a&&hc(t,r,i,a);for(var s,l,c=t;t.prev!==t.next;)if(s=t.prev,l=t.next,a?ic(t,r,i,a):rc(t))e.push(s.i/n),e.push(t.i/n),e.push(l.i/n),Ec(t),t=l.next,c=l.next;else if((t=l)===c){o?1===o?nc(t=ac(ec(t),e,n),e,n,r,i,a,2):2===o&&oc(t,e,n,r,i,a):nc(ec(t),e,n,r,i,a,1);break}}}function rc(t){var e=t.prev,n=t,r=t.next;if(yc(e,n,r)>=0)return!1;for(var i=t.next.next;i!==t.prev;){if(vc(e.x,e.y,n.x,n.y,r.x,r.y,i.x,i.y)&&yc(i.prev,i,i.next)>=0)return!1;i=i.next}return!0}function ic(t,e,n,r){var i=t.prev,a=t,o=t.next;if(yc(i,a,o)>=0)return!1;for(var s=i.xa.x?i.x>o.x?i.x:o.x:a.x>o.x?a.x:o.x,u=i.y>a.y?i.y>o.y?i.y:o.y:a.y>o.y?a.y:o.y,f=pc(s,l,e,n,r),h=pc(c,u,e,n,r),d=t.prevZ,p=t.nextZ;d&&d.z>=f&&p&&p.z<=h;){if(d!==t.prev&&d!==t.next&&vc(i.x,i.y,a.x,a.y,o.x,o.y,d.x,d.y)&&yc(d.prev,d,d.next)>=0)return!1;if(d=d.prevZ,p!==t.prev&&p!==t.next&&vc(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&yc(p.prev,p,p.next)>=0)return!1;p=p.nextZ}for(;d&&d.z>=f;){if(d!==t.prev&&d!==t.next&&vc(i.x,i.y,a.x,a.y,o.x,o.y,d.x,d.y)&&yc(d.prev,d,d.next)>=0)return!1;d=d.prevZ}for(;p&&p.z<=h;){if(p!==t.prev&&p!==t.next&&vc(i.x,i.y,a.x,a.y,o.x,o.y,p.x,p.y)&&yc(p.prev,p,p.next)>=0)return!1;p=p.nextZ}return!0}function ac(t,e,n){var r=t;do{var i=r.prev,a=r.next.next;!bc(i,a)&&xc(i,r,r.next,a)&&Tc(i,a)&&Tc(a,i)&&(e.push(i.i/n),e.push(r.i/n),e.push(a.i/n),Ec(r),Ec(r.next),r=t=a),r=r.next}while(r!==t);return ec(r)}function oc(t,e,n,r,i,a){var o=t;do{for(var s=o.next.next;s!==o.prev;){if(o.i!==s.i&&mc(o,s)){var l=Ac(o,s);return o=ec(o,o.next),l=ec(l,l.next),nc(o,e,n,r,i,a),void nc(l,e,n,r,i,a)}s=s.next}o=o.next}while(o!==t)}function sc(t,e,n,r){var i,a,o,s=[];for(i=0,a=e.length;i=r.next.y&&r.next.y!==r.y){var s=r.x+(a-r.y)*(r.next.x-r.x)/(r.next.y-r.y);if(s<=i&&s>o){if(o=s,s===i){if(a===r.y)return r;if(a===r.next.y)return r.next}n=r.x=r.x&&r.x>=u&&i!==r.x&&vc(an.x||r.x===n.x&&fc(n,r)))&&(n=r,h=l)),r=r.next}while(r!==c);return n}function fc(t,e){return yc(t.prev,t,e.prev)<0&&yc(e.next,t,t.next)<0}function hc(t,e,n,r){var i=t;do{null===i.z&&(i.z=pc(i.x,i.y,e,n,r)),i.prevZ=i.prev,i.nextZ=i.next,i=i.next}while(i!==t);i.prevZ.nextZ=null,i.prevZ=null,dc(i)}function dc(t){var e,n,r,i,a,o,s,l,c=1;do{for(n=t,t=null,a=null,o=0;n;){for(o++,r=n,s=0,e=0;e0||l>0&&r;)0!==s&&(0===l||!r||n.z<=r.z)?(i=n,n=n.nextZ,s--):(i=r,r=r.nextZ,l--),a?a.nextZ=i:t=i,i.prevZ=a,a=i;n=r}a.nextZ=null,c*=2}while(o>1);return t}function pc(t,e,n,r,i){return(t=1431655765&((t=858993459&((t=252645135&((t=16711935&((t=32767*(t-n)*i)|t<<8))|t<<4))|t<<2))|t<<1))|(e=1431655765&((e=858993459&((e=252645135&((e=16711935&((e=32767*(e-r)*i)|e<<8))|e<<4))|e<<2))|e<<1))<<1}function gc(t){var e=t,n=t;do{(e.x=0&&(t-o)*(r-s)-(n-o)*(e-s)>=0&&(n-o)*(a-s)-(i-o)*(r-s)>=0}function mc(t,e){return t.next.i!==e.i&&t.prev.i!==e.i&&!kc(t,e)&&(Tc(t,e)&&Tc(e,t)&&Mc(t,e)&&(yc(t.prev,t,e.prev)||yc(t,e.prev,e))||bc(t,e)&&yc(t.prev,t,t.next)>0&&yc(e.prev,e,e.next)>0)}function yc(t,e,n){return(e.y-t.y)*(n.x-e.x)-(e.x-t.x)*(n.y-e.y)}function bc(t,e){return t.x===e.x&&t.y===e.y}function xc(t,e,n,r){var i=_c(yc(t,e,n)),a=_c(yc(t,e,r)),o=_c(yc(n,r,t)),s=_c(yc(n,r,e));return i!==a&&o!==s||!(0!==i||!wc(t,n,e))||!(0!==a||!wc(t,r,e))||!(0!==o||!wc(n,t,r))||!(0!==s||!wc(n,e,r))}function wc(t,e,n){return e.x<=Math.max(t.x,n.x)&&e.x>=Math.min(t.x,n.x)&&e.y<=Math.max(t.y,n.y)&&e.y>=Math.min(t.y,n.y)}function _c(t){return t>0?1:t<0?-1:0}function kc(t,e){var n=t;do{if(n.i!==t.i&&n.next.i!==t.i&&n.i!==e.i&&n.next.i!==e.i&&xc(n,n.next,t,e))return!0;n=n.next}while(n!==t);return!1}function Tc(t,e){return yc(t.prev,t,t.next)<0?yc(t,e,t.next)>=0&&yc(t,t.prev,e)>=0:yc(t,e,t.prev)<0||yc(t,t.next,e)<0}function Mc(t,e){var n=t,r=!1,i=(t.x+e.x)/2,a=(t.y+e.y)/2;do{n.y>a!==n.next.y>a&&n.next.y!==n.y&&i<(n.next.x-n.x)*(a-n.y)/(n.next.y-n.y)+n.x&&(r=!r),n=n.next}while(n!==t);return r}function Ac(t,e){var n=new Cc(t.i,t.x,t.y),r=new Cc(e.i,e.x,e.y),i=t.next,a=e.prev;return t.next=e,e.prev=t,n.next=i,i.prev=n,r.next=n,n.prev=r,a.next=r,r.prev=a,r}function Sc(t,e,n,r){var i=new Cc(t,e,n);return r?(i.next=r.next,i.prev=r,r.next.prev=i,r.next=i):(i.prev=i,i.next=i),i}function Ec(t){t.next.prev=t.prev,t.prev.next=t.next,t.prevZ&&(t.prevZ.nextZ=t.nextZ),t.nextZ&&(t.nextZ.prevZ=t.prevZ)}function Cc(t,e,n){this.i=t,this.x=e,this.y=n,this.prev=null,this.next=null,this.z=null,this.prevZ=null,this.nextZ=null,this.steiner=!1}function Pc(t,e,n,r){for(var i=0,a=e,o=n-r;an;){if(r-n>600){var a=r-n+1,o=e-n+1,s=Math.log(a),l=.5*Math.exp(2*s/3),c=.5*Math.sqrt(s*l*(a-l)/a)*(o-a/2<0?-1:1);Lc(t,e,Math.max(n,Math.floor(e-o*l/a+c)),Math.min(r,Math.floor(e+(a-o)*l/a+c)),i)}var u=t[e],f=n,h=r;for(Ic(t,n,e),i(t[r],u)>0&&Ic(t,n,r);f0;)h--}0===i(t[n],u)?Ic(t,n,h):Ic(t,++h,r),h<=e&&(n=h+1),e<=h&&(r=h-1)}}function Ic(t,e,n){var r=t[e];t[e]=t[n],t[n]=r}function Dc(t,e){return te?1:0}function Rc(t,e){var n=t.length;if(n<=1)return[t];for(var r,i,a=[],o=0;o1)for(var l=0;l0&&(r+=t[i-1].length,n.holes.push(r))}return n},Zl.default=Jl;var Fc=500,Bc=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.patternFeatures=[],this.layoutVertexArray=new xo,this.indexArray=new Lo,this.indexArray2=new jo,this.programConfigurations=new Es(Kl,t.layers,t.zoom),this.segments=new Qo,this.segments2=new Qo,this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};Bc.prototype.populate=function(t,e,n){this.hasPattern=Nc("fill",this.layers,e);for(var r=this.layers[0].layout.get("fill-sort-key"),i=[],a=0,o=t;a>3}if(i--,1===r||2===r)o+=t.readSVarint(),s+=t.readSVarint(),1===r&&(e&&l.push(e),e=[]),e.push(new a(o,s));else{if(7!==r)throw new Error("unknown command "+r);e&&e.push(e[0].clone())}}return e&&l.push(e),l},Wc.prototype.bbox=function(){var t=this._pbf;t.pos=this._geometry;for(var e=t.readVarint()+t.pos,n=1,r=0,i=0,a=0,o=1/0,s=-1/0,l=1/0,c=-1/0;t.pos>3}if(r--,1===n||2===n)(i+=t.readSVarint())s&&(s=i),(a+=t.readSVarint())c&&(c=a);else if(7!==n)throw new Error("unknown command "+n)}return[o,l,s,c]},Wc.prototype.toGeoJSON=function(t,e,n){var r,i,a=this.extent*Math.pow(2,n),o=this.extent*t,s=this.extent*e,l=this.loadGeometry(),c=Wc.types[this.type];function u(t){for(var e=0;e>3;e=1===r?t.readString():2===r?t.readFloat():3===r?t.readDouble():4===r?t.readVarint64():5===r?t.readVarint():6===r?t.readSVarint():7===r?t.readBoolean():null}return e}function eu(t,e){this.layers=t.readFields(nu,{},e)}function nu(t,e,n){if(3===t){var r=new Zc(n,n.readVarint()+n.pos);r.length&&(e[r.name]=r)}}Jc.prototype.feature=function(t){if(t<0||t>=this._features.length)throw new Error("feature index out of bounds");this._pbf.pos=this._features[t];var e=this._pbf.readVarint()+this._pbf.pos;return new Gc(this._pbf,e,this.extent,this._keys,this._values)};var ru={VectorTile:eu,VectorTileFeature:Gc,VectorTileLayer:Zc},iu=ru.VectorTileFeature.types,au=500,ou=Math.pow(2,13);function su(t,e,n,r,i,a,o,s){t.emplaceBack(e,n,2*Math.floor(r*ou)+o,i*ou*2,a*ou*2,Math.round(s))}var lu=function(t){this.zoom=t.zoom,this.overscaling=t.overscaling,this.layers=t.layers,this.layerIds=this.layers.map((function(t){return t.id})),this.index=t.index,this.hasPattern=!1,this.layoutVertexArray=new _o,this.indexArray=new Lo,this.programConfigurations=new Es(qc,t.layers,t.zoom),this.segments=new Qo,this.stateDependentLayerIds=this.layers.filter((function(t){return t.isStateDependent()})).map((function(t){return t.id}))};function cu(t,e){return t.x===e.x&&(t.x<0||t.x>Ls)||t.y===e.y&&(t.y<0||t.y>Ls)}function uu(t){return t.every((function(t){return t.x<0}))||t.every((function(t){return t.x>Ls}))||t.every((function(t){return t.y<0}))||t.every((function(t){return t.y>Ls}))}lu.prototype.populate=function(t,e,n){this.features=[],this.hasPattern=Nc("fill-extrusion",this.layers,e);for(var r=0,i=t;r=1){var y=p[v-1];if(!cu(m,y)){f.vertexLength+4>Qo.MAX_VERTEX_ARRAY_LENGTH&&(f=this.segments.prepareSegment(4,this.layoutVertexArray,this.indexArray));var b=m.sub(y)._perp()._unit(),x=y.dist(m);g+x>32768&&(g=0),su(this.layoutVertexArray,m.x,m.y,b.x,b.y,0,0,g),su(this.layoutVertexArray,m.x,m.y,b.x,b.y,0,1,g),g+=x,su(this.layoutVertexArray,y.x,y.y,b.x,b.y,0,0,g),su(this.layoutVertexArray,y.x,y.y,b.x,b.y,0,1,g);var w=f.vertexLength;this.indexArray.emplaceBack(w,w+2,w+1),this.indexArray.emplaceBack(w+1,w+2,w+3),f.vertexLength+=4,f.primitiveLength+=2}}}}if(f.vertexLength+l>Qo.MAX_VERTEX_ARRAY_LENGTH&&(f=this.segments.prepareSegment(l,this.layoutVertexArray,this.indexArray)),"Polygon"===iu[t.type]){for(var _=[],k=[],T=f.vertexLength,M=0,A=s;M=2&&t[l-1].equals(t[l-2]);)l--;for(var c=0;c0;if(k&&m>c){var M=u.dist(d);if(M>2*f){var A=u.sub(u.sub(d)._mult(f/M)._round());this.updateDistance(d,A),this.addCurrentVertex(A,g,0,0,h),d=A}}var S=d&&p,E=S?n:s?"butt":r;if(S&&"round"===E&&(wi&&(E="bevel"),"bevel"===E&&(w>2&&(E="flipbevel"),w100)y=v.mult(-1);else{var C=w*g.add(v).mag()/g.sub(v).mag();y._perp()._mult(C*(T?-1:1))}this.addCurrentVertex(u,y,0,0,h),this.addCurrentVertex(u,y.mult(-1),0,0,h)}else if("bevel"===E||"fakeround"===E){var P=-Math.sqrt(w*w-1),O=T?P:0,L=T?0:P;if(d&&this.addCurrentVertex(u,g,O,L,h),"fakeround"===E)for(var I=Math.round(180*_/Math.PI/ku),D=1;D2*f){var B=u.add(p.sub(u)._mult(f/F)._round());this.updateDistance(u,B),this.addCurrentVertex(B,v,0,0,h),u=B}}}}},Su.prototype.addCurrentVertex=function(t,e,n,r,i,a){void 0===a&&(a=!1);var o=e.x+e.y*n,s=e.y-e.x*n,l=-e.x+e.y*r,c=-e.y-e.x*r;this.addHalfVertex(t,o,s,a,!1,n,i),this.addHalfVertex(t,l,c,a,!0,-r,i),this.distance>Au/2&&0===this.totalDistance&&(this.distance=0,this.addCurrentVertex(t,e,n,r,i,a))},Su.prototype.addHalfVertex=function(t,e,n,r,i,a,o){var s=t.x,l=t.y,c=this.scaledDistance*Mu;this.layoutVertexArray.emplaceBack((s<<1)+(r?1:0),(l<<1)+(i?1:0),Math.round(xu*e)+128,Math.round(xu*n)+128,1+(0===a?0:a<0?-1:1)|(63&c)<<2,c>>6);var u=o.vertexLength++;this.e1>=0&&this.e2>=0&&(this.indexArray.emplaceBack(this.e1,this.e2,u),o.primitiveLength++),i?this.e2=u:this.e1=u},Su.prototype.updateScaledDistance=function(){this.scaledDistance=this.totalDistance>0?(this.clipStart+(this.clipEnd-this.clipStart)*this.distance/this.totalDistance)*(Au-1):this.distance},Su.prototype.updateDistance=function(t,e){this.distance+=t.dist(e),this.updateScaledDistance()},ga("LineBucket",Su,{omit:["layers","patternFeatures"]});var Eu=new lo({"line-cap":new ro(le.layout_line["line-cap"]),"line-join":new io(le.layout_line["line-join"]),"line-miter-limit":new ro(le.layout_line["line-miter-limit"]),"line-round-limit":new ro(le.layout_line["line-round-limit"]),"line-sort-key":new io(le.layout_line["line-sort-key"])}),Cu={paint:new lo({"line-opacity":new io(le.paint_line["line-opacity"]),"line-color":new io(le.paint_line["line-color"]),"line-translate":new ro(le.paint_line["line-translate"]),"line-translate-anchor":new ro(le.paint_line["line-translate-anchor"]),"line-width":new io(le.paint_line["line-width"]),"line-gap-width":new io(le.paint_line["line-gap-width"]),"line-offset":new io(le.paint_line["line-offset"]),"line-blur":new io(le.paint_line["line-blur"]),"line-dasharray":new oo(le.paint_line["line-dasharray"]),"line-pattern":new ao(le.paint_line["line-pattern"]),"line-gradient":new so(le.paint_line["line-gradient"])}),layout:Eu},Pu=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.possiblyEvaluate=function(e,n){return n=new $a(Math.floor(n.zoom),{now:n.now,fadeDuration:n.fadeDuration,zoomHistory:n.zoomHistory,transition:n.transition}),t.prototype.possiblyEvaluate.call(this,e,n)},e.prototype.evaluate=function(e,n,r,i){return n=v({},n,{zoom:Math.floor(n.zoom)}),t.prototype.evaluate.call(this,e,n,r,i)},e}(io),Ou=new Pu(Cu.paint.properties["line-width"].specification);Ou.useIntegerZoom=!0;var Lu=function(t){function e(e){t.call(this,e,Cu)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype._handleSpecialPaintPropertyUpdate=function(t){"line-gradient"===t&&this._updateGradient()},e.prototype._updateGradient=function(){var t=this._transitionablePaint._values["line-gradient"].value.expression;this.gradient=Wl(t,"lineProgress"),this.gradientTexture=null},e.prototype.recalculate=function(e,n){t.prototype.recalculate.call(this,e,n),this.paint._values["line-floorwidth"]=Ou.possiblyEvaluate(this._transitioningPaint._values["line-width"].value,e)},e.prototype.createBucket=function(t){return new Su(t)},e.prototype.queryRadius=function(t){var e=t,n=Iu(Zs("line-width",this,e),Zs("line-gap-width",this,e)),r=Zs("line-offset",this,e);return n/2+Math.abs(r)+Js(this.paint.get("line-translate"))},e.prototype.queryIntersectsFeature=function(t,e,n,r,i,a,o){var s=Qs(t,this.paint.get("line-translate"),this.paint.get("line-translate-anchor"),a.angle,o),l=o/2*Iu(this.paint.get("line-width").evaluate(e,n),this.paint.get("line-gap-width").evaluate(e,n)),c=this.paint.get("line-offset").evaluate(e,n);return c&&(r=Du(r,c*o)),Us(s,r,l)},e.prototype.isTileClipped=function(){return!0},e}(uo);function Iu(t,e){return e>0?e+2*t:t}function Du(t,e){for(var n=[],r=new a(0,0),i=0;i":"\ufe40","?":"\ufe16","@":"\uff20","[":"\ufe47","\\":"\uff3c","]":"\ufe48","^":"\uff3e",_:"\ufe33","`":"\uff40","{":"\ufe37","|":"\u2015","}":"\ufe38","~":"\uff5e","\xa2":"\uffe0","\xa3":"\uffe1","\xa5":"\uffe5","\xa6":"\uffe4","\xac":"\uffe2","\xaf":"\uffe3","\u2013":"\ufe32","\u2014":"\ufe31","\u2018":"\ufe43","\u2019":"\ufe44","\u201c":"\ufe41","\u201d":"\ufe42","\u2026":"\ufe19","\u2027":"\u30fb","\u20a9":"\uffe6","\u3001":"\ufe11","\u3002":"\ufe12","\u3008":"\ufe3f","\u3009":"\ufe40","\u300a":"\ufe3d","\u300b":"\ufe3e","\u300c":"\ufe41","\u300d":"\ufe42","\u300e":"\ufe43","\u300f":"\ufe44","\u3010":"\ufe3b","\u3011":"\ufe3c","\u3014":"\ufe39","\u3015":"\ufe3a","\u3016":"\ufe17","\u3017":"\ufe18","\uff01":"\ufe15","\uff08":"\ufe35","\uff09":"\ufe36","\uff0c":"\ufe10","\uff0d":"\ufe32","\uff0e":"\u30fb","\uff1a":"\ufe13","\uff1b":"\ufe14","\uff1c":"\ufe3f","\uff1e":"\ufe40","\uff1f":"\ufe16","\uff3b":"\ufe47","\uff3d":"\ufe48","\uff3f":"\ufe33","\uff5b":"\ufe37","\uff5c":"\u2015","\uff5d":"\ufe38","\uff5f":"\ufe35","\uff60":"\ufe36","\uff61":"\ufe12","\uff62":"\ufe41","\uff63":"\ufe42"};function qu(t){for(var e="",n=0;n>1,u=-7,f=n?i-1:0,h=n?-1:1,d=t[e+f];for(f+=h,a=d&(1<<-u)-1,d>>=-u,u+=s;u>0;a=256*a+t[e+f],f+=h,u-=8);for(o=a&(1<<-u)-1,a>>=-u,u+=r;u>0;o=256*o+t[e+f],f+=h,u-=8);if(0===a)a=1-c;else{if(a===l)return o?NaN:1/0*(d?-1:1);o+=Math.pow(2,r),a-=c}return(d?-1:1)*o*Math.pow(2,a-r)},write:function(t,e,n,r,i,a){var o,s,l,c=8*a-i-1,u=(1<>1,h=23===i?Math.pow(2,-24)-Math.pow(2,-77):0,d=r?0:a-1,p=r?1:-1,g=e<0||0===e&&1/e<0?1:0;for(e=Math.abs(e),isNaN(e)||e===1/0?(s=isNaN(e)?1:0,o=u):(o=Math.floor(Math.log(e)/Math.LN2),e*(l=Math.pow(2,-o))<1&&(o--,l*=2),(e+=o+f>=1?h/l:h*Math.pow(2,1-f))*l>=2&&(o++,l/=2),o+f>=u?(s=0,o=u):o+f>=1?(s=(e*l-1)*Math.pow(2,i),o+=f):(s=e*Math.pow(2,f-1)*Math.pow(2,i),o=0));i>=8;t[n+d]=255&s,d+=p,s/=256,i-=8);for(o=o<0;t[n+d]=255&o,d+=p,o/=256,c-=8);t[n+d-p]|=128*g}},Yu=$u;function $u(t){this.buf=ArrayBuffer.isView&&ArrayBuffer.isView(t)?t:new Uint8Array(t||0),this.pos=0,this.type=0,this.length=this.buf.length}$u.Varint=0,$u.Fixed64=1,$u.Bytes=2,$u.Fixed32=5;var Xu=4294967296,Ku=1/Xu,Zu=12,Ju="undefined"===typeof TextDecoder?null:new TextDecoder("utf8");function Qu(t,e,n){var r,i,a=n.buf;if(r=(112&(i=a[n.pos++]))>>4,i<128)return ef(t,r,e);if(r|=(127&(i=a[n.pos++]))<<3,i<128)return ef(t,r,e);if(r|=(127&(i=a[n.pos++]))<<10,i<128)return ef(t,r,e);if(r|=(127&(i=a[n.pos++]))<<17,i<128)return ef(t,r,e);if(r|=(127&(i=a[n.pos++]))<<24,i<128)return ef(t,r,e);if(r|=(1&(i=a[n.pos++]))<<31,i<128)return ef(t,r,e);throw new Error("Expected varint not more than 10 bytes")}function tf(t){return t.type===$u.Bytes?t.readVarint()+t.pos:t.pos+1}function ef(t,e,n){return n?4294967296*e+(t>>>0):4294967296*(e>>>0)+(t>>>0)}function nf(t,e){var n,r;if(t>=0?(n=t%4294967296|0,r=t/4294967296|0):(r=~(-t/4294967296),4294967295^(n=~(-t%4294967296))?n=n+1|0:(n=0,r=r+1|0)),t>=0x10000000000000000||t<-0x10000000000000000)throw new Error("Given varint doesn't fit into 10 bytes");e.realloc(10),rf(n,r,e),af(r,e)}function rf(t,e,n){n.buf[n.pos++]=127&t|128,t>>>=7,n.buf[n.pos++]=127&t|128,t>>>=7,n.buf[n.pos++]=127&t|128,t>>>=7,n.buf[n.pos++]=127&t|128,t>>>=7,n.buf[n.pos]=127&t}function af(t,e){var n=(7&t)<<4;e.buf[e.pos++]|=n|((t>>>=3)?128:0),t&&(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),t&&(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),t&&(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),t&&(e.buf[e.pos++]=127&t|((t>>>=7)?128:0),t&&(e.buf[e.pos++]=127&t)))))}function of(t,e,n){var r=e<=16383?1:e<=2097151?2:e<=268435455?3:Math.floor(Math.log(e)/(7*Math.LN2));n.realloc(r);for(var i=n.pos-1;i>=t;i--)n.buf[i+r]=n.buf[i]}function sf(t,e){for(var n=0;n>>8,t[n+2]=e>>>16,t[n+3]=e>>>24}function yf(t,e){return(t[e]|t[e+1]<<8|t[e+2]<<16)+(t[e+3]<<24)}function bf(t,e,n){for(var r="",i=e;i239?4:l>223?3:l>191?2:1;if(i+u>n)break;1===u?l<128&&(c=l):2===u?128===(192&(a=t[i+1]))&&(c=(31&l)<<6|63&a)<=127&&(c=null):3===u?(a=t[i+1],o=t[i+2],128===(192&a)&&128===(192&o)&&((c=(15&l)<<12|(63&a)<<6|63&o)<=2047||c>=55296&&c<=57343)&&(c=null)):4===u&&(a=t[i+1],o=t[i+2],s=t[i+3],128===(192&a)&&128===(192&o)&&128===(192&s)&&((c=(15&l)<<18|(63&a)<<12|(63&o)<<6|63&s)<=65535||c>=1114112)&&(c=null)),null===c?(c=65533,u=1):c>65535&&(c-=65536,r+=String.fromCharCode(c>>>10&1023|55296),c=56320|1023&c),r+=String.fromCharCode(c),i+=u}return r}function xf(t,e,n){return Ju.decode(t.subarray(e,n))}function wf(t,e,n){for(var r,i,a=0;a55295&&r<57344){if(!i){r>56319||a+1===e.length?(t[n++]=239,t[n++]=191,t[n++]=189):i=r;continue}if(r<56320){t[n++]=239,t[n++]=191,t[n++]=189,i=r;continue}r=i-55296<<10|r-56320|65536,i=null}else i&&(t[n++]=239,t[n++]=191,t[n++]=189,i=null);r<128?t[n++]=r:(r<2048?t[n++]=r>>6|192:(r<65536?t[n++]=r>>12|224:(t[n++]=r>>18|240,t[n++]=r>>12&63|128),t[n++]=r>>6&63|128),t[n++]=63&r|128)}return n}$u.prototype={destroy:function(){this.buf=null},readFields:function(t,e,n){for(n=n||this.length;this.pos>3,a=this.pos;this.type=7&r,t(i,e,this),this.pos===a&&this.skip(r)}return e},readMessage:function(t,e){return this.readFields(t,e,this.readVarint()+this.pos)},readFixed32:function(){var t=vf(this.buf,this.pos);return this.pos+=4,t},readSFixed32:function(){var t=yf(this.buf,this.pos);return this.pos+=4,t},readFixed64:function(){var t=vf(this.buf,this.pos)+vf(this.buf,this.pos+4)*Xu;return this.pos+=8,t},readSFixed64:function(){var t=vf(this.buf,this.pos)+yf(this.buf,this.pos+4)*Xu;return this.pos+=8,t},readFloat:function(){var t=Wu.read(this.buf,this.pos,!0,23,4);return this.pos+=4,t},readDouble:function(){var t=Wu.read(this.buf,this.pos,!0,52,8);return this.pos+=8,t},readVarint:function(t){var e,n,r=this.buf;return e=127&(n=r[this.pos++]),n<128?e:(e|=(127&(n=r[this.pos++]))<<7,n<128?e:(e|=(127&(n=r[this.pos++]))<<14,n<128?e:(e|=(127&(n=r[this.pos++]))<<21,n<128?e:Qu(e|=(15&(n=r[this.pos]))<<28,t,this))))},readVarint64:function(){return this.readVarint(!0)},readSVarint:function(){var t=this.readVarint();return t%2===1?(t+1)/-2:t/2},readBoolean:function(){return Boolean(this.readVarint())},readString:function(){var t=this.readVarint()+this.pos,e=this.pos;return this.pos=t,t-e>=Zu&&Ju?xf(this.buf,e,t):bf(this.buf,e,t)},readBytes:function(){var t=this.readVarint()+this.pos,e=this.buf.subarray(this.pos,t);return this.pos=t,e},readPackedVarint:function(t,e){if(this.type!==$u.Bytes)return t.push(this.readVarint(e));var n=tf(this);for(t=t||[];this.pos127;);else if(e===$u.Bytes)this.pos=this.readVarint()+this.pos;else if(e===$u.Fixed32)this.pos+=4;else{if(e!==$u.Fixed64)throw new Error("Unimplemented type: "+e);this.pos+=8}},writeTag:function(t,e){this.writeVarint(t<<3|e)},realloc:function(t){for(var e=this.length||16;e268435455||t<0?nf(t,this):(this.realloc(4),this.buf[this.pos++]=127&t|(t>127?128:0),t<=127||(this.buf[this.pos++]=127&(t>>>=7)|(t>127?128:0),t<=127||(this.buf[this.pos++]=127&(t>>>=7)|(t>127?128:0),t<=127||(this.buf[this.pos++]=t>>>7&127))))},writeSVarint:function(t){this.writeVarint(t<0?2*-t-1:2*t)},writeBoolean:function(t){this.writeVarint(Boolean(t))},writeString:function(t){t=String(t),this.realloc(4*t.length),this.pos++;var e=this.pos;this.pos=wf(this.buf,t,this.pos);var n=this.pos-e;n>=128&&of(e,n,this),this.pos=e-1,this.writeVarint(n),this.pos+=n},writeFloat:function(t){this.realloc(4),Wu.write(this.buf,t,this.pos,!0,23,4),this.pos+=4},writeDouble:function(t){this.realloc(8),Wu.write(this.buf,t,this.pos,!0,52,8),this.pos+=8},writeBytes:function(t){var e=t.length;this.writeVarint(e),this.realloc(e);for(var n=0;n=128&&of(n,r,this),this.pos=n-1,this.writeVarint(r),this.pos+=r},writeMessage:function(t,e,n){this.writeTag(t,$u.Bytes),this.writeRawMessage(e,n)},writePackedVarint:function(t,e){e.length&&this.writeMessage(t,sf,e)},writePackedSVarint:function(t,e){e.length&&this.writeMessage(t,lf,e)},writePackedBoolean:function(t,e){e.length&&this.writeMessage(t,ff,e)},writePackedFloat:function(t,e){e.length&&this.writeMessage(t,cf,e)},writePackedDouble:function(t,e){e.length&&this.writeMessage(t,uf,e)},writePackedFixed32:function(t,e){e.length&&this.writeMessage(t,hf,e)},writePackedSFixed32:function(t,e){e.length&&this.writeMessage(t,df,e)},writePackedFixed64:function(t,e){e.length&&this.writeMessage(t,pf,e)},writePackedSFixed64:function(t,e){e.length&&this.writeMessage(t,gf,e)},writeBytesField:function(t,e){this.writeTag(t,$u.Bytes),this.writeBytes(e)},writeFixed32Field:function(t,e){this.writeTag(t,$u.Fixed32),this.writeFixed32(e)},writeSFixed32Field:function(t,e){this.writeTag(t,$u.Fixed32),this.writeSFixed32(e)},writeFixed64Field:function(t,e){this.writeTag(t,$u.Fixed64),this.writeFixed64(e)},writeSFixed64Field:function(t,e){this.writeTag(t,$u.Fixed64),this.writeSFixed64(e)},writeVarintField:function(t,e){this.writeTag(t,$u.Varint),this.writeVarint(e)},writeSVarintField:function(t,e){this.writeTag(t,$u.Varint),this.writeSVarint(e)},writeStringField:function(t,e){this.writeTag(t,$u.Bytes),this.writeString(e)},writeFloatField:function(t,e){this.writeTag(t,$u.Fixed32),this.writeFloat(e)},writeDoubleField:function(t,e){this.writeTag(t,$u.Fixed64),this.writeDouble(e)},writeBooleanField:function(t,e){this.writeVarintField(t,Boolean(e))}};var _f=3;function kf(t,e,n){1===t&&n.readMessage(Tf,e)}function Tf(t,e,n){if(3===t){var r=n.readMessage(Mf,{}),i=r.id,a=r.bitmap,o=r.width,s=r.height,l=r.left,c=r.top,u=r.advance;e.push({id:i,bitmap:new Vl({width:o+2*_f,height:s+2*_f},a),metrics:{width:o,height:s,left:l,top:c,advance:u}})}}function Mf(t,e,n){1===t?e.id=n.readVarint():2===t?e.bitmap=n.readBytes():3===t?e.width=n.readVarint():4===t?e.height=n.readVarint():5===t?e.left=n.readSVarint():6===t?e.top=n.readSVarint():7===t&&(e.advance=n.readVarint())}function Af(t){return new Yu(t).readFields(kf,[])}var Sf=_f;function Ef(t){for(var e=0,n=0,r=0,i=t;r=0;h--){var d=o[h];if(!(f.w>d.w||f.h>d.h)){if(f.x=d.x,f.y=d.y,l=Math.max(l,f.y+f.h),s=Math.max(s,f.x+f.w),f.w===d.w&&f.h===d.h){var p=o.pop();h=0&&r>=t&&Hf[this.text.charCodeAt(r)];r--)n--;this.text=this.text.substring(t,n),this.sectionIndex=this.sectionIndex.slice(t,n)},Ff.prototype.substring=function(t,e){var n=new Ff;return n.text=this.text.substring(t,e),n.sectionIndex=this.sectionIndex.slice(t,e),n.sections=this.sections,n},Ff.prototype.toString=function(){return this.text},Ff.prototype.getMaxScale=function(){var t=this;return this.sectionIndex.reduce((function(e,n){return Math.max(e,t.sections[n].scale)}),0)},Ff.prototype.addTextSection=function(t,e){this.text+=t.text,this.sections.push(jf.forText(t.scale,t.fontStack||e));for(var n=this.sections.length-1,r=0;r=Nf?null:++this.imageSectionID:(this.imageSectionID=zf,this.imageSectionID)};var Hf={9:!0,10:!0,11:!0,12:!0,13:!0,32:!0},Vf={};function qf(t,e,n,r,i,a){if(e.imageName){var o=r[e.imageName];return o?o.displaySize[0]*e.scale*Gu/a+i:0}var s=n[e.fontStack],l=s&&s[t];return l?l.metrics.advance*e.scale+i:0}function Gf(t,e,n,r,i,a){for(var o=0,s=0;s=0,u=0,f=0;f0&&j>M&&(M=j)}else{var F=n[S.fontStack],B=F&&F[C];if(B&&B.rect)L=B.rect,O=B.metrics;else{var U=e[S.fontStack],H=U&&U[C];if(!H)continue;O=H.metrics}P=(w-S.scale)*Gu}R?(t.verticalizable=!0,T.push({glyph:C,imageName:I,x:h,y:d+P,vertical:R,scale:S.scale,fontStack:S.fontStack,sectionIndex:E,metrics:O,rect:L}),h+=D*S.scale+c):(T.push({glyph:C,imageName:I,x:h,y:d+P,vertical:R,scale:S.scale,fontStack:S.fontStack,sectionIndex:E,metrics:O,rect:L}),h+=O.advance*S.scale+c)}if(0!==T.length){var V=h-c;p=Math.max(V,p),Qf(T,0,T.length-1,v,M)}h=0;var q=a*w+M;k.lineOffset=Math.max(M,_),d+=q,g=Math.max(q,g),++m}else d+=a,++m}var G=d-Df,W=Zf(o),Y=W.horizontalAlign,$=W.verticalAlign;th(t.positionedLines,v,Y,$,p,g,a,G,i.length),t.top+=-$*G,t.bottom=t.top+G,t.left+=-Y*p,t.right=t.left+p}function Qf(t,e,n,r,i){if(r||i)for(var a=t[n],o=a.metrics.advance*a.scale,s=(t[n].x+o)*r,l=e;l<=n;l++)t[l].x-=s,t[l].y+=i}function th(t,e,n,r,i,a,o,s,l){var c=(e-n)*i,u=0;u=a!==o?-s*r-Df:(-r*l+.5)*o;for(var f=0,h=t;f-n/2;){if(--o<0)return!1;s-=t[o].dist(a),a=t[o]}s+=t[o].dist(t[o+1]),o++;for(var l=[],c=0;sr;)c-=l.shift().angleDelta;if(c>i)return!1;o++,s+=f.dist(h)}return!0}function uh(t){for(var e=0,n=0;nc){var p=(c-l)/d,g=Nn(f.x,h.x,p),v=Nn(f.y,h.y,p),m=new rh(g,v,h.angleTo(f),u);return m._round(),!o||ch(t,m,s,o,e)?m:void 0}l+=d}}function ph(t,e,n,r,i,a,o,s,l){var c=fh(r,a,o),u=hh(r,i),f=u*o,h=0===t[0].x||t[0].x===l||0===t[0].y||t[0].y===l;return e-f=0&&x=0&&w=0&&h+c<=u){var _=new rh(x,w,y,p);_._round(),r&&!ch(t,_,a,r,i)||d.push(_)}}f+=m}return s||d.length||o||(d=gh(t,f/2,n,r,i,a,o,!0,l)),d}function vh(t,e,n,r,i){for(var o=[],s=0;s=r&&h.x>=r||(f.x>=r?f=new a(r,f.y+(h.y-f.y)*((r-f.x)/(h.x-f.x)))._round():h.x>=r&&(h=new a(r,f.y+(h.y-f.y)*((r-f.x)/(h.x-f.x)))._round()),f.y>=i&&h.y>=i||(f.y>=i?f=new a(f.x+(h.x-f.x)*((i-f.y)/(h.y-f.y)),i)._round():h.y>=i&&(h=new a(f.x+(h.x-f.x)*((i-f.y)/(h.y-f.y)),i)._round()),c&&f.equals(c[c.length-1])||(c=[f],o.push(c)),c.push(h)))))}return o}var mh=Cf;function yh(t,e,n,r){var i=[],o=t.image,s=o.pixelRatio,l=o.paddedRect.w-2*mh,c=o.paddedRect.h-2*mh,u=t.right-t.left,f=t.bottom-t.top,h=o.stretchX||[[0,l]],d=o.stretchY||[[0,c]],p=function(t,e){return t+e[1]-e[0]},g=h.reduce(p,0),v=d.reduce(p,0),m=l-g,y=c-v,b=0,x=g,w=0,_=v,k=0,T=m,M=0,A=y;if(o.content&&r){var S=o.content;b=bh(h,0,S[0]),w=bh(d,0,S[1]),x=bh(h,S[0],S[2]),_=bh(d,S[1],S[3]),k=S[0]-b,M=S[1]-w,T=S[2]-S[0]-x,A=S[3]-S[1]-_}var E=function(r,i,l,c){var h=wh(r.stretch-b,x,u,t.left),d=_h(r.fixed-k,T,r.stretch,g),p=wh(i.stretch-w,_,f,t.top),m=_h(i.fixed-M,A,i.stretch,v),y=wh(l.stretch-b,x,u,t.left),S=_h(l.fixed-k,T,l.stretch,g),E=wh(c.stretch-w,_,f,t.top),C=_h(c.fixed-M,A,c.stretch,v),P=new a(h,p),O=new a(y,p),L=new a(y,E),I=new a(h,E),D=new a(d/s,m/s),R=new a(S/s,C/s),z=e*Math.PI/180;if(z){var N=Math.sin(z),j=Math.cos(z),F=[j,-N,N,j];P._matMult(F),O._matMult(F),I._matMult(F),L._matMult(F)}var B=r.stretch+r.fixed,U=l.stretch+l.fixed,H=i.stretch+i.fixed,V=c.stretch+c.fixed;return{tl:P,tr:O,bl:I,br:L,tex:{x:o.paddedRect.x+mh+B,y:o.paddedRect.y+mh+H,w:U-B,h:V-H},writingMode:void 0,glyphOffset:[0,0],sectionIndex:0,pixelOffsetTL:D,pixelOffsetBR:R,minFontScaleX:T/s/u,minFontScaleY:A/s/f,isSDF:n}};if(r&&(o.stretchX||o.stretchY))for(var C=xh(h,m,g),P=xh(d,y,v),O=0;O0&&(p=Math.max(10,p),this.circleDiameter=p)}else{var g=o.top*s-l,v=o.bottom*s+l,m=o.left*s-l,y=o.right*s+l,b=o.collisionPadding;if(b&&(m-=b[0]*s,g-=b[1]*s,y+=b[2]*s,v+=b[3]*s),u){var x=new a(m,g),w=new a(y,g),_=new a(m,v),k=new a(y,v),T=u*Math.PI/180;x._rotate(T),w._rotate(T),_._rotate(T),k._rotate(T),m=Math.min(x.x,w.x,_.x,k.x),y=Math.max(x.x,w.x,_.x,k.x),g=Math.min(x.y,w.y,_.y,k.y),v=Math.max(x.y,w.y,_.y,k.y)}t.emplaceBack(e.x,e.y,m,g,y,v,n,r,i)}this.boxEndIndex=t.length},Mh=function(t,e){if(void 0===t&&(t=[]),void 0===e&&(e=Ah),this.data=t,this.length=this.data.length,this.compare=e,this.length>0)for(var n=(this.length>>1)-1;n>=0;n--)this._down(n)};function Ah(t,e){return te?1:0}function Sh(t,e,n){void 0===e&&(e=1),void 0===n&&(n=!1);for(var r=1/0,i=1/0,o=-1/0,s=-1/0,l=t[0],c=0;co)&&(o=u.x),(!c||u.y>s)&&(s=u.y)}var f=o-r,h=s-i,d=Math.min(f,h),p=d/2,g=new Mh([],Eh);if(0===d)return new a(r,i);for(var v=r;vy.d||!y.d)&&(y=x,n&&console.log("found best %d after %d probes",Math.round(1e4*x.d)/1e4,b)),x.max-y.d<=e||(p=x.h/2,g.push(new Ch(x.p.x-p,x.p.y-p,p,t)),g.push(new Ch(x.p.x+p,x.p.y-p,p,t)),g.push(new Ch(x.p.x-p,x.p.y+p,p,t)),g.push(new Ch(x.p.x+p,x.p.y+p,p,t)),b+=4)}return n&&(console.log("num probes: "+b),console.log("best distance: "+y.d)),y.p}function Eh(t,e){return e.max-t.max}function Ch(t,e,n,r){this.p=new a(t,e),this.h=n,this.d=Ph(this.p,r),this.max=this.d+this.h*Math.SQRT2}function Ph(t,e){for(var n=!1,r=1/0,i=0;it.y!==u.y>t.y&&t.x<(u.x-c.x)*(t.y-c.y)/(u.y-c.y)+c.x&&(n=!n),r=Math.min(r,Ws(t,c,u))}return(n?1:-1)*Math.sqrt(r)}function Oh(t){for(var e=0,n=0,r=0,i=t[0],a=0,o=i.length,s=o-1;a0&&(this.data[0]=e,this._down(0)),t}},Mh.prototype.peek=function(){return this.data[0]},Mh.prototype._up=function(t){for(var e=this,n=e.data,r=e.compare,i=n[t];t>0;){var a=t-1>>1,o=n[a];if(r(i,o)>=0)break;n[t]=o,t=a}n[t]=i},Mh.prototype._down=function(t){for(var e=this,n=e.data,r=e.compare,i=this.length>>1,a=n[t];t=0)break;n[t]=s,t=o}n[t]=a};var Lh=7,Ih=Number.POSITIVE_INFINITY;function Dh(t,e){function n(t,e){var n=0,r=0;e<0&&(e=0);var i=e/Math.sqrt(2);switch(t){case"top-right":case"top-left":r=i-Lh;break;case"bottom-right":case"bottom-left":r=-i+Lh;break;case"bottom":r=-e+Lh;break;case"top":r=e-Lh}switch(t){case"top-right":case"bottom-right":n=-i;break;case"top-left":case"bottom-left":n=i;break;case"left":n=e;break;case"right":n=-e}return[n,r]}function r(t,e,n){var r=0,i=0;switch(e=Math.abs(e),n=Math.abs(n),t){case"top-right":case"top-left":case"top":i=n-Lh;break;case"bottom-right":case"bottom-left":case"bottom":i=-n+Lh}switch(t){case"top-right":case"bottom-right":case"right":r=-e;break;case"top-left":case"bottom-left":case"left":r=e}return[r,i]}return e[1]!==Ih?r(t,e[0],e[1]):n(t,e[0])}function Rh(t,e,n,r,i,a,o){t.createArrays();var s=512*t.overscaling;t.tilePixelRatio=Ls/s,t.compareText={},t.iconsNeedLinear=!1;var l=t.layers[0].layout,c=t.layers[0]._unevaluatedLayout._values,u={};if("composite"===t.textSizeData.kind){var f=t.textSizeData,h=f.minZoom,d=f.maxZoom;u.compositeTextSizes=[c["text-size"].possiblyEvaluate(new $a(h),o),c["text-size"].possiblyEvaluate(new $a(d),o)]}if("composite"===t.iconSizeData.kind){var p=t.iconSizeData,g=p.minZoom,v=p.maxZoom;u.compositeIconSizes=[c["icon-size"].possiblyEvaluate(new $a(g),o),c["icon-size"].possiblyEvaluate(new $a(v),o)]}u.layoutTextSize=c["text-size"].possiblyEvaluate(new $a(t.zoom+1),o),u.layoutIconSize=c["icon-size"].possiblyEvaluate(new $a(t.zoom+1),o),u.textMaxSize=c["text-size"].possiblyEvaluate(new $a(18));for(var m=l.get("text-line-height")*Gu,y="map"===l.get("text-rotation-alignment")&&"point"!==l.get("symbol-placement"),b=l.get("text-keep-upright"),x=l.get("text-size"),w=function(){var a=k[_],s=l.get("text-font").evaluate(a,{},o).join(","),c=x.evaluate(a,{},o),f=u.layoutTextSize.evaluate(a,{},o),h=u.layoutIconSize.evaluate(a,{},o),d={horizontal:{},vertical:void 0},p=a.text,g=[0,0];if(p){var v=p.toString(),w=l.get("text-letter-spacing").evaluate(a,{},o)*Gu,T=Ta(v)?w:0,M=l.get("text-anchor").evaluate(a,{},o),A=l.get("text-variable-anchor");if(!A){var S=l.get("text-radial-offset").evaluate(a,{},o);g=S?Dh(M,[S*Gu,Ih]):l.get("text-offset").evaluate(a,{},o).map((function(t){return t*Gu}))}var E=y?"center":l.get("text-justify").evaluate(a,{},o),P=l.get("symbol-placement"),O="point"===P?l.get("text-max-width").evaluate(a,{},o)*Gu:0,L=function(){t.allowVerticalPlacement&&ka(v)&&(d.vertical=Uf(p,e,n,i,s,O,m,M,"left",T,g,If.vertical,!0,P,f,c))};if(!y&&A){for(var I="auto"===E?A.map((function(t){return zh(t)})):[E],D=!1,R=0;R=Ls||f.y<0||f.y>=Ls||Hh(t,f,s,n,r,i,h,t.layers[0],t.collisionBoxArray,e.index,e.sourceLayerIndex,t.index,y,_,M,l,x,k,A,p,e,a,c,u,o)};if("line"===S)for(var O=0,L=vh(e.geometry,0,0,Ls,Ls);O1){var B=dh(F,T,n.vertical||g,r,v,b);B&&P(F,B)}}else if("Polygon"===e.type)for(var U=0,H=Rc(e.geometry,0);UFh&&C(t.layerIds[0]+': Value for "text-size" is >= '+jh+'. Reduce your "text-size".'):"composite"===v.kind&&((m=[ih*d.compositeTextSizes[0].evaluate(o,{},p),ih*d.compositeTextSizes[1].evaluate(o,{},p)])[0]>Fh||m[1]>Fh)&&C(t.layerIds[0]+': Value for "text-size" is >= '+jh+'. Reduce your "text-size".'),t.addSymbols(t.text,g,m,s,a,o,c,e,l.lineStartIndex,l.lineLength,h,p);for(var y=0,b=u;yFh&&C(t.layerIds[0]+': Value for "icon-size" is >= '+jh+'. Reduce your "icon-size".'):"composite"===$.kind&&((X=[ih*w.compositeIconSizes[0].evaluate(x,{},k),ih*w.compositeIconSizes[1].evaluate(x,{},k)])[0]>Fh||X[1]>Fh)&&C(t.layerIds[0]+': Value for "icon-size" is >= '+jh+'. Reduce your "icon-size".'),t.addSymbols(t.icon,W,X,b,y,x,!1,e,O.lineStartIndex,O.lineLength,-1,k),z=t.icon.placedSymbolArray.length-1,Y&&(I=4*Y.length,t.addSymbols(t.icon,Y,X,b,y,x,If.vertical,e,O.lineStartIndex,O.lineLength,-1,k),N=t.icon.placedSymbolArray.length-1)}for(var K in r.horizontal){var Z=r.horizontal[K];if(!A){F=is(Z.text);var J=s.layout.get("text-rotate").evaluate(x,{},k);A=new Th(l,e,c,u,f,Z,h,d,p,J)}var Q=1===Z.positionedLines.length;if(D+=Bh(t,e,Z,a,s,p,x,g,O,r.vertical?If.horizontal:If.horizontalOnly,Q?Object.keys(r.horizontal):[K],j,z,w,k),Q)break}r.vertical&&(R+=Bh(t,e,r.vertical,a,s,p,x,g,O,If.vertical,["vertical"],j,N,w,k));var tt=A?A.boxStartIndex:t.collisionBoxArray.length,et=A?A.boxEndIndex:t.collisionBoxArray.length,nt=E?E.boxStartIndex:t.collisionBoxArray.length,rt=E?E.boxEndIndex:t.collisionBoxArray.length,it=S?S.boxStartIndex:t.collisionBoxArray.length,at=S?S.boxEndIndex:t.collisionBoxArray.length,ot=P?P.boxStartIndex:t.collisionBoxArray.length,st=P?P.boxEndIndex:t.collisionBoxArray.length,lt=-1,ct=function(t,e){return t&&t.circleDiameter?Math.max(t.circleDiameter,e):e};lt=ct(A,lt),lt=ct(E,lt),lt=ct(S,lt);var ut=(lt=ct(P,lt))>-1?1:0;ut&&(lt*=T/Gu),t.glyphOffsetArray.length>=Zh.MAX_GLYPHS&&C("Too many glyphs being rendered in a tile. See https://github.com/mapbox/mapbox-gl-js/issues/2907"),void 0!==x.sortKey&&t.addToSortKeyRanges(t.symbolInstances.length,x.sortKey),t.symbolInstances.emplaceBack(e.x,e.y,j.right>=0?j.right:-1,j.center>=0?j.center:-1,j.left>=0?j.left:-1,j.vertical||-1,z,N,F,tt,et,nt,rt,it,at,ot,st,c,D,R,L,I,ut,0,h,B,U,lt)}function Vh(t,e,n,r){var i=t.compareText;if(e in i){for(var a=i[e],o=a.length-1;o>=0;o--)if(r.dist(a[o])0)&&("constant"!==a.value.kind||a.value.value.length>0),c="constant"!==s.value.kind||!!s.value.value||Object.keys(s.parameters).length>0,u=i.get("symbol-sort-key");if(this.features=[],l||c){for(var f=e.iconDependencies,h=e.glyphDependencies,d=e.availableImages,p=new $a(this.zoom),g=0,v=t;g=0;for(var I=0,D=T.sections;I=0;s--)a[s]={x:e[s].x,y:e[s].y,tileUnitDistanceFromAnchor:i},s>0&&(i+=e[s-1].dist(e[s]));for(var l=0;l0},Zh.prototype.hasIconData=function(){return this.icon.segments.get().length>0},Zh.prototype.hasDebugData=function(){return this.textCollisionBox&&this.iconCollisionBox},Zh.prototype.hasTextCollisionBoxData=function(){return this.hasDebugData()&&this.textCollisionBox.segments.get().length>0},Zh.prototype.hasIconCollisionBoxData=function(){return this.hasDebugData()&&this.iconCollisionBox.segments.get().length>0},Zh.prototype.addIndicesForPlacedSymbol=function(t,e){for(var n=t.placedSymbolArray.get(e),r=n.vertexStartIndex+4*n.numGlyphs,i=n.vertexStartIndex;i1||this.icon.segments.get().length>1)){this.symbolInstanceIndexes=this.getSortedSymbolIndexes(t),this.sortedAngle=t,this.text.indexArray.clear(),this.icon.indexArray.clear(),this.featureSortOrder=[];for(var n=0,r=this.symbolInstanceIndexes;n=0&&r.indexOf(t)===n&&e.addIndicesForPlacedSymbol(e.text,t)})),a.verticalPlacedTextSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.text,a.verticalPlacedTextSymbolIndex),a.placedIconSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.icon,a.placedIconSymbolIndex),a.verticalPlacedIconSymbolIndex>=0&&this.addIndicesForPlacedSymbol(this.icon,a.verticalPlacedIconSymbolIndex)}this.text.indexBuffer&&this.text.indexBuffer.updateData(this.text.indexArray),this.icon.indexBuffer&&this.icon.indexBuffer.updateData(this.icon.indexArray)}},ga("SymbolBucket",Zh,{omit:["layers","collisionBoxArray","features","compareText"]}),Zh.MAX_GLYPHS=65535,Zh.addDynamicAttributes=Yh;var Qh=new lo({"symbol-placement":new ro(le.layout_symbol["symbol-placement"]),"symbol-spacing":new ro(le.layout_symbol["symbol-spacing"]),"symbol-avoid-edges":new ro(le.layout_symbol["symbol-avoid-edges"]),"symbol-sort-key":new io(le.layout_symbol["symbol-sort-key"]),"symbol-z-order":new ro(le.layout_symbol["symbol-z-order"]),"icon-allow-overlap":new ro(le.layout_symbol["icon-allow-overlap"]),"icon-ignore-placement":new ro(le.layout_symbol["icon-ignore-placement"]),"icon-optional":new ro(le.layout_symbol["icon-optional"]),"icon-rotation-alignment":new ro(le.layout_symbol["icon-rotation-alignment"]),"icon-size":new io(le.layout_symbol["icon-size"]),"icon-text-fit":new ro(le.layout_symbol["icon-text-fit"]),"icon-text-fit-padding":new ro(le.layout_symbol["icon-text-fit-padding"]),"icon-image":new io(le.layout_symbol["icon-image"]),"icon-rotate":new io(le.layout_symbol["icon-rotate"]),"icon-padding":new ro(le.layout_symbol["icon-padding"]),"icon-keep-upright":new ro(le.layout_symbol["icon-keep-upright"]),"icon-offset":new io(le.layout_symbol["icon-offset"]),"icon-anchor":new io(le.layout_symbol["icon-anchor"]),"icon-pitch-alignment":new ro(le.layout_symbol["icon-pitch-alignment"]),"text-pitch-alignment":new ro(le.layout_symbol["text-pitch-alignment"]),"text-rotation-alignment":new ro(le.layout_symbol["text-rotation-alignment"]),"text-field":new io(le.layout_symbol["text-field"]),"text-font":new io(le.layout_symbol["text-font"]),"text-size":new io(le.layout_symbol["text-size"]),"text-max-width":new io(le.layout_symbol["text-max-width"]),"text-line-height":new ro(le.layout_symbol["text-line-height"]),"text-letter-spacing":new io(le.layout_symbol["text-letter-spacing"]),"text-justify":new io(le.layout_symbol["text-justify"]),"text-radial-offset":new io(le.layout_symbol["text-radial-offset"]),"text-variable-anchor":new ro(le.layout_symbol["text-variable-anchor"]),"text-anchor":new io(le.layout_symbol["text-anchor"]),"text-max-angle":new ro(le.layout_symbol["text-max-angle"]),"text-writing-mode":new ro(le.layout_symbol["text-writing-mode"]),"text-rotate":new io(le.layout_symbol["text-rotate"]),"text-padding":new ro(le.layout_symbol["text-padding"]),"text-keep-upright":new ro(le.layout_symbol["text-keep-upright"]),"text-transform":new io(le.layout_symbol["text-transform"]),"text-offset":new io(le.layout_symbol["text-offset"]),"text-allow-overlap":new ro(le.layout_symbol["text-allow-overlap"]),"text-ignore-placement":new ro(le.layout_symbol["text-ignore-placement"]),"text-optional":new ro(le.layout_symbol["text-optional"])}),td={paint:new lo({"icon-opacity":new io(le.paint_symbol["icon-opacity"]),"icon-color":new io(le.paint_symbol["icon-color"]),"icon-halo-color":new io(le.paint_symbol["icon-halo-color"]),"icon-halo-width":new io(le.paint_symbol["icon-halo-width"]),"icon-halo-blur":new io(le.paint_symbol["icon-halo-blur"]),"icon-translate":new ro(le.paint_symbol["icon-translate"]),"icon-translate-anchor":new ro(le.paint_symbol["icon-translate-anchor"]),"text-opacity":new io(le.paint_symbol["text-opacity"]),"text-color":new io(le.paint_symbol["text-color"],{runtimeType:xe,getOverride:function(t){return t.textColor},hasOverride:function(t){return!!t.textColor}}),"text-halo-color":new io(le.paint_symbol["text-halo-color"]),"text-halo-width":new io(le.paint_symbol["text-halo-width"]),"text-halo-blur":new io(le.paint_symbol["text-halo-blur"]),"text-translate":new ro(le.paint_symbol["text-translate"]),"text-translate-anchor":new ro(le.paint_symbol["text-translate-anchor"])}),layout:Qh},ed=function(t){this.type=t.property.overrides?t.property.overrides.runtimeType:ve,this.defaultValue=t};ed.prototype.evaluate=function(t){if(t.formattedSection){var e=this.defaultValue.property.overrides;if(e&&e.hasOverride(t.formattedSection))return e.getOverride(t.formattedSection)}return t.feature&&t.featureState?this.defaultValue.evaluate(t.feature,t.featureState):this.defaultValue.property.specification.default},ed.prototype.eachChild=function(t){this.defaultValue.isConstant()||t(this.defaultValue.value._styleExpression.expression)},ed.prototype.outputDefined=function(){return!1},ed.prototype.serialize=function(){return null},ga("FormatSectionOverride",ed,{omit:["defaultValue"]});var nd=function(t){function e(e){t.call(this,e,td)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.recalculate=function(e,n){if(t.prototype.recalculate.call(this,e,n),"auto"===this.layout.get("icon-rotation-alignment")&&("point"!==this.layout.get("symbol-placement")?this.layout._values["icon-rotation-alignment"]="map":this.layout._values["icon-rotation-alignment"]="viewport"),"auto"===this.layout.get("text-rotation-alignment")&&("point"!==this.layout.get("symbol-placement")?this.layout._values["text-rotation-alignment"]="map":this.layout._values["text-rotation-alignment"]="viewport"),"auto"===this.layout.get("text-pitch-alignment")&&(this.layout._values["text-pitch-alignment"]=this.layout.get("text-rotation-alignment")),"auto"===this.layout.get("icon-pitch-alignment")&&(this.layout._values["icon-pitch-alignment"]=this.layout.get("icon-rotation-alignment")),"point"===this.layout.get("symbol-placement")){var r=this.layout.get("text-writing-mode");if(r){for(var i=[],a=0,o=r;a",targetMapId:r,sourceMapId:a.mapId})}}},yd.prototype.receive=function(t){var e=t.data,n=e.id;if(n&&(!e.targetMapId||this.mapId===e.targetMapId))if(""===e.type){delete this.tasks[n];var r=this.cancelCallbacks[n];delete this.cancelCallbacks[n],r&&r()}else I()||e.mustQueue?(this.tasks[n]=e,this.taskQueue.push(n),this.invoker.trigger()):this.processTask(n,e)},yd.prototype.process=function(){if(this.taskQueue.length){var t=this.taskQueue.shift(),e=this.tasks[t];delete this.tasks[t],this.taskQueue.length&&this.invoker.trigger(),e&&this.processTask(t,e)}},yd.prototype.processTask=function(t,e){var n=this;if(""===e.type){var r=this.callbacks[t];delete this.callbacks[t],r&&(e.error?r(xa(e.error)):r(null,xa(e.data)))}else{var i=!1,a=z(this.globalScope)?void 0:[],o=e.hasCallback?function(e,r){i=!0,delete n.cancelCallbacks[t],n.target.postMessage({id:t,type:"",sourceMapId:n.mapId,error:e?ba(e):null,data:ba(r,a)},a)}:function(t){i=!0},s=null,l=xa(e.data);if(this.parent[e.type])s=this.parent[e.type](e.sourceMapId,l,o);else if(this.parent.getWorkerSource){var c=e.type.split(".");s=this.parent.getWorkerSource(e.sourceMapId,c[0],l.source)[c[1]](l,o)}else o(new Error("Could not find function "+e.type));!i&&s&&s.cancel&&(this.cancelCallbacks[t]=s.cancel)}},yd.prototype.remove=function(){this.invoker.remove(),this.target.removeEventListener("message",this.receive,!1)};var wd=function(t,e){t&&(e?this.setSouthWest(t).setNorthEast(e):4===t.length?this.setSouthWest([t[0],t[1]]).setNorthEast([t[2],t[3]]):this.setSouthWest(t[0]).setNorthEast(t[1]))};wd.prototype.setNorthEast=function(t){return this._ne=t instanceof kd?new kd(t.lng,t.lat):kd.convert(t),this},wd.prototype.setSouthWest=function(t){return this._sw=t instanceof kd?new kd(t.lng,t.lat):kd.convert(t),this},wd.prototype.extend=function(t){var e,n,r=this._sw,i=this._ne;if(t instanceof kd)e=t,n=t;else{if(!(t instanceof wd)){if(Array.isArray(t)){if(4===t.length||t.every(Array.isArray)){var a=t;return this.extend(wd.convert(a))}var o=t;return this.extend(kd.convert(o))}return this}if(e=t._sw,n=t._ne,!e||!n)return this}return r||i?(r.lng=Math.min(e.lng,r.lng),r.lat=Math.min(e.lat,r.lat),i.lng=Math.max(n.lng,i.lng),i.lat=Math.max(n.lat,i.lat)):(this._sw=new kd(e.lng,e.lat),this._ne=new kd(n.lng,n.lat)),this},wd.prototype.getCenter=function(){return new kd((this._sw.lng+this._ne.lng)/2,(this._sw.lat+this._ne.lat)/2)},wd.prototype.getSouthWest=function(){return this._sw},wd.prototype.getNorthEast=function(){return this._ne},wd.prototype.getNorthWest=function(){return new kd(this.getWest(),this.getNorth())},wd.prototype.getSouthEast=function(){return new kd(this.getEast(),this.getSouth())},wd.prototype.getWest=function(){return this._sw.lng},wd.prototype.getSouth=function(){return this._sw.lat},wd.prototype.getEast=function(){return this._ne.lng},wd.prototype.getNorth=function(){return this._ne.lat},wd.prototype.toArray=function(){return[this._sw.toArray(),this._ne.toArray()]},wd.prototype.toString=function(){return"LngLatBounds("+this._sw.toString()+", "+this._ne.toString()+")"},wd.prototype.isEmpty=function(){return!(this._sw&&this._ne)},wd.prototype.contains=function(t){var e=kd.convert(t),n=e.lng,r=e.lat,i=this._sw.lat<=r&&r<=this._ne.lat,a=this._sw.lng<=n&&n<=this._ne.lng;return this._sw.lng>this._ne.lng&&(a=this._sw.lng>=n&&n>=this._ne.lng),i&&a},wd.convert=function(t){return!t||t instanceof wd?t:new wd(t)};var _d=6371008.8,kd=function(t,e){if(isNaN(t)||isNaN(e))throw new Error("Invalid LngLat object: ("+t+", "+e+")");if(this.lng=+t,this.lat=+e,this.lat>90||this.lat<-90)throw new Error("Invalid LngLat latitude value: must be between -90 and 90")};kd.prototype.wrap=function(){return new kd(h(this.lng,-180,180),this.lat)},kd.prototype.toArray=function(){return[this.lng,this.lat]},kd.prototype.toString=function(){return"LngLat("+this.lng+", "+this.lat+")"},kd.prototype.distanceTo=function(t){var e=Math.PI/180,n=this.lat*e,r=t.lat*e,i=Math.sin(n)*Math.sin(r)+Math.cos(n)*Math.cos(r)*Math.cos((t.lng-this.lng)*e);return _d*Math.acos(Math.min(i,1))},kd.prototype.toBounds=function(t){void 0===t&&(t=0);var e=360*t/40075017,n=e/Math.cos(Math.PI/180*this.lat);return new wd(new kd(this.lng-n,this.lat-e),new kd(this.lng+n,this.lat+e))},kd.convert=function(t){if(t instanceof kd)return t;if(Array.isArray(t)&&(2===t.length||3===t.length))return new kd(Number(t[0]),Number(t[1]));if(!Array.isArray(t)&&"object"===typeof t&&null!==t)return new kd(Number("lng"in t?t.lng:t.lon),Number(t.lat));throw new Error("`LngLatLike` argument must be specified as a LngLat instance, an object {lng: , lat: }, an object {lon: , lat: }, or an array of [, ]")};var Td=2*Math.PI*_d;function Md(t){return Td*Math.cos(t*Math.PI/180)}function Ad(t){return(180+t)/360}function Sd(t){return(180-180/Math.PI*Math.log(Math.tan(Math.PI/4+t*Math.PI/360)))/360}function Ed(t,e){return t/Md(e)}function Cd(t){return 360*t-180}function Pd(t){var e=180-360*t;return 360/Math.PI*Math.atan(Math.exp(e*Math.PI/180))-90}function Od(t,e){return t*Md(Pd(e))}function Ld(t){return 1/Math.cos(t*Math.PI/180)}var Id=function(t,e,n){void 0===n&&(n=0),this.x=+t,this.y=+e,this.z=+n};Id.fromLngLat=function(t,e){void 0===e&&(e=0);var n=kd.convert(t);return new Id(Ad(n.lng),Sd(n.lat),Ed(e,n.lat))},Id.prototype.toLngLat=function(){return new kd(Cd(this.x),Pd(this.y))},Id.prototype.toAltitude=function(){return Od(this.z,this.y)},Id.prototype.meterInMercatorCoordinateUnits=function(){return 1/Td*Ld(Pd(this.y))};var Dd=function(t,e,n){this.z=t,this.x=e,this.y=n,this.key=Nd(0,t,t,e,n)};Dd.prototype.equals=function(t){return this.z===t.z&&this.x===t.x&&this.y===t.y},Dd.prototype.url=function(t,e){var n=bd(this.x,this.y,this.z),r=jd(this.z,this.x,this.y);return t[(this.x+this.y)%t.length].replace("{prefix}",(this.x%16).toString(16)+(this.y%16).toString(16)).replace("{z}",String(this.z)).replace("{x}",String(this.x)).replace("{y}",String("tms"===e?Math.pow(2,this.z)-this.y-1:this.y)).replace("{quadkey}",r).replace("{bbox-epsg-3857}",n)},Dd.prototype.getTilePoint=function(t){var e=Math.pow(2,this.z);return new a((t.x*e-this.x)*Ls,(t.y*e-this.y)*Ls)},Dd.prototype.toString=function(){return this.z+"/"+this.x+"/"+this.y};var Rd=function(t,e){this.wrap=t,this.canonical=e,this.key=Nd(t,e.z,e.z,e.x,e.y)},zd=function(t,e,n,r,i){this.overscaledZ=t,this.wrap=e,this.canonical=new Dd(n,+r,+i),this.key=Nd(e,t,n,r,i)};function Nd(t,e,n,r,i){(t*=2)<0&&(t=-1*t-1);var a=1<0;a--)i+=(e&(r=1<this.canonical.z?new zd(t,this.wrap,this.canonical.z,this.canonical.x,this.canonical.y):new zd(t,this.wrap,t,this.canonical.x>>e,this.canonical.y>>e)},zd.prototype.calculateScaledKey=function(t,e){var n=this.canonical.z-t;return t>this.canonical.z?Nd(this.wrap*+e,t,this.canonical.z,this.canonical.x,this.canonical.y):Nd(this.wrap*+e,t,t,this.canonical.x>>n,this.canonical.y>>n)},zd.prototype.isChildOf=function(t){if(t.wrap!==this.wrap)return!1;var e=this.canonical.z-t.canonical.z;return 0===t.overscaledZ||t.overscaledZ>e&&t.canonical.y===this.canonical.y>>e},zd.prototype.children=function(t){if(this.overscaledZ>=t)return[new zd(this.overscaledZ+1,this.wrap,this.canonical.z,this.canonical.x,this.canonical.y)];var e=this.canonical.z+1,n=2*this.canonical.x,r=2*this.canonical.y;return[new zd(e,this.wrap,e,n,r),new zd(e,this.wrap,e,n+1,r),new zd(e,this.wrap,e,n,r+1),new zd(e,this.wrap,e,n+1,r+1)]},zd.prototype.isLessThan=function(t){return this.wrapt.wrap)&&(this.overscaledZt.overscaledZ)&&(this.canonical.xt.canonical.x)&&this.canonical.y=this.dim+1||e<-1||e>=this.dim+1)throw new RangeError("out of range source coordinates for DEM data");return(e+1)*this.stride+(t+1)},Fd.prototype._unpackMapbox=function(t,e,n){return(256*t*256+256*e+n)/10-1e4},Fd.prototype._unpackTerrarium=function(t,e,n){return 256*t+e+n/256-32768},Fd.prototype.getPixels=function(){return new ql({width:this.stride,height:this.stride},new Uint8Array(this.data.buffer))},Fd.prototype.backfillBorder=function(t,e,n){if(this.dim!==t.dim)throw new Error("dem dimension mismatch");var r=e*this.dim,i=e*this.dim+this.dim,a=n*this.dim,o=n*this.dim+this.dim;switch(e){case-1:r=i-1;break;case 1:i=r+1}switch(n){case-1:a=o-1;break;case 1:o=a+1}for(var s=-e*this.dim,l=-n*this.dim,c=a;c=0&&u[3]>=0&&s.insert(o,u[0],u[1],u[2],u[3])}},Gd.prototype.loadVTLayers=function(){return this.vtLayers||(this.vtLayers=new ru.VectorTile(new Yu(this.rawTileData)).layers,this.sourceLayerCoder=new Ud(this.vtLayers?Object.keys(this.vtLayers).sort():["_geojsonTileLayer"])),this.vtLayers},Gd.prototype.query=function(t,e,n,r){var i=this;this.loadVTLayers();for(var a=t.params||{},o=Ls/t.tileSize/t.scale,s=Pi(a.filter),l=t.queryGeometry,c=t.queryPadding*o,u=Yd(l),f=this.grid.query(u.minX-c,u.minY-c,u.maxX+c,u.maxY+c),h=Yd(t.cameraQueryGeometry),d=0,p=this.grid3D.query(h.minX-c,h.minY-c,h.maxX+c,h.maxY+c,(function(e,n,r,i){return Xs(t.cameraQueryGeometry,e-c,n-c,r+c,i+c)}));dr)i=!1;else if(e)if(this.expirationTime=E.maxzoom||"none"!==E.visibility&&(l(S,this.zoom,r),(g[E.id]=E.createBucket({index:u.bucketLayerIDs.length,layers:S,zoom:this.zoom,pixelRatio:this.pixelRatio,overscaling:this.overscaling,collisionBoxArray:this.collisionBoxArray,sourceLayerIndex:x,sourceID:this.source})).populate(w,v,this.tileID.canonical),u.bucketLayerIDs.push(S.map((function(t){return t.id}))))}}}var C=t.mapObject(v.glyphDependencies,(function(t){return Object.keys(t).map(Number)}));Object.keys(C).length?i.send("getGlyphs",{uid:this.uid,stacks:C},(function(t,e){f||(f=t,h=e,L.call(s))})):h={};var P=Object.keys(v.iconDependencies);P.length?i.send("getImages",{icons:P,source:this.source,tileID:this.tileID,type:"icons"},(function(t,e){f||(f=t,d=e,L.call(s))})):d={};var O=Object.keys(v.patternDependencies);function L(){if(f)return a(f);if(h&&d&&p){var e=new o(h),n=new t.ImageAtlas(d,p);for(var i in g){var s=g[i];s instanceof t.SymbolBucket?(l(s.layers,this.zoom,r),t.performSymbolLayout(s,h,e.positions,d,n.iconPositions,this.showCollisionBoxes,this.tileID.canonical)):s.hasPattern&&(s instanceof t.LineBucket||s instanceof t.FillBucket||s instanceof t.FillExtrusionBucket)&&(l(s.layers,this.zoom,r),s.addFeatures(v,this.tileID.canonical,n.patternPositions))}this.status="done",a(null,{buckets:t.values(g).filter((function(t){return!t.isEmpty()})),featureIndex:u,collisionBoxArray:this.collisionBoxArray,glyphAtlasImage:e.image,imageAtlas:n,glyphMap:this.returnDependencies?h:null,iconMap:this.returnDependencies?d:null,glyphPositions:this.returnDependencies?e.positions:null})}}O.length?i.send("getImages",{icons:O,source:this.source,tileID:this.tileID,type:"patterns"},(function(t,e){f||(f=t,p=e,L.call(s))})):p={},L.call(this)};var u=function(t,e,n,r){this.actor=t,this.layerIndex=e,this.availableImages=n,this.loadVectorData=r||c,this.loading={},this.loaded={}};u.prototype.loadTile=function(e,n){var r=this,i=e.uid;this.loading||(this.loading={});var a=!!(e&&e.request&&e.request.collectResourceTiming)&&new t.RequestPerformance(e.request),o=this.loading[i]=new s(e);o.abort=this.loadVectorData(e,(function(e,s){if(delete r.loading[i],e||!s)return o.status="done",r.loaded[i]=o,n(e);var l=s.rawData,c={};s.expires&&(c.expires=s.expires),s.cacheControl&&(c.cacheControl=s.cacheControl);var u={};if(a){var f=a.finish();f&&(u.resourceTiming=JSON.parse(JSON.stringify(f)))}o.vectorTile=s.vectorTile,o.parse(s.vectorTile,r.layerIndex,r.availableImages,r.actor,(function(e,r){if(e||!r)return n(e);n(null,t.extend({rawTileData:l.slice(0)},r,c,u))})),r.loaded=r.loaded||{},r.loaded[i]=o}))},u.prototype.reloadTile=function(t,e){var n=this,r=this.loaded,i=t.uid,a=this;if(r&&r[i]){var o=r[i];o.showCollisionBoxes=t.showCollisionBoxes;var s=function(t,r){var i=o.reloadCallback;i&&(delete o.reloadCallback,o.parse(o.vectorTile,a.layerIndex,n.availableImages,a.actor,i)),e(t,r)};"parsing"===o.status?o.reloadCallback=s:"done"===o.status&&(o.vectorTile?o.parse(o.vectorTile,this.layerIndex,this.availableImages,this.actor,s):s())}},u.prototype.abortTile=function(t,e){var n=this.loading,r=t.uid;n&&n[r]&&n[r].abort&&(n[r].abort(),delete n[r]),e()},u.prototype.removeTile=function(t,e){var n=this.loaded,r=t.uid;n&&n[r]&&delete n[r],e()};var f=t.window.ImageBitmap,h=function(){this.loaded={}};h.prototype.loadTile=function(e,n){var r=e.uid,i=e.encoding,a=e.rawImageData,o=f&&a instanceof f?this.getImageData(a):a,s=new t.DEMData(r,o,i);this.loaded=this.loaded||{},this.loaded[r]=s,n(null,s)},h.prototype.getImageData=function(e){this.offscreenCanvas&&this.offscreenCanvasContext||(this.offscreenCanvas=new OffscreenCanvas(e.width,e.height),this.offscreenCanvasContext=this.offscreenCanvas.getContext("2d")),this.offscreenCanvas.width=e.width,this.offscreenCanvas.height=e.height,this.offscreenCanvasContext.drawImage(e,0,0,e.width,e.height);var n=this.offscreenCanvasContext.getImageData(-1,-1,e.width+2,e.height+2);return this.offscreenCanvasContext.clearRect(0,0,this.offscreenCanvas.width,this.offscreenCanvas.height),new t.RGBAImage({width:n.width,height:n.height},n.data)},h.prototype.removeTile=function(t){var e=this.loaded,n=t.uid;e&&e[n]&&delete e[n]};var d=p;function p(t,e){var n,r=t&&t.type;if("FeatureCollection"===r)for(n=0;n=0!==!!e&&t.reverse()}var m=t.vectorTile.VectorTileFeature.prototype.toGeoJSON,y=function(e){this._feature=e,this.extent=t.EXTENT,this.type=e.type,this.properties=e.tags,"id"in e&&!isNaN(e.id)&&(this.id=parseInt(e.id,10))};y.prototype.loadGeometry=function(){if(1===this._feature.type){for(var e=[],n=0,r=this._feature.geometry;n>31}function z(t,e){for(var n=t.loadGeometry(),r=t.type,i=0,a=0,o=n.length,s=0;s>1;F(t,e,o,r,i,a%2),j(t,e,n,r,o-1,a+1),j(t,e,n,o+1,i,a+1)}}function F(t,e,n,r,i,a){for(;i>r;){if(i-r>600){var o=i-r+1,s=n-r+1,l=Math.log(o),c=.5*Math.exp(2*l/3),u=.5*Math.sqrt(l*c*(o-c)/o)*(s-o/2<0?-1:1);F(t,e,n,Math.max(r,Math.floor(n-s*c/o+u)),Math.min(i,Math.floor(n+(o-s)*c/o+u)),a)}var f=e[2*n+a],h=r,d=i;for(B(t,e,r,n),e[2*i+a]>f&&B(t,e,r,i);hf;)d--}e[2*r+a]===f?B(t,e,r,d):B(t,e,++d,i),d<=n&&(r=d+1),n<=d&&(i=d-1)}}function B(t,e,n,r){U(t,n,r),U(e,2*n,2*r),U(e,2*n+1,2*r+1)}function U(t,e,n){var r=t[e];t[e]=t[n],t[n]=r}function H(t,e,n,r,i,a,o){for(var s,l,c=[0,t.length-1,0],u=[];c.length;){var f=c.pop(),h=c.pop(),d=c.pop();if(h-d<=o)for(var p=d;p<=h;p++)s=e[2*p],l=e[2*p+1],s>=n&&s<=i&&l>=r&&l<=a&&u.push(t[p]);else{var g=Math.floor((d+h)/2);s=e[2*g],l=e[2*g+1],s>=n&&s<=i&&l>=r&&l<=a&&u.push(t[g]);var v=(f+1)%2;(0===f?n<=s:r<=l)&&(c.push(d),c.push(g-1),c.push(v)),(0===f?i>=s:a>=l)&&(c.push(g+1),c.push(h),c.push(v))}}return u}function V(t,e,n,r,i,a){for(var o=[0,t.length-1,0],s=[],l=i*i;o.length;){var c=o.pop(),u=o.pop(),f=o.pop();if(u-f<=a)for(var h=f;h<=u;h++)q(e[2*h],e[2*h+1],n,r)<=l&&s.push(t[h]);else{var d=Math.floor((f+u)/2),p=e[2*d],g=e[2*d+1];q(p,g,n,r)<=l&&s.push(t[d]);var v=(c+1)%2;(0===c?n-i<=p:r-i<=g)&&(o.push(f),o.push(d-1),o.push(v)),(0===c?n+i>=p:r+i>=g)&&(o.push(d+1),o.push(u),o.push(v))}}return s}function q(t,e,n,r){var i=t-n,a=e-r;return i*i+a*a}T.fromVectorTileJs=M,T.fromGeojsonVt=A,T.GeoJSONWrapper=S;var G=function(t){return t[0]},W=function(t){return t[1]},Y=function(t,e,n,r,i){void 0===e&&(e=G),void 0===n&&(n=W),void 0===r&&(r=64),void 0===i&&(i=Float64Array),this.nodeSize=r,this.points=t;for(var a=t.length<65536?Uint16Array:Uint32Array,o=this.ids=new a(t.length),s=this.coords=new i(2*t.length),l=0;l=1e4?Math.round(e/1e3)+"k":e>=1e3?Math.round(e/100)/10+"k":e;return it(it({},t.properties),{cluster:!0,cluster_id:t.id,point_count:e,point_count_abbreviated:n})}function tt(t){return t/360+.5}function et(t){var e=Math.sin(t*Math.PI/180),n=.5-.25*Math.log((1+e)/(1-e))/Math.PI;return n<0?0:n>1?1:n}function nt(t){return 360*(t-.5)}function rt(t){var e=(180-360*t)*Math.PI/180;return 360*Math.atan(Math.exp(e))/Math.PI-90}function it(t,e){for(var n in e)t[n]=e[n];return t}function at(t){return t.x}function ot(t){return t.y}function st(t,e,n,r){for(var i,a=r,o=n-e>>1,s=n-e,l=t[e],c=t[e+1],u=t[n],f=t[n+1],h=e+3;ha)i=h,a=d;else if(d===a){var p=Math.abs(h-o);pr&&(i-e>3&&st(t,e,i,r),t[i+2]=a,n-i>3&&st(t,i,n,r))}function lt(t,e,n,r,i,a){var o=i-n,s=a-r;if(0!==o||0!==s){var l=((t-n)*o+(e-r)*s)/(o*o+s*s);l>1?(n=i,r=a):l>0&&(n+=o*l,r+=s*l)}return(o=t-n)*o+(s=e-r)*s}function ct(t,e,n,r){var i={id:"undefined"===typeof t?null:t,type:e,geometry:n,tags:r,minX:1/0,minY:1/0,maxX:-1/0,maxY:-1/0};return ut(i),i}function ut(t){var e=t.geometry,n=t.type;if("Point"===n||"MultiPoint"===n||"LineString"===n)ft(t,e);else if("Polygon"===n||"MultiLineString"===n)for(var r=0;r0&&(o+=r?(i*c-l*a)/2:Math.sqrt(Math.pow(l-i,2)+Math.pow(c-a,2))),i=l,a=c}var u=e.length-3;e[2]=1,st(e,0,u,n),e[u+2]=1,e.size=Math.abs(o),e.start=0,e.end=e.size}function vt(t,e,n,r){for(var i=0;i1?1:n}function bt(t,e,n,r,i,a,o,s){if(r/=e,a>=(n/=e)&&o=r)return null;for(var l=[],c=0;c=n&&p=r)){var g=[];if("Point"===h||"MultiPoint"===h)xt(f,g,n,r,i);else if("LineString"===h)wt(f,g,n,r,i,!1,s.lineMetrics);else if("MultiLineString"===h)kt(f,g,n,r,i,!1);else if("Polygon"===h)kt(f,g,n,r,i,!0);else if("MultiPolygon"===h)for(var v=0;v=n&&o<=r&&(e.push(t[a]),e.push(t[a+1]),e.push(t[a+2]))}}function wt(t,e,n,r,i,a,o){for(var s,l,c=_t(t),u=0===i?Mt:At,f=t.start,h=0;hn&&(l=u(c,d,p,v,m,n),o&&(c.start=f+s*l)):y>r?b=n&&(l=u(c,d,p,v,m,n),x=!0),b>r&&y<=r&&(l=u(c,d,p,v,m,r),x=!0),!a&&x&&(o&&(c.end=f+s*l),e.push(c),c=_t(t)),o&&(f+=s)}var w=t.length-3;d=t[w],p=t[w+1],g=t[w+2],(y=0===i?d:p)>=n&&y<=r&&Tt(c,d,p,g),w=c.length-3,a&&w>=3&&(c[w]!==c[0]||c[w+1]!==c[1])&&Tt(c,c[0],c[1],c[2]),c.length&&e.push(c)}function _t(t){var e=[];return e.size=t.size,e.start=t.start,e.end=t.end,e}function kt(t,e,n,r,i,a){for(var o=0;oo.maxX&&(o.maxX=u),f>o.maxY&&(o.maxY=f)}return o}function It(t,e,n,r){var i=e.geometry,a=e.type,o=[];if("Point"===a||"MultiPoint"===a)for(var s=0;s0&&e.size<(i?o:r))n.numPoints+=e.length/3;else{for(var s=[],l=0;lo)&&(n.numSimplified++,s.push(e[l]),s.push(e[l+1])),n.numPoints++;i&&Rt(s,a),t.push(s)}}function Rt(t,e){for(var n=0,r=0,i=t.length,a=i-2;r0===e)for(r=0,i=t.length;r24)throw new Error("maxZoom should be in the 0-24 range");if(e.promoteId&&e.generateId)throw new Error("promoteId and generateId cannot be used together.");var r=ht(t,e);this.tiles={},this.tileCoords=[],n&&(console.timeEnd("preprocess data"),console.log("index: maxZoom: %d, maxPoints: %d",e.indexMaxZoom,e.indexMaxPoints),console.time("generate tiles"),this.stats={},this.total=0),(r=St(r,e)).length&&this.splitTile(r,0,0,0),n&&(r.length&&console.log("features: %d, points: %d",this.tiles[0].numFeatures,this.tiles[0].numPoints),console.timeEnd("generate tiles"),console.log("tiles generated:",this.total,JSON.stringify(this.stats)))}function jt(t,e,n){return 32*((1<=r;c--){var u=+Date.now();s=this._cluster(s,c),this.trees[c]=new Y(s,at,ot,a,Float32Array),n&&console.log("z%d: %d clusters in %dms",c,s.length,+Date.now()-u)}return n&&console.timeEnd("total time"),this},X.prototype.getClusters=function(t,e){var n=((t[0]+180)%360+360)%360-180,r=Math.max(-90,Math.min(90,t[1])),i=180===t[2]?180:((t[2]+180)%360+360)%360-180,a=Math.max(-90,Math.min(90,t[3]));if(t[2]-t[0]>=360)n=-180,i=180;else if(n>i){var o=this.getClusters([n,r,180,a],e),s=this.getClusters([-180,r,i,a],e);return o.concat(s)}for(var l=this.trees[this._limitZoom(e)],c=[],u=0,f=l.range(tt(n),et(a),tt(i),et(r));u1?this._map(c,!0):null,v=(l<<5)+(e+1)+this.points.length,m=0,y=f;m>5},X.prototype._getOriginZoom=function(t){return(t-this.points.length)%32},X.prototype._map=function(t,e){if(t.numPoints)return e?it({},t.properties):t.properties;var n=this.points[t.index].properties,r=this.options.map(n);return e&&r===n?it({},r):r},Nt.prototype.options={maxZoom:14,indexMaxZoom:5,indexMaxPoints:1e5,tolerance:3,extent:4096,buffer:64,lineMetrics:!1,promoteId:null,generateId:!1,debug:0},Nt.prototype.splitTile=function(t,e,n,r,i,a,o){for(var s=[t,e,n,r],l=this.options,c=l.debug;s.length;){r=s.pop(),n=s.pop(),e=s.pop(),t=s.pop();var u=1<1&&console.time("creation"),h=this.tiles[f]=Lt(t,e,n,r,l),this.tileCoords.push({z:e,x:n,y:r}),c)){c>1&&(console.log("tile z%d-%d-%d (features: %d, points: %d, simplified: %d)",e,n,r,h.numFeatures,h.numPoints,h.numSimplified),console.timeEnd("creation"));var d="z"+e;this.stats[d]=(this.stats[d]||0)+1,this.total++}if(h.source=t,i){if(e===l.maxZoom||e===i)continue;var p=1<1&&console.time("clipping");var g,v,m,y,b,x,w=.5*l.buffer/l.extent,_=.5-w,k=.5+w,T=1+w;g=v=m=y=null,b=bt(t,u,n-w,n+k,0,h.minX,h.maxX,l),x=bt(t,u,n+_,n+T,0,h.minX,h.maxX,l),t=null,b&&(g=bt(b,u,r-w,r+k,1,h.minY,h.maxY,l),v=bt(b,u,r+_,r+T,1,h.minY,h.maxY,l),b=null),x&&(m=bt(x,u,r-w,r+k,1,h.minY,h.maxY,l),y=bt(x,u,r+_,r+T,1,h.minY,h.maxY,l),x=null),c>1&&console.timeEnd("clipping"),s.push(g||[],e+1,2*n,2*r),s.push(v||[],e+1,2*n,2*r+1),s.push(m||[],e+1,2*n+1,2*r),s.push(y||[],e+1,2*n+1,2*r+1)}}},Nt.prototype.getTile=function(t,e,n){var r=this.options,i=r.extent,a=r.debug;if(t<0||t>24)return null;var o=1<1&&console.log("drilling down to z%d-%d-%d",t,e,n);for(var l,c=t,u=e,f=n;!l&&c>0;)c--,u=Math.floor(u/2),f=Math.floor(f/2),l=this.tiles[jt(c,u,f)];return l&&l.source?(a>1&&console.log("found parent tile z%d-%d-%d",c,u,f),a>1&&console.time("drilling down"),this.splitTile(l.source,c,u,f,t,e,n),a>1&&console.timeEnd("drilling down"),this.tiles[s]?Pt(this.tiles[s],i):null):null};var Ut=function(e){function n(t,n,r,i){e.call(this,t,n,r,Bt),i&&(this.loadGeoJSON=i)}return e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n,n.prototype.loadData=function(t,e){this._pendingCallback&&this._pendingCallback(null,{abandoned:!0}),this._pendingCallback=e,this._pendingLoadDataParams=t,this._state&&"Idle"!==this._state?this._state="NeedsLoadData":(this._state="Coalescing",this._loadData())},n.prototype._loadData=function(){var e=this;if(this._pendingCallback&&this._pendingLoadDataParams){var n=this._pendingCallback,r=this._pendingLoadDataParams;delete this._pendingCallback,delete this._pendingLoadDataParams;var i=!!(r&&r.request&&r.request.collectResourceTiming)&&new t.RequestPerformance(r.request);this.loadGeoJSON(r,(function(t,a){if(t||!a)return n(t);if("object"!==typeof a)return n(new Error("Input data given to '"+r.source+"' is not a valid GeoJSON object."));d(a,!0);try{e._geoJSONIndex=r.cluster?new X(Ht(r)).load(a.features):zt(a,r.geojsonVtOptions)}catch(t){return n(t)}e.loaded={};var o={};if(i){var s=i.finish();s&&(o.resourceTiming={},o.resourceTiming[r.source]=JSON.parse(JSON.stringify(s)))}n(null,o)}))}},n.prototype.coalesce=function(){"Coalescing"===this._state?this._state="Idle":"NeedsLoadData"===this._state&&(this._state="Coalescing",this._loadData())},n.prototype.reloadTile=function(t,n){var r=this.loaded,i=t.uid;return r&&r[i]?e.prototype.reloadTile.call(this,t,n):this.loadTile(t,n)},n.prototype.loadGeoJSON=function(e,n){if(e.request)t.getJSON(e.request,n);else{if("string"!==typeof e.data)return n(new Error("Input data given to '"+e.source+"' is not a valid GeoJSON object."));try{return n(null,JSON.parse(e.data))}catch(r){return n(new Error("Input data given to '"+e.source+"' is not a valid GeoJSON object."))}}},n.prototype.removeSource=function(t,e){this._pendingCallback&&this._pendingCallback(null,{abandoned:!0}),e()},n.prototype.getClusterExpansionZoom=function(t,e){try{e(null,this._geoJSONIndex.getClusterExpansionZoom(t.clusterId))}catch(n){e(n)}},n.prototype.getClusterChildren=function(t,e){try{e(null,this._geoJSONIndex.getChildren(t.clusterId))}catch(n){e(n)}},n.prototype.getClusterLeaves=function(t,e){try{e(null,this._geoJSONIndex.getLeaves(t.clusterId,t.limit,t.offset))}catch(n){e(n)}},n}(u);function Ht(e){var n=e.superclusterOptions,r=e.clusterProperties;if(!r||!n)return n;for(var i={},a={},o={accumulated:null,zoom:0},s={properties:null},l=Object.keys(r),c=0,u=l;c1?"@2x":"",l=t.getJSON(n.transformRequest(n.normalizeSpriteURL(e,s,".json"),t.ResourceType.SpriteJSON),(function(t,e){l=null,o||(o=t,i=e,u())})),c=t.getImage(n.transformRequest(n.normalizeSpriteURL(e,s,".png"),t.ResourceType.SpriteImage),(function(t,e){c=null,o||(o=t,a=e,u())}));function u(){if(o)r(o);else if(i&&a){var e=t.browser.getImageData(a),n={};for(var s in i){var l=i[s],c=l.width,u=l.height,f=l.x,h=l.y,d=l.sdf,p=l.pixelRatio,g=l.stretchX,v=l.stretchY,m=l.content,y=new t.RGBAImage({width:c,height:u});t.RGBAImage.copy(e,y,{x:f,y:h},{x:0,y:0},{width:c,height:u}),n[s]={data:y,pixelRatio:p,sdf:d,stretchX:g,stretchY:v,content:m}}r(null,n)}}return{cancel:function(){l&&(l.cancel(),l=null),c&&(c.cancel(),c=null)}}}function h(t){var e=t.userImage;return!!(e&&e.render&&e.render())&&(t.data.replace(new Uint8Array(e.data.buffer)),!0)}n.suppressClick=function(){t.window.addEventListener("click",u,!0),t.window.setTimeout((function(){t.window.removeEventListener("click",u,!0)}),0)},n.mousePos=function(e,n){var r=e.getBoundingClientRect();return new t.Point(n.clientX-r.left-e.clientLeft,n.clientY-r.top-e.clientTop)},n.touchPos=function(e,n){for(var r=e.getBoundingClientRect(),i=[],a=0;a=0?0:e.button},n.remove=function(t){t.parentNode&&t.parentNode.removeChild(t)};var d=1,p=function(e){function n(){e.call(this),this.images={},this.updatedImages={},this.callbackDispatchedThisFrame={},this.loaded=!1,this.requestors=[],this.patterns={},this.atlasImage=new t.RGBAImage({width:1,height:1}),this.dirty=!0}return e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n,n.prototype.isLoaded=function(){return this.loaded},n.prototype.setLoaded=function(t){if(this.loaded!==t&&(this.loaded=t,t)){for(var e=0,n=this.requestors;e=0?1.2:1))}function x(t,e,n,r,i,a,o){for(var s=0;s65535)e(new Error("glyphs > 65535 not supported"));else if(a.ranges[s])e(null,{stack:n,id:i,glyph:o});else{var l=a.requests[s];l||(l=a.requests[s]=[],_.loadGlyphRange(n,s,r.url,r.requestManager,(function(t,e){if(e){for(var n in e)r._doesCharSupportLocalGlyph(+n)||(a.glyphs[+n]=e[+n]);a.ranges[s]=!0}for(var i=0,o=l;i1&&(l=t[++s]);var u=Math.abs(c-l.left),f=Math.abs(c-l.right),h=Math.min(u,f),d=void 0,p=i/n*(r+1);if(l.isDash){var g=r-Math.abs(p);d=Math.sqrt(h*h+g*g)}else d=r-Math.sqrt(h*h+p*p);this.data[o+c]=Math.max(0,Math.min(255,d+128))}},S.prototype.addRegularDash=function(t){for(var e=t.length-1;e>=0;--e){var n=t[e],r=t[e+1];n.zeroLength?t.splice(e,1):r&&r.isDash===n.isDash&&(r.left=n.left,t.splice(e,1))}var i=t[0],a=t[t.length-1];i.isDash===a.isDash&&(i.left=a.left-this.width,a.right=i.right+this.width);for(var o=this.width*this.nextRow,s=0,l=t[s],c=0;c1&&(l=t[++s]);var u=Math.abs(c-l.left),f=Math.abs(c-l.right),h=Math.min(u,f),d=l.isDash?h:-h;this.data[o+c]=Math.max(0,Math.min(255,d+128))}},S.prototype.addDash=function(e,n){var r=n?7:0,i=2*r+1;if(this.nextRow+i>this.height)return t.warnOnce("LineAtlas out of space"),null;for(var a=0,o=0;o=r.minX&&e.x=r.minY&&e.y0&&(l[new t.OverscaledTileID(e.overscaledZ,a,n.z,i,n.y-1).key]={backfilled:!1},l[new t.OverscaledTileID(e.overscaledZ,e.wrap,n.z,n.x,n.y-1).key]={backfilled:!1},l[new t.OverscaledTileID(e.overscaledZ,s,n.z,o,n.y-1).key]={backfilled:!1}),n.y+10&&(r.resourceTiming=e._resourceTiming,e._resourceTiming=[]),e.fire(new t.Event("data",r))}}))},n.prototype.onAdd=function(t){this.map=t,this.load()},n.prototype.setData=function(e){var n=this;return this._data=e,this.fire(new t.Event("dataloading",{dataType:"source"})),this._updateWorkerData((function(e){if(e)n.fire(new t.ErrorEvent(e));else{var r={dataType:"source",sourceDataType:"content"};n._collectResourceTiming&&n._resourceTiming&&n._resourceTiming.length>0&&(r.resourceTiming=n._resourceTiming,n._resourceTiming=[]),n.fire(new t.Event("data",r))}})),this},n.prototype.getClusterExpansionZoom=function(t,e){return this.actor.send("geojson.getClusterExpansionZoom",{clusterId:t,source:this.id},e),this},n.prototype.getClusterChildren=function(t,e){return this.actor.send("geojson.getClusterChildren",{clusterId:t,source:this.id},e),this},n.prototype.getClusterLeaves=function(t,e,n,r){return this.actor.send("geojson.getClusterLeaves",{source:this.id,clusterId:t,limit:e,offset:n},r),this},n.prototype._updateWorkerData=function(e){var n=this;this._loaded=!1;var r=t.extend({},this.workerOptions),i=this._data;"string"===typeof i?(r.request=this.map._requestManager.transformRequest(t.browser.resolveURL(i),t.ResourceType.Source),r.request.collectResourceTiming=this._collectResourceTiming):r.data=JSON.stringify(i),this.actor.send(this.type+".loadData",r,(function(t,i){n._removed||i&&i.abandoned||(n._loaded=!0,i&&i.resourceTiming&&i.resourceTiming[n.id]&&(n._resourceTiming=i.resourceTiming[n.id].slice(0)),n.actor.send(n.type+".coalesce",{source:r.source},null),e(t))}))},n.prototype.loaded=function(){return this._loaded},n.prototype.loadTile=function(e,n){var r=this,i=e.actor?"reloadTile":"loadTile";e.actor=this.actor;var a={type:this.type,uid:e.uid,tileID:e.tileID,zoom:e.tileID.overscaledZ,maxZoom:this.maxzoom,tileSize:this.tileSize,source:this.id,pixelRatio:t.browser.devicePixelRatio,showCollisionBoxes:this.map.showCollisionBoxes,promoteId:this.promoteId};e.request=this.actor.send(i,a,(function(t,a){return delete e.request,e.unloadVectorData(),e.aborted?n(null):t?n(t):(e.loadVectorData(a,r.map.painter,"reloadTile"===i),n(null))}))},n.prototype.abortTile=function(t){t.request&&(t.request.cancel(),delete t.request),t.aborted=!0},n.prototype.unloadTile=function(t){t.unloadVectorData(),this.actor.send("removeTile",{uid:t.uid,type:this.type,source:this.id})},n.prototype.onRemove=function(){this._removed=!0,this.actor.send("removeSource",{type:this.type,source:this.id})},n.prototype.serialize=function(){return t.extend({},this._options,{type:this.type,data:this._data})},n.prototype.hasTransition=function(){return!1},n}(t.Evented),R=t.createLayout([{name:"a_pos",type:"Int16",components:2},{name:"a_texture_pos",type:"Int16",components:2}]),z=function(e){function n(t,n,r,i){e.call(this),this.id=t,this.dispatcher=r,this.coordinates=n.coordinates,this.type="image",this.minzoom=0,this.maxzoom=22,this.tileSize=512,this.tiles={},this._loaded=!1,this.setEventedParent(i),this.options=n}return e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n,n.prototype.load=function(e,n){var r=this;this._loaded=!1,this.fire(new t.Event("dataloading",{dataType:"source"})),this.url=this.options.url,t.getImage(this.map._requestManager.transformRequest(this.url,t.ResourceType.Image),(function(i,a){r._loaded=!0,i?r.fire(new t.ErrorEvent(i)):a&&(r.image=a,e&&(r.coordinates=e),n&&n(),r._finishLoading())}))},n.prototype.loaded=function(){return this._loaded},n.prototype.updateImage=function(t){var e=this;return this.image&&t.url?(this.options.url=t.url,this.load(t.coordinates,(function(){e.texture=null})),this):this},n.prototype._finishLoading=function(){this.map&&(this.setCoordinates(this.coordinates),this.fire(new t.Event("data",{dataType:"source",sourceDataType:"metadata"})))},n.prototype.onAdd=function(t){this.map=t,this.load()},n.prototype.setCoordinates=function(e){var n=this;this.coordinates=e;var r=e.map(t.MercatorCoordinate.fromLngLat);this.tileID=N(r),this.minzoom=this.maxzoom=this.tileID.z;var i=r.map((function(t){return n.tileID.getTilePoint(t)._round()}));return this._boundsArray=new t.StructArrayLayout4i8,this._boundsArray.emplaceBack(i[0].x,i[0].y,0,0),this._boundsArray.emplaceBack(i[1].x,i[1].y,t.EXTENT,0),this._boundsArray.emplaceBack(i[3].x,i[3].y,0,t.EXTENT),this._boundsArray.emplaceBack(i[2].x,i[2].y,t.EXTENT,t.EXTENT),this.boundsBuffer&&(this.boundsBuffer.destroy(),delete this.boundsBuffer),this.fire(new t.Event("data",{dataType:"source",sourceDataType:"content"})),this},n.prototype.prepare=function(){if(0!==Object.keys(this.tiles).length&&this.image){var e=this.map.painter.context,n=e.gl;for(var r in this.boundsBuffer||(this.boundsBuffer=e.createVertexBuffer(this._boundsArray,R.members)),this.boundsSegments||(this.boundsSegments=t.SegmentVector.simpleSegment(0,0,4,2)),this.texture||(this.texture=new t.Texture(e,this.image,n.RGBA),this.texture.bind(n.LINEAR,n.CLAMP_TO_EDGE)),this.tiles){var i=this.tiles[r];"loaded"!==i.state&&(i.state="loaded",i.texture=this.texture)}}},n.prototype.loadTile=function(t,e){this.tileID&&this.tileID.equals(t.tileID.canonical)?(this.tiles[String(t.tileID.wrap)]=t,t.buckets={},e(null)):(t.state="errored",e(null))},n.prototype.serialize=function(){return{type:"image",url:this.options.url,coordinates:this.coordinates}},n.prototype.hasTransition=function(){return!1},n}(t.Evented);function N(e){for(var n=1/0,r=1/0,i=-1/0,a=-1/0,o=0,s=e;on.end(0)?this.fire(new t.ErrorEvent(new t.ValidationError("sources."+this.id,null,"Playback for this video can be set only between the "+n.start(0)+" and "+n.end(0)+"-second mark."))):this.video.currentTime=e}},n.prototype.getVideo=function(){return this.video},n.prototype.onAdd=function(t){this.map||(this.map=t,this.load(),this.video&&(this.video.play(),this.setCoordinates(this.coordinates)))},n.prototype.prepare=function(){if(!(0===Object.keys(this.tiles).length||this.video.readyState<2)){var e=this.map.painter.context,n=e.gl;for(var r in this.boundsBuffer||(this.boundsBuffer=e.createVertexBuffer(this._boundsArray,R.members)),this.boundsSegments||(this.boundsSegments=t.SegmentVector.simpleSegment(0,0,4,2)),this.texture?this.video.paused||(this.texture.bind(n.LINEAR,n.CLAMP_TO_EDGE),n.texSubImage2D(n.TEXTURE_2D,0,0,0,n.RGBA,n.UNSIGNED_BYTE,this.video)):(this.texture=new t.Texture(e,this.video,n.RGBA),this.texture.bind(n.LINEAR,n.CLAMP_TO_EDGE)),this.tiles){var i=this.tiles[r];"loaded"!==i.state&&(i.state="loaded",i.texture=this.texture)}}},n.prototype.serialize=function(){return{type:"video",urls:this.urls,coordinates:this.coordinates}},n.prototype.hasTransition=function(){return this.video&&!this.video.paused},n}(z),F=function(e){function n(n,r,i,a){e.call(this,n,r,i,a),r.coordinates?Array.isArray(r.coordinates)&&4===r.coordinates.length&&!r.coordinates.some((function(t){return!Array.isArray(t)||2!==t.length||t.some((function(t){return"number"!==typeof t}))}))||this.fire(new t.ErrorEvent(new t.ValidationError("sources."+n,null,'"coordinates" property must be an array of 4 longitude/latitude array pairs'))):this.fire(new t.ErrorEvent(new t.ValidationError("sources."+n,null,'missing required property "coordinates"'))),r.animate&&"boolean"!==typeof r.animate&&this.fire(new t.ErrorEvent(new t.ValidationError("sources."+n,null,'optional "animate" property must be a boolean value'))),r.canvas?"string"===typeof r.canvas||r.canvas instanceof t.window.HTMLCanvasElement||this.fire(new t.ErrorEvent(new t.ValidationError("sources."+n,null,'"canvas" must be either a string representing the ID of the canvas element from which to read, or an HTMLCanvasElement instance'))):this.fire(new t.ErrorEvent(new t.ValidationError("sources."+n,null,'missing required property "canvas"'))),this.options=r,this.animate=void 0===r.animate||r.animate}return e&&(n.__proto__=e),n.prototype=Object.create(e&&e.prototype),n.prototype.constructor=n,n.prototype.load=function(){this._loaded=!0,this.canvas||(this.canvas=this.options.canvas instanceof t.window.HTMLCanvasElement?this.options.canvas:t.window.document.getElementById(this.options.canvas)),this.width=this.canvas.width,this.height=this.canvas.height,this._hasInvalidDimensions()?this.fire(new t.ErrorEvent(new Error("Canvas dimensions cannot be less than or equal to zero."))):(this.play=function(){this._playing=!0,this.map.triggerRepaint()},this.pause=function(){this._playing&&(this.prepare(),this._playing=!1)},this._finishLoading())},n.prototype.getCanvas=function(){return this.canvas},n.prototype.onAdd=function(t){this.map=t,this.load(),this.canvas&&this.animate&&this.play()},n.prototype.onRemove=function(){this.pause()},n.prototype.prepare=function(){var e=!1;if(this.canvas.width!==this.width&&(this.width=this.canvas.width,e=!0),this.canvas.height!==this.height&&(this.height=this.canvas.height,e=!0),!this._hasInvalidDimensions()&&0!==Object.keys(this.tiles).length){var n=this.map.painter.context,r=n.gl;for(var i in this.boundsBuffer||(this.boundsBuffer=n.createVertexBuffer(this._boundsArray,R.members)),this.boundsSegments||(this.boundsSegments=t.SegmentVector.simpleSegment(0,0,4,2)),this.texture?(e||this._playing)&&this.texture.update(this.canvas,{premultiply:!0}):this.texture=new t.Texture(n,this.canvas,r.RGBA,{premultiply:!0}),this.tiles){var a=this.tiles[i];"loaded"!==a.state&&(a.state="loaded",a.texture=this.texture)}}},n.prototype.serialize=function(){return{type:"canvas",coordinates:this.coordinates}},n.prototype.hasTransition=function(){return this._playing},n.prototype._hasInvalidDimensions=function(){for(var t=0,e=[this.canvas.width,this.canvas.height];tthis.max){var o=this._getAndRemoveByKey(this.order[0]);o&&this.onRemove(o)}return this},Z.prototype.has=function(t){return t.wrapped().key in this.data},Z.prototype.getAndRemove=function(t){return this.has(t)?this._getAndRemoveByKey(t.wrapped().key):null},Z.prototype._getAndRemoveByKey=function(t){var e=this.data[t].shift();return e.timeout&&clearTimeout(e.timeout),0===this.data[t].length&&delete this.data[t],this.order.splice(this.order.indexOf(t),1),e.value},Z.prototype.getByKey=function(t){var e=this.data[t];return e?e[0].value:null},Z.prototype.get=function(t){return this.has(t)?this.data[t.wrapped().key][0].value:null},Z.prototype.remove=function(t,e){if(!this.has(t))return this;var n=t.wrapped().key,r=void 0===e?0:this.data[n].indexOf(e),i=this.data[n][r];return this.data[n].splice(r,1),i.timeout&&clearTimeout(i.timeout),0===this.data[n].length&&delete this.data[n],this.onRemove(i.value),this.order.splice(this.order.indexOf(n),1),this},Z.prototype.setMaxSize=function(t){for(this.max=t;this.order.length>this.max;){var e=this._getAndRemoveByKey(this.order[0]);e&&this.onRemove(e)}return this},Z.prototype.filter=function(t){var e=[];for(var n in this.data)for(var r=0,i=this.data[n];r1||(Math.abs(n)>1&&(1===Math.abs(n+i)?n+=i:1===Math.abs(n-i)&&(n-=i)),e.dem&&t.dem&&(t.dem.backfillBorder(e.dem,n,r),t.neighboringTiles&&t.neighboringTiles[a]&&(t.neighboringTiles[a].backfilled=!0)))}},n.prototype.getTile=function(t){return this.getTileByID(t.key)},n.prototype.getTileByID=function(t){return this._tiles[t]},n.prototype._retainLoadedChildren=function(t,e,n,r){for(var i in this._tiles){var a=this._tiles[i];if(!(r[i]||!a.hasData()||a.tileID.overscaledZ<=e||a.tileID.overscaledZ>n)){for(var o=a.tileID;a&&a.tileID.overscaledZ>e+1;){var s=a.tileID.scaledTo(a.tileID.overscaledZ-1);(a=this._tiles[s.key])&&a.hasData()&&(o=s)}for(var l=o;l.overscaledZ>e;)if(t[(l=l.scaledTo(l.overscaledZ-1)).key]){r[o.key]=o;break}}}},n.prototype.findLoadedParent=function(t,e){if(t.key in this._loadedParentTiles){var n=this._loadedParentTiles[t.key];return n&&n.tileID.overscaledZ>=e?n:null}for(var r=t.overscaledZ-1;r>=e;r--){var i=t.scaledTo(r),a=this._getLoadedTile(i);if(a)return a}},n.prototype._getLoadedTile=function(t){var e=this._tiles[t.key];return e&&e.hasData()?e:this._cache.getByKey(t.wrapped().key)},n.prototype.updateCacheSize=function(t){var e=(Math.ceil(t.width/this._source.tileSize)+1)*(Math.ceil(t.height/this._source.tileSize)+1),n=5,r=Math.floor(e*n),i="number"===typeof this._maxTileCacheSize?Math.min(this._maxTileCacheSize,r):r;this._cache.setMaxSize(i)},n.prototype.handleWrapJump=function(t){var e=(t-(void 0===this._prevLng?t:this._prevLng))/360,n=Math.round(e);if(this._prevLng=t,n){var r={};for(var i in this._tiles){var a=this._tiles[i];a.tileID=a.tileID.unwrapTo(a.tileID.wrap+n),r[a.tileID.key]=a}for(var o in this._tiles=r,this._timers)clearTimeout(this._timers[o]),delete this._timers[o];for(var s in this._tiles){var l=this._tiles[s];this._setTileReloadTimer(s,l)}}},n.prototype.update=function(e){var r=this;if(this.transform=e,this._sourceLoaded&&!this._paused){var i;this.updateCacheSize(e),this.handleWrapJump(this.transform.center.lng),this._coveredTiles={},this.used?this._source.tileID?i=e.getVisibleUnwrappedCoordinates(this._source.tileID).map((function(e){return new t.OverscaledTileID(e.canonical.z,e.wrap,e.canonical.z,e.canonical.x,e.canonical.y)})):(i=e.coveringTiles({tileSize:this._source.tileSize,minzoom:this._source.minzoom,maxzoom:this._source.maxzoom,roundZoom:this._source.roundZoom,reparseOverscaled:this._source.reparseOverscaled}),this._source.hasTile&&(i=i.filter((function(t){return r._source.hasTile(t)})))):i=[];var a=e.coveringZoomLevel(this._source),o=Math.max(a-n.maxOverzooming,this._source.minzoom),s=Math.max(a+n.maxUnderzooming,this._source.minzoom),l=this._updateRetainedTiles(i,a);if(Jt(this._source.type)){for(var c={},u={},f=0,h=Object.keys(l);fthis._source.maxzoom){var v=p.children(this._source.maxzoom)[0],m=this.getTile(v);if(m&&m.hasData()){r[v.key]=v;continue}}else{var y=p.children(this._source.maxzoom);if(r[y[0].key]&&r[y[1].key]&&r[y[2].key]&&r[y[3].key])continue}for(var b=g.wasRequested(),x=p.overscaledZ-1;x>=a;--x){var w=p.scaledTo(x);if(i[w.key])break;if(i[w.key]=!0,!(g=this.getTile(w))&&b&&(g=this._addTile(w)),g&&(r[w.key]=w,b=g.wasRequested(),g.hasData()))break}}}return r},n.prototype._updateLoadedParentTileCache=function(){for(var t in this._loadedParentTiles={},this._tiles){for(var e=[],n=void 0,r=this._tiles[t].tileID;r.overscaledZ>0;){if(r.key in this._loadedParentTiles){n=this._loadedParentTiles[r.key];break}e.push(r.key);var i=r.scaledTo(r.overscaledZ-1);if(n=this._getLoadedTile(i))break;r=i}for(var a=0,o=e;a0||(e.hasData()&&"reloading"!==e.state?this._cache.add(e.tileID,e,e.getExpiryTimeout()):(e.aborted=!0,this._abortTile(e),this._unloadTile(e))))},n.prototype.clearTiles=function(){for(var t in this._shouldReloadOnResume=!1,this._paused=!1,this._tiles)this._removeTile(t);this._cache.reset()},n.prototype.tilesIn=function(e,n,r){var i=this,a=[],o=this.transform;if(!o)return a;for(var s=r?o.getCameraQueryGeometry(e):e,l=e.map((function(t){return o.pointCoordinate(t)})),c=s.map((function(t){return o.pointCoordinate(t)})),u=this.getIds(),f=1/0,h=1/0,d=-1/0,p=-1/0,g=0,v=c;g=0&&m[1].y+v>=0){var y=l.map((function(t){return s.getTilePoint(t)})),b=c.map((function(t){return s.getTilePoint(t)}));a.push({tile:r,tileID:s,queryGeometry:y,cameraQueryGeometry:b,scale:g})}}},b=0;b=t.browser.now())return!0}return!1},n.prototype.setFeatureState=function(t,e,n){t=t||"_geojsonTileLayer",this._state.updateState(t,e,n)},n.prototype.removeFeatureState=function(t,e,n){t=t||"_geojsonTileLayer",this._state.removeFeatureState(t,e,n)},n.prototype.getFeatureState=function(t,e){return t=t||"_geojsonTileLayer",this._state.getState(t,e)},n.prototype.setDependencies=function(t,e,n){var r=this._tiles[t];r&&r.setDependencies(e,n)},n.prototype.reloadTilesForDependencies=function(t,e){for(var n in this._tiles)this._tiles[n].hasDependency(t,e)&&this._reloadTile(n,"reloading");this._cache.filter((function(n){return!n.hasDependency(t,e)}))},n}(t.Evented);function Zt(t,e){var n=Math.abs(2*t.wrap)-+(t.wrap<0),r=Math.abs(2*e.wrap)-+(e.wrap<0);return t.overscaledZ-e.overscaledZ||r-n||e.canonical.y-t.canonical.y||e.canonical.x-t.canonical.x}function Jt(t){return"raster"===t||"image"===t||"video"===t}function Qt(){return new t.window.Worker(us.workerUrl)}Kt.maxOverzooming=10,Kt.maxUnderzooming=3;var te="mapboxgl_preloaded_worker_pool",ee=function(){this.active={}};ee.prototype.acquire=function(t){if(!this.workers)for(this.workers=[];this.workers.length0?(i-o)/s:0;return this.points[a].mult(1-l).add(this.points[n].mult(l))};var _e=function(t,e,n){var r=this.boxCells=[],i=this.circleCells=[];this.xCellCount=Math.ceil(t/n),this.yCellCount=Math.ceil(e/n);for(var a=0;a=-e[0]&&n<=e[0]&&r>=-e[1]&&r<=e[1]}function Ee(e,n,r,i,a,o,s,l){var c=i?e.textSizeData:e.iconSizeData,u=t.evaluateSizeForZoom(c,r.transform.zoom),f=[256/r.width*2+1,256/r.height*2+1],h=i?e.text.dynamicLayoutVertexArray:e.icon.dynamicLayoutVertexArray;h.clear();for(var d=e.lineVertexArray,p=i?e.text.placedSymbolArray:e.icon.placedSymbolArray,g=r.transform.width/r.transform.height,v=!1,m=0;mMath.abs(r.x-n.x)*i?{useVertical:!0}:(e===t.WritingMode.vertical?n.yr.x)?{needsFlipping:!0}:null}function Oe(e,n,r,i,a,o,s,l,c,u,f,h,d,p){var g,v=n/24,m=e.lineOffsetX*v,y=e.lineOffsetY*v;if(e.numGlyphs>1){var b=e.glyphStartIndex+e.numGlyphs,x=e.lineStartIndex,w=e.lineStartIndex+e.lineLength,_=Ce(v,l,m,y,r,f,h,e,c,o,d);if(!_)return{notEnoughRoom:!0};var k=Me(_.first.point,s).point,T=Me(_.last.point,s).point;if(i&&!r){var M=Pe(e.writingMode,k,T,p);if(M)return M}g=[_.first];for(var A=e.glyphStartIndex+1;A0?P.point:Le(h,C,S,1,a),L=Pe(e.writingMode,S,O,p);if(L)return L}var I=Ie(v*l.getoffsetX(e.glyphStartIndex),m,y,r,f,h,e.segment,e.lineStartIndex,e.lineStartIndex+e.lineLength,c,o,d);if(!I)return{notEnoughRoom:!0};g=[I]}for(var D=0,R=g;D0?1:-1,g=0;i&&(p*=-1,g=Math.PI),p<0&&(g+=Math.PI);for(var v=p>0?l+s:l+s+1,m=a,y=a,b=0,x=0,w=Math.abs(d),_=[];b+x<=w;){if((v+=p)=c)return null;if(y=m,_.push(m),void 0===(m=h[v])){var k=new t.Point(u.getx(v),u.gety(v)),T=Me(k,f);if(T.signedDistanceFromCamera>0)m=h[v]=T.point;else{var M=v-p;m=Le(0===b?o:new t.Point(u.getx(M),u.gety(M)),k,y,w-b+1,f)}}b+=x,x=y.dist(m)}var A=(w-b)/x,S=m.sub(y),E=S.mult(A)._add(y);E._add(S._unit()._perp()._mult(r*p));var C=g+Math.atan2(m.y-y.y,m.x-y.x);return _.push(E),{point:E,angle:C,path:_}}_e.prototype.keysLength=function(){return this.boxKeys.length+this.circleKeys.length},_e.prototype.insert=function(t,e,n,r,i){this._forEachCell(e,n,r,i,this._insertBoxCell,this.boxUid++),this.boxKeys.push(t),this.bboxes.push(e),this.bboxes.push(n),this.bboxes.push(r),this.bboxes.push(i)},_e.prototype.insertCircle=function(t,e,n,r){this._forEachCell(e-r,n-r,e+r,n+r,this._insertCircleCell,this.circleUid++),this.circleKeys.push(t),this.circles.push(e),this.circles.push(n),this.circles.push(r)},_e.prototype._insertBoxCell=function(t,e,n,r,i,a){this.boxCells[i].push(a)},_e.prototype._insertCircleCell=function(t,e,n,r,i,a){this.circleCells[i].push(a)},_e.prototype._query=function(t,e,n,r,i,a){if(n<0||t>this.width||r<0||e>this.height)return!i&&[];var o=[];if(t<=0&&e<=0&&this.width<=n&&this.height<=r){if(i)return!0;for(var s=0;s0:o},_e.prototype._queryCircle=function(t,e,n,r,i){var a=t-n,o=t+n,s=e-n,l=e+n;if(o<0||a>this.width||l<0||s>this.height)return!r&&[];var c=[],u={hitTest:r,circle:{x:t,y:e,radius:n},seenUids:{box:{},circle:{}}};return this._forEachCell(a,s,o,l,this._queryCellCircle,c,u,i),r?c.length>0:c},_e.prototype.query=function(t,e,n,r,i){return this._query(t,e,n,r,!1,i)},_e.prototype.hitTest=function(t,e,n,r,i){return this._query(t,e,n,r,!0,i)},_e.prototype.hitTestCircle=function(t,e,n,r){return this._queryCircle(t,e,n,!0,r)},_e.prototype._queryCell=function(t,e,n,r,i,a,o,s){var l=o.seenUids,c=this.boxCells[i];if(null!==c)for(var u=this.bboxes,f=0,h=c;f=u[p+0]&&r>=u[p+1]&&(!s||s(this.boxKeys[d]))){if(o.hitTest)return a.push(!0),!0;a.push({key:this.boxKeys[d],x1:u[p],y1:u[p+1],x2:u[p+2],y2:u[p+3]})}}}var g=this.circleCells[i];if(null!==g)for(var v=this.circles,m=0,y=g;mo*o+s*s},_e.prototype._circleAndRectCollide=function(t,e,n,r,i,a,o){var s=(a-r)/2,l=Math.abs(t-(r+s));if(l>s+n)return!1;var c=(o-i)/2,u=Math.abs(e-(i+c));if(u>c+n)return!1;if(l<=s||u<=c)return!0;var f=l-s,h=u-c;return f*f+h*h<=n*n};var De=new Float32Array([-1/0,-1/0,0,-1/0,-1/0,0,-1/0,-1/0,0,-1/0,-1/0,0]);function Re(t,e){for(var n=0;n=1;L--)O.push(C.path[L]);for(var I=1;I0){for(var N=O[0].clone(),j=O[0].clone(),F=1;F=A.x&&j.x<=S.x&&N.y>=A.y&&j.y<=S.y?[O]:j.xS.x||j.yS.y?[]:t.clipLine([O],A.x,A.y,S.x,S.y)}for(var B=0,U=z;B=this.screenRightBoundary||rthis.screenBottomBoundary},je.prototype.isInsideGrid=function(t,e,n,r){return n>=0&&t=0&&e0?(this.prevPlacement&&this.prevPlacement.variableOffsets[f.crossTileID]&&this.prevPlacement.placements[f.crossTileID]&&this.prevPlacement.placements[f.crossTileID].text&&(g=this.prevPlacement.variableOffsets[f.crossTileID].anchor),this.variableOffsets[f.crossTileID]={textOffset:v,width:n,height:r,anchor:t,textBoxScale:i,prevAnchor:g},this.markUsedJustification(h,t,f,d),h.allowVerticalPlacement&&(this.markUsedOrientation(h,d,f),this.placedOrientations[f.crossTileID]=d),{shift:m,placedGlyphBoxes:y}):void 0},$e.prototype.placeLayerBucketPart=function(e,n,r){var i=this,a=e.parameters,o=a.bucket,s=a.layout,l=a.posMatrix,c=a.textLabelPlaneMatrix,u=a.labelToScreenMatrix,f=a.textPixelRatio,h=a.holdingForFade,d=a.collisionBoxArray,p=a.partiallyEvaluatedTextSize,g=a.collisionGroup,v=s.get("text-optional"),m=s.get("icon-optional"),y=s.get("text-allow-overlap"),b=s.get("icon-allow-overlap"),x="map"===s.get("text-rotation-alignment"),w="map"===s.get("text-pitch-alignment"),_="none"!==s.get("icon-text-fit"),k="viewport-y"===s.get("symbol-z-order"),T=y&&(b||!o.hasIconData()||m),M=b&&(y||!o.hasTextData()||v);!o.collisionArrays&&d&&o.deserializeCollisionBoxes(d);var A=function(e,a){if(!n[e.crossTileID])if(h)i.placements[e.crossTileID]=new He(!1,!1,!1);else{var d=!1,k=!1,A=!0,S=null,E={box:null,offscreen:null},C={box:null,offscreen:null},P=null,O=null,L=null,I=0,D=0,R=0;a.textFeatureIndex?I=a.textFeatureIndex:e.useRuntimeCollisionCircles&&(I=e.featureIndex),a.verticalTextFeatureIndex&&(D=a.verticalTextFeatureIndex);var z=a.textBox;if(z){var N=function(n){var r=t.WritingMode.horizontal;if(o.allowVerticalPlacement&&!n&&i.prevPlacement){var a=i.prevPlacement.placedOrientations[e.crossTileID];a&&(i.placedOrientations[e.crossTileID]=a,r=a,i.markUsedOrientation(o,r,e))}return r},j=function(n,r){if(o.allowVerticalPlacement&&e.numVerticalGlyphVertices>0&&a.verticalTextBox)for(var i=0,s=o.writingModes;i0&&(F=F.filter((function(t){return t!==B.anchor}))).unshift(B.anchor)}var U=function(t,n,r){for(var a=t.x2-t.x1,s=t.y2-t.y1,c=e.textBoxScale,u=_&&!b?n:null,h={box:[],offscreen:!1},p=y?2*F.length:F.length,v=0;v=F.length,T=i.attemptAnchorPlacement(m,t,a,s,c,x,w,f,l,g,k,e,o,r,u);if(T&&(h=T.placedGlyphBoxes)&&h.box&&h.box.length){d=!0,S=T.shift;break}}return h};j((function(){return U(z,a.iconBox,t.WritingMode.horizontal)}),(function(){var n=a.verticalTextBox,r=E&&E.box&&E.box.length;return o.allowVerticalPlacement&&!r&&e.numVerticalGlyphVertices>0&&n?U(n,a.verticalIconBox,t.WritingMode.vertical):{box:null,offscreen:null}})),E&&(d=E.box,A=E.offscreen);var H=N(E&&E.box);if(!d&&i.prevPlacement){var V=i.prevPlacement.variableOffsets[e.crossTileID];V&&(i.variableOffsets[e.crossTileID]=V,i.markUsedJustification(o,V.anchor,e,H))}}else{var q=function(t,n){var r=i.collisionIndex.placeCollisionBox(t,y,f,l,g.predicate);return r&&r.box&&r.box.length&&(i.markUsedOrientation(o,n,e),i.placedOrientations[e.crossTileID]=n),r};j((function(){return q(z,t.WritingMode.horizontal)}),(function(){var n=a.verticalTextBox;return o.allowVerticalPlacement&&e.numVerticalGlyphVertices>0&&n?q(n,t.WritingMode.vertical):{box:null,offscreen:null}})),N(E&&E.box&&E.box.length)}}if(d=(P=E)&&P.box&&P.box.length>0,A=P&&P.offscreen,e.useRuntimeCollisionCircles){var G=o.text.placedSymbolArray.get(e.centerJustifiedTextSymbolIndex),W=t.evaluateSizeForFeature(o.textSizeData,p,G),Y=s.get("text-padding"),$=e.collisionCircleDiameter;O=i.collisionIndex.placeCollisionCircles(y,G,o.lineVertexArray,o.glyphOffsetArray,W,l,c,u,r,w,g.predicate,$,Y),d=y||O.circles.length>0&&!O.collisionDetected,A=A&&O.offscreen}if(a.iconFeatureIndex&&(R=a.iconFeatureIndex),a.iconBox){var X=function(t){var e=_&&S?Ye(t,S.x,S.y,x,w,i.transform.angle):t;return i.collisionIndex.placeCollisionBox(e,b,f,l,g.predicate)};k=C&&C.box&&C.box.length&&a.verticalIconBox?(L=X(a.verticalIconBox)).box.length>0:(L=X(a.iconBox)).box.length>0,A=A&&L.offscreen}var K=v||0===e.numHorizontalGlyphVertices&&0===e.numVerticalGlyphVertices,Z=m||0===e.numIconVertices;if(K||Z?Z?K||(k=k&&d):d=k&&d:k=d=k&&d,d&&P&&P.box&&(C&&C.box&&D?i.collisionIndex.insertCollisionBox(P.box,s.get("text-ignore-placement"),o.bucketInstanceId,D,g.ID):i.collisionIndex.insertCollisionBox(P.box,s.get("text-ignore-placement"),o.bucketInstanceId,I,g.ID)),k&&L&&i.collisionIndex.insertCollisionBox(L.box,s.get("icon-ignore-placement"),o.bucketInstanceId,R,g.ID),O&&(d&&i.collisionIndex.insertCollisionCircles(O.circles,s.get("text-ignore-placement"),o.bucketInstanceId,I,g.ID),r)){var J=o.bucketInstanceId,Q=i.collisionCircleArrays[J];void 0===Q&&(Q=i.collisionCircleArrays[J]=new Ve);for(var tt=0;tt=0;--E){var C=S[E];A(o.symbolInstances.get(C),o.collisionArrays[C])}else for(var P=e.symbolInstanceStart;P=0&&(e.text.placedSymbolArray.get(c).crossTileID=a>=0&&c!==a?0:r.crossTileID)}},$e.prototype.markUsedOrientation=function(e,n,r){for(var i=n===t.WritingMode.horizontal||n===t.WritingMode.horizontalOnly?n:0,a=n===t.WritingMode.vertical?n:0,o=0,s=[r.leftJustifiedTextSymbolIndex,r.centerJustifiedTextSymbolIndex,r.rightJustifiedTextSymbolIndex];o0||l>0,b=a.numIconVertices>0,x=i.placedOrientations[a.crossTileID],w=x===t.WritingMode.vertical,_=x===t.WritingMode.horizontal||x===t.WritingMode.horizontalOnly;if(y){var k=rn(m.text),T=w?an:k;p(e.text,s,T);var M=_?an:k;p(e.text,l,M);var A=m.text.isHidden();[a.rightJustifiedTextSymbolIndex,a.centerJustifiedTextSymbolIndex,a.leftJustifiedTextSymbolIndex].forEach((function(t){t>=0&&(e.text.placedSymbolArray.get(t).hidden=A||w?1:0)})),a.verticalPlacedTextSymbolIndex>=0&&(e.text.placedSymbolArray.get(a.verticalPlacedTextSymbolIndex).hidden=A||_?1:0);var S=i.variableOffsets[a.crossTileID];S&&i.markUsedJustification(e,S.anchor,a,x);var E=i.placedOrientations[a.crossTileID];E&&(i.markUsedJustification(e,"left",a,E),i.markUsedOrientation(e,E,a))}if(b){var C=rn(m.icon),P=!(h&&a.verticalPlacedIconSymbolIndex&&w);if(a.placedIconSymbolIndex>=0){var O=P?C:an;p(e.icon,a.numIconVertices,O),e.icon.placedSymbolArray.get(a.placedIconSymbolIndex).hidden=m.icon.isHidden()}if(a.verticalPlacedIconSymbolIndex>=0){var L=P?an:C;p(e.icon,a.numVerticalIconVertices,L),e.icon.placedSymbolArray.get(a.verticalPlacedIconSymbolIndex).hidden=m.icon.isHidden()}}if(e.hasIconCollisionBoxData()||e.hasTextCollisionBoxData()){var I=e.collisionArrays[r];if(I){var D=new t.Point(0,0);if(I.textBox||I.verticalTextBox){var R=!0;if(c){var z=i.variableOffsets[g];z?(D=We(z.anchor,z.width,z.height,z.textOffset,z.textBoxScale),u&&D._rotate(f?i.transform.angle:-i.transform.angle)):R=!1}I.textBox&&Xe(e.textCollisionBox.collisionVertexArray,m.text.placed,!R||w,D.x,D.y),I.verticalTextBox&&Xe(e.textCollisionBox.collisionVertexArray,m.text.placed,!R||_,D.x,D.y)}var N=Boolean(!_&&I.verticalIconBox);I.iconBox&&Xe(e.iconCollisionBox.collisionVertexArray,m.icon.placed,N,h?D.x:0,h?D.y:0),I.verticalIconBox&&Xe(e.iconCollisionBox.collisionVertexArray,m.icon.placed,!N,h?D.x:0,h?D.y:0)}}},v=0;vt},$e.prototype.setStale=function(){this.stale=!0};var Ke=Math.pow(2,25),Ze=Math.pow(2,24),Je=Math.pow(2,17),Qe=Math.pow(2,16),tn=Math.pow(2,9),en=Math.pow(2,8),nn=Math.pow(2,1);function rn(t){if(0===t.opacity&&!t.placed)return 0;if(1===t.opacity&&t.placed)return 4294967295;var e=t.placed?1:0,n=Math.floor(127*t.opacity);return n*Ke+e*Ze+n*Je+e*Qe+n*tn+e*en+n*nn+e}var an=0,on=function(t){this._sortAcrossTiles="viewport-y"!==t.layout.get("symbol-z-order")&&void 0!==t.layout.get("symbol-sort-key").constantOr(1),this._currentTileIndex=0,this._currentPartIndex=0,this._seenCrossTileIDs={},this._bucketParts=[]};on.prototype.continuePlacement=function(t,e,n,r,i){for(var a=this._bucketParts;this._currentTileIndex2};this._currentPlacementIndex>=0;){var s=n[e[this._currentPlacementIndex]],l=this.placement.collisionIndex.transform.zoom;if("symbol"===s.type&&(!s.minzoom||s.minzoom<=l)&&(!s.maxzoom||s.maxzoom>l)){if(this._inProgressLayer||(this._inProgressLayer=new on(s)),this._inProgressLayer.continuePlacement(r[s.source],this.placement,this._showCollisionBoxes,s,o))return;delete this._inProgressLayer}this._currentPlacementIndex--}this._done=!0},sn.prototype.commit=function(t){return this.placement.commit(t),this.placement};var ln=512/t.EXTENT/2,cn=function(t,e,n){this.tileID=t,this.indexedSymbolInstances={},this.bucketInstanceId=n;for(var r=0;rt.overscaledZ)for(var s in o){var l=o[s];l.tileID.isChildOf(t)&&l.findMatches(e.symbolInstances,t,i)}else{var c=o[t.scaledTo(Number(a)).key];c&&c.findMatches(e.symbolInstances,t,i)}}for(var u=0;u0)throw new Error("Unimplemented: "+i.map((function(t){return t.command})).join(", ")+".");return r.forEach((function(t){"setTransition"!==t.command&&n[t.command].apply(n,t.args)})),this.stylesheet=e,!0},n.prototype.addImage=function(e,n){if(this.getImage(e))return this.fire(new t.ErrorEvent(new Error("An image with this name already exists.")));this.imageManager.addImage(e,n),this._availableImages=this.imageManager.listImages(),this._changedImages[e]=!0,this._changed=!0,this.fire(new t.Event("data",{dataType:"style"}))},n.prototype.updateImage=function(t,e){this.imageManager.updateImage(t,e)},n.prototype.getImage=function(t){return this.imageManager.getImage(t)},n.prototype.removeImage=function(e){if(!this.getImage(e))return this.fire(new t.ErrorEvent(new Error("No image with this name exists.")));this.imageManager.removeImage(e),this._availableImages=this.imageManager.listImages(),this._changedImages[e]=!0,this._changed=!0,this.fire(new t.Event("data",{dataType:"style"}))},n.prototype.listImages=function(){return this._checkLoaded(),this.imageManager.listImages()},n.prototype.addSource=function(e,n,r){var i=this;if(void 0===r&&(r={}),this._checkLoaded(),void 0!==this.sourceCaches[e])throw new Error("There is already a source with this ID");if(!n.type)throw new Error("The type property must be defined, but the only the following properties were given: "+Object.keys(n).join(", ")+".");if(!(["vector","raster","geojson","video","image"].indexOf(n.type)>=0)||!this._validate(t.validateStyle.source,"sources."+e,n,null,r)){this.map&&this.map._collectResourceTiming&&(n.collectResourceTiming=!0);var a=this.sourceCaches[e]=new Kt(e,n,this.dispatcher);a.style=this,a.setEventedParent(this,(function(){return{isSourceLoaded:i.loaded(),source:a.serialize(),sourceId:e}})),a.onAdd(this.map),this._changed=!0}},n.prototype.removeSource=function(e){if(this._checkLoaded(),void 0===this.sourceCaches[e])throw new Error("There is no source with this ID");for(var n in this._layers)if(this._layers[n].source===e)return this.fire(new t.ErrorEvent(new Error('Source "'+e+'" cannot be removed while layer "'+n+'" is using it.')));var r=this.sourceCaches[e];delete this.sourceCaches[e],delete this._updatedSources[e],r.fire(new t.Event("data",{sourceDataType:"metadata",dataType:"source",sourceId:e})),r.setEventedParent(null),r.clearTiles(),r.onRemove&&r.onRemove(this.map),this._changed=!0},n.prototype.setGeoJSONSourceData=function(t,e){this._checkLoaded(),this.sourceCaches[t].getSource().setData(e),this._changed=!0},n.prototype.getSource=function(t){return this.sourceCaches[t]&&this.sourceCaches[t].getSource()},n.prototype.addLayer=function(e,n,r){void 0===r&&(r={}),this._checkLoaded();var i=e.id;if(this.getLayer(i))this.fire(new t.ErrorEvent(new Error('Layer with id "'+i+'" already exists on this map')));else{var a;if("custom"===e.type){if(dn(this,t.validateCustomStyleLayer(e)))return;a=t.createStyleLayer(e)}else{if("object"===typeof e.source&&(this.addSource(i,e.source),e=t.clone$1(e),e=t.extend(e,{source:i})),this._validate(t.validateStyle.layer,"layers."+i,e,{arrayIndex:-1},r))return;a=t.createStyleLayer(e),this._validateLayer(a),a.setEventedParent(this,{layer:{id:i}}),this._serializedLayers[a.id]=a.serialize()}var o=n?this._order.indexOf(n):this._order.length;if(n&&-1===o)this.fire(new t.ErrorEvent(new Error('Layer with id "'+n+'" does not exist on this map.')));else{if(this._order.splice(o,0,i),this._layerOrderChanged=!0,this._layers[i]=a,this._removedLayers[i]&&a.source&&"custom"!==a.type){var s=this._removedLayers[i];delete this._removedLayers[i],s.type!==a.type?this._updatedSources[a.source]="clear":(this._updatedSources[a.source]="reload",this.sourceCaches[a.source].pause())}this._updateLayer(a),a.onAdd&&a.onAdd(this.map)}}},n.prototype.moveLayer=function(e,n){if(this._checkLoaded(),this._changed=!0,this._layers[e]){if(e!==n){var r=this._order.indexOf(e);this._order.splice(r,1);var i=n?this._order.indexOf(n):this._order.length;n&&-1===i?this.fire(new t.ErrorEvent(new Error('Layer with id "'+n+'" does not exist on this map.'))):(this._order.splice(i,0,e),this._layerOrderChanged=!0)}}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be moved.")))},n.prototype.removeLayer=function(e){this._checkLoaded();var n=this._layers[e];if(n){n.setEventedParent(null);var r=this._order.indexOf(e);this._order.splice(r,1),this._layerOrderChanged=!0,this._changed=!0,this._removedLayers[e]=n,delete this._layers[e],delete this._serializedLayers[e],delete this._updatedLayers[e],delete this._updatedPaintProps[e],n.onRemove&&n.onRemove(this.map)}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be removed.")))},n.prototype.getLayer=function(t){return this._layers[t]},n.prototype.hasLayer=function(t){return t in this._layers},n.prototype.setLayerZoomRange=function(e,n,r){this._checkLoaded();var i=this.getLayer(e);i?i.minzoom===n&&i.maxzoom===r||(null!=n&&(i.minzoom=n),null!=r&&(i.maxzoom=r),this._updateLayer(i)):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot have zoom extent.")))},n.prototype.setFilter=function(e,n,r){void 0===r&&(r={}),this._checkLoaded();var i=this.getLayer(e);if(i){if(!t.deepEqual(i.filter,n))return null===n||void 0===n?(i.filter=void 0,void this._updateLayer(i)):void(this._validate(t.validateStyle.filter,"layers."+i.id+".filter",n,null,r)||(i.filter=t.clone$1(n),this._updateLayer(i)))}else this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be filtered.")))},n.prototype.getFilter=function(e){return t.clone$1(this.getLayer(e).filter)},n.prototype.setLayoutProperty=function(e,n,r,i){void 0===i&&(i={}),this._checkLoaded();var a=this.getLayer(e);a?t.deepEqual(a.getLayoutProperty(n),r)||(a.setLayoutProperty(n,r,i),this._updateLayer(a)):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be styled.")))},n.prototype.getLayoutProperty=function(e,n){var r=this.getLayer(e);if(r)return r.getLayoutProperty(n);this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style.")))},n.prototype.setPaintProperty=function(e,n,r,i){void 0===i&&(i={}),this._checkLoaded();var a=this.getLayer(e);a?t.deepEqual(a.getPaintProperty(n),r)||(a.setPaintProperty(n,r,i)&&this._updateLayer(a),this._changed=!0,this._updatedPaintProps[e]=!0):this.fire(new t.ErrorEvent(new Error("The layer '"+e+"' does not exist in the map's style and cannot be styled.")))},n.prototype.getPaintProperty=function(t,e){return this.getLayer(t).getPaintProperty(e)},n.prototype.setFeatureState=function(e,n){this._checkLoaded();var r=e.source,i=e.sourceLayer,a=this.sourceCaches[r];if(void 0!==a){var o=a.getSource().type;"geojson"===o&&i?this.fire(new t.ErrorEvent(new Error("GeoJSON sources cannot have a sourceLayer parameter."))):"vector"!==o||i?(void 0===e.id&&this.fire(new t.ErrorEvent(new Error("The feature id parameter must be provided."))),a.setFeatureState(i,e.id,n)):this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+r+"' does not exist in the map's style.")))},n.prototype.removeFeatureState=function(e,n){this._checkLoaded();var r=e.source,i=this.sourceCaches[r];if(void 0!==i){var a=i.getSource().type,o="vector"===a?e.sourceLayer:void 0;"vector"!==a||o?n&&"string"!==typeof e.id&&"number"!==typeof e.id?this.fire(new t.ErrorEvent(new Error("A feature id is requred to remove its specific state property."))):i.removeFeatureState(o,e.id,n):this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+r+"' does not exist in the map's style.")))},n.prototype.getFeatureState=function(e){this._checkLoaded();var n=e.source,r=e.sourceLayer,i=this.sourceCaches[n];if(void 0!==i){if("vector"!==i.getSource().type||r)return void 0===e.id&&this.fire(new t.ErrorEvent(new Error("The feature id parameter must be provided."))),i.getFeatureState(r,e.id);this.fire(new t.ErrorEvent(new Error("The sourceLayer parameter must be provided for vector source types.")))}else this.fire(new t.ErrorEvent(new Error("The source '"+n+"' does not exist in the map's style.")))},n.prototype.getTransition=function(){return t.extend({duration:300,delay:0},this.stylesheet&&this.stylesheet.transition)},n.prototype.serialize=function(){return t.filterObject({version:this.stylesheet.version,name:this.stylesheet.name,metadata:this.stylesheet.metadata,light:this.stylesheet.light,center:this.stylesheet.center,zoom:this.stylesheet.zoom,bearing:this.stylesheet.bearing,pitch:this.stylesheet.pitch,sprite:this.stylesheet.sprite,glyphs:this.stylesheet.glyphs,transition:this.stylesheet.transition,sources:t.mapObject(this.sourceCaches,(function(t){return t.serialize()})),layers:this._serializeLayers(this._order)},(function(t){return void 0!==t}))},n.prototype._updateLayer=function(t){this._updatedLayers[t.id]=!0,t.source&&!this._updatedSources[t.source]&&"raster"!==this.sourceCaches[t.source].getSource().type&&(this._updatedSources[t.source]="reload",this.sourceCaches[t.source].pause()),this._changed=!0},n.prototype._flattenAndSortRenderedFeatures=function(t){for(var e=this,n=function(t){return"fill-extrusion"===e._layers[t].type},r={},i=[],a=this._order.length-1;a>=0;a--){var o=this._order[a];if(n(o)){r[o]=a;for(var s=0,l=t;s=0;p--){var g=this._order[p];if(n(g))for(var v=i.length-1;v>=0;v--){var m=i[v].feature;if(r[m.layer.id] 0.5) {gl_FragColor=vec4(0.0,0.0,1.0,0.5)*alpha;}if (v_notUsed > 0.5) {gl_FragColor*=.1;}}",Ln="attribute vec2 a_pos;attribute vec2 a_anchor_pos;attribute vec2 a_extrude;attribute vec2 a_placed;attribute vec2 a_shift;uniform mat4 u_matrix;uniform vec2 u_extrude_scale;uniform float u_camera_to_center_distance;varying float v_placed;varying float v_notUsed;void main() {vec4 projectedPoint=u_matrix*vec4(a_anchor_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float collision_perspective_ratio=clamp(0.5+0.5*(u_camera_to_center_distance/camera_to_anchor_distance),0.0,4.0);gl_Position=u_matrix*vec4(a_pos,0.0,1.0);gl_Position.xy+=(a_extrude+a_shift)*u_extrude_scale*gl_Position.w*collision_perspective_ratio;v_placed=a_placed.x;v_notUsed=a_placed.y;}",In="varying float v_radius;varying vec2 v_extrude;varying float v_perspective_ratio;varying float v_collision;void main() {float alpha=0.5*min(v_perspective_ratio,1.0);float stroke_radius=0.9*max(v_perspective_ratio,1.0);float distance_to_center=length(v_extrude);float distance_to_edge=abs(distance_to_center-v_radius);float opacity_t=smoothstep(-stroke_radius,0.0,-distance_to_edge);vec4 color=mix(vec4(0.0,0.0,1.0,0.5),vec4(1.0,0.0,0.0,1.0),v_collision);gl_FragColor=color*alpha*opacity_t;}",Dn="attribute vec2 a_pos;attribute float a_radius;attribute vec2 a_flags;uniform mat4 u_matrix;uniform mat4 u_inv_matrix;uniform vec2 u_viewport_size;uniform float u_camera_to_center_distance;varying float v_radius;varying vec2 v_extrude;varying float v_perspective_ratio;varying float v_collision;vec3 toTilePosition(vec2 screenPos) {vec4 rayStart=u_inv_matrix*vec4(screenPos,-1.0,1.0);vec4 rayEnd =u_inv_matrix*vec4(screenPos, 1.0,1.0);rayStart.xyz/=rayStart.w;rayEnd.xyz /=rayEnd.w;highp float t=(0.0-rayStart.z)/(rayEnd.z-rayStart.z);return mix(rayStart.xyz,rayEnd.xyz,t);}void main() {vec2 quadCenterPos=a_pos;float radius=a_radius;float collision=a_flags.x;float vertexIdx=a_flags.y;vec2 quadVertexOffset=vec2(mix(-1.0,1.0,float(vertexIdx >=2.0)),mix(-1.0,1.0,float(vertexIdx >=1.0 && vertexIdx <=2.0)));vec2 quadVertexExtent=quadVertexOffset*radius;vec3 tilePos=toTilePosition(quadCenterPos);vec4 clipPos=u_matrix*vec4(tilePos,1.0);highp float camera_to_anchor_distance=clipPos.w;highp float collision_perspective_ratio=clamp(0.5+0.5*(u_camera_to_center_distance/camera_to_anchor_distance),0.0,4.0);float padding_factor=1.2;v_radius=radius;v_extrude=quadVertexExtent*padding_factor;v_perspective_ratio=collision_perspective_ratio;v_collision=collision;gl_Position=vec4(clipPos.xyz/clipPos.w,1.0)+vec4(quadVertexExtent*padding_factor/u_viewport_size*2.0,0.0,0.0);}",Rn="uniform highp vec4 u_color;uniform sampler2D u_overlay;varying vec2 v_uv;void main() {vec4 overlay_color=texture2D(u_overlay,v_uv);gl_FragColor=mix(u_color,overlay_color,overlay_color.a);}",zn="attribute vec2 a_pos;varying vec2 v_uv;uniform mat4 u_matrix;uniform float u_overlay_scale;void main() {v_uv=a_pos/8192.0;gl_Position=u_matrix*vec4(a_pos*u_overlay_scale,0,1);}",Nn="#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float opacity\ngl_FragColor=color*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",jn="attribute vec2 a_pos;uniform mat4 u_matrix;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float opacity\ngl_Position=u_matrix*vec4(a_pos,0,1);}",Fn="varying vec2 v_pos;\n#pragma mapbox: define highp vec4 outline_color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 outline_color\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_pos-gl_FragCoord.xy);float alpha=1.0-smoothstep(0.0,1.0,dist);gl_FragColor=outline_color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",Bn="attribute vec2 a_pos;uniform mat4 u_matrix;uniform vec2 u_world;varying vec2 v_pos;\n#pragma mapbox: define highp vec4 outline_color\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 outline_color\n#pragma mapbox: initialize lowp float opacity\ngl_Position=u_matrix*vec4(a_pos,0,1);v_pos=(gl_Position.xy/gl_Position.w+1.0)/2.0*u_world;}",Un="uniform vec2 u_texsize;uniform sampler2D u_image;uniform float u_fade;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec2 v_pos;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);float dist=length(v_pos-gl_FragCoord.xy);float alpha=1.0-smoothstep(0.0,1.0,dist);gl_FragColor=mix(color1,color2,u_fade)*alpha*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",Hn="uniform mat4 u_matrix;uniform vec2 u_world;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform vec3 u_scale;attribute vec2 a_pos;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec2 v_pos;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;gl_Position=u_matrix*vec4(a_pos,0,1);vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileRatio,a_pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileRatio,a_pos);v_pos=(gl_Position.xy/gl_Position.w+1.0)/2.0*u_world;}",Vn="uniform vec2 u_texsize;uniform float u_fade;uniform sampler2D u_image;varying vec2 v_pos_a;varying vec2 v_pos_b;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);gl_FragColor=mix(color1,color2,u_fade)*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",qn="uniform mat4 u_matrix;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform vec3 u_scale;attribute vec2 a_pos;varying vec2 v_pos_a;varying vec2 v_pos_b;\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileZoomRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;gl_Position=u_matrix*vec4(a_pos,0,1);v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileZoomRatio,a_pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileZoomRatio,a_pos);}",Gn="varying vec4 v_color;void main() {gl_FragColor=v_color;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",Wn="uniform mat4 u_matrix;uniform vec3 u_lightcolor;uniform lowp vec3 u_lightpos;uniform lowp float u_lightintensity;uniform float u_vertical_gradient;uniform lowp float u_opacity;attribute vec2 a_pos;attribute vec4 a_normal_ed;varying vec4 v_color;\n#pragma mapbox: define highp float base\n#pragma mapbox: define highp float height\n#pragma mapbox: define highp vec4 color\nvoid main() {\n#pragma mapbox: initialize highp float base\n#pragma mapbox: initialize highp float height\n#pragma mapbox: initialize highp vec4 color\nvec3 normal=a_normal_ed.xyz;base=max(0.0,base);height=max(0.0,height);float t=mod(normal.x,2.0);gl_Position=u_matrix*vec4(a_pos,t > 0.0 ? height : base,1);float colorvalue=color.r*0.2126+color.g*0.7152+color.b*0.0722;v_color=vec4(0.0,0.0,0.0,1.0);vec4 ambientlight=vec4(0.03,0.03,0.03,1.0);color+=ambientlight;float directional=clamp(dot(normal/16384.0,u_lightpos),0.0,1.0);directional=mix((1.0-u_lightintensity),max((1.0-colorvalue+u_lightintensity),1.0),directional);if (normal.y !=0.0) {directional*=((1.0-u_vertical_gradient)+(u_vertical_gradient*clamp((t+base)*pow(height/150.0,0.5),mix(0.7,0.98,1.0-u_lightintensity),1.0)));}v_color.r+=clamp(color.r*directional*u_lightcolor.r,mix(0.0,0.3,1.0-u_lightcolor.r),1.0);v_color.g+=clamp(color.g*directional*u_lightcolor.g,mix(0.0,0.3,1.0-u_lightcolor.g),1.0);v_color.b+=clamp(color.b*directional*u_lightcolor.b,mix(0.0,0.3,1.0-u_lightcolor.b),1.0);v_color*=u_opacity;}",Yn="uniform vec2 u_texsize;uniform float u_fade;uniform sampler2D u_image;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec4 v_lighting;\n#pragma mapbox: define lowp float base\n#pragma mapbox: define lowp float height\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float base\n#pragma mapbox: initialize lowp float height\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;vec2 imagecoord=mod(v_pos_a,1.0);vec2 pos=mix(pattern_tl_a/u_texsize,pattern_br_a/u_texsize,imagecoord);vec4 color1=texture2D(u_image,pos);vec2 imagecoord_b=mod(v_pos_b,1.0);vec2 pos2=mix(pattern_tl_b/u_texsize,pattern_br_b/u_texsize,imagecoord_b);vec4 color2=texture2D(u_image,pos2);vec4 mixedColor=mix(color1,color2,u_fade);gl_FragColor=mixedColor*v_lighting;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",$n="uniform mat4 u_matrix;uniform vec2 u_pixel_coord_upper;uniform vec2 u_pixel_coord_lower;uniform float u_height_factor;uniform vec3 u_scale;uniform float u_vertical_gradient;uniform lowp float u_opacity;uniform vec3 u_lightcolor;uniform lowp vec3 u_lightpos;uniform lowp float u_lightintensity;attribute vec2 a_pos;attribute vec4 a_normal_ed;varying vec2 v_pos_a;varying vec2 v_pos_b;varying vec4 v_lighting;\n#pragma mapbox: define lowp float base\n#pragma mapbox: define lowp float height\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float base\n#pragma mapbox: initialize lowp float height\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec3 normal=a_normal_ed.xyz;float edgedistance=a_normal_ed.w;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;base=max(0.0,base);height=max(0.0,height);float t=mod(normal.x,2.0);float z=t > 0.0 ? height : base;gl_Position=u_matrix*vec4(a_pos,z,1);vec2 pos=normal.x==1.0 && normal.y==0.0 && normal.z==16384.0\n? a_pos\n: vec2(edgedistance,z*u_height_factor);v_pos_a=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,fromScale*display_size_a,tileRatio,pos);v_pos_b=get_pattern_pos(u_pixel_coord_upper,u_pixel_coord_lower,toScale*display_size_b,tileRatio,pos);v_lighting=vec4(0.0,0.0,0.0,1.0);float directional=clamp(dot(normal/16383.0,u_lightpos),0.0,1.0);directional=mix((1.0-u_lightintensity),max((0.5+u_lightintensity),1.0),directional);if (normal.y !=0.0) {directional*=((1.0-u_vertical_gradient)+(u_vertical_gradient*clamp((t+base)*pow(height/150.0,0.5),mix(0.7,0.98,1.0-u_lightintensity),1.0)));}v_lighting.rgb+=clamp(directional*u_lightcolor,mix(vec3(0.0),vec3(0.3),1.0-u_lightcolor),vec3(1.0));v_lighting*=u_opacity;}",Xn="#ifdef GL_ES\nprecision highp float;\n#endif\nuniform sampler2D u_image;varying vec2 v_pos;uniform vec2 u_dimension;uniform float u_zoom;uniform float u_maxzoom;uniform vec4 u_unpack;float getElevation(vec2 coord,float bias) {vec4 data=texture2D(u_image,coord)*255.0;data.a=-1.0;return dot(data,u_unpack)/4.0;}void main() {vec2 epsilon=1.0/u_dimension;float a=getElevation(v_pos+vec2(-epsilon.x,-epsilon.y),0.0);float b=getElevation(v_pos+vec2(0,-epsilon.y),0.0);float c=getElevation(v_pos+vec2(epsilon.x,-epsilon.y),0.0);float d=getElevation(v_pos+vec2(-epsilon.x,0),0.0);float e=getElevation(v_pos,0.0);float f=getElevation(v_pos+vec2(epsilon.x,0),0.0);float g=getElevation(v_pos+vec2(-epsilon.x,epsilon.y),0.0);float h=getElevation(v_pos+vec2(0,epsilon.y),0.0);float i=getElevation(v_pos+vec2(epsilon.x,epsilon.y),0.0);float exaggeration=u_zoom < 2.0 ? 0.4 : u_zoom < 4.5 ? 0.35 : 0.3;vec2 deriv=vec2((c+f+f+i)-(a+d+d+g),(g+h+h+i)-(a+b+b+c))/ pow(2.0,(u_zoom-u_maxzoom)*exaggeration+19.2562-u_zoom);gl_FragColor=clamp(vec4(deriv.x/2.0+0.5,deriv.y/2.0+0.5,1.0,1.0),0.0,1.0);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",Kn="uniform mat4 u_matrix;uniform vec2 u_dimension;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);highp vec2 epsilon=1.0/u_dimension;float scale=(u_dimension.x-2.0)/u_dimension.x;v_pos=(a_texture_pos/8192.0)*scale+epsilon;}",Zn="uniform sampler2D u_image;varying vec2 v_pos;uniform vec2 u_latrange;uniform vec2 u_light;uniform vec4 u_shadow;uniform vec4 u_highlight;uniform vec4 u_accent;\n#define PI 3.141592653589793\nvoid main() {vec4 pixel=texture2D(u_image,v_pos);vec2 deriv=((pixel.rg*2.0)-1.0);float scaleFactor=cos(radians((u_latrange[0]-u_latrange[1])*(1.0-v_pos.y)+u_latrange[1]));float slope=atan(1.25*length(deriv)/scaleFactor);float aspect=deriv.x !=0.0 ? atan(deriv.y,-deriv.x) : PI/2.0*(deriv.y > 0.0 ? 1.0 :-1.0);float intensity=u_light.x;float azimuth=u_light.y+PI;float base=1.875-intensity*1.75;float maxValue=0.5*PI;float scaledSlope=intensity !=0.5 ? ((pow(base,slope)-1.0)/(pow(base,maxValue)-1.0))*maxValue : slope;float accent=cos(scaledSlope);vec4 accent_color=(1.0-accent)*u_accent*clamp(intensity*2.0,0.0,1.0);float shade=abs(mod((aspect+azimuth)/PI+0.5,2.0)-1.0);vec4 shade_color=mix(u_shadow,u_highlight,shade)*sin(scaledSlope)*clamp(intensity*2.0,0.0,1.0);gl_FragColor=accent_color*(1.0-shade_color.a)+shade_color;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",Jn="uniform mat4 u_matrix;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);v_pos=a_texture_pos/8192.0;}",Qn="uniform lowp float u_device_pixel_ratio;varying vec2 v_width2;varying vec2 v_normal;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",tr="\n#define scale 0.015873016\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform vec2 u_units_to_pixels;uniform lowp float u_device_pixel_ratio;varying vec2 v_normal;varying vec2 v_width2;varying float v_gamma_scale;varying highp float v_linesofar;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;v_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*2.0;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_width2=vec2(outset,inset);}",er="uniform lowp float u_device_pixel_ratio;uniform sampler2D u_image;varying vec2 v_width2;varying vec2 v_normal;varying float v_gamma_scale;varying highp float v_lineprogress;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);vec4 color=texture2D(u_image,vec2(v_lineprogress,0.5));gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",nr="\n#define MAX_LINE_DISTANCE 32767.0\n#define scale 0.015873016\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;uniform vec2 u_units_to_pixels;varying vec2 v_normal;varying vec2 v_width2;varying float v_gamma_scale;varying highp float v_lineprogress;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;v_lineprogress=(floor(a_data.z/4.0)+a_data.w*64.0)*2.0/MAX_LINE_DISTANCE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_width2=vec2(outset,inset);}",rr="uniform lowp float u_device_pixel_ratio;uniform vec2 u_texsize;uniform float u_fade;uniform mediump vec3 u_scale;uniform sampler2D u_image;varying vec2 v_normal;varying vec2 v_width2;varying float v_linesofar;varying float v_gamma_scale;varying float v_width;\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\nvec2 pattern_tl_a=pattern_from.xy;vec2 pattern_br_a=pattern_from.zw;vec2 pattern_tl_b=pattern_to.xy;vec2 pattern_br_b=pattern_to.zw;float tileZoomRatio=u_scale.x;float fromScale=u_scale.y;float toScale=u_scale.z;vec2 display_size_a=(pattern_br_a-pattern_tl_a)/pixel_ratio_from;vec2 display_size_b=(pattern_br_b-pattern_tl_b)/pixel_ratio_to;vec2 pattern_size_a=vec2(display_size_a.x*fromScale/tileZoomRatio,display_size_a.y);vec2 pattern_size_b=vec2(display_size_b.x*toScale/tileZoomRatio,display_size_b.y);float aspect_a=display_size_a.y/v_width;float aspect_b=display_size_b.y/v_width;float dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);float x_a=mod(v_linesofar/pattern_size_a.x*aspect_a,1.0);float x_b=mod(v_linesofar/pattern_size_b.x*aspect_b,1.0);float y=0.5*v_normal.y+0.5;vec2 texel_size=1.0/u_texsize;vec2 pos_a=mix(pattern_tl_a*texel_size-texel_size,pattern_br_a*texel_size+texel_size,vec2(x_a,y));vec2 pos_b=mix(pattern_tl_b*texel_size-texel_size,pattern_br_b*texel_size+texel_size,vec2(x_b,y));vec4 color=mix(texture2D(u_image,pos_a),texture2D(u_image,pos_b),u_fade);gl_FragColor=color*alpha*opacity;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",ir="\n#define scale 0.015873016\n#define LINE_DISTANCE_SCALE 2.0\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform vec2 u_units_to_pixels;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;varying vec2 v_normal;varying vec2 v_width2;varying float v_linesofar;varying float v_gamma_scale;varying float v_width;\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\n#pragma mapbox: define lowp vec4 pattern_from\n#pragma mapbox: define lowp vec4 pattern_to\n#pragma mapbox: define lowp float pixel_ratio_from\n#pragma mapbox: define lowp float pixel_ratio_to\nvoid main() {\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\n#pragma mapbox: initialize mediump vec4 pattern_from\n#pragma mapbox: initialize mediump vec4 pattern_to\n#pragma mapbox: initialize lowp float pixel_ratio_from\n#pragma mapbox: initialize lowp float pixel_ratio_to\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;float a_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*LINE_DISTANCE_SCALE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_linesofar=a_linesofar;v_width2=vec2(outset,inset);v_width=floorwidth;}",ar="uniform lowp float u_device_pixel_ratio;uniform sampler2D u_image;uniform float u_sdfgamma;uniform float u_mix;varying vec2 v_normal;varying vec2 v_width2;varying vec2 v_tex_a;varying vec2 v_tex_b;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\nfloat dist=length(v_normal)*v_width2.s;float blur2=(blur+1.0/u_device_pixel_ratio)*v_gamma_scale;float alpha=clamp(min(dist-(v_width2.t-blur2),v_width2.s-dist)/blur2,0.0,1.0);float sdfdist_a=texture2D(u_image,v_tex_a).a;float sdfdist_b=texture2D(u_image,v_tex_b).a;float sdfdist=mix(sdfdist_a,sdfdist_b,u_mix);alpha*=smoothstep(0.5-u_sdfgamma/floorwidth,0.5+u_sdfgamma/floorwidth,sdfdist);gl_FragColor=color*(alpha*opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",or="\n#define scale 0.015873016\n#define LINE_DISTANCE_SCALE 2.0\nattribute vec2 a_pos_normal;attribute vec4 a_data;uniform mat4 u_matrix;uniform mediump float u_ratio;uniform lowp float u_device_pixel_ratio;uniform vec2 u_patternscale_a;uniform float u_tex_y_a;uniform vec2 u_patternscale_b;uniform float u_tex_y_b;uniform vec2 u_units_to_pixels;varying vec2 v_normal;varying vec2 v_width2;varying vec2 v_tex_a;varying vec2 v_tex_b;varying float v_gamma_scale;\n#pragma mapbox: define highp vec4 color\n#pragma mapbox: define lowp float blur\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define mediump float gapwidth\n#pragma mapbox: define lowp float offset\n#pragma mapbox: define mediump float width\n#pragma mapbox: define lowp float floorwidth\nvoid main() {\n#pragma mapbox: initialize highp vec4 color\n#pragma mapbox: initialize lowp float blur\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize mediump float gapwidth\n#pragma mapbox: initialize lowp float offset\n#pragma mapbox: initialize mediump float width\n#pragma mapbox: initialize lowp float floorwidth\nfloat ANTIALIASING=1.0/u_device_pixel_ratio/2.0;vec2 a_extrude=a_data.xy-128.0;float a_direction=mod(a_data.z,4.0)-1.0;float a_linesofar=(floor(a_data.z/4.0)+a_data.w*64.0)*LINE_DISTANCE_SCALE;vec2 pos=floor(a_pos_normal*0.5);mediump vec2 normal=a_pos_normal-2.0*pos;normal.y=normal.y*2.0-1.0;v_normal=normal;gapwidth=gapwidth/2.0;float halfwidth=width/2.0;offset=-1.0*offset;float inset=gapwidth+(gapwidth > 0.0 ? ANTIALIASING : 0.0);float outset=gapwidth+halfwidth*(gapwidth > 0.0 ? 2.0 : 1.0)+(halfwidth==0.0 ? 0.0 : ANTIALIASING);mediump vec2 dist=outset*a_extrude*scale;mediump float u=0.5*a_direction;mediump float t=1.0-abs(u);mediump vec2 offset2=offset*a_extrude*scale*normal.y*mat2(t,-u,u,t);vec4 projected_extrude=u_matrix*vec4(dist/u_ratio,0.0,0.0);gl_Position=u_matrix*vec4(pos+offset2/u_ratio,0.0,1.0)+projected_extrude;float extrude_length_without_perspective=length(dist);float extrude_length_with_perspective=length(projected_extrude.xy/gl_Position.w*u_units_to_pixels);v_gamma_scale=extrude_length_without_perspective/extrude_length_with_perspective;v_tex_a=vec2(a_linesofar*u_patternscale_a.x/floorwidth,normal.y*u_patternscale_a.y+u_tex_y_a);v_tex_b=vec2(a_linesofar*u_patternscale_b.x/floorwidth,normal.y*u_patternscale_b.y+u_tex_y_b);v_width2=vec2(outset,inset);}",sr="uniform float u_fade_t;uniform float u_opacity;uniform sampler2D u_image0;uniform sampler2D u_image1;varying vec2 v_pos0;varying vec2 v_pos1;uniform float u_brightness_low;uniform float u_brightness_high;uniform float u_saturation_factor;uniform float u_contrast_factor;uniform vec3 u_spin_weights;void main() {vec4 color0=texture2D(u_image0,v_pos0);vec4 color1=texture2D(u_image1,v_pos1);if (color0.a > 0.0) {color0.rgb=color0.rgb/color0.a;}if (color1.a > 0.0) {color1.rgb=color1.rgb/color1.a;}vec4 color=mix(color0,color1,u_fade_t);color.a*=u_opacity;vec3 rgb=color.rgb;rgb=vec3(dot(rgb,u_spin_weights.xyz),dot(rgb,u_spin_weights.zxy),dot(rgb,u_spin_weights.yzx));float average=(color.r+color.g+color.b)/3.0;rgb+=(average-rgb)*u_saturation_factor;rgb=(rgb-0.5)*u_contrast_factor+0.5;vec3 u_high_vec=vec3(u_brightness_low,u_brightness_low,u_brightness_low);vec3 u_low_vec=vec3(u_brightness_high,u_brightness_high,u_brightness_high);gl_FragColor=vec4(mix(u_high_vec,u_low_vec,rgb)*color.a,color.a);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",lr="uniform mat4 u_matrix;uniform vec2 u_tl_parent;uniform float u_scale_parent;uniform float u_buffer_scale;attribute vec2 a_pos;attribute vec2 a_texture_pos;varying vec2 v_pos0;varying vec2 v_pos1;void main() {gl_Position=u_matrix*vec4(a_pos,0,1);v_pos0=(((a_texture_pos/8192.0)-0.5)/u_buffer_scale )+0.5;v_pos1=(v_pos0*u_scale_parent)+u_tl_parent;}",cr="uniform sampler2D u_texture;varying vec2 v_tex;varying float v_fade_opacity;\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\nlowp float alpha=opacity*v_fade_opacity;gl_FragColor=texture2D(u_texture,v_tex)*alpha;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",ur="const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec4 a_pixeloffset;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform highp float u_camera_to_center_distance;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform float u_fade_change;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform vec2 u_texsize;varying vec2 v_tex;varying float v_fade_opacity;\n#pragma mapbox: define lowp float opacity\nvoid main() {\n#pragma mapbox: initialize lowp float opacity\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);vec2 a_pxoffset=a_pixeloffset.xy;vec2 a_minFontScale=a_pixeloffset.zw/256.0;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=u_is_text ? size/24.0 : size;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*max(a_minFontScale,fontScale)+a_pxoffset/16.0),0.0,1.0);v_tex=a_tex/u_texsize;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;v_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));}",fr="#define SDF_PX 8.0\nuniform bool u_is_halo;uniform sampler2D u_texture;uniform highp float u_gamma_scale;uniform lowp float u_device_pixel_ratio;uniform bool u_is_text;varying vec2 v_data0;varying vec3 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nfloat EDGE_GAMMA=0.105/u_device_pixel_ratio;vec2 tex=v_data0.xy;float gamma_scale=v_data1.x;float size=v_data1.y;float fade_opacity=v_data1[2];float fontScale=u_is_text ? size/24.0 : size;lowp vec4 color=fill_color;highp float gamma=EDGE_GAMMA/(fontScale*u_gamma_scale);lowp float buff=(256.0-64.0)/256.0;if (u_is_halo) {color=halo_color;gamma=(halo_blur*1.19/SDF_PX+EDGE_GAMMA)/(fontScale*u_gamma_scale);buff=(6.0-halo_width/fontScale)/SDF_PX;}lowp float dist=texture2D(u_texture,tex).a;highp float gamma_scaled=gamma*gamma_scale;highp float alpha=smoothstep(buff-gamma_scaled,buff+gamma_scaled,dist);gl_FragColor=color*(alpha*opacity*fade_opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",hr="const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec4 a_pixeloffset;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform highp float u_camera_to_center_distance;uniform float u_fade_change;uniform vec2 u_texsize;varying vec2 v_data0;varying vec3 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);vec2 a_pxoffset=a_pixeloffset.xy;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=u_is_text ? size/24.0 : size;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*fontScale+a_pxoffset),0.0,1.0);float gamma_scale=gl_Position.w;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;float interpolated_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));v_data0=a_tex/u_texsize;v_data1=vec3(gamma_scale,size,interpolated_fade_opacity);}",dr="#define SDF_PX 8.0\n#define SDF 1.0\n#define ICON 0.0\nuniform bool u_is_halo;uniform sampler2D u_texture;uniform sampler2D u_texture_icon;uniform highp float u_gamma_scale;uniform lowp float u_device_pixel_ratio;varying vec4 v_data0;varying vec4 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nfloat fade_opacity=v_data1[2];if (v_data1.w==ICON) {vec2 tex_icon=v_data0.zw;lowp float alpha=opacity*fade_opacity;gl_FragColor=texture2D(u_texture_icon,tex_icon)*alpha;\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\nreturn;}vec2 tex=v_data0.xy;float EDGE_GAMMA=0.105/u_device_pixel_ratio;float gamma_scale=v_data1.x;float size=v_data1.y;float fontScale=size/24.0;lowp vec4 color=fill_color;highp float gamma=EDGE_GAMMA/(fontScale*u_gamma_scale);lowp float buff=(256.0-64.0)/256.0;if (u_is_halo) {color=halo_color;gamma=(halo_blur*1.19/SDF_PX+EDGE_GAMMA)/(fontScale*u_gamma_scale);buff=(6.0-halo_width/fontScale)/SDF_PX;}lowp float dist=texture2D(u_texture,tex).a;highp float gamma_scaled=gamma*gamma_scale;highp float alpha=smoothstep(buff-gamma_scaled,buff+gamma_scaled,dist);gl_FragColor=color*(alpha*opacity*fade_opacity);\n#ifdef OVERDRAW_INSPECTOR\ngl_FragColor=vec4(1.0);\n#endif\n}",pr="const float PI=3.141592653589793;attribute vec4 a_pos_offset;attribute vec4 a_data;attribute vec3 a_projected_pos;attribute float a_fade_opacity;uniform bool u_is_size_zoom_constant;uniform bool u_is_size_feature_constant;uniform highp float u_size_t;uniform highp float u_size;uniform mat4 u_matrix;uniform mat4 u_label_plane_matrix;uniform mat4 u_coord_matrix;uniform bool u_is_text;uniform bool u_pitch_with_map;uniform highp float u_pitch;uniform bool u_rotate_symbol;uniform highp float u_aspect_ratio;uniform highp float u_camera_to_center_distance;uniform float u_fade_change;uniform vec2 u_texsize;uniform vec2 u_texsize_icon;varying vec4 v_data0;varying vec4 v_data1;\n#pragma mapbox: define highp vec4 fill_color\n#pragma mapbox: define highp vec4 halo_color\n#pragma mapbox: define lowp float opacity\n#pragma mapbox: define lowp float halo_width\n#pragma mapbox: define lowp float halo_blur\nvoid main() {\n#pragma mapbox: initialize highp vec4 fill_color\n#pragma mapbox: initialize highp vec4 halo_color\n#pragma mapbox: initialize lowp float opacity\n#pragma mapbox: initialize lowp float halo_width\n#pragma mapbox: initialize lowp float halo_blur\nvec2 a_pos=a_pos_offset.xy;vec2 a_offset=a_pos_offset.zw;vec2 a_tex=a_data.xy;vec2 a_size=a_data.zw;float a_size_min=floor(a_size[0]*0.5);float is_sdf=a_size[0]-2.0*a_size_min;highp float segment_angle=-a_projected_pos[2];float size;if (!u_is_size_zoom_constant && !u_is_size_feature_constant) {size=mix(a_size_min,a_size[1],u_size_t)/128.0;} else if (u_is_size_zoom_constant && !u_is_size_feature_constant) {size=a_size_min/128.0;} else {size=u_size;}vec4 projectedPoint=u_matrix*vec4(a_pos,0,1);highp float camera_to_anchor_distance=projectedPoint.w;highp float distance_ratio=u_pitch_with_map ?\ncamera_to_anchor_distance/u_camera_to_center_distance :\nu_camera_to_center_distance/camera_to_anchor_distance;highp float perspective_ratio=clamp(0.5+0.5*distance_ratio,0.0,4.0);size*=perspective_ratio;float fontScale=size/24.0;highp float symbol_rotation=0.0;if (u_rotate_symbol) {vec4 offsetProjectedPoint=u_matrix*vec4(a_pos+vec2(1,0),0,1);vec2 a=projectedPoint.xy/projectedPoint.w;vec2 b=offsetProjectedPoint.xy/offsetProjectedPoint.w;symbol_rotation=atan((b.y-a.y)/u_aspect_ratio,b.x-a.x);}highp float angle_sin=sin(segment_angle+symbol_rotation);highp float angle_cos=cos(segment_angle+symbol_rotation);mat2 rotation_matrix=mat2(angle_cos,-1.0*angle_sin,angle_sin,angle_cos);vec4 projected_pos=u_label_plane_matrix*vec4(a_projected_pos.xy,0.0,1.0);gl_Position=u_coord_matrix*vec4(projected_pos.xy/projected_pos.w+rotation_matrix*(a_offset/32.0*fontScale),0.0,1.0);float gamma_scale=gl_Position.w;vec2 fade_opacity=unpack_opacity(a_fade_opacity);float fade_change=fade_opacity[1] > 0.5 ? u_fade_change :-u_fade_change;float interpolated_fade_opacity=max(0.0,min(1.0,fade_opacity[0]+fade_change));v_data0.xy=a_tex/u_texsize;v_data0.zw=a_tex/u_texsize_icon;v_data1=vec4(gamma_scale,size,interpolated_fade_opacity,is_sdf);}",gr=Ur("#ifdef GL_ES\nprecision mediump float;\n#else\n#if !defined(lowp)\n#define lowp\n#endif\n#if !defined(mediump)\n#define mediump\n#endif\n#if !defined(highp)\n#define highp\n#endif\n#endif","#ifdef GL_ES\nprecision highp float;\n#else\n#if !defined(lowp)\n#define lowp\n#endif\n#if !defined(mediump)\n#define mediump\n#endif\n#if !defined(highp)\n#define highp\n#endif\n#endif\nvec2 unpack_float(const float packedValue) {int packedIntValue=int(packedValue);int v0=packedIntValue/256;return vec2(v0,packedIntValue-v0*256);}vec2 unpack_opacity(const float packedOpacity) {int intOpacity=int(packedOpacity)/2;return vec2(float(intOpacity)/127.0,mod(packedOpacity,2.0));}vec4 decode_color(const vec2 encodedColor) {return vec4(unpack_float(encodedColor[0])/255.0,unpack_float(encodedColor[1])/255.0\n);}float unpack_mix_vec2(const vec2 packedValue,const float t) {return mix(packedValue[0],packedValue[1],t);}vec4 unpack_mix_color(const vec4 packedColors,const float t) {vec4 minColor=decode_color(vec2(packedColors[0],packedColors[1]));vec4 maxColor=decode_color(vec2(packedColors[2],packedColors[3]));return mix(minColor,maxColor,t);}vec2 get_pattern_pos(const vec2 pixel_coord_upper,const vec2 pixel_coord_lower,const vec2 pattern_size,const float tile_units_to_pixels,const vec2 pos) {vec2 offset=mod(mod(mod(pixel_coord_upper,pattern_size)*256.0,pattern_size)*256.0+pixel_coord_lower,pattern_size);return (tile_units_to_pixels*pos+offset)/pattern_size;}"),vr=Ur(bn,xn),mr=Ur(wn,_n),yr=Ur(kn,Tn),br=Ur(Mn,An),xr=Ur(Sn,En),wr=Ur(Cn,Pn),_r=Ur(On,Ln),kr=Ur(In,Dn),Tr=Ur(Rn,zn),Mr=Ur(Nn,jn),Ar=Ur(Fn,Bn),Sr=Ur(Un,Hn),Er=Ur(Vn,qn),Cr=Ur(Gn,Wn),Pr=Ur(Yn,$n),Or=Ur(Xn,Kn),Lr=Ur(Zn,Jn),Ir=Ur(Qn,tr),Dr=Ur(er,nr),Rr=Ur(rr,ir),zr=Ur(ar,or),Nr=Ur(sr,lr),jr=Ur(cr,ur),Fr=Ur(fr,hr),Br=Ur(dr,pr);function Ur(t,e){var n=/#pragma mapbox: ([\w]+) ([\w]+) ([\w]+) ([\w]+)/g,r={};return{fragmentSource:t=t.replace(n,(function(t,e,n,i,a){return r[a]=!0,"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nvarying "+n+" "+i+" "+a+";\n#else\nuniform "+n+" "+i+" u_"+a+";\n#endif\n":"\n#ifdef HAS_UNIFORM_u_"+a+"\n "+n+" "+i+" "+a+" = u_"+a+";\n#endif\n"})),vertexSource:e=e.replace(n,(function(t,e,n,i,a){var o="float"===i?"vec2":"vec4",s=a.match(/color/)?"color":o;return r[a]?"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nuniform lowp float u_"+a+"_t;\nattribute "+n+" "+o+" a_"+a+";\nvarying "+n+" "+i+" "+a+";\n#else\nuniform "+n+" "+i+" u_"+a+";\n#endif\n":"vec4"===s?"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+a+" = a_"+a+";\n#else\n "+n+" "+i+" "+a+" = u_"+a+";\n#endif\n":"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+a+" = unpack_mix_"+s+"(a_"+a+", u_"+a+"_t);\n#else\n "+n+" "+i+" "+a+" = u_"+a+";\n#endif\n":"define"===e?"\n#ifndef HAS_UNIFORM_u_"+a+"\nuniform lowp float u_"+a+"_t;\nattribute "+n+" "+o+" a_"+a+";\n#else\nuniform "+n+" "+i+" u_"+a+";\n#endif\n":"vec4"===s?"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+n+" "+i+" "+a+" = a_"+a+";\n#else\n "+n+" "+i+" "+a+" = u_"+a+";\n#endif\n":"\n#ifndef HAS_UNIFORM_u_"+a+"\n "+n+" "+i+" "+a+" = unpack_mix_"+s+"(a_"+a+", u_"+a+"_t);\n#else\n "+n+" "+i+" "+a+" = u_"+a+";\n#endif\n"}))}}var Hr=Object.freeze({__proto__:null,prelude:gr,background:vr,backgroundPattern:mr,circle:yr,clippingMask:br,heatmap:xr,heatmapTexture:wr,collisionBox:_r,collisionCircle:kr,debug:Tr,fill:Mr,fillOutline:Ar,fillOutlinePattern:Sr,fillPattern:Er,fillExtrusion:Cr,fillExtrusionPattern:Pr,hillshadePrepare:Or,hillshade:Lr,line:Ir,lineGradient:Dr,linePattern:Rr,lineSDF:zr,raster:Nr,symbolIcon:jr,symbolSDF:Fr,symbolTextAndIcon:Br}),Vr=function(){this.boundProgram=null,this.boundLayoutVertexBuffer=null,this.boundPaintVertexBuffers=[],this.boundIndexBuffer=null,this.boundVertexOffset=null,this.boundDynamicVertexBuffer=null,this.vao=null};Vr.prototype.bind=function(t,e,n,r,i,a,o,s){this.context=t;for(var l=this.boundPaintVertexBuffers.length!==r.length,c=0;!l&&c>16,s>>16],u_pixel_coord_lower:[65535&o,65535&s]}}function Wr(t,e,n,r){var i=n.imageManager.getPattern(t.from.toString()),a=n.imageManager.getPattern(t.to.toString()),o=n.imageManager.getPixelSize(),s=o.width,l=o.height,c=Math.pow(2,r.tileID.overscaledZ),u=r.tileSize*Math.pow(2,n.transform.tileZoom)/c,f=u*(r.tileID.canonical.x+r.tileID.wrap*c),h=u*r.tileID.canonical.y;return{u_image:0,u_pattern_tl_a:i.tl,u_pattern_br_a:i.br,u_pattern_tl_b:a.tl,u_pattern_br_b:a.br,u_texsize:[s,l],u_mix:e.t,u_pattern_size_a:i.displaySize,u_pattern_size_b:a.displaySize,u_scale_a:e.fromScale,u_scale_b:e.toScale,u_tile_units_to_pixels:1/Fe(r,1,n.transform.tileZoom),u_pixel_coord_upper:[f>>16,h>>16],u_pixel_coord_lower:[65535&f,65535&h]}}qr.prototype.draw=function(t,e,n,r,i,a,o,s,l,c,u,f,h,d,p,g){var v,m=t.gl;if(!this.failedToCreate){for(var y in t.program.set(this.program),t.setDepthMode(n),t.setStencilMode(r),t.setColorMode(i),t.setCullFace(a),this.fixedUniforms)this.fixedUniforms[y].set(o[y]);d&&d.setUniforms(t,this.binderUniforms,f,{zoom:h});for(var b=(v={},v[m.LINES]=2,v[m.TRIANGLES]=3,v[m.LINE_STRIP]=1,v)[e],x=0,w=u.get();x0?1/(1-t):1+t}function yi(t){return t>0?1-1/(1.001-t):-t}var bi,xi=function(t,e,n,r,i,a,o,s,l,c){var u=i.transform;return{u_is_size_zoom_constant:+("constant"===t||"source"===t),u_is_size_feature_constant:+("constant"===t||"camera"===t),u_size_t:e?e.uSizeT:0,u_size:e?e.uSize:0,u_camera_to_center_distance:u.cameraToCenterDistance,u_pitch:u.pitch/360*2*Math.PI,u_rotate_symbol:+n,u_aspect_ratio:u.width/u.height,u_fade_change:i.options.fadeDuration?i.symbolFadeChange:1,u_matrix:a,u_label_plane_matrix:o,u_coord_matrix:s,u_is_text:+l,u_pitch_with_map:+r,u_texsize:c,u_texture:0}},wi=function(e,n,r,i,a,o,s,l,c,u,f){var h=a.transform;return t.extend(xi(e,n,r,i,a,o,s,l,c,u),{u_gamma_scale:i?Math.cos(h._pitch)*h.cameraToCenterDistance:1,u_device_pixel_ratio:t.browser.devicePixelRatio,u_is_halo:+f})},_i=function(e,n,r,i,a,o,s,l,c,u){return t.extend(wi(e,n,r,i,a,o,s,l,!0,c,!0),{u_texsize_icon:u,u_texture_icon:1})},ki=function(t,e,n){return{u_matrix:t,u_opacity:e,u_color:n}},Ti=function(e,n,r,i,a,o){return t.extend(Wr(i,o,r,a),{u_matrix:e,u_opacity:n})},Mi={fillExtrusion:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_lightpos:new t.Uniform3f(e,n.u_lightpos),u_lightintensity:new t.Uniform1f(e,n.u_lightintensity),u_lightcolor:new t.Uniform3f(e,n.u_lightcolor),u_vertical_gradient:new t.Uniform1f(e,n.u_vertical_gradient),u_opacity:new t.Uniform1f(e,n.u_opacity)}},fillExtrusionPattern:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_lightpos:new t.Uniform3f(e,n.u_lightpos),u_lightintensity:new t.Uniform1f(e,n.u_lightintensity),u_lightcolor:new t.Uniform3f(e,n.u_lightcolor),u_vertical_gradient:new t.Uniform1f(e,n.u_vertical_gradient),u_height_factor:new t.Uniform1f(e,n.u_height_factor),u_image:new t.Uniform1i(e,n.u_image),u_texsize:new t.Uniform2f(e,n.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,n.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,n.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,n.u_scale),u_fade:new t.Uniform1f(e,n.u_fade),u_opacity:new t.Uniform1f(e,n.u_opacity)}},fill:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix)}},fillPattern:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_image:new t.Uniform1i(e,n.u_image),u_texsize:new t.Uniform2f(e,n.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,n.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,n.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,n.u_scale),u_fade:new t.Uniform1f(e,n.u_fade)}},fillOutline:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_world:new t.Uniform2f(e,n.u_world)}},fillOutlinePattern:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_world:new t.Uniform2f(e,n.u_world),u_image:new t.Uniform1i(e,n.u_image),u_texsize:new t.Uniform2f(e,n.u_texsize),u_pixel_coord_upper:new t.Uniform2f(e,n.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,n.u_pixel_coord_lower),u_scale:new t.Uniform3f(e,n.u_scale),u_fade:new t.Uniform1f(e,n.u_fade)}},circle:function(e,n){return{u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_scale_with_map:new t.Uniform1i(e,n.u_scale_with_map),u_pitch_with_map:new t.Uniform1i(e,n.u_pitch_with_map),u_extrude_scale:new t.Uniform2f(e,n.u_extrude_scale),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_matrix:new t.UniformMatrix4f(e,n.u_matrix)}},collisionBox:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_pixels_to_tile_units:new t.Uniform1f(e,n.u_pixels_to_tile_units),u_extrude_scale:new t.Uniform2f(e,n.u_extrude_scale),u_overscale_factor:new t.Uniform1f(e,n.u_overscale_factor)}},collisionCircle:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_inv_matrix:new t.UniformMatrix4f(e,n.u_inv_matrix),u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_viewport_size:new t.Uniform2f(e,n.u_viewport_size)}},debug:function(e,n){return{u_color:new t.UniformColor(e,n.u_color),u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_overlay:new t.Uniform1i(e,n.u_overlay),u_overlay_scale:new t.Uniform1f(e,n.u_overlay_scale)}},clippingMask:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix)}},heatmap:function(e,n){return{u_extrude_scale:new t.Uniform1f(e,n.u_extrude_scale),u_intensity:new t.Uniform1f(e,n.u_intensity),u_matrix:new t.UniformMatrix4f(e,n.u_matrix)}},heatmapTexture:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_world:new t.Uniform2f(e,n.u_world),u_image:new t.Uniform1i(e,n.u_image),u_color_ramp:new t.Uniform1i(e,n.u_color_ramp),u_opacity:new t.Uniform1f(e,n.u_opacity)}},hillshade:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_image:new t.Uniform1i(e,n.u_image),u_latrange:new t.Uniform2f(e,n.u_latrange),u_light:new t.Uniform2f(e,n.u_light),u_shadow:new t.UniformColor(e,n.u_shadow),u_highlight:new t.UniformColor(e,n.u_highlight),u_accent:new t.UniformColor(e,n.u_accent)}},hillshadePrepare:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_image:new t.Uniform1i(e,n.u_image),u_dimension:new t.Uniform2f(e,n.u_dimension),u_zoom:new t.Uniform1f(e,n.u_zoom),u_maxzoom:new t.Uniform1f(e,n.u_maxzoom),u_unpack:new t.Uniform4f(e,n.u_unpack)}},line:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_ratio:new t.Uniform1f(e,n.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,n.u_units_to_pixels)}},lineGradient:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_ratio:new t.Uniform1f(e,n.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,n.u_units_to_pixels),u_image:new t.Uniform1i(e,n.u_image)}},linePattern:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_texsize:new t.Uniform2f(e,n.u_texsize),u_ratio:new t.Uniform1f(e,n.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_image:new t.Uniform1i(e,n.u_image),u_units_to_pixels:new t.Uniform2f(e,n.u_units_to_pixels),u_scale:new t.Uniform3f(e,n.u_scale),u_fade:new t.Uniform1f(e,n.u_fade)}},lineSDF:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_ratio:new t.Uniform1f(e,n.u_ratio),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_units_to_pixels:new t.Uniform2f(e,n.u_units_to_pixels),u_patternscale_a:new t.Uniform2f(e,n.u_patternscale_a),u_patternscale_b:new t.Uniform2f(e,n.u_patternscale_b),u_sdfgamma:new t.Uniform1f(e,n.u_sdfgamma),u_image:new t.Uniform1i(e,n.u_image),u_tex_y_a:new t.Uniform1f(e,n.u_tex_y_a),u_tex_y_b:new t.Uniform1f(e,n.u_tex_y_b),u_mix:new t.Uniform1f(e,n.u_mix)}},raster:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_tl_parent:new t.Uniform2f(e,n.u_tl_parent),u_scale_parent:new t.Uniform1f(e,n.u_scale_parent),u_buffer_scale:new t.Uniform1f(e,n.u_buffer_scale),u_fade_t:new t.Uniform1f(e,n.u_fade_t),u_opacity:new t.Uniform1f(e,n.u_opacity),u_image0:new t.Uniform1i(e,n.u_image0),u_image1:new t.Uniform1i(e,n.u_image1),u_brightness_low:new t.Uniform1f(e,n.u_brightness_low),u_brightness_high:new t.Uniform1f(e,n.u_brightness_high),u_saturation_factor:new t.Uniform1f(e,n.u_saturation_factor),u_contrast_factor:new t.Uniform1f(e,n.u_contrast_factor),u_spin_weights:new t.Uniform3f(e,n.u_spin_weights)}},symbolIcon:function(e,n){return{u_is_size_zoom_constant:new t.Uniform1i(e,n.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,n.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,n.u_size_t),u_size:new t.Uniform1f(e,n.u_size),u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,n.u_pitch),u_rotate_symbol:new t.Uniform1i(e,n.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,n.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,n.u_fade_change),u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,n.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,n.u_coord_matrix),u_is_text:new t.Uniform1i(e,n.u_is_text),u_pitch_with_map:new t.Uniform1i(e,n.u_pitch_with_map),u_texsize:new t.Uniform2f(e,n.u_texsize),u_texture:new t.Uniform1i(e,n.u_texture)}},symbolSDF:function(e,n){return{u_is_size_zoom_constant:new t.Uniform1i(e,n.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,n.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,n.u_size_t),u_size:new t.Uniform1f(e,n.u_size),u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,n.u_pitch),u_rotate_symbol:new t.Uniform1i(e,n.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,n.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,n.u_fade_change),u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,n.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,n.u_coord_matrix),u_is_text:new t.Uniform1i(e,n.u_is_text),u_pitch_with_map:new t.Uniform1i(e,n.u_pitch_with_map),u_texsize:new t.Uniform2f(e,n.u_texsize),u_texture:new t.Uniform1i(e,n.u_texture),u_gamma_scale:new t.Uniform1f(e,n.u_gamma_scale),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_is_halo:new t.Uniform1i(e,n.u_is_halo)}},symbolTextAndIcon:function(e,n){return{u_is_size_zoom_constant:new t.Uniform1i(e,n.u_is_size_zoom_constant),u_is_size_feature_constant:new t.Uniform1i(e,n.u_is_size_feature_constant),u_size_t:new t.Uniform1f(e,n.u_size_t),u_size:new t.Uniform1f(e,n.u_size),u_camera_to_center_distance:new t.Uniform1f(e,n.u_camera_to_center_distance),u_pitch:new t.Uniform1f(e,n.u_pitch),u_rotate_symbol:new t.Uniform1i(e,n.u_rotate_symbol),u_aspect_ratio:new t.Uniform1f(e,n.u_aspect_ratio),u_fade_change:new t.Uniform1f(e,n.u_fade_change),u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_label_plane_matrix:new t.UniformMatrix4f(e,n.u_label_plane_matrix),u_coord_matrix:new t.UniformMatrix4f(e,n.u_coord_matrix),u_is_text:new t.Uniform1i(e,n.u_is_text),u_pitch_with_map:new t.Uniform1i(e,n.u_pitch_with_map),u_texsize:new t.Uniform2f(e,n.u_texsize),u_texsize_icon:new t.Uniform2f(e,n.u_texsize_icon),u_texture:new t.Uniform1i(e,n.u_texture),u_texture_icon:new t.Uniform1i(e,n.u_texture_icon),u_gamma_scale:new t.Uniform1f(e,n.u_gamma_scale),u_device_pixel_ratio:new t.Uniform1f(e,n.u_device_pixel_ratio),u_is_halo:new t.Uniform1i(e,n.u_is_halo)}},background:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_opacity:new t.Uniform1f(e,n.u_opacity),u_color:new t.UniformColor(e,n.u_color)}},backgroundPattern:function(e,n){return{u_matrix:new t.UniformMatrix4f(e,n.u_matrix),u_opacity:new t.Uniform1f(e,n.u_opacity),u_image:new t.Uniform1i(e,n.u_image),u_pattern_tl_a:new t.Uniform2f(e,n.u_pattern_tl_a),u_pattern_br_a:new t.Uniform2f(e,n.u_pattern_br_a),u_pattern_tl_b:new t.Uniform2f(e,n.u_pattern_tl_b),u_pattern_br_b:new t.Uniform2f(e,n.u_pattern_br_b),u_texsize:new t.Uniform2f(e,n.u_texsize),u_mix:new t.Uniform1f(e,n.u_mix),u_pattern_size_a:new t.Uniform2f(e,n.u_pattern_size_a),u_pattern_size_b:new t.Uniform2f(e,n.u_pattern_size_b),u_scale_a:new t.Uniform1f(e,n.u_scale_a),u_scale_b:new t.Uniform1f(e,n.u_scale_b),u_pixel_coord_upper:new t.Uniform2f(e,n.u_pixel_coord_upper),u_pixel_coord_lower:new t.Uniform2f(e,n.u_pixel_coord_lower),u_tile_units_to_pixels:new t.Uniform1f(e,n.u_tile_units_to_pixels)}}};function Ai(e,n,r,i,a,o,s){for(var l=e.context,c=l.gl,u=e.useProgram("collisionBox"),f=[],h=0,d=0,p=0;p0){var w=t.create(),_=y;t.mul(w,m.placementInvProjMatrix,e.transform.glCoordMatrix),t.mul(w,w,m.placementViewportMatrix),f.push({circleArray:x,circleOffset:d,transform:_,invTransform:w}),d=h+=x.length/4}b&&u.draw(l,c.LINES,jt.disabled,Ut.disabled,e.colorModeForRenderPass(),$t.disabled,ti(y,e.transform,v),r.id,b.layoutVertexBuffer,b.indexBuffer,b.segments,null,e.transform.zoom,null,null,b.collisionVertexBuffer)}}if(s&&f.length){var k=e.useProgram("collisionCircle"),T=new t.StructArrayLayout2f1f2i16;T.resize(4*h),T._trim();for(var M=0,A=0,S=f;A=0&&(g[m.associatedIconIndex]={shiftedAnchor:S,angle:E})}else Re(m.numGlyphs,d)}if(f){p.clear();for(var P=e.icon.placedSymbolArray,O=0;O0){var s=t.browser.now(),l=(s-e.timeAdded)/o,c=n?(s-n.timeAdded)/o:-1,u=r.getSource(),f=a.coveringZoomLevel({tileSize:u.tileSize,roundZoom:u.roundZoom}),h=!n||Math.abs(n.tileID.overscaledZ-f)>Math.abs(e.tileID.overscaledZ-f),d=h&&e.refreshedUponExpiration?1:t.clamp(h?l:1-c,0,1);return e.refreshedUponExpiration&&l>=1&&(e.refreshedUponExpiration=!1),n?{opacity:1,mix:1-d}:{opacity:d,mix:0}}return{opacity:1,mix:0}}function Zi(t,e,n){var r=n.paint.get("background-color"),i=n.paint.get("background-opacity");if(0!==i){var a=t.context,o=a.gl,s=t.transform,l=s.tileSize,c=n.paint.get("background-pattern");if(!t.isPatternMissing(c)){var u=!c&&1===r.a&&1===i&&t.opaquePassEnabledForLayer()?"opaque":"translucent";if(t.renderPass===u){var f=Ut.disabled,h=t.depthModeForSublayer(0,"opaque"===u?jt.ReadWrite:jt.ReadOnly),d=t.colorModeForRenderPass(),p=t.useProgram(c?"backgroundPattern":"background"),g=s.coveringTiles({tileSize:l});c&&(a.activeTexture.set(o.TEXTURE0),t.imageManager.bind(t.context));for(var v=n.getCrossfadeParameters(),m=0,y=g;m "+r.overscaledZ),ua(e,m+" "+p+"kb"),s.draw(i,a.TRIANGLES,l,c,Gt.alphaBlended,$t.disabled,ni(o,t.Color.transparent,v),f,e.debugBuffer,e.quadTriangleIndexBuffer,e.debugSegments)}function ua(t,e){t.initDebugOverlayCanvas();var n=t.debugOverlayCanvas,r=t.context.gl,i=t.debugOverlayCanvas.getContext("2d");i.clearRect(0,0,n.width,n.height),i.shadowColor="white",i.shadowBlur=2,i.lineWidth=1.5,i.strokeStyle="white",i.textBaseline="top",i.font="bold 36px Open Sans, sans-serif",i.fillText(e,5,5),i.strokeText(e,5,5),t.debugOverlayTexture.update(n),t.debugOverlayTexture.bind(r.LINEAR,r.CLAMP_TO_EDGE)}function fa(t,e,n){var r=t.context,i=n.implementation;if("offscreen"===t.renderPass){var a=i.prerender;a&&(t.setCustomLayerDefaults(),r.setColorMode(t.colorModeForRenderPass()),a.call(i,r.gl,t.transform.customLayerMatrix()),r.setDirty(),t.setBaseState())}else if("translucent"===t.renderPass){t.setCustomLayerDefaults(),r.setColorMode(t.colorModeForRenderPass()),r.setStencilMode(Ut.disabled);var o="3d"===i.renderingMode?new jt(t.context.gl.LEQUAL,jt.ReadWrite,t.depthRangeFor3D):t.depthModeForSublayer(0,jt.ReadOnly);r.setDepthMode(o),i.render(r.gl,t.transform.customLayerMatrix()),r.setDirty(),t.setBaseState(),r.bindFramebuffer.set(null)}}var ha={symbol:Ci,circle:zi,heatmap:Ni,line:Ui,fill:Hi,"fill-extrusion":qi,hillshade:Wi,raster:Xi,background:Zi,debug:la,custom:fa},da=function(t,e){this.context=new Xt(t),this.transform=e,this._tileTextures={},this.setup(),this.numSublayers=Kt.maxUnderzooming+Kt.maxOverzooming+1,this.depthEpsilon=1/Math.pow(2,16),this.crossTileSymbolIndex=new hn,this.gpuTimers={}};da.prototype.resize=function(e,n){if(this.width=e*t.browser.devicePixelRatio,this.height=n*t.browser.devicePixelRatio,this.context.viewport.set([0,0,this.width,this.height]),this.style)for(var r=0,i=this.style._order;r256&&this.clearStencil(),n.setColorMode(Gt.disabled),n.setDepthMode(jt.disabled);var i=this.useProgram("clippingMask");this._tileClippingMaskIDs={};for(var a=0,o=e;a256&&this.clearStencil();var t=this.nextStencilID++,e=this.context.gl;return new Ut({func:e.NOTEQUAL,mask:255},t,255,e.KEEP,e.KEEP,e.REPLACE)},da.prototype.stencilModeForClipping=function(t){var e=this.context.gl;return new Ut({func:e.EQUAL,mask:255},this._tileClippingMaskIDs[t.key],0,e.KEEP,e.KEEP,e.REPLACE)},da.prototype.stencilConfigForOverlap=function(t){var e,n=this.context.gl,r=t.sort((function(t,e){return e.overscaledZ-t.overscaledZ})),i=r[r.length-1].overscaledZ,a=r[0].overscaledZ-i+1;if(a>1){this.currentStencilSource=void 0,this.nextStencilID+a>256&&this.clearStencil();for(var o={},s=0;s=0;this.currentLayer--){var _=this.style._layers[i[this.currentLayer]],k=a[_.source],T=u[_.source];this._renderTileClippingMasks(_,T),this.renderLayer(this,k,_,T)}for(this.renderPass="translucent",this.currentLayer=0;this.currentLayer0?e.pop():null},da.prototype.isPatternMissing=function(t){if(!t)return!1;if(!t.from||!t.to)return!0;var e=this.imageManager.getPattern(t.from.toString()),n=this.imageManager.getPattern(t.to.toString());return!e||!n},da.prototype.useProgram=function(t,e){this.cache=this.cache||{};var n=""+t+(e?e.cacheKey:"")+(this._showOverdrawInspector?"/overdraw":"");return this.cache[n]||(this.cache[n]=new qr(this.context,Hr[t],e,Mi[t],this._showOverdrawInspector)),this.cache[n]},da.prototype.setCustomLayerDefaults=function(){this.context.unbindVAO(),this.context.cullFace.setDefault(),this.context.activeTexture.setDefault(),this.context.pixelStoreUnpack.setDefault(),this.context.pixelStoreUnpackPremultiplyAlpha.setDefault(),this.context.pixelStoreUnpackFlipY.setDefault()},da.prototype.setBaseState=function(){var t=this.context.gl;this.context.cullFace.set(!1),this.context.viewport.set([0,0,this.width,this.height]),this.context.blendEquation.set(t.FUNC_ADD)},da.prototype.initDebugOverlayCanvas=function(){if(null==this.debugOverlayCanvas){this.debugOverlayCanvas=t.window.document.createElement("canvas"),this.debugOverlayCanvas.width=512,this.debugOverlayCanvas.height=512;var e=this.context.gl;this.debugOverlayTexture=new t.Texture(this.context,this.debugOverlayCanvas,e.RGBA)}},da.prototype.destroy=function(){this.emptyTexture.destroy(),this.debugOverlayTexture&&this.debugOverlayTexture.destroy()};var pa=function(t,e){this.points=t,this.planes=e};pa.fromInvProjectionMatrix=function(e,n,r){var i=[[-1,1,-1,1],[1,1,-1,1],[1,-1,-1,1],[-1,-1,-1,1],[-1,1,1,1],[1,1,1,1],[1,-1,1,1],[-1,-1,1,1]],a=Math.pow(2,r),o=i.map((function(n){return t.transformMat4([],n,e)})).map((function(e){return t.scale$1([],e,1/e[3]/n*a)})),s=[[0,1,2],[6,5,4],[0,3,7],[2,1,5],[3,2,6],[0,4,5]].map((function(e){var n=t.sub([],o[e[0]],o[e[1]]),r=t.sub([],o[e[2]],o[e[1]]),i=t.normalize([],t.cross([],n,r)),a=-t.dot(i,o[e[1]]);return i.concat(a)}));return new pa(o,s)};var ga=function(e,n){this.min=e,this.max=n,this.center=t.scale$2([],t.add([],this.min,this.max),.5)};ga.prototype.quadrant=function(e){for(var n=[e%2===0,e<2],r=t.clone$2(this.min),i=t.clone$2(this.max),a=0;a=0;if(0===o)return 0;o!==n.length&&(r=!1)}if(r)return 2;for(var l=0;l<3;l++){for(var c=Number.MAX_VALUE,u=-Number.MAX_VALUE,f=0;fthis.max[l]-this.min[l])return 0}return 1};var va=function(t,e,n,r){if(void 0===t&&(t=0),void 0===e&&(e=0),void 0===n&&(n=0),void 0===r&&(r=0),isNaN(t)||t<0||isNaN(e)||e<0||isNaN(n)||n<0||isNaN(r)||r<0)throw new Error("Invalid value for edge-insets, top, bottom, left and right must all be numbers");this.top=t,this.bottom=e,this.left=n,this.right=r};va.prototype.interpolate=function(e,n,r){return null!=n.top&&null!=e.top&&(this.top=t.number(e.top,n.top,r)),null!=n.bottom&&null!=e.bottom&&(this.bottom=t.number(e.bottom,n.bottom,r)),null!=n.left&&null!=e.left&&(this.left=t.number(e.left,n.left,r)),null!=n.right&&null!=e.right&&(this.right=t.number(e.right,n.right,r)),this},va.prototype.getCenter=function(e,n){var r=t.clamp((this.left+e-this.right)/2,0,e),i=t.clamp((this.top+n-this.bottom)/2,0,n);return new t.Point(r,i)},va.prototype.equals=function(t){return this.top===t.top&&this.bottom===t.bottom&&this.left===t.left&&this.right===t.right},va.prototype.clone=function(){return new va(this.top,this.bottom,this.left,this.right)},va.prototype.toJSON=function(){return{top:this.top,bottom:this.bottom,left:this.left,right:this.right}};var ma=function(e,n,r,i,a){this.tileSize=512,this.maxValidLatitude=85.051129,this._renderWorldCopies=void 0===a||a,this._minZoom=e||0,this._maxZoom=n||22,this._minPitch=void 0===r||null===r?0:r,this._maxPitch=void 0===i||null===i?60:i,this.setMaxBounds(),this.width=0,this.height=0,this._center=new t.LngLat(0,0),this.zoom=0,this.angle=0,this._fov=.6435011087932844,this._pitch=0,this._unmodified=!0,this._edgeInsets=new va,this._posMatrixCache={},this._alignedPosMatrixCache={}},ya={minZoom:{configurable:!0},maxZoom:{configurable:!0},minPitch:{configurable:!0},maxPitch:{configurable:!0},renderWorldCopies:{configurable:!0},worldSize:{configurable:!0},centerOffset:{configurable:!0},size:{configurable:!0},bearing:{configurable:!0},pitch:{configurable:!0},fov:{configurable:!0},zoom:{configurable:!0},center:{configurable:!0},padding:{configurable:!0},centerPoint:{configurable:!0},unmodified:{configurable:!0},point:{configurable:!0}};function ba(t,e){var n=!1,r=null,i=function i(){r=null,n&&(t(),r=setTimeout(i,e),n=!1)};return function(){return n=!0,r||i(),r}}ma.prototype.clone=function(){var t=new ma(this._minZoom,this._maxZoom,this._minPitch,this.maxPitch,this._renderWorldCopies);return t.tileSize=this.tileSize,t.latRange=this.latRange,t.width=this.width,t.height=this.height,t._center=this._center,t.zoom=this.zoom,t.angle=this.angle,t._fov=this._fov,t._pitch=this._pitch,t._unmodified=this._unmodified,t._edgeInsets=this._edgeInsets.clone(),t._calcMatrices(),t},ya.minZoom.get=function(){return this._minZoom},ya.minZoom.set=function(t){this._minZoom!==t&&(this._minZoom=t,this.zoom=Math.max(this.zoom,t))},ya.maxZoom.get=function(){return this._maxZoom},ya.maxZoom.set=function(t){this._maxZoom!==t&&(this._maxZoom=t,this.zoom=Math.min(this.zoom,t))},ya.minPitch.get=function(){return this._minPitch},ya.minPitch.set=function(t){this._minPitch!==t&&(this._minPitch=t,this.pitch=Math.max(this.pitch,t))},ya.maxPitch.get=function(){return this._maxPitch},ya.maxPitch.set=function(t){this._maxPitch!==t&&(this._maxPitch=t,this.pitch=Math.min(this.pitch,t))},ya.renderWorldCopies.get=function(){return this._renderWorldCopies},ya.renderWorldCopies.set=function(t){void 0===t?t=!0:null===t&&(t=!1),this._renderWorldCopies=t},ya.worldSize.get=function(){return this.tileSize*this.scale},ya.centerOffset.get=function(){return this.centerPoint._sub(this.size._div(2))},ya.size.get=function(){return new t.Point(this.width,this.height)},ya.bearing.get=function(){return-this.angle/Math.PI*180},ya.bearing.set=function(e){var n=-t.wrap(e,-180,180)*Math.PI/180;this.angle!==n&&(this._unmodified=!1,this.angle=n,this._calcMatrices(),this.rotationMatrix=t.create$2(),t.rotate(this.rotationMatrix,this.rotationMatrix,this.angle))},ya.pitch.get=function(){return this._pitch/Math.PI*180},ya.pitch.set=function(e){var n=t.clamp(e,this.minPitch,this.maxPitch)/180*Math.PI;this._pitch!==n&&(this._unmodified=!1,this._pitch=n,this._calcMatrices())},ya.fov.get=function(){return this._fov/Math.PI*180},ya.fov.set=function(t){t=Math.max(.01,Math.min(60,t)),this._fov!==t&&(this._unmodified=!1,this._fov=t/180*Math.PI,this._calcMatrices())},ya.zoom.get=function(){return this._zoom},ya.zoom.set=function(t){var e=Math.min(Math.max(t,this.minZoom),this.maxZoom);this._zoom!==e&&(this._unmodified=!1,this._zoom=e,this.scale=this.zoomScale(e),this.tileZoom=Math.floor(e),this.zoomFraction=e-this.tileZoom,this._constrain(),this._calcMatrices())},ya.center.get=function(){return this._center},ya.center.set=function(t){t.lat===this._center.lat&&t.lng===this._center.lng||(this._unmodified=!1,this._center=t,this._constrain(),this._calcMatrices())},ya.padding.get=function(){return this._edgeInsets.toJSON()},ya.padding.set=function(t){this._edgeInsets.equals(t)||(this._unmodified=!1,this._edgeInsets.interpolate(this._edgeInsets,t,1),this._calcMatrices())},ya.centerPoint.get=function(){return this._edgeInsets.getCenter(this.width,this.height)},ma.prototype.isPaddingEqual=function(t){return this._edgeInsets.equals(t)},ma.prototype.interpolatePadding=function(t,e,n){this._unmodified=!1,this._edgeInsets.interpolate(t,e,n),this._constrain(),this._calcMatrices()},ma.prototype.coveringZoomLevel=function(t){var e=(t.roundZoom?Math.round:Math.floor)(this.zoom+this.scaleZoom(this.tileSize/t.tileSize));return Math.max(0,e)},ma.prototype.getVisibleUnwrappedCoordinates=function(e){var n=[new t.UnwrappedTileID(0,e)];if(this._renderWorldCopies)for(var r=this.pointCoordinate(new t.Point(0,0)),i=this.pointCoordinate(new t.Point(this.width,0)),a=this.pointCoordinate(new t.Point(this.width,this.height)),o=this.pointCoordinate(new t.Point(0,this.height)),s=Math.floor(Math.min(r.x,i.x,a.x,o.x)),l=Math.floor(Math.max(r.x,i.x,a.x,o.x)),c=1,u=s-c;u<=l+c;u++)0!==u&&n.push(new t.UnwrappedTileID(u,e));return n},ma.prototype.coveringTiles=function(e){var n=this.coveringZoomLevel(e),r=n;if(void 0!==e.minzoom&&ne.maxzoom&&(n=e.maxzoom);var i=t.MercatorCoordinate.fromLngLat(this.center),a=Math.pow(2,n),o=[a*i.x,a*i.y,0],s=pa.fromInvProjectionMatrix(this.invProjMatrix,this.worldSize,n),l=e.minzoom||0;this.pitch<=60&&this._edgeInsets.top<.1&&(l=n);var c=3,u=function(t){return{aabb:new ga([t*a,0,0],[(t+1)*a,a,0]),zoom:0,x:0,y:0,wrap:t,fullyVisible:!1}},f=[],h=[],d=n,p=e.reparseOverscaled?r:n;if(this._renderWorldCopies)for(var g=1;g<=3;g++)f.push(u(-g)),f.push(u(g));for(f.push(u(0));f.length>0;){var v=f.pop(),m=v.x,y=v.y,b=v.fullyVisible;if(!b){var x=v.aabb.intersects(s);if(0===x)continue;b=2===x}var w=v.aabb.distanceX(o),_=v.aabb.distanceY(o),k=Math.max(Math.abs(w),Math.abs(_)),T=c+(1<T&&v.zoom>=l)h.push({tileID:new t.OverscaledTileID(v.zoom===d?p:v.zoom,v.wrap,v.zoom,m,y),distanceSq:t.sqrLen([o[0]-.5-m,o[1]-.5-y])});else for(var M=0;M<4;M++){var A=(m<<1)+M%2,S=(y<<1)+(M>>1);f.push({aabb:v.aabb.quadrant(M),zoom:v.zoom+1,x:A,y:S,wrap:v.wrap,fullyVisible:b})}}return h.sort((function(t,e){return t.distanceSq-e.distanceSq})).map((function(t){return t.tileID}))},ma.prototype.resize=function(t,e){this.width=t,this.height=e,this.pixelsToGLUnits=[2/t,-2/e],this._constrain(),this._calcMatrices()},ya.unmodified.get=function(){return this._unmodified},ma.prototype.zoomScale=function(t){return Math.pow(2,t)},ma.prototype.scaleZoom=function(t){return Math.log(t)/Math.LN2},ma.prototype.project=function(e){var n=t.clamp(e.lat,-this.maxValidLatitude,this.maxValidLatitude);return new t.Point(t.mercatorXfromLng(e.lng)*this.worldSize,t.mercatorYfromLat(n)*this.worldSize)},ma.prototype.unproject=function(e){return new t.MercatorCoordinate(e.x/this.worldSize,e.y/this.worldSize).toLngLat()},ya.point.get=function(){return this.project(this.center)},ma.prototype.setLocationAtPoint=function(e,n){var r=this.pointCoordinate(n),i=this.pointCoordinate(this.centerPoint),a=this.locationCoordinate(e),o=new t.MercatorCoordinate(a.x-(r.x-i.x),a.y-(r.y-i.y));this.center=this.coordinateLocation(o),this._renderWorldCopies&&(this.center=this.center.wrap())},ma.prototype.locationPoint=function(t){return this.coordinatePoint(this.locationCoordinate(t))},ma.prototype.pointLocation=function(t){return this.coordinateLocation(this.pointCoordinate(t))},ma.prototype.locationCoordinate=function(e){return t.MercatorCoordinate.fromLngLat(e)},ma.prototype.coordinateLocation=function(t){return t.toLngLat()},ma.prototype.pointCoordinate=function(e){var n=0,r=[e.x,e.y,0,1],i=[e.x,e.y,1,1];t.transformMat4(r,r,this.pixelMatrixInverse),t.transformMat4(i,i,this.pixelMatrixInverse);var a=r[3],o=i[3],s=r[0]/a,l=i[0]/o,c=r[1]/a,u=i[1]/o,f=r[2]/a,h=i[2]/o,d=f===h?0:(n-f)/(h-f);return new t.MercatorCoordinate(t.number(s,l,d)/this.worldSize,t.number(c,u,d)/this.worldSize)},ma.prototype.coordinatePoint=function(e){var n=[e.x*this.worldSize,e.y*this.worldSize,0,1];return t.transformMat4(n,n,this.pixelMatrix),new t.Point(n[0]/n[3],n[1]/n[3])},ma.prototype.getBounds=function(){return(new t.LngLatBounds).extend(this.pointLocation(new t.Point(0,0))).extend(this.pointLocation(new t.Point(this.width,0))).extend(this.pointLocation(new t.Point(this.width,this.height))).extend(this.pointLocation(new t.Point(0,this.height)))},ma.prototype.getMaxBounds=function(){return this.latRange&&2===this.latRange.length&&this.lngRange&&2===this.lngRange.length?new t.LngLatBounds([this.lngRange[0],this.latRange[0]],[this.lngRange[1],this.latRange[1]]):null},ma.prototype.setMaxBounds=function(t){t?(this.lngRange=[t.getWest(),t.getEast()],this.latRange=[t.getSouth(),t.getNorth()],this._constrain()):(this.lngRange=null,this.latRange=[-this.maxValidLatitude,this.maxValidLatitude])},ma.prototype.calculatePosMatrix=function(e,n){void 0===n&&(n=!1);var r=e.key,i=n?this._alignedPosMatrixCache:this._posMatrixCache;if(i[r])return i[r];var a=e.canonical,o=this.worldSize/this.zoomScale(a.z),s=a.x+Math.pow(2,a.z)*e.wrap,l=t.identity(new Float64Array(16));return t.translate(l,l,[s*o,a.y*o,0]),t.scale(l,l,[o/t.EXTENT,o/t.EXTENT,1]),t.multiply(l,n?this.alignedProjMatrix:this.projMatrix,l),i[r]=new Float32Array(l),i[r]},ma.prototype.customLayerMatrix=function(){return this.mercatorMatrix.slice()},ma.prototype._constrain=function(){if(this.center&&this.width&&this.height&&!this._constraining){this._constraining=!0;var e,n,r,i,a=-90,o=90,s=-180,l=180,c=this.size,u=this._unmodified;if(this.latRange){var f=this.latRange;a=t.mercatorYfromLat(f[1])*this.worldSize,e=(o=t.mercatorYfromLat(f[0])*this.worldSize)-ao&&(i=o-v)}if(this.lngRange){var m=d.x,y=c.x/2;m-yl&&(r=l-y)}void 0===r&&void 0===i||(this.center=this.unproject(new t.Point(void 0!==r?r:d.x,void 0!==i?i:d.y))),this._unmodified=u,this._constraining=!1}},ma.prototype._calcMatrices=function(){if(this.height){var e=this._fov/2,n=this.centerOffset;this.cameraToCenterDistance=.5/Math.tan(e)*this.height;var r=Math.PI/2+this._pitch,i=this._fov*(.5+n.y/this.height),a=Math.sin(i)*this.cameraToCenterDistance/Math.sin(t.clamp(Math.PI-r-i,.01,Math.PI-.01)),o=this.point,s=o.x,l=o.y,c=1.01*(Math.cos(Math.PI/2-this._pitch)*a+this.cameraToCenterDistance),u=this.height/50,f=new Float64Array(16);t.perspective(f,this._fov,this.width/this.height,u,c),f[8]=2*-n.x/this.width,f[9]=2*n.y/this.height,t.scale(f,f,[1,-1,1]),t.translate(f,f,[0,0,-this.cameraToCenterDistance]),t.rotateX(f,f,this._pitch),t.rotateZ(f,f,this.angle),t.translate(f,f,[-s,-l,0]),this.mercatorMatrix=t.scale([],f,[this.worldSize,this.worldSize,this.worldSize]),t.scale(f,f,[1,1,t.mercatorZfromAltitude(1,this.center.lat)*this.worldSize,1]),this.projMatrix=f,this.invProjMatrix=t.invert([],this.projMatrix);var h=this.width%2/2,d=this.height%2/2,p=Math.cos(this.angle),g=Math.sin(this.angle),v=s-Math.round(s)+p*h+g*d,m=l-Math.round(l)+p*d+g*h,y=new Float64Array(f);if(t.translate(y,y,[v>.5?v-1:v,m>.5?m-1:m,0]),this.alignedProjMatrix=y,f=t.create(),t.scale(f,f,[this.width/2,-this.height/2,1]),t.translate(f,f,[1,-1,0]),this.labelPlaneMatrix=f,f=t.create(),t.scale(f,f,[1,-1,1]),t.translate(f,f,[-1,-1,0]),t.scale(f,f,[2/this.width,2/this.height,1]),this.glCoordMatrix=f,this.pixelMatrix=t.multiply(new Float64Array(16),this.labelPlaneMatrix,this.projMatrix),!(f=t.invert(new Float64Array(16),this.pixelMatrix)))throw new Error("failed to invert matrix");this.pixelMatrixInverse=f,this._posMatrixCache={},this._alignedPosMatrixCache={}}},ma.prototype.maxPitchScaleFactor=function(){if(!this.pixelMatrixInverse)return 1;var e=this.pointCoordinate(new t.Point(0,0)),n=[e.x*this.worldSize,e.y*this.worldSize,0,1];return t.transformMat4(n,n,this.pixelMatrix)[3]/this.cameraToCenterDistance},ma.prototype.getCameraPoint=function(){var e=this._pitch,n=Math.tan(e)*(this.cameraToCenterDistance||1);return this.centerPoint.add(new t.Point(0,n))},ma.prototype.getCameraQueryGeometry=function(e){var n=this.getCameraPoint();if(1===e.length)return[e[0],n];for(var r=n.x,i=n.y,a=n.x,o=n.y,s=0,l=e;s=3&&!t.some((function(t){return isNaN(t)}))){var e=this._map.dragRotate.isEnabled()&&this._map.touchZoomRotate.isEnabled()?+(t[3]||0):this._map.getBearing();return this._map.jumpTo({center:[+t[2],+t[1]],zoom:+t[0],bearing:e,pitch:+(t[4]||0)}),!0}return!1},xa.prototype._updateHashUnthrottled=function(){var e=this.getHashString();try{t.window.history.replaceState(t.window.history.state,"",e)}catch(n){}};var wa={linearity:.3,easing:t.bezier(0,0,.3,1)},_a=t.extend({deceleration:2500,maxSpeed:1400},wa),ka=t.extend({deceleration:20,maxSpeed:1400},wa),Ta=t.extend({deceleration:1e3,maxSpeed:360},wa),Ma=t.extend({deceleration:1e3,maxSpeed:90},wa),Aa=function(t){this._map=t,this.clear()};function Sa(t,e){(!t.duration||t.duration0&&n-e[0].time>r;)e.shift()},Aa.prototype._onMoveEnd=function(e){if(this._drainInertiaBuffer(),!(this._inertiaBuffer.length<2)){for(var n={zoom:0,bearing:0,pitch:0,pan:new t.Point(0,0),pinchAround:void 0,around:void 0},r=0,i=this._inertiaBuffer;r=this._clickTolerance||this._map.fire(new Ca(t.type,this._map,t))},La.prototype.dblclick=function(t){return this._firePreventable(new Ca(t.type,this._map,t))},La.prototype.mouseover=function(t){this._map.fire(new Ca(t.type,this._map,t))},La.prototype.mouseout=function(t){this._map.fire(new Ca(t.type,this._map,t))},La.prototype.touchstart=function(t){return this._firePreventable(new Pa(t.type,this._map,t))},La.prototype.touchmove=function(t){this._map.fire(new Pa(t.type,this._map,t))},La.prototype.touchend=function(t){this._map.fire(new Pa(t.type,this._map,t))},La.prototype.touchcancel=function(t){this._map.fire(new Pa(t.type,this._map,t))},La.prototype._firePreventable=function(t){if(this._map.fire(t),t.defaultPrevented)return{}},La.prototype.isEnabled=function(){return!0},La.prototype.isActive=function(){return!1},La.prototype.enable=function(){},La.prototype.disable=function(){};var Ia=function(t){this._map=t};Ia.prototype.reset=function(){this._delayContextMenu=!1,delete this._contextMenuEvent},Ia.prototype.mousemove=function(t){this._map.fire(new Ca(t.type,this._map,t))},Ia.prototype.mousedown=function(){this._delayContextMenu=!0},Ia.prototype.mouseup=function(){this._delayContextMenu=!1,this._contextMenuEvent&&(this._map.fire(new Ca("contextmenu",this._map,this._contextMenuEvent)),delete this._contextMenuEvent)},Ia.prototype.contextmenu=function(t){this._delayContextMenu?this._contextMenuEvent=t:this._map.fire(new Ca(t.type,this._map,t)),this._map.listens("contextmenu")&&t.preventDefault()},Ia.prototype.isEnabled=function(){return!0},Ia.prototype.isActive=function(){return!1},Ia.prototype.enable=function(){},Ia.prototype.disable=function(){};var Da=function(t,e){this._map=t,this._el=t.getCanvasContainer(),this._container=t.getContainer(),this._clickTolerance=e.clickTolerance||1};function Ra(t,e){for(var n={},r=0;rthis.numTouches)&&(this.aborted=!0),this.aborted||(void 0===this.startTime&&(this.startTime=t.timeStamp),n.length===this.numTouches&&(this.centroid=za(e),this.touches=Ra(n,e)))},Ba.prototype.touchmove=function(t,e,n){if(!this.aborted&&this.centroid){var r=Ra(n,e);for(var i in this.touches){var a=this.touches[i],o=r[i];(!o||o.dist(a)>Fa)&&(this.aborted=!0)}}},Ba.prototype.touchend=function(t,e,n){if((!this.centroid||t.timeStamp-this.startTime>ja)&&(this.aborted=!0),0===n.length){var r=!this.aborted&&this.centroid;if(this.reset(),r)return r}};var Ua=function(t){this.singleTap=new Ba(t),this.numTaps=t.numTaps,this.reset()};Ua.prototype.reset=function(){this.lastTime=1/0,delete this.lastTap,this.count=0,this.singleTap.reset()},Ua.prototype.touchstart=function(t,e,n){this.singleTap.touchstart(t,e,n)},Ua.prototype.touchmove=function(t,e,n){this.singleTap.touchmove(t,e,n)},Ua.prototype.touchend=function(t,e,n){var r=this.singleTap.touchend(t,e,n);if(r){var i=t.timeStamp-this.lastTime0&&(this._active=!0);var i=Ra(r,n),a=new t.Point(0,0),o=new t.Point(0,0),s=0;for(var l in i){var c=i[l],u=this._touches[l];u&&(a._add(c),o._add(c.sub(u)),s++,i[l]=c)}if(this._touches=i,!(sMath.abs(t.x)}var ao=100,oo=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.reset=function(){t.prototype.reset.call(this),this._valid=void 0,delete this._firstMove,delete this._lastPoints},e.prototype._start=function(t){this._lastPoints=t,io(t[0].sub(t[1]))&&(this._valid=!1)},e.prototype._move=function(t,e,n){var r=t[0].sub(this._lastPoints[0]),i=t[1].sub(this._lastPoints[1]);if(this._valid=this.gestureBeginsVertically(r,i,n.timeStamp),this._valid)return this._lastPoints=t,this._active=!0,{pitchDelta:(r.y+i.y)/2*-.5}},e.prototype.gestureBeginsVertically=function(t,e,n){if(void 0!==this._valid)return this._valid;var r=2,i=t.mag()>=r,a=e.mag()>=r;if(i||a){if(!i||!a)return void 0===this._firstMove&&(this._firstMove=n),n-this._firstMove0===e.y>0;return io(t)&&io(e)&&o}},e}(Ka),so={panStep:100,bearingStep:15,pitchStep:10},lo=function(){var t=so;this._panStep=t.panStep,this._bearingStep=t.bearingStep,this._pitchStep=t.pitchStep};function co(t){return t*(2-t)}lo.prototype.reset=function(){this._active=!1},lo.prototype.keydown=function(t){var e=this;if(!(t.altKey||t.ctrlKey||t.metaKey)){var n=0,r=0,i=0,a=0,o=0;switch(t.keyCode){case 61:case 107:case 171:case 187:n=1;break;case 189:case 109:case 173:n=-1;break;case 37:t.shiftKey?r=-1:(t.preventDefault(),a=-1);break;case 39:t.shiftKey?r=1:(t.preventDefault(),a=1);break;case 38:t.shiftKey?i=1:(t.preventDefault(),o=-1);break;case 40:t.shiftKey?i=-1:(t.preventDefault(),o=1);break;default:return}return{cameraAnimation:function(s){var l=s.getZoom();s.easeTo({duration:300,easeId:"keyboardHandler",easing:co,zoom:n?Math.round(l)+n*(t.shiftKey?2:1):l,bearing:s.getBearing()+r*e._bearingStep,pitch:s.getPitch()+i*e._pitchStep,offset:[-a*e._panStep,-o*e._panStep],center:s.getCenter()},{originalEvent:t})}}}},lo.prototype.enable=function(){this._enabled=!0},lo.prototype.disable=function(){this._enabled=!1,this.reset()},lo.prototype.isEnabled=function(){return this._enabled},lo.prototype.isActive=function(){return this._active};var uo=4.000244140625,fo=.01,ho=1/450,po=2,go=function(e,n){this._map=e,this._el=e.getCanvasContainer(),this._handler=n,this._delta=0,this._defaultZoomRate=fo,this._wheelZoomRate=ho,t.bindAll(["_onWheel","_onTimeout","_onScrollFrame","_onScrollFinished"],this)};go.prototype.setZoomRate=function(t){this._defaultZoomRate=t},go.prototype.setWheelZoomRate=function(t){this._wheelZoomRate=t},go.prototype.isEnabled=function(){return!!this._enabled},go.prototype.isActive=function(){return!!this._active||void 0!==this._finishTimeout},go.prototype.isZooming=function(){return!!this._zooming},go.prototype.enable=function(t){this.isEnabled()||(this._enabled=!0,this._aroundCenter=t&&"center"===t.around)},go.prototype.disable=function(){this.isEnabled()&&(this._enabled=!1)},go.prototype.wheel=function(e){if(this.isEnabled()){var n=e.deltaMode===t.window.WheelEvent.DOM_DELTA_LINE?40*e.deltaY:e.deltaY,r=t.browser.now(),i=r-(this._lastWheelEventTime||0);this._lastWheelEventTime=r,0!==n&&n%uo===0?this._type="wheel":0!==n&&Math.abs(n)<4?this._type="trackpad":i>400?(this._type=null,this._lastValue=n,this._timeout=setTimeout(this._onTimeout,40,e)):this._type||(this._type=Math.abs(i*n)<200?"trackpad":"wheel",this._timeout&&(clearTimeout(this._timeout),this._timeout=null,n+=this._lastValue)),e.shiftKey&&n&&(n/=4),this._type&&(this._lastWheelEvent=e,this._delta-=n,this._active||this._start(e)),e.preventDefault()}},go.prototype._onTimeout=function(t){this._type="wheel",this._delta-=this._lastValue,this._active||this._start(t)},go.prototype._start=function(e){if(this._delta){this._frameId&&(this._frameId=null),this._active=!0,this.isZooming()||(this._zooming=!0),this._finishTimeout&&(clearTimeout(this._finishTimeout),delete this._finishTimeout);var r=n.mousePos(this._el,e);this._around=t.LngLat.convert(this._aroundCenter?this._map.getCenter():this._map.unproject(r)),this._aroundPoint=this._map.transform.locationPoint(this._around),this._frameId||(this._frameId=!0,this._handler._triggerRenderFrame())}},go.prototype.renderFrame=function(){return this._onScrollFrame()},go.prototype._onScrollFrame=function(){var e=this;if(this._frameId&&(this._frameId=null,this.isActive())){var n=this._map.transform;if(0!==this._delta){var r="wheel"===this._type&&Math.abs(this._delta)>uo?this._wheelZoomRate:this._defaultZoomRate,i=po/(1+Math.exp(-Math.abs(this._delta*r)));this._delta<0&&0!==i&&(i=1/i);var a="number"===typeof this._targetZoom?n.zoomScale(this._targetZoom):n.scale;this._targetZoom=Math.min(n.maxZoom,Math.max(n.minZoom,n.scaleZoom(a*i))),"wheel"===this._type&&(this._startZoom=n.zoom,this._easing=this._smoothOutEasing(200)),this._delta=0}var o,s="number"===typeof this._targetZoom?this._targetZoom:n.zoom,l=this._startZoom,c=this._easing,u=!1;if("wheel"===this._type&&l&&c){var f=Math.min((t.browser.now()-this._lastWheelEventTime)/200,1),h=c(f);o=t.number(l,s,h),f<1?this._frameId||(this._frameId=!0):u=!0}else o=s,u=!0;return this._active=!0,u&&(this._active=!1,this._finishTimeout=setTimeout((function(){e._zooming=!1,e._handler._triggerRenderFrame(),delete e._targetZoom,delete e._finishTimeout}),200)),{noInertia:!0,needsRenderFrame:!u,zoomDelta:o-n.zoom,around:this._aroundPoint,originalEvent:this._lastWheelEvent}}},go.prototype._smoothOutEasing=function(e){var n=t.ease;if(this._prevEase){var r=this._prevEase,i=(t.browser.now()-r.start)/r.duration,a=r.easing(i+.01)-r.easing(i),o=.27/Math.sqrt(a*a+1e-4)*.01,s=Math.sqrt(.0729-o*o);n=t.bezier(o,s,.25,1)}return this._prevEase={start:t.browser.now(),duration:e,easing:n},n},go.prototype.reset=function(){this._active=!1};var vo=function(t,e){this._clickZoom=t,this._tapZoom=e};vo.prototype.enable=function(){this._clickZoom.enable(),this._tapZoom.enable()},vo.prototype.disable=function(){this._clickZoom.disable(),this._tapZoom.disable()},vo.prototype.isEnabled=function(){return this._clickZoom.isEnabled()&&this._tapZoom.isEnabled()},vo.prototype.isActive=function(){return this._clickZoom.isActive()||this._tapZoom.isActive()};var mo=function(){this.reset()};mo.prototype.reset=function(){this._active=!1},mo.prototype.dblclick=function(t,e){return t.preventDefault(),{cameraAnimation:function(n){n.easeTo({duration:300,zoom:n.getZoom()+(t.shiftKey?-1:1),around:n.unproject(e)},{originalEvent:t})}}},mo.prototype.enable=function(){this._enabled=!0},mo.prototype.disable=function(){this._enabled=!1,this.reset()},mo.prototype.isEnabled=function(){return this._enabled},mo.prototype.isActive=function(){return this._active};var yo=function(){this._tap=new Ua({numTouches:1,numTaps:1}),this.reset()};yo.prototype.reset=function(){this._active=!1,delete this._swipePoint,delete this._swipeTouch,delete this._tapTime,this._tap.reset()},yo.prototype.touchstart=function(t,e,n){this._swipePoint||(this._tapTime&&t.timeStamp-this._tapTime>Na&&this.reset(),this._tapTime?n.length>0&&(this._swipePoint=e[0],this._swipeTouch=n[0].identifier):this._tap.touchstart(t,e,n))},yo.prototype.touchmove=function(t,e,n){if(this._tapTime){if(this._swipePoint){if(n[0].identifier!==this._swipeTouch)return;var r=e[0],i=r.y-this._swipePoint.y;return this._swipePoint=r,t.preventDefault(),this._active=!0,{zoomDelta:i/128}}}else this._tap.touchmove(t,e,n)},yo.prototype.touchend=function(t,e,n){this._tapTime?this._swipePoint&&0===n.length&&this.reset():this._tap.touchend(t,e,n)&&(this._tapTime=t.timeStamp)},yo.prototype.touchcancel=function(){this.reset()},yo.prototype.enable=function(){this._enabled=!0},yo.prototype.disable=function(){this._enabled=!1,this.reset()},yo.prototype.isEnabled=function(){return this._enabled},yo.prototype.isActive=function(){return this._active};var bo=function(t,e,n){this._el=t,this._mousePan=e,this._touchPan=n};bo.prototype.enable=function(t){this._inertiaOptions=t||{},this._mousePan.enable(),this._touchPan.enable(),this._el.classList.add("mapboxgl-touch-drag-pan")},bo.prototype.disable=function(){this._mousePan.disable(),this._touchPan.disable(),this._el.classList.remove("mapboxgl-touch-drag-pan")},bo.prototype.isEnabled=function(){return this._mousePan.isEnabled()&&this._touchPan.isEnabled()},bo.prototype.isActive=function(){return this._mousePan.isActive()||this._touchPan.isActive()};var xo=function(t,e,n){this._pitchWithRotate=t.pitchWithRotate,this._mouseRotate=e,this._mousePitch=n};xo.prototype.enable=function(){this._mouseRotate.enable(),this._pitchWithRotate&&this._mousePitch.enable()},xo.prototype.disable=function(){this._mouseRotate.disable(),this._mousePitch.disable()},xo.prototype.isEnabled=function(){return this._mouseRotate.isEnabled()&&(!this._pitchWithRotate||this._mousePitch.isEnabled())},xo.prototype.isActive=function(){return this._mouseRotate.isActive()||this._mousePitch.isActive()};var wo=function(t,e,n,r){this._el=t,this._touchZoom=e,this._touchRotate=n,this._tapDragZoom=r,this._rotationDisabled=!1,this._enabled=!0};wo.prototype.enable=function(t){this._touchZoom.enable(t),this._rotationDisabled||this._touchRotate.enable(t),this._tapDragZoom.enable(),this._el.classList.add("mapboxgl-touch-zoom-rotate")},wo.prototype.disable=function(){this._touchZoom.disable(),this._touchRotate.disable(),this._tapDragZoom.disable(),this._el.classList.remove("mapboxgl-touch-zoom-rotate")},wo.prototype.isEnabled=function(){return this._touchZoom.isEnabled()&&(this._rotationDisabled||this._touchRotate.isEnabled())&&this._tapDragZoom.isEnabled()},wo.prototype.isActive=function(){return this._touchZoom.isActive()||this._touchRotate.isActive()||this._tapDragZoom.isActive()},wo.prototype.disableRotation=function(){this._rotationDisabled=!0,this._touchRotate.disable()},wo.prototype.enableRotation=function(){this._rotationDisabled=!1,this._touchZoom.isEnabled()&&this._touchRotate.enable()};var _o=function(t){return t.zoom||t.drag||t.pitch||t.rotate},ko=function(t){function e(){t.apply(this,arguments)}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e}(t.Event);function To(t){return t.panDelta&&t.panDelta.mag()||t.zoomDelta||t.bearingDelta||t.pitchDelta}var Mo=function(e,r){this._map=e,this._el=this._map.getCanvasContainer(),this._handlers=[],this._handlersById={},this._changes=[],this._inertia=new Aa(e),this._bearingSnap=r.bearingSnap,this._previousActiveHandlers={},this._eventsInProgress={},this._addDefaultHandlers(r),t.bindAll(["handleEvent","handleWindowEvent"],this);var i=this._el;this._listeners=[[i,"touchstart",{passive:!1}],[i,"touchmove",{passive:!1}],[i,"touchend",void 0],[i,"touchcancel",void 0],[i,"mousedown",void 0],[i,"mousemove",void 0],[i,"mouseup",void 0],[t.window.document,"mousemove",{capture:!0}],[t.window.document,"mouseup",void 0],[i,"mouseover",void 0],[i,"mouseout",void 0],[i,"dblclick",void 0],[i,"click",void 0],[i,"keydown",{capture:!1}],[i,"keyup",void 0],[i,"wheel",{passive:!1}],[i,"contextmenu",void 0],[t.window,"blur",void 0]];for(var a=0,o=this._listeners;aa?Math.min(2,w):Math.max(.5,w),_=Math.pow(v,1-e),k=i.unproject(b.add(x.mult(e*_)).mult(g));i.setLocationAtPoint(i.renderWorldCopies?k.wrap():k,p)}r._fireMoveEvents(n)}),(function(t){r._afterEase(n,t)}),e),this},n.prototype._prepareEase=function(e,n,r){void 0===r&&(r={}),this._moving=!0,n||r.moving||this.fire(new t.Event("movestart",e)),this._zooming&&!r.zooming&&this.fire(new t.Event("zoomstart",e)),this._rotating&&!r.rotating&&this.fire(new t.Event("rotatestart",e)),this._pitching&&!r.pitching&&this.fire(new t.Event("pitchstart",e))},n.prototype._fireMoveEvents=function(e){this.fire(new t.Event("move",e)),this._zooming&&this.fire(new t.Event("zoom",e)),this._rotating&&this.fire(new t.Event("rotate",e)),this._pitching&&this.fire(new t.Event("pitch",e))},n.prototype._afterEase=function(e,n){if(!this._easeId||!n||this._easeId!==n){delete this._easeId;var r=this._zooming,i=this._rotating,a=this._pitching;this._moving=!1,this._zooming=!1,this._rotating=!1,this._pitching=!1,this._padding=!1,r&&this.fire(new t.Event("zoomend",e)),i&&this.fire(new t.Event("rotateend",e)),a&&this.fire(new t.Event("pitchend",e)),this.fire(new t.Event("moveend",e))}},n.prototype.flyTo=function(e,n){var r=this;if(!e.essential&&t.browser.prefersReducedMotion){var i=t.pick(e,["center","zoom","bearing","pitch","around"]);return this.jumpTo(i,n)}this.stop(),e=t.extend({offset:[0,0],speed:1.2,curve:1.42,easing:t.ease},e);var a=this.transform,o=this.getZoom(),s=this.getBearing(),l=this.getPitch(),c=this.getPadding(),u="zoom"in e?t.clamp(+e.zoom,a.minZoom,a.maxZoom):o,f="bearing"in e?this._normalizeBearing(e.bearing,s):s,h="pitch"in e?+e.pitch:l,d="padding"in e?e.padding:a.padding,p=a.zoomScale(u-o),g=t.Point.convert(e.offset),v=a.centerPoint.add(g),m=a.pointLocation(v),y=t.LngLat.convert(e.center||m);this._normalizeCenter(y);var b=a.project(m),x=a.project(y).sub(b),w=e.curve,_=Math.max(a.width,a.height),k=_/p,T=x.mag();if("minZoom"in e){var M=t.clamp(Math.min(e.minZoom,o,u),a.minZoom,a.maxZoom),A=_/a.zoomScale(M-o);w=Math.sqrt(A/T*2)}var S=w*w;function E(t){var e=(k*k-_*_+(t?-1:1)*S*S*T*T)/(2*(t?k:_)*S*T);return Math.log(Math.sqrt(e*e+1)-e)}function C(t){return(Math.exp(t)-Math.exp(-t))/2}function P(t){return(Math.exp(t)+Math.exp(-t))/2}function O(t){return C(t)/P(t)}var L=E(0),I=function(t){return P(L)/P(L+w*t)},D=function(t){return _*((P(L)*O(L+w*t)-C(L))/S)/T},R=(E(1)-L)/w;if(Math.abs(T)<1e-6||!isFinite(R)){if(Math.abs(_-k)<1e-6)return this.easeTo(e,n);var z=k<_?-1:1;R=Math.abs(Math.log(k/_))/w,D=function(){return 0},I=function(t){return Math.exp(z*w*t)}}if("duration"in e)e.duration=+e.duration;else{var N="screenSpeed"in e?+e.screenSpeed/w:+e.speed;e.duration=1e3*R/N}return e.maxDuration&&e.duration>e.maxDuration&&(e.duration=0),this._zooming=!0,this._rotating=s!==f,this._pitching=h!==l,this._padding=!a.isPaddingEqual(d),this._prepareEase(n,!1),this._ease((function(e){var i=e*R,p=1/I(i);a.zoom=1===e?u:o+a.scaleZoom(p),r._rotating&&(a.bearing=t.number(s,f,e)),r._pitching&&(a.pitch=t.number(l,h,e)),r._padding&&(a.interpolatePadding(c,d,e),v=a.centerPoint.add(g));var m=1===e?y:a.unproject(b.add(x.mult(D(i))).mult(p));a.setLocationAtPoint(a.renderWorldCopies?m.wrap():m,v),r._fireMoveEvents(n)}),(function(){return r._afterEase(n)}),e),this},n.prototype.isEasing=function(){return!!this._easeFrameId},n.prototype.stop=function(){return this._stop()},n.prototype._stop=function(t,e){if(this._easeFrameId&&(this._cancelRenderFrame(this._easeFrameId),delete this._easeFrameId,delete this._onEaseFrame),this._onEaseEnd){var n=this._onEaseEnd;delete this._onEaseEnd,n.call(this,e)}if(!t){var r=this.handlers;r&&r.stop()}return this},n.prototype._ease=function(e,n,r){!1===r.animate||0===r.duration?(e(1),n()):(this._easeStart=t.browser.now(),this._easeOptions=r,this._onEaseFrame=e,this._onEaseEnd=n,this._easeFrameId=this._requestRenderFrame(this._renderFrameCallback))},n.prototype._renderFrameCallback=function(){var e=Math.min((t.browser.now()-this._easeStart)/this._easeOptions.duration,1);this._onEaseFrame(this._easeOptions.easing(e)),e<1?this._easeFrameId=this._requestRenderFrame(this._renderFrameCallback):this.stop()},n.prototype._normalizeBearing=function(e,n){e=t.wrap(e,-180,180);var r=Math.abs(e-n);return Math.abs(e-360-n)180?-360:n<-180?360:0}},n}(t.Evented),So=function(e){void 0===e&&(e={}),this.options=e,t.bindAll(["_updateEditLink","_updateData","_updateCompact"],this)};So.prototype.getDefaultPosition=function(){return"bottom-right"},So.prototype.onAdd=function(t){var e=this.options&&this.options.compact;return this._map=t,this._container=n.create("div","mapboxgl-ctrl mapboxgl-ctrl-attrib"),this._innerContainer=n.create("div","mapboxgl-ctrl-attrib-inner",this._container),e&&this._container.classList.add("mapboxgl-compact"),this._updateAttributions(),this._updateEditLink(),this._map.on("styledata",this._updateData),this._map.on("sourcedata",this._updateData),this._map.on("moveend",this._updateEditLink),void 0===e&&(this._map.on("resize",this._updateCompact),this._updateCompact()),this._container},So.prototype.onRemove=function(){n.remove(this._container),this._map.off("styledata",this._updateData),this._map.off("sourcedata",this._updateData),this._map.off("moveend",this._updateEditLink),this._map.off("resize",this._updateCompact),this._map=void 0,this._attribHTML=void 0},So.prototype._updateEditLink=function(){var e=this._editLink;e||(e=this._editLink=this._container.querySelector(".mapbox-improve-map"));var n=[{key:"owner",value:this.styleOwner},{key:"id",value:this.styleId},{key:"access_token",value:this._map._requestManager._customAccessToken||t.config.ACCESS_TOKEN}];if(e){var r=n.reduce((function(t,e,r){return e.value&&(t+=e.key+"="+e.value+(r=0)return!1;return!0}))).join(" | ");o!==this._attribHTML&&(this._attribHTML=o,t.length?(this._innerContainer.innerHTML=o,this._container.classList.remove("mapboxgl-attrib-empty")):this._container.classList.add("mapboxgl-attrib-empty"),this._editLink=null)}},So.prototype._updateCompact=function(){this._map.getCanvasContainer().offsetWidth<=640?this._container.classList.add("mapboxgl-compact"):this._container.classList.remove("mapboxgl-compact")};var Eo=function(){t.bindAll(["_updateLogo"],this),t.bindAll(["_updateCompact"],this)};Eo.prototype.onAdd=function(t){this._map=t,this._container=n.create("div","mapboxgl-ctrl");var e=n.create("a","mapboxgl-ctrl-logo");return e.target="_blank",e.rel="noopener nofollow",e.href="https://www.mapbox.com/",e.setAttribute("aria-label",this._map._getUIString("LogoControl.Title")),e.setAttribute("rel","noopener nofollow"),this._container.appendChild(e),this._container.style.display="none",this._map.on("sourcedata",this._updateLogo),this._updateLogo(),this._map.on("resize",this._updateCompact),this._updateCompact(),this._container},Eo.prototype.onRemove=function(){n.remove(this._container),this._map.off("sourcedata",this._updateLogo),this._map.off("resize",this._updateCompact)},Eo.prototype.getDefaultPosition=function(){return"bottom-left"},Eo.prototype._updateLogo=function(t){t&&"metadata"!==t.sourceDataType||(this._container.style.display=this._logoRequired()?"block":"none")},Eo.prototype._logoRequired=function(){if(this._map.style){var t=this._map.style.sourceCaches;for(var e in t)if(t[e].getSource().mapbox_logo)return!0;return!1}},Eo.prototype._updateCompact=function(){var t=this._container.children;if(t.length){var e=t[0];this._map.getCanvasContainer().offsetWidth<250?e.classList.add("mapboxgl-compact"):e.classList.remove("mapboxgl-compact")}};var Co=function(){this._queue=[],this._id=0,this._cleared=!1,this._currentlyRunning=!1};Co.prototype.add=function(t){var e=++this._id;return this._queue.push({callback:t,id:e,cancelled:!1}),e},Co.prototype.remove=function(t){for(var e=this._currentlyRunning,n=0,r=e?this._queue.concat(e):this._queue;ne.maxZoom)throw new Error("maxZoom must be greater than or equal to minZoom");if(null!=e.minPitch&&null!=e.maxPitch&&e.minPitch>e.maxPitch)throw new Error("maxPitch must be greater than or equal to minPitch");if(null!=e.minPitch&&e.minPitchNo)throw new Error("maxPitch must be less than or equal to "+No);var i=new ma(e.minZoom,e.maxZoom,e.minPitch,e.maxPitch,e.renderWorldCopies);if(r.call(this,i,e),this._interactive=e.interactive,this._maxTileCacheSize=e.maxTileCacheSize,this._failIfMajorPerformanceCaveat=e.failIfMajorPerformanceCaveat,this._preserveDrawingBuffer=e.preserveDrawingBuffer,this._antialias=e.antialias,this._trackResize=e.trackResize,this._bearingSnap=e.bearingSnap,this._refreshExpiredTiles=e.refreshExpiredTiles,this._fadeDuration=e.fadeDuration,this._crossSourceCollisions=e.crossSourceCollisions,this._crossFadingFactor=1,this._collectResourceTiming=e.collectResourceTiming,this._renderTaskQueue=new Co,this._controls=[],this._mapId=t.uniqueId(),this._locale=t.extend({},Po,e.locale),this._requestManager=new t.RequestManager(e.transformRequest,e.accessToken),"string"===typeof e.container){if(this._container=t.window.document.getElementById(e.container),!this._container)throw new Error("Container '"+e.container+"' not found.")}else{if(!(e.container instanceof Lo))throw new Error("Invalid type: 'container' must be a String or HTMLElement.");this._container=e.container}if(e.maxBounds&&this.setMaxBounds(e.maxBounds),t.bindAll(["_onWindowOnline","_onWindowResize","_contextLost","_contextRestored"],this),this._setupContainer(),this._setupPainter(),void 0===this.painter)throw new Error("Failed to initialize WebGL.");this.on("move",(function(){return n._update(!1)})),this.on("moveend",(function(){return n._update(!1)})),this.on("zoom",(function(){return n._update(!0)})),"undefined"!==typeof t.window&&(t.window.addEventListener("online",this._onWindowOnline,!1),t.window.addEventListener("resize",this._onWindowResize,!1)),this.handlers=new Mo(this,e);var a="string"===typeof e.hash&&e.hash||void 0;this._hash=e.hash&&new xa(a).addTo(this),this._hash&&this._hash._onHashChange()||(this.jumpTo({center:e.center,zoom:e.zoom,bearing:e.bearing,pitch:e.pitch}),e.bounds&&(this.resize(),this.fitBounds(e.bounds,t.extend({},e.fitBoundsOptions,{duration:0})))),this.resize(),this._localIdeographFontFamily=e.localIdeographFontFamily,e.style&&this.setStyle(e.style,{localIdeographFontFamily:e.localIdeographFontFamily}),e.attributionControl&&this.addControl(new So({customAttribution:e.customAttribution})),this.addControl(new Eo,e.logoPosition),this.on("style.load",(function(){n.transform.unmodified&&n.jumpTo(n.style.stylesheet)})),this.on("data",(function(e){n._update("style"===e.dataType),n.fire(new t.Event(e.dataType+"data",e))})),this.on("dataloading",(function(e){n.fire(new t.Event(e.dataType+"dataloading",e))}))}r&&(i.__proto__=r),i.prototype=Object.create(r&&r.prototype),i.prototype.constructor=i;var a={showTileBoundaries:{configurable:!0},showPadding:{configurable:!0},showCollisionBoxes:{configurable:!0},showOverdrawInspector:{configurable:!0},repaint:{configurable:!0},vertices:{configurable:!0},version:{configurable:!0}};return i.prototype._getMapId=function(){return this._mapId},i.prototype.addControl=function(e,n){if(void 0===n&&e.getDefaultPosition&&(n=e.getDefaultPosition()),void 0===n&&(n="top-right"),!e||!e.onAdd)return this.fire(new t.ErrorEvent(new Error("Invalid argument to map.addControl(). Argument must be a control with onAdd and onRemove methods.")));var r=e.onAdd(this);this._controls.push(e);var i=this._controlPositions[n];return-1!==n.indexOf("bottom")?i.insertBefore(r,i.firstChild):i.appendChild(r),this},i.prototype.removeControl=function(e){if(!e||!e.onRemove)return this.fire(new t.ErrorEvent(new Error("Invalid argument to map.removeControl(). Argument must be a control with onAdd and onRemove methods.")));var n=this._controls.indexOf(e);return n>-1&&this._controls.splice(n,1),e.onRemove(this),this},i.prototype.resize=function(e){var n=this._containerDimensions(),r=n[0],i=n[1];this._resizeCanvas(r,i),this.transform.resize(r,i),this.painter.resize(r,i);var a=!this._moving;return a&&(this.stop(),this.fire(new t.Event("movestart",e)).fire(new t.Event("move",e))),this.fire(new t.Event("resize",e)),a&&this.fire(new t.Event("moveend",e)),this},i.prototype.getBounds=function(){return this.transform.getBounds()},i.prototype.getMaxBounds=function(){return this.transform.getMaxBounds()},i.prototype.setMaxBounds=function(e){return this.transform.setMaxBounds(t.LngLatBounds.convert(e)),this._update()},i.prototype.setMinZoom=function(t){if((t=null===t||void 0===t?Do:t)>=Do&&t<=this.transform.maxZoom)return this.transform.minZoom=t,this._update(),this.getZoom()=this.transform.minZoom)return this.transform.maxZoom=t,this._update(),this.getZoom()>t&&this.setZoom(t),this;throw new Error("maxZoom must be greater than the current minZoom")},i.prototype.getMaxZoom=function(){return this.transform.maxZoom},i.prototype.setMinPitch=function(t){if((t=null===t||void 0===t?zo:t)=zo&&t<=this.transform.maxPitch)return this.transform.minPitch=t,this._update(),this.getPitch()No)throw new Error("maxPitch must be less than or equal to "+No);if(t>=this.transform.minPitch)return this.transform.maxPitch=t,this._update(),this.getPitch()>t&&this.setPitch(t),this;throw new Error("maxPitch must be greater than the current minPitch")},i.prototype.getMaxPitch=function(){return this.transform.maxPitch},i.prototype.getRenderWorldCopies=function(){return this.transform.renderWorldCopies},i.prototype.setRenderWorldCopies=function(t){return this.transform.renderWorldCopies=t,this._update()},i.prototype.project=function(e){return this.transform.locationPoint(t.LngLat.convert(e))},i.prototype.unproject=function(e){return this.transform.pointLocation(t.Point.convert(e))},i.prototype.isMoving=function(){return this._moving||this.handlers.isMoving()},i.prototype.isZooming=function(){return this._zooming||this.handlers.isZooming()},i.prototype.isRotating=function(){return this._rotating||this.handlers.isRotating()},i.prototype._createDelegatedListener=function(t,e,n){var r,i=this;if("mouseenter"===t||"mouseover"===t){var a=!1,o=function(r){var o=i.getLayer(e)?i.queryRenderedFeatures(r.point,{layers:[e]}):[];o.length?a||(a=!0,n.call(i,new Ca(t,i,r.originalEvent,{features:o}))):a=!1};return{layer:e,listener:n,delegates:{mousemove:o,mouseout:function(){a=!1}}}}if("mouseleave"===t||"mouseout"===t){var s=!1,l=function(r){(i.getLayer(e)?i.queryRenderedFeatures(r.point,{layers:[e]}):[]).length?s=!0:s&&(s=!1,n.call(i,new Ca(t,i,r.originalEvent)))},c=function(e){s&&(s=!1,n.call(i,new Ca(t,i,e.originalEvent)))};return{layer:e,listener:n,delegates:{mousemove:l,mouseout:c}}}var u=function(t){var r=i.getLayer(e)?i.queryRenderedFeatures(t.point,{layers:[e]}):[];r.length&&(t.features=r,n.call(i,t),delete t.features)};return{layer:e,listener:n,delegates:(r={},r[t]=u,r)}},i.prototype.on=function(t,e,n){if(void 0===n)return r.prototype.on.call(this,t,e);var i=this._createDelegatedListener(t,e,n);for(var a in this._delegatedListeners=this._delegatedListeners||{},this._delegatedListeners[t]=this._delegatedListeners[t]||[],this._delegatedListeners[t].push(i),i.delegates)this.on(a,i.delegates[a]);return this},i.prototype.once=function(t,e,n){if(void 0===n)return r.prototype.once.call(this,t,e);var i=this._createDelegatedListener(t,e,n);for(var a in i.delegates)this.once(a,i.delegates[a]);return this},i.prototype.off=function(t,e,n){var i=this;if(void 0===n)return r.prototype.off.call(this,t,e);var a=function(r){for(var a=r[t],o=0;o180;){var s=r.locationPoint(e);if(s.x>=0&&s.y>=0&&s.x<=r.width&&s.y<=r.height)break;e.lng>r.center.lng?e.lng-=360:e.lng+=360}return e}Vo.prototype.down=function(t,e){this.mouseRotate.mousedown(t,e),this.mousePitch&&this.mousePitch.mousedown(t,e),n.disableDrag()},Vo.prototype.move=function(t,e){var n=this.map,r=this.mouseRotate.mousemoveWindow(t,e);if(r&&r.bearingDelta&&n.setBearing(n.getBearing()+r.bearingDelta),this.mousePitch){var i=this.mousePitch.mousemoveWindow(t,e);i&&i.pitchDelta&&n.setPitch(n.getPitch()+i.pitchDelta)}},Vo.prototype.off=function(){var t=this.element;n.removeEventListener(t,"mousedown",this.mousedown),n.removeEventListener(t,"touchstart",this.touchstart,{passive:!1}),n.removeEventListener(t,"touchmove",this.touchmove),n.removeEventListener(t,"touchend",this.touchend),n.removeEventListener(t,"touchcancel",this.reset),this.offTemp()},Vo.prototype.offTemp=function(){n.enableDrag(),n.removeEventListener(t.window,"mousemove",this.mousemove),n.removeEventListener(t.window,"mouseup",this.mouseup)},Vo.prototype.mousedown=function(e){this.down(t.extend({},e,{ctrlKey:!0,preventDefault:function(){return e.preventDefault()}}),n.mousePos(this.element,e)),n.addEventListener(t.window,"mousemove",this.mousemove),n.addEventListener(t.window,"mouseup",this.mouseup)},Vo.prototype.mousemove=function(t){this.move(t,n.mousePos(this.element,t))},Vo.prototype.mouseup=function(t){this.mouseRotate.mouseupWindow(t),this.mousePitch&&this.mousePitch.mouseupWindow(t),this.offTemp()},Vo.prototype.touchstart=function(t){1!==t.targetTouches.length?this.reset():(this._startPos=this._lastPos=n.touchPos(this.element,t.targetTouches)[0],this.down({type:"mousedown",button:0,ctrlKey:!0,preventDefault:function(){return t.preventDefault()}},this._startPos))},Vo.prototype.touchmove=function(t){1!==t.targetTouches.length?this.reset():(this._lastPos=n.touchPos(this.element,t.targetTouches)[0],this.move({preventDefault:function(){return t.preventDefault()}},this._lastPos))},Vo.prototype.touchend=function(t){0===t.targetTouches.length&&this._startPos&&this._lastPos&&this._startPos.dist(this._lastPos)e.getEast()||n.latitudee.getNorth())},r.prototype._setErrorState=function(){switch(this._watchState){case"WAITING_ACTIVE":this._watchState="ACTIVE_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error");break;case"ACTIVE_LOCK":this._watchState="ACTIVE_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting");break;case"BACKGROUND":this._watchState="BACKGROUND_ERROR",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting")}},r.prototype._onSuccess=function(e){if(this._map){if(this._isOutOfMapMaxBounds(e))return this._setErrorState(),this.fire(new t.Event("outofmaxbounds",e)),this._updateMarker(),void this._finish();if(this.options.trackUserLocation)switch(this._lastKnownPosition=e,this._watchState){case"WAITING_ACTIVE":case"ACTIVE_LOCK":case"ACTIVE_ERROR":this._watchState="ACTIVE_LOCK",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"BACKGROUND":case"BACKGROUND_ERROR":this._watchState="BACKGROUND",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background")}this.options.showUserLocation&&"OFF"!==this._watchState&&this._updateMarker(e),this.options.trackUserLocation&&"ACTIVE_LOCK"!==this._watchState||this._updateCamera(e),this.options.showUserLocation&&this._dotElement.classList.remove("mapboxgl-user-location-dot-stale"),this.fire(new t.Event("geolocate",e)),this._finish()}},r.prototype._updateCamera=function(e){var n=new t.LngLat(e.coords.longitude,e.coords.latitude),r=e.coords.accuracy,i=this._map.getBearing(),a=t.extend({bearing:i},this.options.fitBoundsOptions);this._map.fitBounds(n.toBounds(r),a,{geolocateSource:!0})},r.prototype._updateMarker=function(e){if(e){var n=new t.LngLat(e.coords.longitude,e.coords.latitude);this._accuracyCircleMarker.setLngLat(n).addTo(this._map),this._userLocationDotMarker.setLngLat(n).addTo(this._map),this._accuracy=e.coords.accuracy,this.options.showUserLocation&&this.options.showAccuracyCircle&&this._updateCircleRadius()}else this._userLocationDotMarker.remove(),this._accuracyCircleMarker.remove()},r.prototype._updateCircleRadius=function(){var t=this._map._container.clientHeight/2,e=this._map.unproject([0,t]),n=this._map.unproject([1,t]),r=e.distanceTo(n),i=Math.ceil(2*this._accuracy/r);this._circleElement.style.width=i+"px",this._circleElement.style.height=i+"px"},r.prototype._onZoom=function(){this.options.showUserLocation&&this.options.showAccuracyCircle&&this._updateCircleRadius()},r.prototype._onError=function(e){if(this._map){if(this.options.trackUserLocation)if(1===e.code){this._watchState="OFF",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this._geolocateButton.disabled=!0;var n=this._map._getUIString("GeolocateControl.LocationNotAvailable");this._geolocateButton.title=n,this._geolocateButton.setAttribute("aria-label",n),void 0!==this._geolocationWatchID&&this._clearWatch()}else{if(3===e.code&&Jo)return;this._setErrorState()}"OFF"!==this._watchState&&this.options.showUserLocation&&this._dotElement.classList.add("mapboxgl-user-location-dot-stale"),this.fire(new t.Event("error",e)),this._finish()}},r.prototype._finish=function(){this._timeoutId&&clearTimeout(this._timeoutId),this._timeoutId=void 0},r.prototype._setupUI=function(e){var r=this;if(this._container.addEventListener("contextmenu",(function(t){return t.preventDefault()})),this._geolocateButton=n.create("button","mapboxgl-ctrl-geolocate",this._container),n.create("span","mapboxgl-ctrl-icon",this._geolocateButton).setAttribute("aria-hidden",!0),this._geolocateButton.type="button",!1===e){t.warnOnce("Geolocation support is not available so the GeolocateControl will be disabled.");var i=this._map._getUIString("GeolocateControl.LocationNotAvailable");this._geolocateButton.disabled=!0,this._geolocateButton.title=i,this._geolocateButton.setAttribute("aria-label",i)}else{var a=this._map._getUIString("GeolocateControl.FindMyLocation");this._geolocateButton.title=a,this._geolocateButton.setAttribute("aria-label",a)}this.options.trackUserLocation&&(this._geolocateButton.setAttribute("aria-pressed","false"),this._watchState="OFF"),this.options.showUserLocation&&(this._dotElement=n.create("div","mapboxgl-user-location-dot"),this._userLocationDotMarker=new $o(this._dotElement),this._circleElement=n.create("div","mapboxgl-user-location-accuracy-circle"),this._accuracyCircleMarker=new $o({element:this._circleElement,pitchAlignment:"map"}),this.options.trackUserLocation&&(this._watchState="OFF"),this._map.on("zoom",this._onZoom)),this._geolocateButton.addEventListener("click",this.trigger.bind(this)),this._setup=!0,this.options.trackUserLocation&&this._map.on("movestart",(function(e){var n=e.originalEvent&&"resize"===e.originalEvent.type;e.geolocateSource||"ACTIVE_LOCK"!==r._watchState||n||(r._watchState="BACKGROUND",r._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background"),r._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),r.fire(new t.Event("trackuserlocationend")))}))},r.prototype.trigger=function(){if(!this._setup)return t.warnOnce("Geolocate control triggered before added to a map"),!1;if(this.options.trackUserLocation){switch(this._watchState){case"OFF":this._watchState="WAITING_ACTIVE",this.fire(new t.Event("trackuserlocationstart"));break;case"WAITING_ACTIVE":case"ACTIVE_LOCK":case"ACTIVE_ERROR":case"BACKGROUND_ERROR":Zo--,Jo=!1,this._watchState="OFF",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-active-error"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background-error"),this.fire(new t.Event("trackuserlocationend"));break;case"BACKGROUND":this._watchState="ACTIVE_LOCK",this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-background"),this._lastKnownPosition&&this._updateCamera(this._lastKnownPosition),this.fire(new t.Event("trackuserlocationstart"))}switch(this._watchState){case"WAITING_ACTIVE":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"ACTIVE_LOCK":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active");break;case"ACTIVE_ERROR":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-active-error");break;case"BACKGROUND":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background");break;case"BACKGROUND_ERROR":this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-background-error")}if("OFF"===this._watchState&&void 0!==this._geolocationWatchID)this._clearWatch();else if(void 0===this._geolocationWatchID){var e;this._geolocateButton.classList.add("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.setAttribute("aria-pressed","true"),++Zo>1?(e={maximumAge:6e5,timeout:0},Jo=!0):(e=this.options.positionOptions,Jo=!1),this._geolocationWatchID=t.window.navigator.geolocation.watchPosition(this._onSuccess,this._onError,e)}}else t.window.navigator.geolocation.getCurrentPosition(this._onSuccess,this._onError,this.options.positionOptions),this._timeoutId=setTimeout(this._finish,1e4);return!0},r.prototype._clearWatch=function(){t.window.navigator.geolocation.clearWatch(this._geolocationWatchID),this._geolocationWatchID=void 0,this._geolocateButton.classList.remove("mapboxgl-ctrl-geolocate-waiting"),this._geolocateButton.setAttribute("aria-pressed","false"),this.options.showUserLocation&&this._updateMarker(null)},r}(t.Evented),ts={maxWidth:100,unit:"metric"},es=function(e){this.options=t.extend({},ts,e),t.bindAll(["_onMove","setUnit"],this)};function ns(t,e,n){var r=n&&n.maxWidth||100,i=t._container.clientHeight/2,a=t.unproject([0,i]),o=t.unproject([r,i]),s=a.distanceTo(o);if(n&&"imperial"===n.unit){var l=3.2808*s;l>5280?rs(e,r,l/5280,t._getUIString("ScaleControl.Miles")):rs(e,r,l,t._getUIString("ScaleControl.Feet"))}else n&&"nautical"===n.unit?rs(e,r,s/1852,t._getUIString("ScaleControl.NauticalMiles")):s>=1e3?rs(e,r,s/1e3,t._getUIString("ScaleControl.Kilometers")):rs(e,r,s,t._getUIString("ScaleControl.Meters"))}function rs(t,e,n,r){var i=as(n),a=i/n;t.style.width=e*a+"px",t.innerHTML=i+" "+r}function is(t){var e=Math.pow(10,Math.ceil(-Math.log(t)/Math.LN10));return Math.round(t*e)/e}function as(t){var e=Math.pow(10,(""+Math.floor(t)).length-1),n=t/e;return e*(n=n>=10?10:n>=5?5:n>=3?3:n>=2?2:n>=1?1:is(n))}es.prototype.getDefaultPosition=function(){return"bottom-left"},es.prototype._onMove=function(){ns(this._map,this._container,this.options)},es.prototype.onAdd=function(t){return this._map=t,this._container=n.create("div","mapboxgl-ctrl mapboxgl-ctrl-scale",t.getContainer()),this._map.on("move",this._onMove),this._onMove(),this._container},es.prototype.onRemove=function(){n.remove(this._container),this._map.off("move",this._onMove),this._map=void 0},es.prototype.setUnit=function(t){this.options.unit=t,ns(this._map,this._container,this.options)};var os=function(e){this._fullscreen=!1,e&&e.container&&(e.container instanceof t.window.HTMLElement?this._container=e.container:t.warnOnce("Full screen control 'container' must be a DOM element.")),t.bindAll(["_onClickFullscreen","_changeIcon"],this),"onfullscreenchange"in t.window.document?this._fullscreenchange="fullscreenchange":"onmozfullscreenchange"in t.window.document?this._fullscreenchange="mozfullscreenchange":"onwebkitfullscreenchange"in t.window.document?this._fullscreenchange="webkitfullscreenchange":"onmsfullscreenchange"in t.window.document&&(this._fullscreenchange="MSFullscreenChange")};os.prototype.onAdd=function(e){return this._map=e,this._container||(this._container=this._map.getContainer()),this._controlContainer=n.create("div","mapboxgl-ctrl mapboxgl-ctrl-group"),this._checkFullscreenSupport()?this._setupUI():(this._controlContainer.style.display="none",t.warnOnce("This device does not support fullscreen mode.")),this._controlContainer},os.prototype.onRemove=function(){n.remove(this._controlContainer),this._map=null,t.window.document.removeEventListener(this._fullscreenchange,this._changeIcon)},os.prototype._checkFullscreenSupport=function(){return!!(t.window.document.fullscreenEnabled||t.window.document.mozFullScreenEnabled||t.window.document.msFullscreenEnabled||t.window.document.webkitFullscreenEnabled)},os.prototype._setupUI=function(){var e=this._fullscreenButton=n.create("button","mapboxgl-ctrl-fullscreen",this._controlContainer);n.create("span","mapboxgl-ctrl-icon",e).setAttribute("aria-hidden",!0),e.type="button",this._updateTitle(),this._fullscreenButton.addEventListener("click",this._onClickFullscreen),t.window.document.addEventListener(this._fullscreenchange,this._changeIcon)},os.prototype._updateTitle=function(){var t=this._getTitle();this._fullscreenButton.setAttribute("aria-label",t),this._fullscreenButton.title=t},os.prototype._getTitle=function(){return this._map._getUIString(this._isFullscreen()?"FullscreenControl.Exit":"FullscreenControl.Enter")},os.prototype._isFullscreen=function(){return this._fullscreen},os.prototype._changeIcon=function(){(t.window.document.fullscreenElement||t.window.document.mozFullScreenElement||t.window.document.webkitFullscreenElement||t.window.document.msFullscreenElement)===this._container!==this._fullscreen&&(this._fullscreen=!this._fullscreen,this._fullscreenButton.classList.toggle("mapboxgl-ctrl-shrink"),this._fullscreenButton.classList.toggle("mapboxgl-ctrl-fullscreen"),this._updateTitle())},os.prototype._onClickFullscreen=function(){this._isFullscreen()?t.window.document.exitFullscreen?t.window.document.exitFullscreen():t.window.document.mozCancelFullScreen?t.window.document.mozCancelFullScreen():t.window.document.msExitFullscreen?t.window.document.msExitFullscreen():t.window.document.webkitCancelFullScreen&&t.window.document.webkitCancelFullScreen():this._container.requestFullscreen?this._container.requestFullscreen():this._container.mozRequestFullScreen?this._container.mozRequestFullScreen():this._container.msRequestFullscreen?this._container.msRequestFullscreen():this._container.webkitRequestFullscreen&&this._container.webkitRequestFullscreen()};var ss={closeButton:!0,closeOnClick:!0,className:"",maxWidth:"240px"},ls=function(e){function r(n){e.call(this),this.options=t.extend(Object.create(ss),n),t.bindAll(["_update","_onClose","remove","_onMouseMove","_onMouseUp","_onDrag"],this)}return e&&(r.__proto__=e),r.prototype=Object.create(e&&e.prototype),r.prototype.constructor=r,r.prototype.addTo=function(e){return this._map&&this.remove(),this._map=e,this.options.closeOnClick&&this._map.on("click",this._onClose),this.options.closeOnMove&&this._map.on("move",this._onClose),this._map.on("remove",this.remove),this._update(),this._trackPointer?(this._map.on("mousemove",this._onMouseMove),this._map.on("mouseup",this._onMouseUp),this._container&&this._container.classList.add("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.add("mapboxgl-track-pointer")):this._map.on("move",this._update),this.fire(new t.Event("open")),this},r.prototype.isOpen=function(){return!!this._map},r.prototype.remove=function(){return this._content&&n.remove(this._content),this._container&&(n.remove(this._container),delete this._container),this._map&&(this._map.off("move",this._update),this._map.off("move",this._onClose),this._map.off("click",this._onClose),this._map.off("remove",this.remove),this._map.off("mousemove",this._onMouseMove),this._map.off("mouseup",this._onMouseUp),this._map.off("drag",this._onDrag),delete this._map),this.fire(new t.Event("close")),this},r.prototype.getLngLat=function(){return this._lngLat},r.prototype.setLngLat=function(e){return this._lngLat=t.LngLat.convert(e),this._pos=null,this._trackPointer=!1,this._update(),this._map&&(this._map.on("move",this._update),this._map.off("mousemove",this._onMouseMove),this._container&&this._container.classList.remove("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.remove("mapboxgl-track-pointer")),this},r.prototype.trackPointer=function(){return this._trackPointer=!0,this._pos=null,this._update(),this._map&&(this._map.off("move",this._update),this._map.on("mousemove",this._onMouseMove),this._map.on("drag",this._onDrag),this._container&&this._container.classList.add("mapboxgl-popup-track-pointer"),this._map._canvasContainer.classList.add("mapboxgl-track-pointer")),this},r.prototype.getElement=function(){return this._container},r.prototype.setText=function(e){return this.setDOMContent(t.window.document.createTextNode(e))},r.prototype.setHTML=function(e){var n,r=t.window.document.createDocumentFragment(),i=t.window.document.createElement("body");for(i.innerHTML=e;n=i.firstChild;)r.appendChild(n);return this.setDOMContent(r)},r.prototype.getMaxWidth=function(){return this._container&&this._container.style.maxWidth},r.prototype.setMaxWidth=function(t){return this.options.maxWidth=t,this._update(),this},r.prototype.setDOMContent=function(t){return this._createContent(),this._content.appendChild(t),this._update(),this},r.prototype.addClassName=function(t){this._container&&this._container.classList.add(t)},r.prototype.removeClassName=function(t){this._container&&this._container.classList.remove(t)},r.prototype.toggleClassName=function(t){if(this._container)return this._container.classList.toggle(t)},r.prototype._createContent=function(){this._content&&n.remove(this._content),this._content=n.create("div","mapboxgl-popup-content",this._container),this.options.closeButton&&(this._closeButton=n.create("button","mapboxgl-popup-close-button",this._content),this._closeButton.type="button",this._closeButton.setAttribute("aria-label","Close popup"),this._closeButton.innerHTML="×",this._closeButton.addEventListener("click",this._onClose))},r.prototype._onMouseUp=function(t){this._update(t.point)},r.prototype._onMouseMove=function(t){this._update(t.point)},r.prototype._onDrag=function(t){this._update(t.point)},r.prototype._update=function(t){var e=this,r=this._lngLat||this._trackPointer;if(this._map&&r&&this._content&&(this._container||(this._container=n.create("div","mapboxgl-popup",this._map.getContainer()),this._tip=n.create("div","mapboxgl-popup-tip",this._container),this._container.appendChild(this._content),this.options.className&&this.options.className.split(" ").forEach((function(t){return e._container.classList.add(t)})),this._trackPointer&&this._container.classList.add("mapboxgl-popup-track-pointer")),this.options.maxWidth&&this._container.style.maxWidth!==this.options.maxWidth&&(this._container.style.maxWidth=this.options.maxWidth),this._map.transform.renderWorldCopies&&!this._trackPointer&&(this._lngLat=qo(this._lngLat,this._pos,this._map.transform)),!this._trackPointer||t)){var i=this._pos=this._trackPointer&&t?t:this._map.project(this._lngLat),a=this.options.anchor,o=cs(this.options.offset);if(!a){var s,l=this._container.offsetWidth,c=this._container.offsetHeight;s=i.y+o.bottom.ythis._map.transform.height-c?["bottom"]:[],i.xthis._map.transform.width-l/2&&s.push("right"),a=0===s.length?"bottom":s.join("-")}var u=i.add(o[a]).round();n.setTransform(this._container,Go[a]+" translate("+u.x+"px,"+u.y+"px)"),Wo(this._container,a,"popup")}},r.prototype._onClose=function(){this.remove()},r}(t.Evented);function cs(e){if(e){if("number"===typeof e){var n=Math.round(Math.sqrt(.5*Math.pow(e,2)));return{center:new t.Point(0,0),top:new t.Point(0,e),"top-left":new t.Point(n,n),"top-right":new t.Point(-n,n),bottom:new t.Point(0,-e),"bottom-left":new t.Point(n,-n),"bottom-right":new t.Point(-n,-n),left:new t.Point(e,0),right:new t.Point(-e,0)}}if(e instanceof t.Point||Array.isArray(e)){var r=t.Point.convert(e);return{center:r,top:r,"top-left":r,"top-right":r,bottom:r,"bottom-left":r,"bottom-right":r,left:r,right:r}}return{center:t.Point.convert(e.center||[0,0]),top:t.Point.convert(e.top||[0,0]),"top-left":t.Point.convert(e["top-left"]||[0,0]),"top-right":t.Point.convert(e["top-right"]||[0,0]),bottom:t.Point.convert(e.bottom||[0,0]),"bottom-left":t.Point.convert(e["bottom-left"]||[0,0]),"bottom-right":t.Point.convert(e["bottom-right"]||[0,0]),left:t.Point.convert(e.left||[0,0]),right:t.Point.convert(e.right||[0,0])}}return cs(new t.Point(0,0))}var us={version:t.version,supported:e,setRTLTextPlugin:t.setRTLTextPlugin,getRTLTextPluginStatus:t.getRTLTextPluginStatus,Map:Fo,NavigationControl:Ho,GeolocateControl:Qo,AttributionControl:So,ScaleControl:es,FullscreenControl:os,Popup:ls,Marker:$o,Style:mn,LngLat:t.LngLat,LngLatBounds:t.LngLatBounds,Point:t.Point,MercatorCoordinate:t.MercatorCoordinate,Evented:t.Evented,config:t.config,prewarm:ae,clearPrewarmedResources:oe,get accessToken(){return t.config.ACCESS_TOKEN},set accessToken(e){t.config.ACCESS_TOKEN=e},get baseApiUrl(){return t.config.API_URL},set baseApiUrl(e){t.config.API_URL=e},get workerCount(){return ee.workerCount},set workerCount(t){ee.workerCount=t},get maxParallelImageRequests(){return t.config.MAX_PARALLEL_IMAGE_REQUESTS},set maxParallelImageRequests(e){t.config.MAX_PARALLEL_IMAGE_REQUESTS=e},clearStorage:function(e){t.clearTileCache(e)},workerUrl:""};return us})),n}()},27084:function(t){"use strict";t.exports=Math.log2||function(t){return Math.log(t)*Math.LOG2E}},16825:function(t,e,n){"use strict";t.exports=function(t,e){e||(e=t,t=window);var n=0,i=0,a=0,o={shift:!1,alt:!1,control:!1,meta:!1},s=!1;function l(t){var e=!1;return"altKey"in t&&(e=e||t.altKey!==o.alt,o.alt=!!t.altKey),"shiftKey"in t&&(e=e||t.shiftKey!==o.shift,o.shift=!!t.shiftKey),"ctrlKey"in t&&(e=e||t.ctrlKey!==o.control,o.control=!!t.ctrlKey),"metaKey"in t&&(e=e||t.metaKey!==o.meta,o.meta=!!t.metaKey),e}function c(t,s){var c=r.x(s),u=r.y(s);"buttons"in s&&(t=0|s.buttons),(t!==n||c!==i||u!==a||l(s))&&(n=0|t,i=c||0,a=u||0,e&&e(n,i,a,o))}function u(t){c(0,t)}function f(){(n||i||a||o.shift||o.alt||o.meta||o.control)&&(i=a=0,n=0,o.shift=o.alt=o.control=o.meta=!1,e&&e(0,0,0,o))}function h(t){l(t)&&e&&e(n,i,a,o)}function d(t){0===r.buttons(t)?c(0,t):c(n,t)}function p(t){c(n|r.buttons(t),t)}function g(t){c(n&~r.buttons(t),t)}function v(){s||(s=!0,t.addEventListener("mousemove",d),t.addEventListener("mousedown",p),t.addEventListener("mouseup",g),t.addEventListener("mouseleave",u),t.addEventListener("mouseenter",u),t.addEventListener("mouseout",u),t.addEventListener("mouseover",u),t.addEventListener("blur",f),t.addEventListener("keyup",h),t.addEventListener("keydown",h),t.addEventListener("keypress",h),t!==window&&(window.addEventListener("blur",f),window.addEventListener("keyup",h),window.addEventListener("keydown",h),window.addEventListener("keypress",h)))}v();var m={element:t};return Object.defineProperties(m,{enabled:{get:function(){return s},set:function(e){e?v():s&&(s=!1,t.removeEventListener("mousemove",d),t.removeEventListener("mousedown",p),t.removeEventListener("mouseup",g),t.removeEventListener("mouseleave",u),t.removeEventListener("mouseenter",u),t.removeEventListener("mouseout",u),t.removeEventListener("mouseover",u),t.removeEventListener("blur",f),t.removeEventListener("keyup",h),t.removeEventListener("keydown",h),t.removeEventListener("keypress",h),t!==window&&(window.removeEventListener("blur",f),window.removeEventListener("keyup",h),window.removeEventListener("keydown",h),window.removeEventListener("keypress",h)))},enumerable:!0},buttons:{get:function(){return n},enumerable:!0},x:{get:function(){return i},enumerable:!0},y:{get:function(){return a},enumerable:!0},mods:{get:function(){return o},enumerable:!0}}),m};var r=n(74311)},48956:function(t){var e={left:0,top:0};t.exports=function(t,n,r){n=n||t.currentTarget||t.srcElement,Array.isArray(r)||(r=[0,0]);var i,a=t.clientX||0,o=t.clientY||0,s=(i=n)===window||i===document||i===document.body?e:i.getBoundingClientRect();return r[0]=a-s.left,r[1]=o-s.top,r}},74311:function(t,e){"use strict";function n(t){return t.target||t.srcElement||window}e.buttons=function(t){if("object"===typeof t){if("buttons"in t)return t.buttons;if("which"in t){if(2===(e=t.which))return 4;if(3===e)return 2;if(e>0)return 1<=0)return 1<0&&a(s,n))}catch(v){u.call(new h(n),v)}}}function u(t){var e=this;e.triggered||(e.triggered=!0,e.def&&(e=e.def),e.msg=t,e.state=2,e.chain.length>0&&a(s,e))}function f(t,e,n,r){for(var i=0;i1&&(i*=m=Math.sqrt(m),s*=m);var y=i*i,b=s*s,x=(c==u?-1:1)*Math.sqrt(Math.abs((y*b-y*v*v-b*g*g)/(y*v*v+b*g*g)));x==1/0&&(x=1);var w=x*i*v/s+(t+f)/2,_=x*-s*g/i+(r+h)/2,k=Math.asin(((r-_)/s).toFixed(9)),T=Math.asin(((h-_)/s).toFixed(9));(k=tT&&(k-=2*e),!u&&T>k&&(T-=2*e)}if(Math.abs(T-k)>n){var M=T,A=f,S=h;T=k+n*(u&&T>k?1:-1);var E=a(f=w+i*Math.cos(T),h=_+s*Math.sin(T),i,s,l,0,u,A,S,[T,M,w,_])}var C=Math.tan((T-k)/4),P=4/3*i*C,O=4/3*s*C,L=[2*t-(t+P*Math.sin(k)),2*r-(r-O*Math.cos(k)),f+P*Math.sin(T),h-O*Math.cos(T),f,h];if(d)return L;E&&(L=L.concat(E));for(var I=0;I7&&(n.push(m.splice(0,7)),m.unshift("C"));break;case"S":var b=d,x=p;"C"!=e&&"S"!=e||(b+=b-o,x+=x-l),m=["C",b,x,m[1],m[2],m[3],m[4]];break;case"T":"Q"==e||"T"==e?(f=2*d-f,h=2*p-h):(f=d,h=p),m=i(d,p,f,h,m[1],m[2]);break;case"Q":f=m[1],h=m[2],m=i(d,p,m[1],m[2],m[3],m[4]);break;case"L":m=r(d,p,m[1],m[2]);break;case"H":m=r(d,p,m[1],p);break;case"V":m=r(d,p,d,m[1]);break;case"Z":m=r(d,p,c,u)}e=y,d=m[m.length-2],p=m[m.length-1],m.length>4?(o=m[m.length-4],l=m[m.length-3]):(o=d,l=p),n.push(m)}return n}},56131:function(t){"use strict";var e=Object.getOwnPropertySymbols,n=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;t.exports=function(){try{if(!Object.assign)return!1;var t=new String("abc");if(t[5]="de","5"===Object.getOwnPropertyNames(t)[0])return!1;for(var e={},n=0;n<10;n++)e["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(e).map((function(t){return e[t]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(t){r[t]=t})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(i){return!1}}()?Object.assign:function(t,i){for(var a,o,s=function(t){if(null===t||void 0===t)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(t)}(t),l=1;l0&&!i.call(t,0))for(var g=0;g0)for(var v=0;v=0&&"[object Function]"===e.call(t.callee)),r}},88641:function(t){"use strict";function e(t,e){if("string"!==typeof t)return[t];var n=[t];"string"===typeof e||Array.isArray(e)?e={brackets:e}:e||(e={});var r=e.brackets?Array.isArray(e.brackets)?e.brackets:[e.brackets]:["{}","[]","()"],i=e.escape||"___",a=!!e.flat;r.forEach((function(t){var e=new RegExp(["\\",t[0],"[^\\",t[0],"\\",t[1],"]*\\",t[1]].join("")),r=[];function a(e,a,o){var s=n.push(e.slice(t[0].length,-t[1].length))-1;return r.push(s),i+s+i}n.forEach((function(t,r){for(var i,o=0;t!=i;)if(i=t,t=t.replace(e,a),o++>1e4)throw Error("References have circular dependency. Please, check them.");n[r]=t})),r=r.reverse(),n=n.map((function(e){return r.forEach((function(n){e=e.replace(new RegExp("(\\"+i+n+"\\"+i+")","g"),t[0]+"$1"+t[1])})),e}))}));var o=new RegExp("\\"+i+"([0-9]+)\\"+i);return a?n:function t(e,n,r){for(var i,a=[],s=0;i=o.exec(e);){if(s++>1e4)throw Error("Circular references in parenthesis");a.push(e.slice(0,i.index)),a.push(t(n[i[1]],n)),e=e.slice(i.index+i[0].length)}return a.push(e),a}(n[0],n)}function n(t,e){if(e&&e.flat){var n,r=e&&e.escape||"___",i=t[0];if(!i)return"";for(var a=new RegExp("\\"+r+"([0-9]+)\\"+r),o=0;i!=n;){if(o++>1e4)throw Error("Circular references in "+t);n=i,i=i.replace(a,s)}return i}return t.reduce((function t(e,n){return Array.isArray(n)&&(n=n.reduce(t,"")),e+n}),"");function s(e,n){if(null==t[n])throw Error("Reference "+n+"is undefined");return t[n]}}function r(t,r){return Array.isArray(t)?n(t,r):e(t,r)}r.parse=e,r.stringify=n,t.exports=r},18863:function(t,e,n){"use strict";var r=n(71299);t.exports=function(t){var e;return arguments.length>1&&(t=arguments),"string"===typeof t?t=t.split(/\s/).map(parseFloat):"number"===typeof t&&(t=[t]),t.length&&"number"===typeof t[0]?e=1===t.length?{width:t[0],height:t[0],x:0,y:0}:2===t.length?{width:t[0],height:t[1],x:0,y:0}:{x:t[0],y:t[1],width:t[2]-t[0]||0,height:t[3]-t[1]||0}:t&&(e={x:(t=r(t,{left:"x l left Left",top:"y t top Top",width:"w width W Width",height:"h height W Width",bottom:"b bottom Bottom",right:"r right Right"})).left||0,y:t.top||0},null==t.width?t.right?e.width=t.right-e.x:e.width=0:e.width=t.width,null==t.height?t.bottom?e.height=t.bottom-e.y:e.height=0:e.height=t.height),e}},95616:function(t){t.exports=function(t){var i=[];return t.replace(n,(function(t,n,a){var o=n.toLowerCase();for(a=function(t){var e=t.match(r);return e?e.map(Number):[]}(a),"m"==o&&a.length>2&&(i.push([n].concat(a.splice(0,2))),o="l",n="m"==n?"l":"L");;){if(a.length==e[o])return a.unshift(n),i.push(a);if(a.lengtha!==d>a&&i<(h-u)*(a-f)/(d-f)+u&&(o=!o)}return o}},52142:function(t,e,n){var r,i=n(69444),a=n(29023),o=n(87263),s=n(11328),l=n(55968),c=n(10670),u=!1,f=a();function h(t,e,n){var i=r.segments(t),a=r.segments(e),o=n(r.combine(i,a));return r.polygon(o)}r={buildLog:function(t){return!0===t?u=i():!1===t&&(u=!1),!1!==u&&u.list},epsilon:function(t){return f.epsilon(t)},segments:function(t){var e=o(!0,f,u);return t.regions.forEach(e.addRegion),{segments:e.calculate(t.inverted),inverted:t.inverted}},combine:function(t,e){return{combined:o(!1,f,u).calculate(t.segments,t.inverted,e.segments,e.inverted),inverted1:t.inverted,inverted2:e.inverted}},selectUnion:function(t){return{segments:l.union(t.combined,u),inverted:t.inverted1||t.inverted2}},selectIntersect:function(t){return{segments:l.intersect(t.combined,u),inverted:t.inverted1&&t.inverted2}},selectDifference:function(t){return{segments:l.difference(t.combined,u),inverted:t.inverted1&&!t.inverted2}},selectDifferenceRev:function(t){return{segments:l.differenceRev(t.combined,u),inverted:!t.inverted1&&t.inverted2}},selectXor:function(t){return{segments:l.xor(t.combined,u),inverted:t.inverted1!==t.inverted2}},polygon:function(t){return{regions:s(t.segments,f,u),inverted:t.inverted}},polygonFromGeoJSON:function(t){return c.toPolygon(r,t)},polygonToGeoJSON:function(t){return c.fromPolygon(r,f,t)},union:function(t,e){return h(t,e,r.selectUnion)},intersect:function(t,e){return h(t,e,r.selectIntersect)},difference:function(t,e){return h(t,e,r.selectDifference)},differenceRev:function(t,e){return h(t,e,r.selectDifferenceRev)},xor:function(t,e){return h(t,e,r.selectXor)}},"object"===typeof window&&(window.PolyBool=r),t.exports=r},69444:function(t){t.exports=function(){var t,e=0,n=!1;function r(e,n){return t.list.push({type:e,data:n?JSON.parse(JSON.stringify(n)):void 0}),t}return t={list:[],segmentId:function(){return e++},checkIntersection:function(t,e){return r("check",{seg1:t,seg2:e})},segmentChop:function(t,e){return r("div_seg",{seg:t,pt:e}),r("chop",{seg:t,pt:e})},statusRemove:function(t){return r("pop_seg",{seg:t})},segmentUpdate:function(t){return r("seg_update",{seg:t})},segmentNew:function(t,e){return r("new_seg",{seg:t,primary:e})},segmentRemove:function(t){return r("rem_seg",{seg:t})},tempStatus:function(t,e,n){return r("temp_status",{seg:t,above:e,below:n})},rewind:function(t){return r("rewind",{seg:t})},status:function(t,e,n){return r("status",{seg:t,above:e,below:n})},vert:function(e){return e===n?t:(n=e,r("vert",{x:e}))},log:function(t){return"string"!==typeof t&&(t=JSON.stringify(t,!1," ")),r("log",{txt:t})},reset:function(){return r("reset")},selected:function(t){return r("selected",{segs:t})},chainStart:function(t){return r("chain_start",{seg:t})},chainRemoveHead:function(t,e){return r("chain_rem_head",{index:t,pt:e})},chainRemoveTail:function(t,e){return r("chain_rem_tail",{index:t,pt:e})},chainNew:function(t,e){return r("chain_new",{pt1:t,pt2:e})},chainMatch:function(t){return r("chain_match",{index:t})},chainClose:function(t){return r("chain_close",{index:t})},chainAddHead:function(t,e){return r("chain_add_head",{index:t,pt:e})},chainAddTail:function(t,e){return r("chain_add_tail",{index:t,pt:e})},chainConnect:function(t,e){return r("chain_con",{index1:t,index2:e})},chainReverse:function(t){return r("chain_rev",{index:t})},chainJoin:function(t,e){return r("chain_join",{index1:t,index2:e})},done:function(){return r("done")}}}},29023:function(t){t.exports=function(t){"number"!==typeof t&&(t=1e-10);var e={epsilon:function(e){return"number"===typeof e&&(t=e),t},pointAboveOrOnLine:function(e,n,r){var i=n[0],a=n[1],o=r[0],s=r[1],l=e[0];return(o-i)*(e[1]-a)-(s-a)*(l-i)>=-t},pointBetween:function(e,n,r){var i=e[1]-n[1],a=r[0]-n[0],o=e[0]-n[0],s=r[1]-n[1],l=o*a+i*s;return!(l-t)},pointsSameX:function(e,n){return Math.abs(e[0]-n[0])t!=o-i>t&&(a-c)*(i-u)/(o-u)+c-r>t&&(s=!s),a=c,o=u}return s}};return e}},10670:function(t){var e={toPolygon:function(t,e){function n(e){if(e.length<=0)return t.segments({inverted:!1,regions:[]});function n(e){var n=e.slice(0,e.length-1);return t.segments({inverted:!1,regions:[n]})}for(var r=n(e[0]),i=1;i0}))}function u(t,r){var i=t.seg,a=r.seg,o=i.start,s=i.end,c=a.start,u=a.end;n&&n.checkIntersection(i,a);var f=e.linesIntersect(o,s,c,u);if(!1===f){if(!e.pointsCollinear(o,s,c))return!1;if(e.pointsSame(o,u)||e.pointsSame(s,c))return!1;var h=e.pointsSame(o,c),d=e.pointsSame(s,u);if(h&&d)return r;var p=!h&&e.pointBetween(o,c,u),g=!d&&e.pointBetween(s,c,u);if(h)return g?l(r,s):l(t,u),r;p&&(d||(g?l(r,s):l(t,u)),l(r,o))}else 0===f.alongA&&(-1===f.alongB?l(t,c):0===f.alongB?l(t,f.pt):1===f.alongB&&l(t,u)),0===f.alongB&&(-1===f.alongA?l(r,o):0===f.alongA?l(r,f.pt):1===f.alongA&&l(r,s));return!1}for(var f=[],h=function(){if(d=a.getHead(),n&&n.vert(d.pt[0]),d.isStart){if(n&&n.segmentNew(d.seg,d.primary),p=c(d),g=p.before?p.before.ev:null,v=p.after?p.after.ev:null,n&&n.tempStatus(d.seg,!!g&&g.seg,!!v&&v.seg),(m=function(){if(g){var t=u(d,g);if(t)return t}return!!v&&u(d,v)}())&&(t?(y=null===d.seg.myFill.below||d.seg.myFill.above!==d.seg.myFill.below)&&(m.seg.myFill.above=!m.seg.myFill.above):m.seg.otherFill=d.seg.myFill,n&&n.segmentUpdate(m.seg),d.other.remove(),d.remove()),a.getHead()!==d)return n&&n.rewind(d.seg),"continue";t?(y=null===d.seg.myFill.below||d.seg.myFill.above!==d.seg.myFill.below,d.seg.myFill.below=v?v.seg.myFill.above:i,d.seg.myFill.above=y?!d.seg.myFill.below:d.seg.myFill.below):null===d.seg.otherFill&&(b=v?d.primary===v.primary?v.seg.otherFill.above:v.seg.myFill.above:d.primary?o:i,d.seg.otherFill={above:b,below:b}),n&&n.status(d.seg,!!g&&g.seg,!!v&&v.seg),d.other.status=p.insert(r.node({ev:d}))}else{if(null===(x=d.status))throw new Error("PolyBool: Zero-length segment detected; your epsilon is probably too small or too large");s.exists(x.prev)&&s.exists(x.next)&&u(x.prev.ev,x.next.ev),n&&n.statusRemove(x.ev.seg),x.remove(),d.primary||(w=d.seg.myFill,d.seg.myFill=d.seg.otherFill,d.seg.otherFill=w),f.push(d.seg)}a.getHead().remove()};!a.isEmpty();){var d,p,g,v,m,y,b,x,w;h()}return n&&n.done(),f}return t?{addRegion:function(t){for(var r,i,a,o=t[t.length-1],l=0;l0&&!this.aborted;){var n=this.ifds_to_read.shift();n.offset&&this.scan_ifd(n.id,n.offset,t)}},r.prototype.read_uint16=function(t){var n=this.input;if(t+2>n.length)throw e("unexpected EOF","EBADDATA");return this.big_endian?256*n[t]+n[t+1]:n[t]+256*n[t+1]},r.prototype.read_uint32=function(t){var n=this.input;if(t+4>n.length)throw e("unexpected EOF","EBADDATA");return this.big_endian?16777216*n[t]+65536*n[t+1]+256*n[t+2]+n[t+3]:n[t]+256*n[t+1]+65536*n[t+2]+16777216*n[t+3]},r.prototype.is_subifd_link=function(t,e){return 0===t&&34665===e||0===t&&34853===e||34665===t&&40965===e},r.prototype.exif_format_length=function(t){switch(t){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}},r.prototype.exif_format_read=function(t,e){var n;switch(t){case 1:case 2:return n=this.input[e];case 6:return(n=this.input[e])|33554430*(128&n);case 3:return n=this.read_uint16(e);case 8:return(n=this.read_uint16(e))|131070*(32768&n);case 4:return n=this.read_uint32(e);case 9:return 0|(n=this.read_uint32(e));default:return null}},r.prototype.scan_ifd=function(t,r,i){var a=this.read_uint16(r);r+=2;for(var o=0;othis.input.length)throw e("unexpected EOF","EBADDATA");for(var p=[],g=h,v=0;v0&&(this.ifds_to_read.push({id:s,offset:p[0]}),d=!0),!1===i({is_big_endian:this.big_endian,ifd:t,tag:s,format:l,count:c,entry_offset:r+this.start,data_length:f,data_offset:h+this.start,value:p,is_subifd_link:d}))return void(this.aborted=!0);r+=12}0===t&&this.ifds_to_read.push({id:1,offset:this.read_uint32(r)})},t.exports.ExifParser=r,t.exports.get_orientation=function(t){var e=0;try{return new r(t,0,t.length).each((function(t){if(0===t.ifd&&274===t.tag&&Array.isArray(t.value))return e=t.value[0],!1})),e}catch(n){return-1}}},76767:function(t,e,n){"use strict";var r=n(14847).n8,i=n(14847).Ag;function a(t,e){if(t.length<4+e)return null;var n=i(t,e);return t.length>4&15,i=15&t[4],a=t[5]>>4&15,o=r(t,6),l=8,c=0;ce.width||t.width===e.width&&t.height>e.height?t:e})),n=t.reduce((function(t,e){return t.height>e.height||t.height===e.height&&t.width>e.width?t:e}));return e.width>n.height||e.width===n.height&&e.height>n.width?e:n}(e.sizes),r=1;e.transforms.forEach((function(t){var e={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},n={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if("imir"===t.type&&(r=0===t.value?n[r]:e[r=e[r=n[r]]]),"irot"===t.type)for(var i=0;i1&&(h.variants=f.variants),f.orientation&&(h.orientation=f.orientation),f.exif_location&&f.exif_location.offset+f.exif_location.length<=t.length){var d=a(t,f.exif_location.offset),p=t.slice(f.exif_location.offset+d+4,f.exif_location.offset+f.exif_location.length),g=s.get_orientation(p);g>0&&(h.orientation=g)}return h}}}}}}},2504:function(t,e,n){"use strict";var r=n(14847).eG,i=n(14847).OF,a=n(14847).mP,o=r("BM");t.exports=function(t){if(!(t.length<26)&&i(t,0,o))return{width:a(t,18),height:a(t,22),type:"bmp",mime:"image/bmp",wUnits:"px",hUnits:"px"}}},47342:function(t,e,n){"use strict";var r=n(14847).eG,i=n(14847).OF,a=n(14847).mP,o=r("GIF87a"),s=r("GIF89a");t.exports=function(t){if(!(t.length<10)&&(i(t,0,o)||i(t,0,s)))return{width:a(t,6),height:a(t,8),type:"gif",mime:"image/gif",wUnits:"px",hUnits:"px"}}},31355:function(t,e,n){"use strict";var r=n(14847).mP;t.exports=function(t){var e=r(t,0),n=r(t,2),i=r(t,4);if(0===e&&1===n&&i){for(var a=[],o={width:0,height:0},s=0;so.width||c>o.height)&&(o=u)}return{width:o.width,height:o.height,variants:a,type:"ico",mime:"image/x-icon",wUnits:"px",hUnits:"px"}}}},54261:function(t,e,n){"use strict";var r=n(14847).n8,i=n(14847).eG,a=n(14847).OF,o=n(71371),s=i("Exif\0\0");t.exports=function(t){if(!(t.length<2)&&255===t[0]&&216===t[1]&&255===t[2])for(var e=2;;){for(;;){if(t.length-e<2)return;if(255===t[e++])break}for(var n,i,l=t[e++];255===l;)l=t[e++];if(208<=l&&l<=217||1===l)n=0;else{if(!(192<=l&&l<=254))return;if(t.length-e<2)return;n=r(t,e)-2,e+=2}if(217===l||218===l)return;if(225===l&&n>=10&&a(t,e,s)&&(i=o.get_orientation(t.slice(e+6,e+n))),n>=5&&192<=l&&l<=207&&196!==l&&200!==l&&204!==l){if(t.length-e0&&(c.orientation=i),c}e+=n}}},6303:function(t,e,n){"use strict";var r=n(14847).eG,i=n(14847).OF,a=n(14847).Ag,o=r("\x89PNG\r\n\x1a\n"),s=r("IHDR");t.exports=function(t){if(!(t.length<24)&&i(t,0,o)&&i(t,12,s))return{width:a(t,16),height:a(t,20),type:"png",mime:"image/png",wUnits:"px",hUnits:"px"}}},38689:function(t,e,n){"use strict";var r=n(14847).eG,i=n(14847).OF,a=n(14847).Ag,o=r("8BPS\0\x01");t.exports=function(t){if(!(t.length<22)&&i(t,0,o))return{width:a(t,18),height:a(t,14),type:"psd",mime:"image/vnd.adobe.photoshop",wUnits:"px",hUnits:"px"}}},6881:function(t){"use strict";function e(t){return"number"===typeof t&&isFinite(t)&&t>0}var n=/<[-_.:a-zA-Z0-9][^>]*>/,r=/^<([-_.:a-zA-Z0-9]+:)?svg\s/,i=/[^-]\bwidth="([^%]+?)"|[^-]\bwidth='([^%]+?)'/,a=/\bheight="([^%]+?)"|\bheight='([^%]+?)'/,o=/\bview[bB]ox="(.+?)"|\bview[bB]ox='(.+?)'/,s=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function l(t){return s.test(t)?t.match(s)[0]:"px"}t.exports=function(t){if(function(t){var e,n=0,r=t.length;for(239===t[0]&&187===t[1]&&191===t[2]&&(n=3);n>14&16383),type:"webp",mime:"image/webp",wUnits:"px",hUnits:"px"}}}function h(t,e){return{width:1+(t[e+6]<<16|t[e+5]<<8|t[e+4]),height:1+(t[e+9]<t.length)){for(;e+8=10?n=n||u(t,e+8):"VP8L"===d&&p>=9?n=n||f(t,e+8):"VP8X"===d&&p>=10?n=n||h(t,e+8):"EXIF"===d&&(r=s.get_orientation(t.slice(e+8,e+8+p)),e=1/0),e+=8+p}else e++;if(n)return r>0&&(n.orientation=r),n}}}},91497:function(t,e,n){"use strict";t.exports={avif:n(24461),bmp:n(2504),gif:n(47342),ico:n(31355),jpeg:n(54261),png:n(6303),psd:n(38689),svg:n(6881),tiff:n(66278),webp:n(90784)}},33575:function(t,e,n){"use strict";var r=n(91497);t.exports=function(t){return function(t){for(var e=Object.keys(r),n=0;n1)for(var n=1;n1&&(t.scaleRatio=[t.scale[0]*t.viewport.width,t.scale[1]*t.viewport.height],n(t),t.after&&t.after(t))}function k(t){if(t){null!=t.length?"number"===typeof t[0]&&(t=[{positions:t}]):Array.isArray(t)||(t=[t]);var e=0,n=0;if(x.groups=b=t.map((function(t,c){var u=b[c];return t?("function"===typeof t?t={after:t}:"number"===typeof t[0]&&(t={positions:t}),t=o(t,{color:"color colors fill",capSize:"capSize cap capsize cap-size",lineWidth:"lineWidth line-width width line thickness",opacity:"opacity alpha",range:"range dataBox",viewport:"viewport viewBox",errors:"errors error",positions:"positions position data points"}),u||(b[c]=u={id:c,scale:null,translate:null,scaleFract:null,translateFract:null,draw:!0},t=s({},y,t)),a(u,t,[{lineWidth:function(t){return.5*+t},capSize:function(t){return.5*+t},opacity:parseFloat,errors:function(t){return t=l(t),n+=t.length,t},positions:function(t,n){return t=l(t,"float64"),n.count=Math.floor(t.length/2),n.bounds=r(t,2),n.offset=e,e+=n.count,t}},{color:function(t,e){var n=e.count;if(t||(t="transparent"),!Array.isArray(t)||"number"===typeof t[0]){var r=t;t=Array(n);for(var a=0;a 0. && baClipping < length(normalWidth * endBotJoin)) {\n\t\t//handle miter clipping\n\t\tbTopCoord -= normalWidth * endTopJoin;\n\t\tbTopCoord += normalize(endTopJoin * normalWidth) * baClipping;\n\t}\n\n\tif (nextReverse) {\n\t\t//make join rectangular\n\t\tvec2 miterShift = normalWidth * endJoinDirection * miterLimit * .5;\n\t\tfloat normalAdjust = 1. - min(miterLimit / endMiterRatio, 1.);\n\t\tbBotCoord = bCoord + miterShift - normalAdjust * normalWidth * currNormal * .5;\n\t\tbTopCoord = bCoord + miterShift + normalAdjust * normalWidth * currNormal * .5;\n\t}\n\telse if (!prevReverse && abClipping > 0. && abClipping < length(normalWidth * startBotJoin)) {\n\t\t//handle miter clipping\n\t\taBotCoord -= normalWidth * startBotJoin;\n\t\taBotCoord += normalize(startBotJoin * normalWidth) * abClipping;\n\t}\n\n\tvec2 aTopPosition = (aTopCoord) * adjustedScale + translate;\n\tvec2 aBotPosition = (aBotCoord) * adjustedScale + translate;\n\n\tvec2 bTopPosition = (bTopCoord) * adjustedScale + translate;\n\tvec2 bBotPosition = (bBotCoord) * adjustedScale + translate;\n\n\t//position is normalized 0..1 coord on the screen\n\tvec2 position = (aTopPosition * lineTop + aBotPosition * lineBot) * lineStart + (bTopPosition * lineTop + bBotPosition * lineBot) * lineEnd;\n\n\tstartCoord = aCoord * scaleRatio + translate * viewport.zw + viewport.xy;\n\tendCoord = bCoord * scaleRatio + translate * viewport.zw + viewport.xy;\n\n\tgl_Position = vec4(position * 2.0 - 1.0, depth, 1);\n\n\tenableStartMiter = step(dot(currTangent, prevTangent), .5);\n\tenableEndMiter = step(dot(currTangent, nextTangent), .5);\n\n\t//bevel miter cutoffs\n\tif (miterMode == 1.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tvec2 startMiterWidth = vec2(startJoinDirection) * thickness * miterLimit * .5;\n\t\t\tstartCutoff = vec4(aCoord, aCoord);\n\t\t\tstartCutoff.zw += vec2(-startJoinDirection.y, startJoinDirection.x) / scaleRatio;\n\t\t\tstartCutoff = startCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tstartCutoff += viewport.xyxy;\n\t\t\tstartCutoff += startMiterWidth.xyxy;\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tvec2 endMiterWidth = vec2(endJoinDirection) * thickness * miterLimit * .5;\n\t\t\tendCutoff = vec4(bCoord, bCoord);\n\t\t\tendCutoff.zw += vec2(-endJoinDirection.y, endJoinDirection.x) / scaleRatio;\n\t\t\tendCutoff = endCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tendCutoff += viewport.xyxy;\n\t\t\tendCutoff += endMiterWidth.xyxy;\n\t\t}\n\t}\n\n\t//round miter cutoffs\n\telse if (miterMode == 2.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tvec2 startMiterWidth = vec2(startJoinDirection) * thickness * abs(dot(startJoinDirection, currNormal)) * .5;\n\t\t\tstartCutoff = vec4(aCoord, aCoord);\n\t\t\tstartCutoff.zw += vec2(-startJoinDirection.y, startJoinDirection.x) / scaleRatio;\n\t\t\tstartCutoff = startCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tstartCutoff += viewport.xyxy;\n\t\t\tstartCutoff += startMiterWidth.xyxy;\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tvec2 endMiterWidth = vec2(endJoinDirection) * thickness * abs(dot(endJoinDirection, currNormal)) * .5;\n\t\t\tendCutoff = vec4(bCoord, bCoord);\n\t\t\tendCutoff.zw += vec2(-endJoinDirection.y, endJoinDirection.x) / scaleRatio;\n\t\t\tendCutoff = endCutoff * scaleRatio.xyxy + translate.xyxy * viewport.zwzw;\n\t\t\tendCutoff += viewport.xyxy;\n\t\t\tendCutoff += endMiterWidth.xyxy;\n\t\t}\n\t}\n}\n"]),frag:o(["precision highp float;\n#define GLSLIFY 1\n\nuniform float dashLength, pixelRatio, thickness, opacity, id, miterMode;\nuniform sampler2D dashTexture;\n\nvarying vec4 fragColor;\nvarying vec2 tangent;\nvarying vec4 startCutoff, endCutoff;\nvarying vec2 startCoord, endCoord;\nvarying float enableStartMiter, enableEndMiter;\n\nfloat distToLine(vec2 p, vec2 a, vec2 b) {\n\tvec2 diff = b - a;\n\tvec2 perp = normalize(vec2(-diff.y, diff.x));\n\treturn dot(p - a, perp);\n}\n\nvoid main() {\n\tfloat alpha = 1., distToStart, distToEnd;\n\tfloat cutoff = thickness * .5;\n\n\t//bevel miter\n\tif (miterMode == 1.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tdistToStart = distToLine(gl_FragCoord.xy, startCutoff.xy, startCutoff.zw);\n\t\t\tif (distToStart < -1.) {\n\t\t\t\tdiscard;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\talpha *= min(max(distToStart + 1., 0.), 1.);\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tdistToEnd = distToLine(gl_FragCoord.xy, endCutoff.xy, endCutoff.zw);\n\t\t\tif (distToEnd < -1.) {\n\t\t\t\tdiscard;\n\t\t\t\treturn;\n\t\t\t}\n\t\t\talpha *= min(max(distToEnd + 1., 0.), 1.);\n\t\t}\n\t}\n\n\t// round miter\n\telse if (miterMode == 2.) {\n\t\tif (enableStartMiter == 1.) {\n\t\t\tdistToStart = distToLine(gl_FragCoord.xy, startCutoff.xy, startCutoff.zw);\n\t\t\tif (distToStart < 0.) {\n\t\t\t\tfloat radius = length(gl_FragCoord.xy - startCoord);\n\n\t\t\t\tif(radius > cutoff + .5) {\n\t\t\t\t\tdiscard;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\talpha -= smoothstep(cutoff - .5, cutoff + .5, radius);\n\t\t\t}\n\t\t}\n\n\t\tif (enableEndMiter == 1.) {\n\t\t\tdistToEnd = distToLine(gl_FragCoord.xy, endCutoff.xy, endCutoff.zw);\n\t\t\tif (distToEnd < 0.) {\n\t\t\t\tfloat radius = length(gl_FragCoord.xy - endCoord);\n\n\t\t\t\tif(radius > cutoff + .5) {\n\t\t\t\t\tdiscard;\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\talpha -= smoothstep(cutoff - .5, cutoff + .5, radius);\n\t\t\t}\n\t\t}\n\t}\n\n\tfloat t = fract(dot(tangent, gl_FragCoord.xy) / dashLength) * .5 + .25;\n\tfloat dash = texture2D(dashTexture, vec2(t, .5)).r;\n\n\tgl_FragColor = fragColor;\n\tgl_FragColor.a *= alpha * opacity * dash;\n}\n"]),attributes:{lineEnd:{buffer:n,divisor:0,stride:8,offset:0},lineTop:{buffer:n,divisor:0,stride:8,offset:4},aColor:{buffer:t.prop("colorBuffer"),stride:4,offset:0,divisor:1},bColor:{buffer:t.prop("colorBuffer"),stride:4,offset:4,divisor:1},prevCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:0,divisor:1},aCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:8,divisor:1},bCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:16,divisor:1},nextCoord:{buffer:t.prop("positionBuffer"),stride:8,offset:24,divisor:1}}},r))}catch(s){e=i}return{fill:t({primitive:"triangle",elements:function(t,e){return e.triangles},offset:0,vert:o(["precision highp float;\n#define GLSLIFY 1\n\nattribute vec2 position, positionFract;\n\nuniform vec4 color;\nuniform vec2 scale, scaleFract, translate, translateFract;\nuniform float pixelRatio, id;\nuniform vec4 viewport;\nuniform float opacity;\n\nvarying vec4 fragColor;\n\nconst float MAX_LINES = 256.;\n\nvoid main() {\n\tfloat depth = (MAX_LINES - 4. - id) / (MAX_LINES);\n\n\tvec2 position = position * scale + translate\n + positionFract * scale + translateFract\n + position * scaleFract\n + positionFract * scaleFract;\n\n\tgl_Position = vec4(position * 2.0 - 1.0, depth, 1);\n\n\tfragColor = color / 255.;\n\tfragColor.a *= opacity;\n}\n"]),frag:o(["precision highp float;\n#define GLSLIFY 1\n\nvarying vec4 fragColor;\n\nvoid main() {\n\tgl_FragColor = fragColor;\n}\n"]),uniforms:{scale:t.prop("scale"),color:t.prop("fill"),scaleFract:t.prop("scaleFract"),translateFract:t.prop("translateFract"),translate:t.prop("translate"),opacity:t.prop("opacity"),pixelRatio:t.context("pixelRatio"),id:t.prop("id"),viewport:function(t,e){return[e.viewport.x,e.viewport.y,t.viewportWidth,t.viewportHeight]}},attributes:{position:{buffer:t.prop("positionBuffer"),stride:8,offset:8},positionFract:{buffer:t.prop("positionFractBuffer"),stride:8,offset:8}},blend:r.blend,depth:{enable:!1},scissor:r.scissor,stencil:r.stencil,viewport:r.viewport}),rect:i,miter:e}},m.defaults={dashes:null,join:"miter",miterLimit:1,thickness:10,cap:"square",color:"black",opacity:1,overlay:!1,viewport:null,range:null,close:!1,fill:null},m.prototype.render=function(){for(var t,e=[],n=arguments.length;n--;)e[n]=arguments[n];e.length&&(t=this).update.apply(t,e),this.draw()},m.prototype.draw=function(){for(var t=this,e=[],n=arguments.length;n--;)e[n]=arguments[n];return(e.length?e:this.passes).forEach((function(e,n){var r;if(e&&Array.isArray(e))return(r=t).draw.apply(r,e);"number"===typeof e&&(e=t.passes[e]),e&&e.count>1&&e.opacity&&(t.regl._refresh(),e.fill&&e.triangles&&e.triangles.length>2&&t.shaders.fill(e),e.thickness&&(e.scale[0]*e.viewport.width>m.precisionThreshold||e.scale[1]*e.viewport.height>m.precisionThreshold||"rect"===e.join||!e.join&&(e.thickness<=2||e.count>=m.maxPoints)?t.shaders.rect(e):t.shaders.miter(e)))})),this},m.prototype.update=function(t){var e=this;if(t){null!=t.length?"number"===typeof t[0]&&(t=[{positions:t}]):Array.isArray(t)||(t=[t]);var n=this.regl,o=this.gl;if(t.forEach((function(t,f){var p=e.passes[f];if(void 0!==t)if(null!==t){if("number"===typeof t[0]&&(t={positions:t}),t=s(t,{positions:"positions points data coords",thickness:"thickness lineWidth lineWidths line-width linewidth width stroke-width strokewidth strokeWidth",join:"lineJoin linejoin join type mode",miterLimit:"miterlimit miterLimit",dashes:"dash dashes dasharray dash-array dashArray",color:"color colour stroke colors colours stroke-color strokeColor",fill:"fill fill-color fillColor",opacity:"alpha opacity",overlay:"overlay crease overlap intersect",close:"closed close closed-path closePath",range:"range dataBox",viewport:"viewport viewBox",hole:"holes hole hollow",splitNull:"splitNull"}),p||(e.passes[f]=p={id:f,scale:null,scaleFract:null,translate:null,translateFract:null,count:0,hole:[],depth:0,dashLength:1,dashTexture:n.texture({channels:1,data:new Uint8Array([255]),width:1,height:1,mag:"linear",min:"linear"}),colorBuffer:n.buffer({usage:"dynamic",type:"uint8",data:new Uint8Array}),positionBuffer:n.buffer({usage:"dynamic",type:"float",data:new Uint8Array}),positionFractBuffer:n.buffer({usage:"dynamic",type:"float",data:new Uint8Array})},t=a({},m.defaults,t)),null!=t.thickness&&(p.thickness=parseFloat(t.thickness)),null!=t.opacity&&(p.opacity=parseFloat(t.opacity)),null!=t.miterLimit&&(p.miterLimit=parseFloat(t.miterLimit)),null!=t.overlay&&(p.overlay=!!t.overlay,f=R}));(L=L.slice(0,z)).push(R)}for(var N=function(t){var e=T.slice(2*D,2*L[t]).concat(R?T.slice(2*R):[]),n=(p.hole||[]).map((function(e){return e-R+(L[t]-D)})),r=c(e,n);r=r.map((function(e){return e+D+(e+Dt.length)&&(e=t.length);for(var n=0,r=new Array(e);n 1.0 + delta) {\n\t\tdiscard;\n\t}\n\n\talpha -= smoothstep(1.0 - delta, 1.0 + delta, radius);\n\n\tfloat borderRadius = fragBorderRadius;\n\tfloat ratio = smoothstep(borderRadius - delta, borderRadius + delta, radius);\n\tvec4 color = mix(fragColor, fragBorderColor, ratio);\n\tcolor.a *= alpha * opacity;\n\tgl_FragColor = color;\n}\n"]),c.vert=h(["precision highp float;\n#define GLSLIFY 1\n\nattribute float x, y, xFract, yFract;\nattribute float size, borderSize;\nattribute vec4 colorId, borderColorId;\nattribute float isActive;\n\nuniform bool constPointSize;\nuniform float pixelRatio;\nuniform vec2 paletteSize, scale, scaleFract, translate, translateFract;\nuniform sampler2D paletteTexture;\n\nconst float maxSize = 100.;\n\nvarying vec4 fragColor, fragBorderColor;\nvarying float fragBorderRadius, fragWidth;\n\nfloat pointSizeScale = (constPointSize) ? 2. : pixelRatio;\n\nbool isDirect = (paletteSize.x < 1.);\n\nvec4 getColor(vec4 id) {\n return isDirect ? id / 255. : texture2D(paletteTexture,\n vec2(\n (id.x + .5) / paletteSize.x,\n (id.y + .5) / paletteSize.y\n )\n );\n}\n\nvoid main() {\n // ignore inactive points\n if (isActive == 0.) return;\n\n vec2 position = vec2(x, y);\n vec2 positionFract = vec2(xFract, yFract);\n\n vec4 color = getColor(colorId);\n vec4 borderColor = getColor(borderColorId);\n\n float size = size * maxSize / 255.;\n float borderSize = borderSize * maxSize / 255.;\n\n gl_PointSize = (size + borderSize) * pointSizeScale;\n\n vec2 pos = (position + translate) * scale\n + (positionFract + translateFract) * scale\n + (position + translate) * scaleFract\n + (positionFract + translateFract) * scaleFract;\n\n gl_Position = vec4(pos * 2. - 1., 0., 1.);\n\n fragBorderRadius = 1. - 2. * borderSize / (size + borderSize);\n fragColor = color;\n fragBorderColor = borderColor.a == 0. || borderSize == 0. ? vec4(color.rgb, 0.) : borderColor;\n fragWidth = 1. / gl_PointSize;\n}\n"]),v&&(c.frag=c.frag.replace("smoothstep","smoothStep"),l.frag=l.frag.replace("smoothstep","smoothStep")),this.drawCircle=t(c)}x.defaults={color:"black",borderColor:"transparent",borderSize:0,size:12,opacity:1,marker:void 0,viewport:null,range:null,pixelSize:null,count:0,offset:0,bounds:null,positions:[],snap:1e4},x.prototype.render=function(){return arguments.length&&this.update.apply(this,arguments),this.draw(),this},x.prototype.draw=function(){for(var t=this,e=arguments.length,n=new Array(e),r=0;rr)?e.tree=u(t,{bounds:f}):r&&r.length&&(e.tree=r),e.tree){var h={primitive:"points",usage:"static",data:e.tree,type:"uint32"};e.elements?e.elements(h):e.elements=o.elements(h)}var d=m.float32(t);return i({data:d,usage:"dynamic"}),a({data:m.fract32(t,d),usage:"dynamic"}),s({data:new Uint8Array(c),type:"uint8",usage:"stream"}),t}},{marker:function(e,n,r){var i=n.activation;if(i.forEach((function(t){return t&&t.destroy&&t.destroy()})),i.length=0,e&&"number"!==typeof e[0]){for(var a=[],s=0,l=Math.min(e.length,n.count);s=0)return a;if(t instanceof Uint8Array||t instanceof Uint8ClampedArray)e=t;else{e=new Uint8Array(t.length);for(var o=0,s=t.length;o4*r&&(this.tooManyColors=!0),this.updatePalette(n),1===i.length?i[0]:i},x.prototype.updatePalette=function(t){if(!this.tooManyColors){var e=this.maxColors,n=this.paletteTexture,r=Math.ceil(.25*t.length/e);if(r>1)for(var i=.25*(t=t.slice()).length%e;i2?(s[0],s[2],r=s[1],i=s[3]):s.length?(r=s[0],i=s[1]):(s.x,r=s.y,s.x,s.width,i=s.y+s.height),l.length>2?(a=l[0],o=l[2],l[1],l[3]):l.length?(a=l[0],o=l[1]):(a=l.x,l.y,o=l.x+l.width,l.y,l.height),[a,r,o,i]}function d(t){if("number"===typeof t)return[t,t,t,t];if(2===t.length)return[t[0],t[1],t[0],t[1]];var e=l(t);return[e.x,e.y,e.x+e.width,e.y+e.height]}t.exports=u,u.prototype.render=function(){for(var t,e=this,n=[],r=arguments.length;r--;)n[r]=arguments[r];return n.length&&(t=this).update.apply(t,n),this.regl.attributes.preserveDrawingBuffer?this.draw():(this.dirty?null==this.planned&&(this.planned=o((function(){e.draw(),e.dirty=!0,e.planned=null}))):(this.draw(),this.dirty=!0,o((function(){e.dirty=!1}))),this)},u.prototype.update=function(){for(var t,e=[],n=arguments.length;n--;)e[n]=arguments[n];if(e.length){for(var r=0;rT))&&(s.lower||!(k>>=e))<<3,(e|=n=(15<(t>>>=n))<<2)|(n=(3<(t>>>=n))<<1)|t>>>n>>1}function d(){function t(t){t:{for(var e=16;268435456>=e;e*=16)if(t<=e){t=e;break t}t=0}return 0<(e=n[h(t)>>2]).length?e.pop():new ArrayBuffer(t)}function e(t){n[h(t.byteLength)>>2].push(t)}var n=f(8,(function(){return[]}));return{alloc:t,free:e,allocType:function(e,n){var r=null;switch(e){case 5120:r=new Int8Array(t(n),0,n);break;case 5121:r=new Uint8Array(t(n),0,n);break;case 5122:r=new Int16Array(t(2*n),0,n);break;case 5123:r=new Uint16Array(t(2*n),0,n);break;case 5124:r=new Int32Array(t(4*n),0,n);break;case 5125:r=new Uint32Array(t(4*n),0,n);break;case 5126:r=new Float32Array(t(4*n),0,n);break;default:return null}return r.length!==n?r.subarray(0,n):r},freeType:function(t){e(t.buffer)}}}function p(t){return!!t&&"object"===typeof t&&Array.isArray(t.shape)&&Array.isArray(t.stride)&&"number"===typeof t.offset&&t.shape.length===t.stride.length&&(Array.isArray(t.data)||ot(t.data))}function g(t,e,n,r,i,a){for(var o=0;o(i=s)&&(i=r.buffer.byteLength,5123===u?i>>=1:5125===u&&(i>>=2)),r.vertCount=i,i=o,0>o&&(i=4,1===(o=r.buffer.dimension)&&(i=0),2===o&&(i=1),3===o&&(i=4)),r.primType=i}function o(t){r.elementsCount--,delete s[t.id],t.buffer.destroy(),t.buffer=null}var s={},l=0,c={uint8:5121,uint16:5123};e.oes_element_index_uint&&(c.uint32=5125),i.prototype.bind=function(){this.buffer.bind()};var u=[];return{create:function(t,e){function s(t){if(t)if("number"===typeof t)l(t),u.primType=4,u.vertCount=0|t,u.type=5121;else{var e=null,n=35044,r=-1,i=-1,o=0,f=0;Array.isArray(t)||ot(t)||p(t)?e=t:("data"in t&&(e=t.data),"usage"in t&&(n=ft[t.usage]),"primitive"in t&&(r=gt[t.primitive]),"count"in t&&(i=0|t.count),"type"in t&&(f=c[t.type]),"length"in t?o=0|t.length:(o=i,5123===f||5122===f?o*=2:5125!==f&&5124!==f||(o*=4))),a(u,e,n,r,i,o,f)}else l(),u.primType=4,u.vertCount=0,u.type=5121;return s}var l=n.create(null,34963,!0),u=new i(l._buffer);return r.elementsCount++,s(t),s._reglType="elements",s._elements=u,s.subdata=function(t,e){return l.subdata(t,e),s},s.destroy=function(){o(u)},s},createStream:function(t){var e=u.pop();return e||(e=new i(n.create(null,34963,!0,!1)._buffer)),a(e,t,35040,-1,-1,0,0),e},destroyStream:function(t){u.push(t)},getElements:function(t){return"function"===typeof t&&t._elements instanceof i?t._elements:null},clear:function(){st(s).forEach(o)}}}function _(t){for(var e=it.allocType(5123,t.length),n=0;n>>31<<15,i=(a<<1>>>24)-127,a=a>>13&1023;e[n]=-24>i?r:-14>i?r+(a+1024>>-14-i):15>=i,n.height>>=i,h(n,r[i]),t.mipmask|=1<e;++e)t.images[e]=null;return t}function T(t){for(var e=t.images,n=0;ne){for(var n=0;n=--this.refCount&&N(this)}}),o.profile&&(a.getTotalTextureSize=function(){var t=0;return Object.keys(at).forEach((function(e){t+=at[e].stats.size})),t}),{create2D:function(e,n){function r(t,e){var n=i.texInfo;O.call(n);var a=w();return"number"===typeof t?y(a,0|t,"number"===typeof e?0|e:0|t):t?(L(n,t),b(a,t)):y(a,1,1),n.genMipmaps&&(a.mipmask=(a.width<<1)-1),i.mipmask=a.mipmask,l(i,a),i.internalformat=a.internalformat,r.width=a.width,r.height=a.height,R(i),x(a,3553),I(n,3553),z(),T(a),o.profile&&(i.stats.size=P(i.internalformat,i.type,a.width,a.height,n.genMipmaps,!1)),r.format=$[i.internalformat],r.type=X[i.type],r.mag=K[n.magFilter],r.min=Z[n.minFilter],r.wrapS=J[n.wrapS],r.wrapT=J[n.wrapT],r}var i=new D(3553);return at[i.id]=i,a.textureCount++,r(e,n),r.subimage=function(t,e,n,a){e|=0,n|=0,a|=0;var o=g();return l(o,i),o.width=0,o.height=0,h(o,t),o.width=o.width||(i.width>>a)-e,o.height=o.height||(i.height>>a)-n,R(i),d(o,3553,e,n,a),z(),v(o),r},r.resize=function(e,n){var a=0|e,s=0|n||a;if(a===i.width&&s===i.height)return r;r.width=i.width=a,r.height=i.height=s,R(i);for(var l=0;i.mipmask>>l;++l){var c=a>>l,u=s>>l;if(!c||!u)break;t.texImage2D(3553,l,i.format,c,u,0,i.format,i.type,null)}return z(),o.profile&&(i.stats.size=P(i.internalformat,i.type,a,s,!1,!1)),r},r._reglType="texture2d",r._texture=i,o.profile&&(r.stats=i.stats),r.destroy=function(){i.decRef()},r},createCube:function(e,n,r,i,s,u){function f(t,e,n,r,i,a){var s,u=p.texInfo;for(O.call(u),s=0;6>s;++s)m[s]=w();if("number"!==typeof t&&t){if("object"===typeof t)if(e)b(m[0],t),b(m[1],e),b(m[2],n),b(m[3],r),b(m[4],i),b(m[5],a);else if(L(u,t),c(p,t),"faces"in t)for(t=t.faces,s=0;6>s;++s)l(m[s],p),b(m[s],t[s]);else for(s=0;6>s;++s)b(m[s],t)}else for(t=0|t||1,s=0;6>s;++s)y(m[s],t,t);for(l(p,m[0]),p.mipmask=u.genMipmaps?(m[0].width<<1)-1:m[0].mipmask,p.internalformat=m[0].internalformat,f.width=m[0].width,f.height=m[0].height,R(p),s=0;6>s;++s)x(m[s],34069+s);for(I(u,34067),z(),o.profile&&(p.stats.size=P(p.internalformat,p.type,f.width,f.height,u.genMipmaps,!0)),f.format=$[p.internalformat],f.type=X[p.type],f.mag=K[u.magFilter],f.min=Z[u.minFilter],f.wrapS=J[u.wrapS],f.wrapT=J[u.wrapT],s=0;6>s;++s)T(m[s]);return f}var p=new D(34067);at[p.id]=p,a.cubeCount++;var m=Array(6);return f(e,n,r,i,s,u),f.subimage=function(t,e,n,r,i){n|=0,r|=0,i|=0;var a=g();return l(a,p),a.width=0,a.height=0,h(a,e),a.width=a.width||(p.width>>i)-n,a.height=a.height||(p.height>>i)-r,R(p),d(a,34069+t,n,r,i),z(),v(a),f},f.resize=function(e){if((e|=0)!==p.width){f.width=p.width=e,f.height=p.height=e,R(p);for(var n=0;6>n;++n)for(var r=0;p.mipmask>>r;++r)t.texImage2D(34069+n,r,p.format,e>>r,e>>r,0,p.format,p.type,null);return z(),o.profile&&(p.stats.size=P(p.internalformat,p.type,f.width,f.height,!1,!0)),f}},f._reglType="textureCube",f._texture=p,o.profile&&(f.stats=p.stats),f.destroy=function(){p.decRef()},f},clear:function(){for(var e=0;en;++n)if(0!==(e.mipmask&1<>n,e.height>>n,0,e.internalformat,e.type,null);else for(var r=0;6>r;++r)t.texImage2D(34069+r,n,e.internalformat,e.width>>n,e.height>>n,0,e.internalformat,e.type,null);I(e.texInfo,e.target)}))},refresh:function(){for(var e=0;ei;++i){for(c=0;ct;++t)n[t].resize(r);return e.width=e.height=r,e},_reglType:"framebufferCube",destroy:function(){n.forEach((function(t){t.destroy()}))}})},clear:function(){st(T).forEach(v)},restore:function(){b.cur=null,b.next=null,b.dirty=!0,st(T).forEach((function(e){e.framebuffer=t.createFramebuffer(),m(e)}))}})}function I(){this.w=this.z=this.y=this.x=this.state=0,this.buffer=null,this.size=0,this.normalized=!1,this.type=5126,this.divisor=this.stride=this.offset=0}function D(t,e,n,r,i,a,o){function s(t){if(t!==m.currentVAO){var n=e.oes_vertex_array_object;t?n.bindVertexArrayOES(t.vao):n.bindVertexArrayOES(null),m.currentVAO=t}}function l(n){if(n!==m.currentVAO){if(n)n.bindAttrs();else{for(var r=e.angle_instanced_arrays,i=0;i=f.byteLength?l.subdata(f):(l.destroy(),n.buffers[s]=null)),n.buffers[s]||(l=n.buffers[s]=i.create(c,34962,!1,!0)),u.buffer=i.getBuffer(l),u.size=0|u.buffer.dimension,u.normalized=!1,u.type=u.buffer.dtype,u.offset=0,u.stride=0,u.divisor=0,u.state=1,t[s]=1):i.getBuffer(c)?(u.buffer=i.getBuffer(c),u.size=0|u.buffer.dimension,u.normalized=!1,u.type=u.buffer.dtype,u.offset=0,u.stride=0,u.divisor=0,u.state=1):i.getBuffer(c.buffer)?(u.buffer=i.getBuffer(c.buffer),u.size=0|(+c.size||u.buffer.dimension),u.normalized=!!c.normalized||!1,u.type="type"in c?ut[c.type]:u.buffer.dtype,u.offset=0|(c.offset||0),u.stride=0|(c.stride||0),u.divisor=0|(c.divisor||0),u.state=1):"x"in c&&(u.x=+c.x||0,u.y=+c.y||0,u.z=+c.z||0,u.w=+c.w||0,u.state=2)}for(l=0;lt&&(t=e.stats.uniformsCount)})),t},n.getMaxAttributesCount=function(){var t=0;return h.forEach((function(e){e.stats.attributesCount>t&&(t=e.stats.attributesCount)})),t}),{clear:function(){var e=t.deleteShader.bind(t);st(c).forEach(e),c={},st(u).forEach(e),u={},h.forEach((function(e){t.deleteProgram(e.program)})),h.length=0,f={},n.shaderCount=0},program:function(e,r,i,a){var o=f[r];o||(o=f[r]={});var d=o[e];if(d&&(d.refCount++,!a))return d;var p=new s(r,e);return n.shaderCount++,l(p,i,a),d||(o[e]=p),h.push(p),Q(p,{destroy:function(){if(p.refCount--,0>=p.refCount){t.deleteProgram(p.program);var e=h.indexOf(p);h.splice(e,1),n.shaderCount--}0>=o[p.vertId].refCount&&(t.deleteShader(u[p.vertId]),delete u[p.vertId],delete f[p.fragId][p.vertId]),Object.keys(f[p.fragId]).length||(t.deleteShader(c[p.fragId]),delete c[p.fragId],delete f[p.fragId])}})},restore:function(){c={},u={};for(var t=0;t>2),n=0;n>5]|=(255&t.charCodeAt(n/8))<<24-n%32;var r,i,a,o,s,l,c,u,f,h,d,p=8*t.length;for(t=[1779033703,-1150833019,1013904242,-1521486534,1359893119,-1694144372,528734635,1541459225],n=Array(64),e[p>>5]|=128<<24-p%32,e[15+(p+64>>9<<4)]=p,u=0;uf;f++){var g;16>f?n[f]=e[f+u]:(h=f,d=U(d=B(d=n[f-2],17)^B(d,19)^d>>>10,n[f-7]),g=B(g=n[f-15],7)^B(g,18)^g>>>3,n[h]=U(U(d,g),n[f-16])),h=U(U(U(U(c,h=B(h=o,6)^B(h,11)^B(h,25)),o&s^~o&l),Dt[f]),n[f]),d=U(c=B(c=p,2)^B(c,13)^B(c,22),p&r^p&i^r&i),c=l,l=s,s=o,o=U(a,h),a=i,i=r,r=p,p=U(h,d)}t[0]=U(p,t[0]),t[1]=U(r,t[1]),t[2]=U(i,t[2]),t[3]=U(a,t[3]),t[4]=U(o,t[4]),t[5]=U(s,t[5]),t[6]=U(l,t[6]),t[7]=U(c,t[7])}for(e="",n=0;n<32*t.length;n+=8)e+=String.fromCharCode(t[n>>5]>>>24-n%32&255);return e}function j(t){for(var e,n="",r=0;r>>4&15)+"0123456789abcdef".charAt(15&e);return n}function F(t){for(var e,n,r="",i=-1;++i=e&&56320<=n&&57343>=n&&(e=65536+((1023&e)<<10)+(1023&n),i++),127>=e?r+=String.fromCharCode(e):2047>=e?r+=String.fromCharCode(192|e>>>6&31,128|63&e):65535>=e?r+=String.fromCharCode(224|e>>>12&15,128|e>>>6&63,128|63&e):2097151>=e&&(r+=String.fromCharCode(240|e>>>18&7,128|e>>>12&63,128|e>>>6&63,128|63&e));return r}function B(t,e){return t>>>e|t<<32-e}function U(t,e){var n=(65535&t)+(65535&e);return(t>>16)+(e>>16)+(n>>16)<<16|65535&n}function H(t){return Array.prototype.slice.call(t)}function V(t){return H(t).join("")}function q(t){function e(){var t=[],e=[];return Q((function(){t.push.apply(t,H(arguments))}),{def:function(){var n="v"+i++;return e.push(n),0"+e+"?"+i+".constant["+e+"]:0;"})).join(""),"}}else{","if(",s,"(",i,".buffer)){",u,"=",a,".createStream(",34962,",",i,".buffer);","}else{",u,"=",a,".getBuffer(",i,".buffer);","}",f,'="type" in ',i,"?",o.glTypes,"[",i,".type]:",u,".dtype;",l.normalized,"=!!",i,".normalized;"),r("size"),r("offset"),r("stride"),r("divisor"),n("}}"),n.exit("if(",l.isStream,"){",a,".destroyStream(",u,");","}"),l}))})),o}function O(t){var e=t.static,n=t.dynamic,r={};return Object.keys(e).forEach((function(t){var n=e[t];r[t]=X((function(t,e){return"number"===typeof n||"boolean"===typeof n?""+n:t.link(n)}))})),Object.keys(n).forEach((function(t){var e=n[t];r[t]=K(e,(function(t,n){return t.invoke(n,e)}))})),r}function L(t,e,r,i,a){function o(t){var e=l[t];e&&(f[t]=e)}var s=M(t,e),l=T(t,d=_(t,a),a),u=S(t,a),f=E(t,a),h=A(t,a,s);o("viewport"),o(m("scissor.box"));var d,p=0>1)",s],");")}function e(){n(l,".drawArraysInstancedANGLE(",[p,g,v,s],");")}d&&"null"!==d?y?t():(n("if(",d,"){"),t(),n("}else{"),e(),n("}")):e()}function o(){function t(){n(u+".drawElements("+[p,v,m,g+"<<(("+m+"-5121)>>1)"]+");")}function e(){n(u+".drawArrays("+[p,g,v]+");")}d&&"null"!==d?y?t():(n("if(",d,"){"),t(),n("}else{"),e(),n("}")):e()}var s,l,c=t.shared,u=c.gl,f=c.draw,h=r.draw,d=function(){var i=h.elements,a=e;return i?((i.contextDep&&r.contextDynamic||i.propDep)&&(a=n),i=i.append(t,a),h.elementsActive&&a("if("+i+")"+u+".bindBuffer(34963,"+i+".buffer.buffer);")):(i=a.def(),a(i,"=",f,".","elements",";","if(",i,"){",u,".bindBuffer(",34963,",",i,".buffer.buffer);}","else if(",c.vao,".currentVAO){",i,"=",t.shared.elements+".getElements("+c.vao,".currentVAO.elements);",ft?"":"if("+i+")"+u+".bindBuffer(34963,"+i+".buffer.buffer);","}")),i}(),p=i("primitive"),g=i("offset"),v=function(){var i=h.count,a=e;return i?((i.contextDep&&r.contextDynamic||i.propDep)&&(a=n),i=i.append(t,a)):i=a.def(f,".","count"),i}();if("number"===typeof v){if(0===v)return}else n("if(",v,"){"),n.exit("}");lt&&(s=i("instances"),l=t.instancing);var m=d+".type",y=h.elements&&$(h.elements)&&!h.vaoActive;lt&&("number"!==typeof s||0<=s)?"string"===typeof s?(n("if(",s,">0){"),a(),n("}else if(",s,"<0){"),o(),n("}")):a():o()}function H(t,e,n,r,i){return i=(e=x()).proc("body",i),lt&&(e.instancing=i.def(e.shared.extensions,".angle_instanced_arrays")),t(e,i,n,r),e.compile().body}function V(t,e,n,r){N(t,e),n.useVAO?n.drawVAO?e(t.shared.vao,".setVAO(",n.drawVAO.append(t,e),");"):e(t.shared.vao,".setVAO(",t.shared.vao,".targetVAO);"):(e(t.shared.vao,".setVAO(null);"),F(t,e,n,r.attributes,(function(){return!0}))),B(t,e,n,r.uniforms,(function(){return!0}),!1),U(t,e,e,n)}function Z(t,e){var n=t.proc("draw",1);N(t,n),I(t,n,e.context),D(t,n,e.framebuffer),R(t,n,e),z(t,n,e.state),j(t,n,e,!1,!0);var r=e.shader.progVar.append(t,n);if(n(t.shared.gl,".useProgram(",r,".program);"),e.shader.program)V(t,n,e,e.shader.program);else{n(t.shared.vao,".setVAO(null);");var i=t.global.def("{}"),a=n.def(r,".id"),o=n.def(i,"[",a,"]");n(t.cond(o).then(o,".call(this,a0);").else(o,"=",i,"[",a,"]=",t.link((function(n){return H(V,t,e,n,1)})),"(",r,");",o,".call(this,a0);"))}0=--this.refCount&&o(this)},i.profile&&(r.getTotalRenderbufferSize=function(){var t=0;return Object.keys(u).forEach((function(e){t+=u[e].stats.size})),t}),{create:function(e,n){function o(e,n){var r=0,a=0,u=32854;if("object"===typeof e&&e?("shape"in e?(r=0|(a=e.shape)[0],a=0|a[1]):("radius"in e&&(r=a=0|e.radius),"width"in e&&(r=0|e.width),"height"in e&&(a=0|e.height)),"format"in e&&(u=s[e.format])):"number"===typeof e?(r=0|e,a="number"===typeof n?0|n:r):e||(r=a=1),r!==c.width||a!==c.height||u!==c.format)return o.width=c.width=r,o.height=c.height=a,c.format=u,t.bindRenderbuffer(36161,c.renderbuffer),t.renderbufferStorage(36161,u,r,a),i.profile&&(c.stats.size=Pt[c.format]*c.width*c.height),o.format=l[c.format],o}var c=new a(t.createRenderbuffer());return u[c.id]=c,r.renderbufferCount++,o(e,n),o.resize=function(e,n){var r=0|e,a=0|n||r;return r===c.width&&a===c.height||(o.width=c.width=r,o.height=c.height=a,t.bindRenderbuffer(36161,c.renderbuffer),t.renderbufferStorage(36161,c.format,r,a),i.profile&&(c.stats.size=Pt[c.format]*c.width*c.height)),o},o._reglType="renderbuffer",o._renderbuffer=c,i.profile&&(o.stats=c.stats),o.destroy=function(){c.decRef()},o},clear:function(){st(u).forEach(o)},restore:function(){st(u).forEach((function(e){e.renderbuffer=t.createRenderbuffer(),t.bindRenderbuffer(36161,e.renderbuffer),t.renderbufferStorage(36161,e.format,e.width,e.height)})),t.bindRenderbuffer(36161,null)}}},Lt=[];Lt[6408]=4,Lt[6407]=3;var It=[];It[5121]=1,It[5126]=4,It[36193]=2;var Dt=[1116352408,1899447441,-1245643825,-373957723,961987163,1508970993,-1841331548,-1424204075,-670586216,310598401,607225278,1426881987,1925078388,-2132889090,-1680079193,-1046744716,-459576895,-272742522,264347078,604807628,770255983,1249150122,1555081692,1996064986,-1740746414,-1473132947,-1341970488,-1084653625,-958395405,-710438585,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,-2117940946,-1838011259,-1564481375,-1474664885,-1035236496,-949202525,-778901479,-694614492,-200395387,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,-2067236844,-1933114872,-1866530822,-1538233109,-1090935817,-965641998],Rt=["x","y","z","w"],zt="blend.func blend.equation stencil.func stencil.opFront stencil.opBack sample.coverage viewport scissor.box polygonOffset.offset".split(" "),Nt={0:0,1:1,zero:0,one:1,"src color":768,"one minus src color":769,"src alpha":770,"one minus src alpha":771,"dst color":774,"one minus dst color":775,"dst alpha":772,"one minus dst alpha":773,"constant color":32769,"one minus constant color":32770,"constant alpha":32771,"one minus constant alpha":32772,"src alpha saturate":776},jt={never:512,less:513,"<":513,equal:514,"=":514,"==":514,"===":514,lequal:515,"<=":515,greater:516,">":516,notequal:517,"!=":517,"!==":517,gequal:518,">=":518,always:519},Ft={0:0,zero:0,keep:7680,replace:7681,increment:7682,decrement:7683,"increment wrap":34055,"decrement wrap":34056,invert:5386},Bt={cw:2304,ccw:2305},Ut=new Y(!1,!1,!1,(function(){})),Ht=function(t,e){function n(){this.endQueryIndex=this.startQueryIndex=-1,this.sum=0,this.stats=null}function r(t,e,r){var i=o.pop()||new n;i.startQueryIndex=t,i.endQueryIndex=e,i.sum=0,i.stats=r,s.push(i)}if(!e.ext_disjoint_timer_query)return null;var i=[],a=[],o=[],s=[],l=[],c=[];return{beginQuery:function(t){var n=i.pop()||e.ext_disjoint_timer_query.createQueryEXT();e.ext_disjoint_timer_query.beginQueryEXT(35007,n),a.push(n),r(a.length-1,a.length,t)},endQuery:function(){e.ext_disjoint_timer_query.endQueryEXT(35007)},pushScopeStats:r,update:function(){var t,n;if(0!==(t=a.length)){c.length=Math.max(c.length,t+1),l.length=Math.max(l.length,t+1),l[0]=0;var r=c[0]=0;for(n=t=0;n=W.length&&r()}var n=J(W,t);W[n]=e}}}function h(){var t=q.viewport,e=q.scissor_box;t[0]=t[1]=e[0]=e[1]=0,C.viewportWidth=C.framebufferWidth=C.drawingBufferWidth=t[2]=e[2]=v.drawingBufferWidth,C.viewportHeight=C.framebufferHeight=C.drawingBufferHeight=t[3]=e[3]=v.drawingBufferHeight}function d(){C.tick+=1,C.time=g(),h(),V.procs.poll()}function p(){B.refresh(),h(),V.procs.refresh(),M&&M.update()}function g(){return(rt()-A)/1e3}if(!(t=c(t)))return null;var v=t.gl,m=v.getContextAttributes();v.isContextLost();var y=u(v,t);if(!y)return null;var b=i(),_={vaoCount:0,bufferCount:0,elementsCount:0,framebufferCount:0,shaderCount:0,textureCount:0,cubeCount:0,renderbufferCount:0,maxTextureUnits:0},k=t.cachedCode||{},T=y.extensions,M=Ht(v,T),A=rt(),S=v.drawingBufferWidth,E=v.drawingBufferHeight,C={tick:0,time:0,viewportWidth:S,viewportHeight:E,framebufferWidth:S,framebufferHeight:E,drawingBufferWidth:S,drawingBufferHeight:E,pixelRatio:t.pixelRatio},P=(S={elements:null,primitive:4,count:-1,offset:0,instances:-1},at(v,T)),I=x(v,_,t,(function(t){return j.destroyBuffer(t)})),N=w(v,T,I,_),j=D(v,T,P,_,I,N,S),F=R(v,b,_,t),B=O(v,T,P,(function(){V.procs.poll()}),C,_,t),U=Ot(v,T,P,_,t),H=L(v,T,P,B,U,_),V=Z(v,b,T,P,I,N,B,H,{},j,F,S,C,M,k,t),q=(b=z(v,H,V.procs.poll,C,m,T,P),V.next),G=v.canvas,W=[],Y=[],$=[],X=[t.onDestroy],K=null;G&&(G.addEventListener("webglcontextlost",a,!1),G.addEventListener("webglcontextrestored",o,!1));var tt=H.setFBO=s({framebuffer:et.define.call(null,1,"framebuffer")});return p(),m=Q(s,{clear:function(t){if("framebuffer"in t)if(t.framebuffer&&"framebufferCube"===t.framebuffer_reglType)for(var e=0;6>e;++e)tt(Q({framebuffer:t.framebuffer.faces[e]},t),l);else tt(t,l);else l(null,t)},prop:et.define.bind(null,1),context:et.define.bind(null,2),this:et.define.bind(null,3),draw:s({}),buffer:function(t){return I.create(t,34962,!1,!1)},elements:function(t){return N.create(t,!1)},texture:B.create2D,cube:B.createCube,renderbuffer:U.create,framebuffer:H.create,framebufferCube:H.createCube,vao:j.createVAO,attributes:m,frame:f,on:function(t,e){var n;switch(t){case"frame":return f(e);case"lost":n=Y;break;case"restore":n=$;break;case"destroy":n=X}return n.push(e),{cancel:function(){for(var t=0;t2?"one of ".concat(e," ").concat(t.slice(0,n-1).join(", "),", or ")+t[n-1]:2===n?"one of ".concat(e," ").concat(t[0]," or ").concat(t[1]):"of ".concat(e," ").concat(t[0])}return"of ".concat(e," ").concat(String(t))}n("ERR_INVALID_OPT_VALUE",(function(t,e){return'The value "'+e+'" is invalid for option "'+t+'"'}),TypeError),n("ERR_INVALID_ARG_TYPE",(function(t,e,n){var i,a,o,s;if("string"===typeof e&&(a="not ",e.substr(!o||o<0?0:+o,a.length)===a)?(i="must not be",e=e.replace(/^not /,"")):i="must be",function(t,e,n){return(void 0===n||n>t.length)&&(n=t.length),t.substring(n-e.length,n)===e}(t," argument"))s="The ".concat(t," ").concat(i," ").concat(r(e,"type"));else{var l=function(t,e,n){return"number"!==typeof n&&(n=0),!(n+e.length>t.length)&&-1!==t.indexOf(e,n)}(t,".")?"property":"argument";s='The "'.concat(t,'" ').concat(l," ").concat(i," ").concat(r(e,"type"))}return s+=". Received type ".concat(typeof n)}),TypeError),n("ERR_STREAM_PUSH_AFTER_EOF","stream.push() after EOF"),n("ERR_METHOD_NOT_IMPLEMENTED",(function(t){return"The "+t+" method is not implemented"})),n("ERR_STREAM_PREMATURE_CLOSE","Premature close"),n("ERR_STREAM_DESTROYED",(function(t){return"Cannot call "+t+" after a stream was destroyed"})),n("ERR_MULTIPLE_CALLBACK","Callback called multiple times"),n("ERR_STREAM_CANNOT_PIPE","Cannot pipe, not readable"),n("ERR_STREAM_WRITE_AFTER_END","write after end"),n("ERR_STREAM_NULL_VALUES","May not write null values to stream",TypeError),n("ERR_UNKNOWN_ENCODING",(function(t){return"Unknown encoding: "+t}),TypeError),n("ERR_STREAM_UNSHIFT_AFTER_END_EVENT","stream.unshift() after end event"),t.exports.q=e},37865:function(t,e,n){"use strict";var r=n(90386),i=Object.keys||function(t){var e=[];for(var n in t)e.push(n);return e};t.exports=u;var a=n(40410),o=n(37493);n(42018)(u,a);for(var s=i(o.prototype),l=0;l0)if("string"===typeof e||s.objectMode||Object.getPrototypeOf(e)===l.prototype||(e=function(t){return l.from(t)}(e)),r)s.endEmitted?_(t,new w):S(t,s,e,!0);else if(s.ended)_(t,new b);else{if(s.destroyed)return!1;s.reading=!1,s.decoder&&!n?(e=s.decoder.write(e),s.objectMode||0!==e.length?S(t,s,e,!1):L(t,s)):S(t,s,e,!1)}else r||(s.reading=!1,L(t,s));return!s.ended&&(s.lengthe.highWaterMark&&(e.highWaterMark=function(t){return t>=E?t=E:(t--,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,t|=t>>>16,t++),t}(t)),t<=e.length?t:e.ended?e.length:(e.needReadable=!0,0))}function P(t){var e=t._readableState;a("emitReadable",e.needReadable,e.emittedReadable),e.needReadable=!1,e.emittedReadable||(a("emitReadable",e.flowing),e.emittedReadable=!0,i.nextTick(O,t))}function O(t){var e=t._readableState;a("emitReadable_",e.destroyed,e.length,e.ended),e.destroyed||!e.length&&!e.ended||(t.emit("readable"),e.emittedReadable=!1),e.needReadable=!e.flowing&&!e.ended&&e.length<=e.highWaterMark,N(t)}function L(t,e){e.readingMore||(e.readingMore=!0,i.nextTick(I,t,e))}function I(t,e){for(;!e.reading&&!e.ended&&(e.length0,e.resumeScheduled&&!e.paused?e.flowing=!0:t.listenerCount("data")>0&&t.resume()}function R(t){a("readable nexttick read 0"),t.read(0)}function z(t,e){a("resume",e.reading),e.reading||t.read(0),e.resumeScheduled=!1,t.emit("resume"),N(t),e.flowing&&!e.reading&&t.read(0)}function N(t){var e=t._readableState;for(a("flow",e.flowing);e.flowing&&null!==t.read(););}function j(t,e){return 0===e.length?null:(e.objectMode?n=e.buffer.shift():!t||t>=e.length?(n=e.decoder?e.buffer.join(""):1===e.buffer.length?e.buffer.first():e.buffer.concat(e.length),e.buffer.clear()):n=e.buffer.consume(t,e.decoder),n);var n}function F(t){var e=t._readableState;a("endReadable",e.endEmitted),e.endEmitted||(e.ended=!0,i.nextTick(B,e,t))}function B(t,e){if(a("endReadableNT",t.endEmitted,t.length),!t.endEmitted&&0===t.length&&(t.endEmitted=!0,e.readable=!1,e.emit("end"),t.autoDestroy)){var n=e._writableState;(!n||n.autoDestroy&&n.finished)&&e.destroy()}}function U(t,e){for(var n=0,r=t.length;n=e.highWaterMark:e.length>0)||e.ended))return a("read: emitReadable",e.length,e.ended),0===e.length&&e.ended?F(this):P(this),null;if(0===(t=C(t,e))&&e.ended)return 0===e.length&&F(this),null;var r,i=e.needReadable;return a("need readable",i),(0===e.length||e.length-t0?j(t,e):null)?(e.needReadable=e.length<=e.highWaterMark,t=0):(e.length-=t,e.awaitDrain=0),0===e.length&&(e.ended||(e.needReadable=!0),n!==t&&e.ended&&F(this)),null!==r&&this.emit("data",r),r},M.prototype._read=function(t){_(this,new x("_read()"))},M.prototype.pipe=function(t,e){var n=this,r=this._readableState;switch(r.pipesCount){case 0:r.pipes=t;break;case 1:r.pipes=[r.pipes,t];break;default:r.pipes.push(t)}r.pipesCount+=1,a("pipe count=%d opts=%j",r.pipesCount,e);var s=e&&!1===e.end||t===i.stdout||t===i.stderr?v:c;function l(e,i){a("onunpipe"),e===n&&i&&!1===i.hasUnpiped&&(i.hasUnpiped=!0,a("cleanup"),t.removeListener("close",p),t.removeListener("finish",g),t.removeListener("drain",u),t.removeListener("error",d),t.removeListener("unpipe",l),n.removeListener("end",c),n.removeListener("end",v),n.removeListener("data",h),f=!0,!r.awaitDrain||t._writableState&&!t._writableState.needDrain||u())}function c(){a("onend"),t.end()}r.endEmitted?i.nextTick(s):n.once("end",s),t.on("unpipe",l);var u=function(t){return function(){var e=t._readableState;a("pipeOnDrain",e.awaitDrain),e.awaitDrain&&e.awaitDrain--,0===e.awaitDrain&&o(t,"data")&&(e.flowing=!0,N(t))}}(n);t.on("drain",u);var f=!1;function h(e){a("ondata");var i=t.write(e);a("dest.write",i),!1===i&&((1===r.pipesCount&&r.pipes===t||r.pipesCount>1&&-1!==U(r.pipes,t))&&!f&&(a("false write response, pause",r.awaitDrain),r.awaitDrain++),n.pause())}function d(e){a("onerror",e),v(),t.removeListener("error",d),0===o(t,"error")&&_(t,e)}function p(){t.removeListener("finish",g),v()}function g(){a("onfinish"),t.removeListener("close",p),v()}function v(){a("unpipe"),n.unpipe(t)}return n.on("data",h),function(t,e,n){if("function"===typeof t.prependListener)return t.prependListener(e,n);t._events&&t._events[e]?Array.isArray(t._events[e])?t._events[e].unshift(n):t._events[e]=[n,t._events[e]]:t.on(e,n)}(t,"error",d),t.once("close",p),t.once("finish",g),t.emit("pipe",n),r.flowing||(a("pipe resume"),n.resume()),t},M.prototype.unpipe=function(t){var e=this._readableState,n={hasUnpiped:!1};if(0===e.pipesCount)return this;if(1===e.pipesCount)return t&&t!==e.pipes||(t||(t=e.pipes),e.pipes=null,e.pipesCount=0,e.flowing=!1,t&&t.emit("unpipe",this,n)),this;if(!t){var r=e.pipes,i=e.pipesCount;e.pipes=null,e.pipesCount=0,e.flowing=!1;for(var a=0;a0,!1!==r.flowing&&this.resume()):"readable"===t&&(r.endEmitted||r.readableListening||(r.readableListening=r.needReadable=!0,r.flowing=!1,r.emittedReadable=!1,a("on readable",r.length,r.reading),r.length?P(this):r.reading||i.nextTick(R,this))),n},M.prototype.addListener=M.prototype.on,M.prototype.removeListener=function(t,e){var n=s.prototype.removeListener.call(this,t,e);return"readable"===t&&i.nextTick(D,this),n},M.prototype.removeAllListeners=function(t){var e=s.prototype.removeAllListeners.apply(this,arguments);return"readable"!==t&&void 0!==t||i.nextTick(D,this),e},M.prototype.resume=function(){var t=this._readableState;return t.flowing||(a("resume"),t.flowing=!t.readableListening,function(t,e){e.resumeScheduled||(e.resumeScheduled=!0,i.nextTick(z,t,e))}(this,t)),t.paused=!1,this},M.prototype.pause=function(){return a("call pause flowing=%j",this._readableState.flowing),!1!==this._readableState.flowing&&(a("pause"),this._readableState.flowing=!1,this.emit("pause")),this._readableState.paused=!0,this},M.prototype.wrap=function(t){var e=this,n=this._readableState,r=!1;for(var i in t.on("end",(function(){if(a("wrapped end"),n.decoder&&!n.ended){var t=n.decoder.end();t&&t.length&&e.push(t)}e.push(null)})),t.on("data",(function(i){a("wrapped data"),n.decoder&&(i=n.decoder.write(i)),(!n.objectMode||null!==i&&void 0!==i)&&(n.objectMode||i&&i.length)&&(e.push(i)||(r=!0,t.pause()))})),t)void 0===this[i]&&"function"===typeof t[i]&&(this[i]=function(e){return function(){return t[e].apply(t,arguments)}}(i));for(var o=0;o-1))throw new w(t);return this._writableState.defaultEncoding=t,this},Object.defineProperty(M.prototype,"writableBuffer",{enumerable:!1,get:function(){return this._writableState&&this._writableState.getBuffer()}}),Object.defineProperty(M.prototype,"writableHighWaterMark",{enumerable:!1,get:function(){return this._writableState.highWaterMark}}),M.prototype._write=function(t,e,n){n(new g("_write()"))},M.prototype._writev=null,M.prototype.end=function(t,e,n){var r=this._writableState;return"function"===typeof t?(n=t,t=null,e=null):"function"===typeof e&&(n=e,e=null),null!==t&&void 0!==t&&this.write(t,e),r.corked&&(r.corked=1,this.uncork()),r.ending||function(t,e,n){e.ending=!0,O(t,e),n&&(e.finished?i.nextTick(n):t.once("finish",n)),e.ended=!0,t.writable=!1}(this,r,n),this},Object.defineProperty(M.prototype,"writableLength",{enumerable:!1,get:function(){return this._writableState.length}}),Object.defineProperty(M.prototype,"destroyed",{enumerable:!1,get:function(){return void 0!==this._writableState&&this._writableState.destroyed},set:function(t){this._writableState&&(this._writableState.destroyed=t)}}),M.prototype.destroy=f.destroy,M.prototype._undestroy=f.undestroy,M.prototype._destroy=function(t,e){e(t)}},68221:function(t,e,n){"use strict";var r,i=n(90386);function a(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}var o=n(12726),s=Symbol("lastResolve"),l=Symbol("lastReject"),c=Symbol("error"),u=Symbol("ended"),f=Symbol("lastPromise"),h=Symbol("handlePromise"),d=Symbol("stream");function p(t,e){return{value:t,done:e}}function g(t){var e=t[s];if(null!==e){var n=t[d].read();null!==n&&(t[f]=null,t[s]=null,t[l]=null,e(p(n,!1)))}}function v(t){i.nextTick(g,t)}var m=Object.getPrototypeOf((function(){})),y=Object.setPrototypeOf((a(r={get stream(){return this[d]},next:function(){var t=this,e=this[c];if(null!==e)return Promise.reject(e);if(this[u])return Promise.resolve(p(void 0,!0));if(this[d].destroyed)return new Promise((function(e,n){i.nextTick((function(){t[c]?n(t[c]):e(p(void 0,!0))}))}));var n,r=this[f];if(r)n=new Promise(function(t,e){return function(n,r){t.then((function(){e[u]?n(p(void 0,!0)):e[h](n,r)}),r)}}(r,this));else{var a=this[d].read();if(null!==a)return Promise.resolve(p(a,!1));n=new Promise(this[h])}return this[f]=n,n}},Symbol.asyncIterator,(function(){return this})),a(r,"return",(function(){var t=this;return new Promise((function(e,n){t[d].destroy(null,(function(t){t?n(t):e(p(void 0,!0))}))}))})),r),m);t.exports=function(t){var e,n=Object.create(y,(a(e={},d,{value:t,writable:!0}),a(e,s,{value:null,writable:!0}),a(e,l,{value:null,writable:!0}),a(e,c,{value:null,writable:!0}),a(e,u,{value:t._readableState.endEmitted,writable:!0}),a(e,h,{value:function(t,e){var r=n[d].read();r?(n[f]=null,n[s]=null,n[l]=null,t(p(r,!1))):(n[s]=t,n[l]=e)},writable:!0}),e));return n[f]=null,o(t,(function(t){if(t&&"ERR_STREAM_PREMATURE_CLOSE"!==t.code){var e=n[l];return null!==e&&(n[f]=null,n[s]=null,n[l]=null,e(t)),void(n[c]=t)}var r=n[s];null!==r&&(n[f]=null,n[s]=null,n[l]=null,r(p(void 0,!0))),n[u]=!0})),t.on("readable",v.bind(null,n)),n}},31125:function(t,e,n){"use strict";function r(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(t);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),n.push.apply(n,r)}return n}function i(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function a(t,e){for(var n=0;n0?this.tail.next=e:this.head=e,this.tail=e,++this.length}},{key:"unshift",value:function(t){var e={data:t,next:this.head};0===this.length&&(this.tail=e),this.head=e,++this.length}},{key:"shift",value:function(){if(0!==this.length){var t=this.head.data;return 1===this.length?this.head=this.tail=null:this.head=this.head.next,--this.length,t}}},{key:"clear",value:function(){this.head=this.tail=null,this.length=0}},{key:"join",value:function(t){if(0===this.length)return"";for(var e=this.head,n=""+e.data;e=e.next;)n+=t+e.data;return n}},{key:"concat",value:function(t){if(0===this.length)return o.alloc(0);for(var e,n,r,i=o.allocUnsafe(t>>>0),a=this.head,s=0;a;)e=a.data,n=i,r=s,o.prototype.copy.call(e,n,r),s+=a.data.length,a=a.next;return i}},{key:"consume",value:function(t,e){var n;return ti.length?i.length:t;if(a===i.length?r+=i:r+=i.slice(0,t),0===(t-=a)){a===i.length?(++n,e.next?this.head=e.next:this.head=this.tail=null):(this.head=e,e.data=i.slice(a));break}++n}return this.length-=n,r}},{key:"_getBuffer",value:function(t){var e=o.allocUnsafe(t),n=this.head,r=1;for(n.data.copy(e),t-=n.data.length;n=n.next;){var i=n.data,a=t>i.length?i.length:t;if(i.copy(e,e.length-t,0,a),0===(t-=a)){a===i.length?(++r,n.next?this.head=n.next:this.head=this.tail=null):(this.head=n,n.data=i.slice(a));break}++r}return this.length-=r,e}},{key:l,value:function(t,e){return s(this,function(t){for(var e=1;e0,(function(t){u||(u=t),t&&h.forEach(l),a||(h.forEach(l),f(u))}))}));return e.reduce(c)}},56306:function(t,e,n){"use strict";var r=n(74322).q.ERR_INVALID_OPT_VALUE;t.exports={getHighWaterMark:function(t,e,n,i){var a=function(t,e,n){return null!=t.highWaterMark?t.highWaterMark:e?t[n]:null}(e,i,n);if(null!=a){if(!isFinite(a)||Math.floor(a)!==a||a<0)throw new r(i?n:"highWaterMark",a);return Math.floor(a)}return t.objectMode?16:16384}}},71405:function(t,e,n){t.exports=n(15398).EventEmitter},68019:function(t,e,n){"use strict";var r=n(71665).Buffer,i=r.isEncoding||function(t){switch((t=""+t)&&t.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return!0;default:return!1}};function a(t){var e;switch(this.encoding=function(t){var e=function(t){if(!t)return"utf8";for(var e;;)switch(t){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return t;default:if(e)return;t=(""+t).toLowerCase(),e=!0}}(t);if("string"!==typeof e&&(r.isEncoding===i||!i(t)))throw new Error("Unknown encoding: "+t);return e||t}(t),this.encoding){case"utf16le":this.text=l,this.end=c,e=4;break;case"utf8":this.fillLast=s,e=4;break;case"base64":this.text=u,this.end=f,e=3;break;default:return this.write=h,void(this.end=d)}this.lastNeed=0,this.lastTotal=0,this.lastChar=r.allocUnsafe(e)}function o(t){return t<=127?0:t>>5===6?2:t>>4===14?3:t>>3===30?4:t>>6===2?-1:-2}function s(t){var e=this.lastTotal-this.lastNeed,n=function(t,e,n){if(128!==(192&e[0]))return t.lastNeed=0,"\ufffd";if(t.lastNeed>1&&e.length>1){if(128!==(192&e[1]))return t.lastNeed=1,"\ufffd";if(t.lastNeed>2&&e.length>2&&128!==(192&e[2]))return t.lastNeed=2,"\ufffd"}}(this,t);return void 0!==n?n:this.lastNeed<=t.length?(t.copy(this.lastChar,e,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal)):(t.copy(this.lastChar,e,0,t.length),void(this.lastNeed-=t.length))}function l(t,e){if((t.length-e)%2===0){var n=t.toString("utf16le",e);if(n){var r=n.charCodeAt(n.length-1);if(r>=55296&&r<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1],n.slice(0,-1)}return n}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=t[t.length-1],t.toString("utf16le",e,t.length-1)}function c(t){var e=t&&t.length?this.write(t):"";if(this.lastNeed){var n=this.lastTotal-this.lastNeed;return e+this.lastChar.toString("utf16le",0,n)}return e}function u(t,e){var n=(t.length-e)%3;return 0===n?t.toString("base64",e):(this.lastNeed=3-n,this.lastTotal=3,1===n?this.lastChar[0]=t[t.length-1]:(this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1]),t.toString("base64",e,t.length-n))}function f(t){var e=t&&t.length?this.write(t):"";return this.lastNeed?e+this.lastChar.toString("base64",0,3-this.lastNeed):e}function h(t){return t.toString(this.encoding)}function d(t){return t&&t.length?this.write(t):""}e.s=a,a.prototype.write=function(t){if(0===t.length)return"";var e,n;if(this.lastNeed){if(void 0===(e=this.fillLast(t)))return"";n=this.lastNeed,this.lastNeed=0}else n=0;return n=0?(i>0&&(t.lastNeed=i-1),i):--r=0?(i>0&&(t.lastNeed=i-2),i):--r=0?(i>0&&(2===i?i=0:t.lastNeed=i-3),i):0}(this,t,e);if(!this.lastNeed)return t.toString("utf8",e);this.lastTotal=n;var r=t.length-(n-this.lastNeed);return t.copy(this.lastChar,0,r),t.toString("utf8",e,r)},a.prototype.fillLast=function(t){if(this.lastNeed<=t.length)return t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,t.length),this.lastNeed-=t.length}},90715:function(t,e,n){var r=n(32791),i=n(41633)("stream-parser");t.exports=function(t){var e=t&&"function"==typeof t._transform,n=t&&"function"==typeof t._write;if(!e&&!n)throw new Error("must pass a Writable or Transform stream in");i("extending Parser into stream"),t._bytes=u,t._skipBytes=f,e&&(t._passthrough=h),e?t._transform=p:t._write=d};var a=-1,o=0,s=1,l=2;function c(t){i("initializing parser stream"),t._parserBytesLeft=0,t._parserBuffers=[],t._parserBuffered=0,t._parserState=a,t._parserCallback=null,"function"==typeof t.push&&(t._parserOutput=t.push.bind(t)),t._parserInit=!0}function u(t,e){r(!this._parserCallback,'there is already a "callback" set!'),r(isFinite(t)&&t>0,'can only buffer a finite number of bytes > 0, got "'+t+'"'),this._parserInit||c(this),i("buffering %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=o}function f(t,e){r(!this._parserCallback,'there is already a "callback" set!'),r(t>0,'can only skip > 0 bytes, got "'+t+'"'),this._parserInit||c(this),i("skipping %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=s}function h(t,e){r(!this._parserCallback,'There is already a "callback" set!'),r(t>0,'can only pass through > 0 bytes, got "'+t+'"'),this._parserInit||c(this),i("passing through %o bytes",t),this._parserBytesLeft=t,this._parserCallback=e,this._parserState=l}function d(t,e,n){this._parserInit||c(this),i("write(%o bytes)",t.length),"function"==typeof e&&(n=e),v(this,t,null,n)}function p(t,e,n){this._parserInit||c(this),i("transform(%o bytes)",t.length),"function"!=typeof e&&(e=this._parserOutput),v(this,t,e,n)}function g(t,e,n,r){if(t._parserBytesLeft-=e.length,i("%o bytes left for stream piece",t._parserBytesLeft),t._parserState===o?(t._parserBuffers.push(e),t._parserBuffered+=e.length):t._parserState===l&&n(e),0!==t._parserBytesLeft)return r;var s=t._parserCallback;if(s&&t._parserState===o&&t._parserBuffers.length>1&&(e=Buffer.concat(t._parserBuffers,t._parserBuffered)),t._parserState!==o&&(e=null),t._parserCallback=null,t._parserBuffered=0,t._parserState=a,t._parserBuffers.splice(0),s){var c=[];e&&c.push(e),n&&c.push(n);var u=s.length>c.length;u&&c.push(m(r));var f=s.apply(t,c);if(!u||r===f)return r}}var v=m((function t(e,n,r,i){return e._parserBytesLeft<=0?i(new Error("got data but not currently parsing anything")):n.length<=e._parserBytesLeft?function(){return g(e,n,r,i)}:function(){var a=n.slice(0,e._parserBytesLeft);return g(e,a,r,(function(o){return o?i(o):n.length>a.length?function(){return t(e,n.slice(a.length),r,i)}:void 0}))}}));function m(t){return function(){for(var e=t.apply(this,arguments);"function"==typeof e;)e=e();return e}}},41633:function(t,e,n){var r=n(90386);function i(){var t;try{t=e.storage.debug}catch(n){}return!t&&"undefined"!==typeof r&&"env"in r&&(t=r.env.DEBUG),t}(e=t.exports=n(74469)).log=function(){return"object"===typeof console&&console.log&&Function.prototype.apply.call(console.log,console,arguments)},e.formatArgs=function(t){var n=this.useColors;if(t[0]=(n?"%c":"")+this.namespace+(n?" %c":" ")+t[0]+(n?"%c ":" ")+"+"+e.humanize(this.diff),n){var r="color: "+this.color;t.splice(1,0,r,"color: inherit");var i=0,a=0;t[0].replace(/%[a-zA-Z%]/g,(function(t){"%%"!==t&&(i++,"%c"===t&&(a=i))})),t.splice(a,0,r)}},e.save=function(t){try{null==t?e.storage.removeItem("debug"):e.storage.debug=t}catch(n){}},e.load=i,e.useColors=function(){return!("undefined"===typeof window||!window.process||"renderer"!==window.process.type)||("undefined"!==typeof document&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||"undefined"!==typeof window&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||"undefined"!==typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)&&parseInt(RegExp.$1,10)>=31||"undefined"!==typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/))},e.storage="undefined"!=typeof chrome&&"undefined"!=typeof chrome.storage?chrome.storage.local:function(){try{return window.localStorage}catch(t){}}(),e.colors=["lightseagreen","forestgreen","goldenrod","dodgerblue","darkorchid","crimson"],e.formatters.j=function(t){try{return JSON.stringify(t)}catch(e){return"[UnexpectedJSONParseError]: "+e.message}},e.enable(i())},74469:function(t,e,n){var r;function i(t){function n(){if(n.enabled){var t=n,i=+new Date,a=i-(r||i);t.diff=a,t.prev=r,t.curr=i,r=i;for(var o=new Array(arguments.length),s=0;s0)return function(t){if(!((t=String(t)).length>100)){var o=/^((?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(t);if(o){var s=parseFloat(o[1]);switch((o[2]||"ms").toLowerCase()){case"years":case"year":case"yrs":case"yr":case"y":return s*a;case"days":case"day":case"d":return s*i;case"hours":case"hour":case"hrs":case"hr":case"h":return s*r;case"minutes":case"minute":case"mins":case"min":case"m":return s*n;case"seconds":case"second":case"secs":case"sec":case"s":return s*e;case"milliseconds":case"millisecond":case"msecs":case"msec":case"ms":return s;default:return}}}}(t);if("number"===c&&!1===isNaN(t))return s.long?o(l=t,i,"day")||o(l,r,"hour")||o(l,n,"minute")||o(l,e,"second")||l+" ms":function(t){return t>=i?Math.round(t/i)+"d":t>=r?Math.round(t/r)+"h":t>=n?Math.round(t/n)+"m":t>=e?Math.round(t/e)+"s":t+"ms"}(t);throw new Error("val is not a non-empty string or a valid number. val="+JSON.stringify(t))}},99011:function(t,e,n){"use strict";var r=n(88641);t.exports=function(t,e,n){if(null==t)throw Error("First argument should be a string");if(null==e)throw Error("Separator should be a string or a RegExp");n?("string"===typeof n||Array.isArray(n))&&(n={ignore:n}):n={},null==n.escape&&(n.escape=!0),null==n.ignore?n.ignore=["[]","()","{}","<>",'""',"''","``","\u201c\u201d","\xab\xbb"]:("string"===typeof n.ignore&&(n.ignore=[n.ignore]),n.ignore=n.ignore.map((function(t){return 1===t.length&&(t+=t),t})));var i=r.parse(t,{flat:!0,brackets:n.ignore}),a=i[0].split(e);if(n.escape){for(var o=[],s=0;s0;){e=c[c.length-1];var d=t[e];if(a[e]=0&&s[e].push(o[g])}a[e]=p}else{if(r[e]===n[e]){var v=[],m=[],y=0;for(p=l.length-1;p>=0;--p){var b=l[p];if(i[b]=!1,v.push(b),m.push(s[b]),y+=s[b].length,o[b]=f.length,b===e){l.length=p;break}}f.push(v);var x=new Array(y);for(p=0;p1&&(i=1),i<-1&&(i=-1),(t*r-e*n<0?-1:1)*Math.acos(i)};e.default=function(t){var e=t.px,n=t.py,l=t.cx,c=t.cy,u=t.rx,f=t.ry,h=t.xAxisRotation,d=void 0===h?0:h,p=t.largeArcFlag,g=void 0===p?0:p,v=t.sweepFlag,m=void 0===v?0:v,y=[];if(0===u||0===f)return[];var b=Math.sin(d*i/360),x=Math.cos(d*i/360),w=x*(e-l)/2+b*(n-c)/2,_=-b*(e-l)/2+x*(n-c)/2;if(0===w&&0===_)return[];u=Math.abs(u),f=Math.abs(f);var k=Math.pow(w,2)/Math.pow(u,2)+Math.pow(_,2)/Math.pow(f,2);k>1&&(u*=Math.sqrt(k),f*=Math.sqrt(k));var T=function(t,e,n,r,a,o,l,c,u,f,h,d){var p=Math.pow(a,2),g=Math.pow(o,2),v=Math.pow(h,2),m=Math.pow(d,2),y=p*g-p*m-g*v;y<0&&(y=0),y/=p*m+g*v;var b=(y=Math.sqrt(y)*(l===c?-1:1))*a/o*d,x=y*-o/a*h,w=f*b-u*x+(t+n)/2,_=u*b+f*x+(e+r)/2,k=(h-b)/a,T=(d-x)/o,M=(-h-b)/a,A=(-d-x)/o,S=s(1,0,k,T),E=s(k,T,M,A);return 0===c&&E>0&&(E-=i),1===c&&E<0&&(E+=i),[w,_,S,E]}(e,n,l,c,u,f,g,m,b,x,w,_),M=r(T,4),A=M[0],S=M[1],E=M[2],C=M[3],P=Math.abs(C)/(i/4);Math.abs(1-P)<1e-7&&(P=1);var O=Math.max(Math.ceil(P),1);C/=O;for(var L=0;Le[2]&&(e[2]=c[u+0]),c[u+1]>e[3]&&(e[3]=c[u+1]);return e}},29988:function(t,e,n){"use strict";t.exports=function(t){for(var e,n=[],o=0,s=0,l=0,c=0,u=null,f=null,h=0,d=0,p=0,g=t.length;p4?(o=v[v.length-4],s=v[v.length-3]):(o=h,s=d),n.push(v)}return n};var r=n(7095);function i(t,e,n,r){return["C",t,e,n,r,n,r]}function a(t,e,n,r,i,a){return["C",t/3+2/3*n,e/3+2/3*r,i/3+2/3*n,a/3+2/3*r,i,a]}},82019:function(t,e,n){"use strict";var r,i=n(1750),a=n(95616),o=n(31457),s=n(89546),l=n(44781),c=document.createElement("canvas"),u=c.getContext("2d");t.exports=function(t,e){if(!s(t))throw Error("Argument should be valid svg path string");var n,f;e||(e={}),e.shape?(n=e.shape[0],f=e.shape[1]):(n=c.width=e.w||e.width||200,f=c.height=e.h||e.height||200);var h=Math.min(n,f),d=e.stroke||0,p=e.viewbox||e.viewBox||i(t),g=[n/(p[2]-p[0]),f/(p[3]-p[1])],v=Math.min(g[0]||0,g[1]||0)/2;if(u.fillStyle="black",u.fillRect(0,0,n,f),u.fillStyle="white",d&&("number"!=typeof d&&(d=1),u.strokeStyle=d>0?"white":"black",u.lineWidth=Math.abs(d)),u.translate(.5*n,.5*f),u.scale(v,v),function(){if(null!=r)return r;var t=document.createElement("canvas").getContext("2d");if(t.canvas.width=t.canvas.height=1,!window.Path2D)return r=!1;var e=new Path2D("M0,0h1v1h-1v-1Z");t.fillStyle="black",t.fill(e);var n=t.getImageData(0,0,1,1);return r=n&&n.data&&255===n.data[3]}()){var m=new Path2D(t);u.fill(m),d&&u.stroke(m)}else{var y=a(t);o(u,y),u.fill(),d&&u.stroke()}return u.setTransform(1,0,0,1,0,0),l(u,{cutoff:null!=e.cutoff?e.cutoff:.5,radius:null!=e.radius?e.radius:.5*h})}},84267:function(t,e,n){var r;!function(i){var a=/^\s+/,o=/\s+$/,s=0,l=i.round,c=i.min,u=i.max,f=i.random;function h(t,e){if(e=e||{},(t=t||"")instanceof h)return t;if(!(this instanceof h))return new h(t,e);var n=function(t){var e,n,r,s={r:0,g:0,b:0},l=1,f=null,h=null,d=null,p=!1,g=!1;return"string"==typeof t&&(t=function(t){t=t.replace(a,"").replace(o,"").toLowerCase();var e,n=!1;if(P[t])t=P[t],n=!0;else if("transparent"==t)return{r:0,g:0,b:0,a:0,format:"name"};return(e=B.rgb.exec(t))?{r:e[1],g:e[2],b:e[3]}:(e=B.rgba.exec(t))?{r:e[1],g:e[2],b:e[3],a:e[4]}:(e=B.hsl.exec(t))?{h:e[1],s:e[2],l:e[3]}:(e=B.hsla.exec(t))?{h:e[1],s:e[2],l:e[3],a:e[4]}:(e=B.hsv.exec(t))?{h:e[1],s:e[2],v:e[3]}:(e=B.hsva.exec(t))?{h:e[1],s:e[2],v:e[3],a:e[4]}:(e=B.hex8.exec(t))?{r:R(e[1]),g:R(e[2]),b:R(e[3]),a:F(e[4]),format:n?"name":"hex8"}:(e=B.hex6.exec(t))?{r:R(e[1]),g:R(e[2]),b:R(e[3]),format:n?"name":"hex"}:(e=B.hex4.exec(t))?{r:R(e[1]+""+e[1]),g:R(e[2]+""+e[2]),b:R(e[3]+""+e[3]),a:F(e[4]+""+e[4]),format:n?"name":"hex8"}:!!(e=B.hex3.exec(t))&&{r:R(e[1]+""+e[1]),g:R(e[2]+""+e[2]),b:R(e[3]+""+e[3]),format:n?"name":"hex"}}(t)),"object"==typeof t&&(U(t.r)&&U(t.g)&&U(t.b)?(e=t.r,n=t.g,r=t.b,s={r:255*I(e,255),g:255*I(n,255),b:255*I(r,255)},p=!0,g="%"===String(t.r).substr(-1)?"prgb":"rgb"):U(t.h)&&U(t.s)&&U(t.v)?(f=N(t.s),h=N(t.v),s=function(t,e,n){t=6*I(t,360),e=I(e,100),n=I(n,100);var r=i.floor(t),a=t-r,o=n*(1-e),s=n*(1-a*e),l=n*(1-(1-a)*e),c=r%6,u=[n,s,o,o,l,n][c],f=[l,n,n,s,o,o][c],h=[o,o,l,n,n,s][c];return{r:255*u,g:255*f,b:255*h}}(t.h,f,h),p=!0,g="hsv"):U(t.h)&&U(t.s)&&U(t.l)&&(f=N(t.s),d=N(t.l),s=function(t,e,n){var r,i,a;function o(t,e,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t}if(t=I(t,360),e=I(e,100),n=I(n,100),0===e)r=i=a=n;else{var s=n<.5?n*(1+e):n+e-n*e,l=2*n-s;r=o(l,s,t+1/3),i=o(l,s,t),a=o(l,s,t-1/3)}return{r:255*r,g:255*i,b:255*a}}(t.h,f,d),p=!0,g="hsl"),t.hasOwnProperty("a")&&(l=t.a)),l=L(l),{ok:p,format:t.format||g,r:c(255,u(s.r,0)),g:c(255,u(s.g,0)),b:c(255,u(s.b,0)),a:l}}(t);this._originalInput=t,this._r=n.r,this._g=n.g,this._b=n.b,this._a=n.a,this._roundA=l(100*this._a)/100,this._format=e.format||n.format,this._gradientType=e.gradientType,this._r<1&&(this._r=l(this._r)),this._g<1&&(this._g=l(this._g)),this._b<1&&(this._b=l(this._b)),this._ok=n.ok,this._tc_id=s++}function d(t,e,n){t=I(t,255),e=I(e,255),n=I(n,255);var r,i,a=u(t,e,n),o=c(t,e,n),s=(a+o)/2;if(a==o)r=i=0;else{var l=a-o;switch(i=s>.5?l/(2-a-o):l/(a+o),a){case t:r=(e-n)/l+(e>1)+720)%360;--e;)r.h=(r.h+i)%360,a.push(h(r));return a}function C(t,e){e=e||6;for(var n=h(t).toHsv(),r=n.h,i=n.s,a=n.v,o=[],s=1/e;e--;)o.push(h({h:r,s:i,v:a})),a=(a+s)%1;return o}h.prototype={isDark:function(){return this.getBrightness()<128},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var t=this.toRgb();return(299*t.r+587*t.g+114*t.b)/1e3},getLuminance:function(){var t,e,n,r=this.toRgb();return t=r.r/255,e=r.g/255,n=r.b/255,.2126*(t<=.03928?t/12.92:i.pow((t+.055)/1.055,2.4))+.7152*(e<=.03928?e/12.92:i.pow((e+.055)/1.055,2.4))+.0722*(n<=.03928?n/12.92:i.pow((n+.055)/1.055,2.4))},setAlpha:function(t){return this._a=L(t),this._roundA=l(100*this._a)/100,this},toHsv:function(){var t=p(this._r,this._g,this._b);return{h:360*t.h,s:t.s,v:t.v,a:this._a}},toHsvString:function(){var t=p(this._r,this._g,this._b),e=l(360*t.h),n=l(100*t.s),r=l(100*t.v);return 1==this._a?"hsv("+e+", "+n+"%, "+r+"%)":"hsva("+e+", "+n+"%, "+r+"%, "+this._roundA+")"},toHsl:function(){var t=d(this._r,this._g,this._b);return{h:360*t.h,s:t.s,l:t.l,a:this._a}},toHslString:function(){var t=d(this._r,this._g,this._b),e=l(360*t.h),n=l(100*t.s),r=l(100*t.l);return 1==this._a?"hsl("+e+", "+n+"%, "+r+"%)":"hsla("+e+", "+n+"%, "+r+"%, "+this._roundA+")"},toHex:function(t){return g(this._r,this._g,this._b,t)},toHexString:function(t){return"#"+this.toHex(t)},toHex8:function(t){return function(t,e,n,r,i){var a=[z(l(t).toString(16)),z(l(e).toString(16)),z(l(n).toString(16)),z(j(r))];return i&&a[0].charAt(0)==a[0].charAt(1)&&a[1].charAt(0)==a[1].charAt(1)&&a[2].charAt(0)==a[2].charAt(1)&&a[3].charAt(0)==a[3].charAt(1)?a[0].charAt(0)+a[1].charAt(0)+a[2].charAt(0)+a[3].charAt(0):a.join("")}(this._r,this._g,this._b,this._a,t)},toHex8String:function(t){return"#"+this.toHex8(t)},toRgb:function(){return{r:l(this._r),g:l(this._g),b:l(this._b),a:this._a}},toRgbString:function(){return 1==this._a?"rgb("+l(this._r)+", "+l(this._g)+", "+l(this._b)+")":"rgba("+l(this._r)+", "+l(this._g)+", "+l(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:l(100*I(this._r,255))+"%",g:l(100*I(this._g,255))+"%",b:l(100*I(this._b,255))+"%",a:this._a}},toPercentageRgbString:function(){return 1==this._a?"rgb("+l(100*I(this._r,255))+"%, "+l(100*I(this._g,255))+"%, "+l(100*I(this._b,255))+"%)":"rgba("+l(100*I(this._r,255))+"%, "+l(100*I(this._g,255))+"%, "+l(100*I(this._b,255))+"%, "+this._roundA+")"},toName:function(){return 0===this._a?"transparent":!(this._a<1)&&(O[g(this._r,this._g,this._b,!0)]||!1)},toFilter:function(t){var e="#"+v(this._r,this._g,this._b,this._a),n=e,r=this._gradientType?"GradientType = 1, ":"";if(t){var i=h(t);n="#"+v(i._r,i._g,i._b,i._a)}return"progid:DXImageTransform.Microsoft.gradient("+r+"startColorstr="+e+",endColorstr="+n+")"},toString:function(t){var e=!!t;t=t||this._format;var n=!1,r=this._a<1&&this._a>=0;return e||!r||"hex"!==t&&"hex6"!==t&&"hex3"!==t&&"hex4"!==t&&"hex8"!==t&&"name"!==t?("rgb"===t&&(n=this.toRgbString()),"prgb"===t&&(n=this.toPercentageRgbString()),"hex"!==t&&"hex6"!==t||(n=this.toHexString()),"hex3"===t&&(n=this.toHexString(!0)),"hex4"===t&&(n=this.toHex8String(!0)),"hex8"===t&&(n=this.toHex8String()),"name"===t&&(n=this.toName()),"hsl"===t&&(n=this.toHslString()),"hsv"===t&&(n=this.toHsvString()),n||this.toHexString()):"name"===t&&0===this._a?this.toName():this.toRgbString()},clone:function(){return h(this.toString())},_applyModification:function(t,e){var n=t.apply(null,[this].concat([].slice.call(e)));return this._r=n._r,this._g=n._g,this._b=n._b,this.setAlpha(n._a),this},lighten:function(){return this._applyModification(x,arguments)},brighten:function(){return this._applyModification(w,arguments)},darken:function(){return this._applyModification(_,arguments)},desaturate:function(){return this._applyModification(m,arguments)},saturate:function(){return this._applyModification(y,arguments)},greyscale:function(){return this._applyModification(b,arguments)},spin:function(){return this._applyModification(k,arguments)},_applyCombination:function(t,e){return t.apply(null,[this].concat([].slice.call(e)))},analogous:function(){return this._applyCombination(E,arguments)},complement:function(){return this._applyCombination(T,arguments)},monochromatic:function(){return this._applyCombination(C,arguments)},splitcomplement:function(){return this._applyCombination(S,arguments)},triad:function(){return this._applyCombination(M,arguments)},tetrad:function(){return this._applyCombination(A,arguments)}},h.fromRatio=function(t,e){if("object"==typeof t){var n={};for(var r in t)t.hasOwnProperty(r)&&(n[r]="a"===r?t[r]:N(t[r]));t=n}return h(t,e)},h.equals=function(t,e){return!(!t||!e)&&h(t).toRgbString()==h(e).toRgbString()},h.random=function(){return h.fromRatio({r:f(),g:f(),b:f()})},h.mix=function(t,e,n){n=0===n?0:n||50;var r=h(t).toRgb(),i=h(e).toRgb(),a=n/100;return h({r:(i.r-r.r)*a+r.r,g:(i.g-r.g)*a+r.g,b:(i.b-r.b)*a+r.b,a:(i.a-r.a)*a+r.a})},h.readability=function(t,e){var n=h(t),r=h(e);return(i.max(n.getLuminance(),r.getLuminance())+.05)/(i.min(n.getLuminance(),r.getLuminance())+.05)},h.isReadable=function(t,e,n){var r,i,a=h.readability(t,e);switch(i=!1,(r=function(t){var e,n;return e=((t=t||{level:"AA",size:"small"}).level||"AA").toUpperCase(),n=(t.size||"small").toLowerCase(),"AA"!==e&&"AAA"!==e&&(e="AA"),"small"!==n&&"large"!==n&&(n="small"),{level:e,size:n}}(n)).level+r.size){case"AAsmall":case"AAAlarge":i=a>=4.5;break;case"AAlarge":i=a>=3;break;case"AAAsmall":i=a>=7}return i},h.mostReadable=function(t,e,n){var r,i,a,o,s=null,l=0;i=(n=n||{}).includeFallbackColors,a=n.level,o=n.size;for(var c=0;cl&&(l=r,s=h(e[c]));return h.isReadable(t,s,{level:a,size:o})||!i?s:(n.includeFallbackColors=!1,h.mostReadable(t,["#fff","#000"],n))};var P=h.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},O=h.hexNames=function(t){var e={};for(var n in t)t.hasOwnProperty(n)&&(e[t[n]]=n);return e}(P);function L(t){return t=parseFloat(t),(isNaN(t)||t<0||t>1)&&(t=1),t}function I(t,e){(function(t){return"string"==typeof t&&-1!=t.indexOf(".")&&1===parseFloat(t)})(t)&&(t="100%");var n=function(t){return"string"===typeof t&&-1!=t.indexOf("%")}(t);return t=c(e,u(0,parseFloat(t))),n&&(t=parseInt(t*e,10)/100),i.abs(t-e)<1e-6?1:t%e/parseFloat(e)}function D(t){return c(1,u(0,t))}function R(t){return parseInt(t,16)}function z(t){return 1==t.length?"0"+t:""+t}function N(t){return t<=1&&(t=100*t+"%"),t}function j(t){return i.round(255*parseFloat(t)).toString(16)}function F(t){return R(t)/255}var B=function(){var t="(?:[-\\+]?\\d*\\.\\d+%?)|(?:[-\\+]?\\d+%?)",e="[\\s|\\(]+("+t+")[,|\\s]+("+t+")[,|\\s]+("+t+")\\s*\\)?",n="[\\s|\\(]+("+t+")[,|\\s]+("+t+")[,|\\s]+("+t+")[,|\\s]+("+t+")\\s*\\)?";return{CSS_UNIT:new RegExp(t),rgb:new RegExp("rgb"+e),rgba:new RegExp("rgba"+n),hsl:new RegExp("hsl"+e),hsla:new RegExp("hsla"+n),hsv:new RegExp("hsv"+e),hsva:new RegExp("hsva"+n),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/}}();function U(t){return!!B.CSS_UNIT.exec(t)}t.exports?t.exports=h:void 0===(r=function(){return h}.call(e,n,e,t))||(t.exports=r)}(Math)},57060:function(t){"use strict";t.exports=n,t.exports.float32=t.exports.float=n,t.exports.fract32=t.exports.fract=function(t,e){if(t.length){if(t instanceof Float32Array)return new Float32Array(t.length);e instanceof Float32Array||(e=n(t));for(var r=0,i=e.length;r":(e.length>100&&(e=e.slice(0,99)+"\u2026"),e=e.replace(i,(function(t){switch(t){case"\n":return"\\n";case"\r":return"\\r";case"\u2028":return"\\u2028";case"\u2029":return"\\u2029";default:throw new Error("Unexpected character")}})))}},47403:function(t,e,n){"use strict";var r=n(24582),i={object:!0,function:!0,undefined:!0};t.exports=function(t){return!!r(t)&&hasOwnProperty.call(i,typeof t)}},82527:function(t,e,n){"use strict";var r=n(69190),i=n(84985);t.exports=function(t){return i(t)?t:r(t,"%v is not a plain function",arguments[1])}},84985:function(t,e,n){"use strict";var r=n(73116),i=/^\s*class[\s{/}]/,a=Function.prototype.toString;t.exports=function(t){return!!r(t)&&!i.test(a.call(t))}},24511:function(t,e,n){"use strict";var r=n(47403);t.exports=function(t){if(!r(t))return!1;try{return!!t.constructor&&t.constructor.prototype===t}catch(e){return!1}}},9234:function(t,e,n){"use strict";var r=n(24582),i=n(47403),a=Object.prototype.toString;t.exports=function(t){if(!r(t))return null;if(i(t)){var e=t.toString;if("function"!==typeof e)return null;if(e===a)return null}try{return""+t}catch(n){return null}}},10424:function(t,e,n){"use strict";var r=n(69190),i=n(24582);t.exports=function(t){return i(t)?t:r(t,"Cannot use %v",arguments[1])}},24582:function(t){"use strict";t.exports=function(t){return void 0!==t&&null!==t}},58404:function(t,e,n){"use strict";var r=n(13547),i=n(12129),a=n(12856).Buffer;n.g.__TYPEDARRAY_POOL||(n.g.__TYPEDARRAY_POOL={UINT8:i([32,0]),UINT16:i([32,0]),UINT32:i([32,0]),BIGUINT64:i([32,0]),INT8:i([32,0]),INT16:i([32,0]),INT32:i([32,0]),BIGINT64:i([32,0]),FLOAT:i([32,0]),DOUBLE:i([32,0]),DATA:i([32,0]),UINT8C:i([32,0]),BUFFER:i([32,0])});var o="undefined"!==typeof Uint8ClampedArray,s="undefined"!==typeof BigUint64Array,l="undefined"!==typeof BigInt64Array,c=n.g.__TYPEDARRAY_POOL;c.UINT8C||(c.UINT8C=i([32,0])),c.BIGUINT64||(c.BIGUINT64=i([32,0])),c.BIGINT64||(c.BIGINT64=i([32,0])),c.BUFFER||(c.BUFFER=i([32,0]));var u=c.DATA,f=c.BUFFER;function h(t){if(t){var e=t.length||t.byteLength,n=r.log2(e);u[n].push(t)}}function d(t){t=r.nextPow2(t);var e=r.log2(t),n=u[e];return n.length>0?n.pop():new ArrayBuffer(t)}function p(t){return new Uint8Array(d(t),0,t)}function g(t){return new Uint16Array(d(2*t),0,t)}function v(t){return new Uint32Array(d(4*t),0,t)}function m(t){return new Int8Array(d(t),0,t)}function y(t){return new Int16Array(d(2*t),0,t)}function b(t){return new Int32Array(d(4*t),0,t)}function x(t){return new Float32Array(d(4*t),0,t)}function w(t){return new Float64Array(d(8*t),0,t)}function _(t){return o?new Uint8ClampedArray(d(t),0,t):p(t)}function k(t){return s?new BigUint64Array(d(8*t),0,t):null}function T(t){return l?new BigInt64Array(d(8*t),0,t):null}function M(t){return new DataView(d(t),0,t)}function A(t){t=r.nextPow2(t);var e=r.log2(t),n=f[e];return n.length>0?n.pop():new a(t)}e.free=function(t){if(a.isBuffer(t))f[r.log2(t.length)].push(t);else{if("[object ArrayBuffer]"!==Object.prototype.toString.call(t)&&(t=t.buffer),!t)return;var e=t.length||t.byteLength,n=0|r.log2(e);u[n].push(t)}},e.freeUint8=e.freeUint16=e.freeUint32=e.freeBigUint64=e.freeInt8=e.freeInt16=e.freeInt32=e.freeBigInt64=e.freeFloat32=e.freeFloat=e.freeFloat64=e.freeDouble=e.freeUint8Clamped=e.freeDataView=function(t){h(t.buffer)},e.freeArrayBuffer=h,e.freeBuffer=function(t){f[r.log2(t.length)].push(t)},e.malloc=function(t,e){if(void 0===e||"arraybuffer"===e)return d(t);switch(e){case"uint8":return p(t);case"uint16":return g(t);case"uint32":return v(t);case"int8":return m(t);case"int16":return y(t);case"int32":return b(t);case"float":case"float32":return x(t);case"double":case"float64":return w(t);case"uint8_clamped":return _(t);case"bigint64":return T(t);case"biguint64":return k(t);case"buffer":return A(t);case"data":case"dataview":return M(t);default:return null}return null},e.mallocArrayBuffer=d,e.mallocUint8=p,e.mallocUint16=g,e.mallocUint32=v,e.mallocInt8=m,e.mallocInt16=y,e.mallocInt32=b,e.mallocFloat32=e.mallocFloat=x,e.mallocFloat64=e.mallocDouble=w,e.mallocUint8Clamped=_,e.mallocBigUint64=k,e.mallocBigInt64=T,e.mallocDataView=M,e.mallocBuffer=A,e.clearCache=function(){for(var t=0;t<32;++t)c.UINT8[t].length=0,c.UINT16[t].length=0,c.UINT32[t].length=0,c.INT8[t].length=0,c.INT16[t].length=0,c.INT32[t].length=0,c.FLOAT[t].length=0,c.DOUBLE[t].length=0,c.BIGUINT64[t].length=0,c.BIGINT64[t].length=0,c.UINT8C[t].length=0,u[t].length=0,f[t].length=0}},90448:function(t){var e=/[\'\"]/;t.exports=function(t){return t?(e.test(t.charAt(0))&&(t=t.substr(1)),e.test(t.charAt(t.length-1))&&(t=t.substr(0,t.length-1)),t):""}},93447:function(t){"use strict";t.exports=function(t,e,n){Array.isArray(n)||(n=[].slice.call(arguments,2));for(var r=0,i=n.length;r=i)return t;switch(t){case"%s":return String(r[n++]);case"%d":return Number(r[n++]);case"%j":try{return JSON.stringify(r[n++])}catch(e){return"[Circular]"}default:return t}})),s=r[n];n=3&&(r.depth=arguments[2]),arguments.length>=4&&(r.colors=arguments[3]),v(n)?r.showHidden=n:n&&e._extend(r,n),x(r.showHidden)&&(r.showHidden=!1),x(r.depth)&&(r.depth=2),x(r.colors)&&(r.colors=!1),x(r.customInspect)&&(r.customInspect=!0),r.colors&&(r.stylize=u),h(r,t,r.depth)}function u(t,e){var n=c.styles[e];return n?"\x1b["+c.colors[n][0]+"m"+t+"\x1b["+c.colors[n][1]+"m":t}function f(t,e){return t}function h(t,n,r){if(t.customInspect&&n&&M(n.inspect)&&n.inspect!==e.inspect&&(!n.constructor||n.constructor.prototype!==n)){var i=n.inspect(r,t);return b(i)||(i=h(t,i,r)),i}var a=function(t,e){if(x(e))return t.stylize("undefined","undefined");if(b(e)){var n="'"+JSON.stringify(e).replace(/^"|"$/g,"").replace(/'/g,"\\'").replace(/\\"/g,'"')+"'";return t.stylize(n,"string")}return y(e)?t.stylize(""+e,"number"):v(e)?t.stylize(""+e,"boolean"):m(e)?t.stylize("null","null"):void 0}(t,n);if(a)return a;var o=Object.keys(n),s=function(t){var e={};return t.forEach((function(t,n){e[t]=!0})),e}(o);if(t.showHidden&&(o=Object.getOwnPropertyNames(n)),T(n)&&(o.indexOf("message")>=0||o.indexOf("description")>=0))return d(n);if(0===o.length){if(M(n)){var l=n.name?": "+n.name:"";return t.stylize("[Function"+l+"]","special")}if(w(n))return t.stylize(RegExp.prototype.toString.call(n),"regexp");if(k(n))return t.stylize(Date.prototype.toString.call(n),"date");if(T(n))return d(n)}var c,u="",f=!1,_=["{","}"];return g(n)&&(f=!0,_=["[","]"]),M(n)&&(u=" [Function"+(n.name?": "+n.name:"")+"]"),w(n)&&(u=" "+RegExp.prototype.toString.call(n)),k(n)&&(u=" "+Date.prototype.toUTCString.call(n)),T(n)&&(u=" "+d(n)),0!==o.length||f&&0!=n.length?r<0?w(n)?t.stylize(RegExp.prototype.toString.call(n),"regexp"):t.stylize("[Object]","special"):(t.seen.push(n),c=f?function(t,e,n,r,i){for(var a=[],o=0,s=e.length;o60?n[0]+(""===e?"":e+"\n ")+" "+t.join(",\n ")+" "+n[1]:n[0]+e+" "+t.join(", ")+" "+n[1]}(c,u,_)):_[0]+u+_[1]}function d(t){return"["+Error.prototype.toString.call(t)+"]"}function p(t,e,n,r,i,a){var o,s,l;if((l=Object.getOwnPropertyDescriptor(e,i)||{value:e[i]}).get?s=l.set?t.stylize("[Getter/Setter]","special"):t.stylize("[Getter]","special"):l.set&&(s=t.stylize("[Setter]","special")),C(r,i)||(o="["+i+"]"),s||(t.seen.indexOf(l.value)<0?(s=m(n)?h(t,l.value,null):h(t,l.value,n-1)).indexOf("\n")>-1&&(s=a?s.split("\n").map((function(t){return" "+t})).join("\n").slice(2):"\n"+s.split("\n").map((function(t){return" "+t})).join("\n")):s=t.stylize("[Circular]","special")),x(o)){if(a&&i.match(/^\d+$/))return s;(o=JSON.stringify(""+i)).match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)?(o=o.slice(1,-1),o=t.stylize(o,"name")):(o=o.replace(/'/g,"\\'").replace(/\\"/g,'"').replace(/(^"|"$)/g,"'"),o=t.stylize(o,"string"))}return o+": "+s}function g(t){return Array.isArray(t)}function v(t){return"boolean"===typeof t}function m(t){return null===t}function y(t){return"number"===typeof t}function b(t){return"string"===typeof t}function x(t){return void 0===t}function w(t){return _(t)&&"[object RegExp]"===A(t)}function _(t){return"object"===typeof t&&null!==t}function k(t){return _(t)&&"[object Date]"===A(t)}function T(t){return _(t)&&("[object Error]"===A(t)||t instanceof Error)}function M(t){return"function"===typeof t}function A(t){return Object.prototype.toString.call(t)}function S(t){return t<10?"0"+t.toString(10):t.toString(10)}e.debuglog=function(t){if(t=t.toUpperCase(),!o[t])if(s.test(t)){var n=r.pid;o[t]=function(){var r=e.format.apply(e,arguments);console.error("%s %d: %s",t,n,r)}}else o[t]=function(){};return o[t]},e.inspect=c,c.colors={bold:[1,22],italic:[3,23],underline:[4,24],inverse:[7,27],white:[37,39],grey:[90,39],black:[30,39],blue:[34,39],cyan:[36,39],green:[32,39],magenta:[35,39],red:[31,39],yellow:[33,39]},c.styles={special:"cyan",number:"yellow",boolean:"yellow",undefined:"grey",null:"bold",string:"green",date:"magenta",regexp:"red"},e.types=n(4936),e.isArray=g,e.isBoolean=v,e.isNull=m,e.isNullOrUndefined=function(t){return null==t},e.isNumber=y,e.isString=b,e.isSymbol=function(t){return"symbol"===typeof t},e.isUndefined=x,e.isRegExp=w,e.types.isRegExp=w,e.isObject=_,e.isDate=k,e.types.isDate=k,e.isError=T,e.types.isNativeError=T,e.isFunction=M,e.isPrimitive=function(t){return null===t||"boolean"===typeof t||"number"===typeof t||"string"===typeof t||"symbol"===typeof t||"undefined"===typeof t},e.isBuffer=n(45920);var E=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];function C(t,e){return Object.prototype.hasOwnProperty.call(t,e)}e.log=function(){console.log("%s - %s",function(){var t=new Date,e=[S(t.getHours()),S(t.getMinutes()),S(t.getSeconds())].join(":");return[t.getDate(),E[t.getMonth()],e].join(" ")}(),e.format.apply(e,arguments))},e.inherits=n(42018),e._extend=function(t,e){if(!e||!_(e))return t;for(var n=Object.keys(e),r=n.length;r--;)t[n[r]]=e[n[r]];return t};var P="undefined"!==typeof Symbol?Symbol("util.promisify.custom"):void 0;function O(t,e){if(!t){var n=new Error("Promise was rejected with a falsy value");n.reason=t,t=n}return e(t)}e.promisify=function(t){if("function"!==typeof t)throw new TypeError('The "original" argument must be of type Function');if(P&&t[P]){var e;if("function"!==typeof(e=t[P]))throw new TypeError('The "util.promisify.custom" argument must be of type Function');return Object.defineProperty(e,P,{value:e,enumerable:!1,writable:!1,configurable:!0}),e}function e(){for(var e,n,r=new Promise((function(t,r){e=t,n=r})),i=[],a=0;a2111)throw e.replace(/\{0\}/,this.local.name);return t},toMonthIndex:function(t,e,n){var i=this.intercalaryMonth(t);if(n&&e!==i||e<1||e>12)throw r.local.invalidMonth.replace(/\{0\}/,this.local.name);return i?!n&&e<=i?e-1:e:e-1},toChineseMonth:function(t,e){t.year&&(e=(t=t.year()).month());var n=this.intercalaryMonth(t);if(e<0||e>(n?12:11))throw r.local.invalidMonth.replace(/\{0\}/,this.local.name);return n?e>13;return e},isIntercalaryMonth:function(t,e){t.year&&(e=(t=t.year()).month());var n=this.intercalaryMonth(t);return!!n&&n===e},leapYear:function(t){return 0!==this.intercalaryMonth(t)},weekOfYear:function(t,e,n){var i,o=this._validateYear(t,r.local.invalidyear),s=h[o-h[0]],l=s>>9&4095,c=s>>5&15,u=31&s;(i=a.newDate(l,c,u)).add(4-(i.dayOfWeek()||7),"d");var f=this.toJD(t,e,n)-i.toJD();return 1+Math.floor(f/7)},monthsInYear:function(t){return this.leapYear(t)?13:12},daysInMonth:function(t,e){t.year&&(e=t.month(),t=t.year()),t=this._validateYear(t);var n=f[t-f[0]];if(e>(n>>13?12:11))throw r.local.invalidMonth.replace(/\{0\}/,this.local.name);var i=n&1<<12-e?30:29;return i},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,s,n,r.local.invalidDate);t=this._validateYear(i.year()),e=i.month(),n=i.day();var o=this.isIntercalaryMonth(t,e),s=this.toChineseMonth(t,e),l=function(t,e,n,r,i){var a,o,s;if("object"===typeof t)o=t,a=e||{};else{var l;if(!("number"===typeof t&&t>=1888&&t<=2111))throw new Error("Lunar year outside range 1888-2111");if(!("number"===typeof e&&e>=1&&e<=12))throw new Error("Lunar month outside range 1 - 12");if(!("number"===typeof n&&n>=1&&n<=30))throw new Error("Lunar day outside range 1 - 30");"object"===typeof r?(l=!1,a=r):(l=!!r,a=i||{}),o={year:t,month:e,day:n,isIntercalary:l}}s=o.day-1;var c,u=f[o.year-f[0]],d=u>>13;c=d&&(o.month>d||o.isIntercalary)?o.month:o.month-1;for(var p=0;p>9&4095,(g>>5&15)-1,(31&g)+s);return a.year=v.getFullYear(),a.month=1+v.getMonth(),a.day=v.getDate(),a}(t,s,n,o);return a.toJD(l.year,l.month,l.day)},fromJD:function(t){var e=a.fromJD(t),n=function(t,e,n,r){var i,a;if("object"===typeof t)i=t,a=e||{};else{if(!("number"===typeof t&&t>=1888&&t<=2111))throw new Error("Solar year outside range 1888-2111");if(!("number"===typeof e&&e>=1&&e<=12))throw new Error("Solar month outside range 1 - 12");if(!("number"===typeof n&&n>=1&&n<=31))throw new Error("Solar day outside range 1 - 31");i={year:t,month:e,day:n},a=r||{}}var o=h[i.year-h[0]],s=i.year<<9|i.month<<5|i.day;a.year=s>=o?i.year:i.year-1,o=h[a.year-h[0]];var l,c=new Date(o>>9&4095,(o>>5&15)-1,31&o),u=new Date(i.year,i.month-1,i.day);l=Math.round((u-c)/864e5);var d,p=f[a.year-f[0]];for(d=0;d<13;d++){var g=p&1<<12-d?30:29;if(l>13;return!v||d=2&&r<=6},extraInfo:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return{century:o[Math.floor((i.year()-1)/100)+1]||""}},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return t=i.year()+(i.year()<0?1:0),e=i.month(),(n=i.day())+(e>1?16:0)+(e>2?32*(e-2):0)+400*(t-1)+this.jdEpoch-1},fromJD:function(t){t=Math.floor(t+.5)-Math.floor(this.jdEpoch)-1;var e=Math.floor(t/400)+1;t-=400*(e-1),t+=t>15?16:0;var n=Math.floor(t/32)+1,r=t-32*(n-1)+1;return this.newDate(e<=0?e-1:e,n,r)}});var o={20:"Fruitbat",21:"Anchovy"};r.calendars.discworld=a},37715:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Ethiopian",jdEpoch:1724220.5,daysPerMonth:[30,30,30,30,30,30,30,30,30,30,30,30,5],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Ethiopian",epochs:["BEE","EE"],monthNames:["Meskerem","Tikemet","Hidar","Tahesas","Tir","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehase","Pagume"],monthNamesShort:["Mes","Tik","Hid","Tah","Tir","Yek","Meg","Mia","Gen","Sen","Ham","Neh","Pag"],dayNames:["Ehud","Segno","Maksegno","Irob","Hamus","Arb","Kidame"],dayNamesShort:["Ehu","Seg","Mak","Iro","Ham","Arb","Kid"],dayNamesMin:["Eh","Se","Ma","Ir","Ha","Ar","Ki"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return(t=e.year()+(e.year()<0?1:0))%4===3||t%4===-1},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,r.local.invalidYear||r.regionalOptions[""].invalidYear),13},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-r.dayOfWeek(),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(13===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return(t=i.year())<0&&t++,i.day()+30*(i.month()-1)+365*(t-1)+Math.floor(t/4)+this.jdEpoch-1},fromJD:function(t){var e=Math.floor(t)+.5-this.jdEpoch,n=Math.floor((e-Math.floor((e+366)/1461))/365)+1;n<=0&&n--,e=Math.floor(t)+.5-this.newDate(n,1,1).toJD();var r=Math.floor(e/30)+1,i=e-30*(r-1)+1;return this.newDate(n,r,i)}}),r.calendars.ethiopian=a},99384:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}function o(t,e){return t-e*Math.floor(t/e)}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Hebrew",jdEpoch:347995.5,daysPerMonth:[30,29,30,29,30,29,30,29,30,29,30,29,29],hasYearZero:!1,minMonth:1,firstMonth:7,minDay:1,regionalOptions:{"":{name:"Hebrew",epochs:["BAM","AM"],monthNames:["Nisan","Iyar","Sivan","Tammuz","Av","Elul","Tishrei","Cheshvan","Kislev","Tevet","Shevat","Adar","Adar II"],monthNamesShort:["Nis","Iya","Siv","Tam","Av","Elu","Tis","Che","Kis","Tev","She","Ada","Ad2"],dayNames:["Yom Rishon","Yom Sheni","Yom Shlishi","Yom Revi'i","Yom Chamishi","Yom Shishi","Yom Shabbat"],dayNamesShort:["Ris","She","Shl","Rev","Cha","Shi","Sha"],dayNamesMin:["Ri","She","Shl","Re","Ch","Shi","Sha"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return this._leapYear(e.year())},_leapYear:function(t){return o(7*(t=t<0?t+1:t)+1,19)<7},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,r.local.invalidYear),this._leapYear(t.year?t.year():t)?13:12},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-r.dayOfWeek(),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInYear:function(t){return t=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear).year(),this.toJD(-1===t?1:t+1,7,1)-this.toJD(t,7,1)},daysInMonth:function(t,e){return t.year&&(e=t.month(),t=t.year()),this._validate(t,e,this.minDay,r.local.invalidMonth),12===e&&this.leapYear(t)||8===e&&5===o(this.daysInYear(t),10)?30:9===e&&3===o(this.daysInYear(t),10)?29:this.daysPerMonth[e-1]},weekDay:function(t,e,n){return 6!==this.dayOfWeek(t,e,n)},extraInfo:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return{yearType:(this.leapYear(i)?"embolismic":"common")+" "+["deficient","regular","complete"][this.daysInYear(i)%10-3]}},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);t=i.year(),e=i.month(),n=i.day();var a=t<=0?t+1:t,o=this.jdEpoch+this._delay1(a)+this._delay2(a)+n+1;if(e<7){for(var s=7;s<=this.monthsInYear(t);s++)o+=this.daysInMonth(t,s);for(s=1;s=this.toJD(-1===e?1:e+1,7,1);)e++;for(var n=tthis.toJD(e,n,this.daysInMonth(e,n));)n++;var r=t-this.toJD(e,n,1)+1;return this.newDate(e,n,r)}}),r.calendars.hebrew=a},43805:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Islamic",jdEpoch:1948439.5,daysPerMonth:[30,29,30,29,30,29,30,29,30,29,30,29],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Islamic",epochs:["BH","AH"],monthNames:["Muharram","Safar","Rabi' al-awwal","Rabi' al-thani","Jumada al-awwal","Jumada al-thani","Rajab","Sha'aban","Ramadan","Shawwal","Dhu al-Qi'dah","Dhu al-Hijjah"],monthNamesShort:["Muh","Saf","Rab1","Rab2","Jum1","Jum2","Raj","Sha'","Ram","Shaw","DhuQ","DhuH"],dayNames:["Yawm al-ahad","Yawm al-ithnayn","Yawm ath-thulaathaa'","Yawm al-arbi'aa'","Yawm al-kham\u012bs","Yawm al-jum'a","Yawm as-sabt"],dayNamesShort:["Aha","Ith","Thu","Arb","Kha","Jum","Sab"],dayNamesMin:["Ah","It","Th","Ar","Kh","Ju","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:6,isRTL:!1}},leapYear:function(t){return(11*this._validate(t,this.minMonth,this.minDay,r.local.invalidYear).year()+14)%30<11},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-r.dayOfWeek(),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInYear:function(t){return this.leapYear(t)?355:354},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(12===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return 5!==this.dayOfWeek(t,e,n)},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return t=i.year(),e=i.month(),t=t<=0?t+1:t,(n=i.day())+Math.ceil(29.5*(e-1))+354*(t-1)+Math.floor((3+11*t)/30)+this.jdEpoch-1},fromJD:function(t){t=Math.floor(t)+.5;var e=Math.floor((30*(t-this.jdEpoch)+10646)/10631);e=e<=0?e-1:e;var n=Math.min(12,Math.ceil((t-29-this.toJD(e,1,1))/29.5)+1),r=t-this.toJD(e,n,1)+1;return this.newDate(e,n,r)}}),r.calendars.islamic=a},88874:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Julian",jdEpoch:1721423.5,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Julian",epochs:["BC","AD"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"mm/dd/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return(t=e.year()<0?e.year()+1:e.year())%4===0},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(4-(r.dayOfWeek()||7),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(2===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return t=i.year(),e=i.month(),n=i.day(),t<0&&t++,e<=2&&(t--,e+=12),Math.floor(365.25*(t+4716))+Math.floor(30.6001*(e+1))+n-1524.5},fromJD:function(t){var e=Math.floor(t+.5)+1524,n=Math.floor((e-122.1)/365.25),r=Math.floor(365.25*n),i=Math.floor((e-r)/30.6001),a=i-Math.floor(i<14?1:13),o=n-Math.floor(a>2?4716:4715),s=e-r-Math.floor(30.6001*i);return o<=0&&o--,this.newDate(o,a,s)}}),r.calendars.julian=a},83290:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}function o(t,e){return t-e*Math.floor(t/e)}function s(t,e){return o(t-1,e)+1}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Mayan",jdEpoch:584282.5,hasYearZero:!0,minMonth:0,firstMonth:0,minDay:0,regionalOptions:{"":{name:"Mayan",epochs:["",""],monthNames:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17"],monthNamesShort:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17"],dayNames:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],dayNamesShort:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],dayNamesMin:["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19"],digits:null,dateFormat:"YYYY.m.d",firstDay:0,isRTL:!1,haabMonths:["Pop","Uo","Zip","Zotz","Tzec","Xul","Yaxkin","Mol","Chen","Yax","Zac","Ceh","Mac","Kankin","Muan","Pax","Kayab","Cumku","Uayeb"],tzolkinMonths:["Imix","Ik","Akbal","Kan","Chicchan","Cimi","Manik","Lamat","Muluc","Oc","Chuen","Eb","Ben","Ix","Men","Cib","Caban","Etznab","Cauac","Ahau"]}},leapYear:function(t){return this._validate(t,this.minMonth,this.minDay,r.local.invalidYear),!1},formatYear:function(t){t=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear).year();var e=Math.floor(t/400);return t%=400,t+=t<0?400:0,e+"."+Math.floor(t/20)+"."+t%20},forYear:function(t){if((t=t.split(".")).length<3)throw"Invalid Mayan year";for(var e=0,n=0;n19||n>0&&r<0)throw"Invalid Mayan year";e=20*e+r}return e},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,r.local.invalidYear),18},weekOfYear:function(t,e,n){return this._validate(t,e,n,r.local.invalidDate),0},daysInYear:function(t){return this._validate(t,this.minMonth,this.minDay,r.local.invalidYear),360},daysInMonth:function(t,e){return this._validate(t,e,this.minDay,r.local.invalidMonth),20},daysInWeek:function(){return 5},dayOfWeek:function(t,e,n){return this._validate(t,e,n,r.local.invalidDate).day()},weekDay:function(t,e,n){return this._validate(t,e,n,r.local.invalidDate),!0},extraInfo:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate).toJD(),a=this._toHaab(i),o=this._toTzolkin(i);return{haabMonthName:this.local.haabMonths[a[0]-1],haabMonth:a[0],haabDay:a[1],tzolkinDayName:this.local.tzolkinMonths[o[0]-1],tzolkinDay:o[0],tzolkinTrecena:o[1]}},_toHaab:function(t){var e=o(8+(t-=this.jdEpoch)+340,365);return[Math.floor(e/20)+1,o(e,20)]},_toTzolkin:function(t){return[s(20+(t-=this.jdEpoch),20),s(t+4,13)]},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return i.day()+20*i.month()+360*i.year()+this.jdEpoch},fromJD:function(t){t=Math.floor(t)+.5-this.jdEpoch;var e=Math.floor(t/360);t%=360,t+=t<0?360:0;var n=Math.floor(t/20),r=t%20;return this.newDate(e,n,r)}}),r.calendars.mayan=a},29108:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar;var o=r.instance("gregorian");i(a.prototype,{name:"Nanakshahi",jdEpoch:2257673.5,daysPerMonth:[31,31,31,31,31,30,30,30,30,30,30,30],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Nanakshahi",epochs:["BN","AN"],monthNames:["Chet","Vaisakh","Jeth","Harh","Sawan","Bhadon","Assu","Katak","Maghar","Poh","Magh","Phagun"],monthNamesShort:["Che","Vai","Jet","Har","Saw","Bha","Ass","Kat","Mgr","Poh","Mgh","Pha"],dayNames:["Somvaar","Mangalvar","Budhvaar","Veervaar","Shukarvaar","Sanicharvaar","Etvaar"],dayNamesShort:["Som","Mangal","Budh","Veer","Shukar","Sanichar","Et"],dayNamesMin:["So","Ma","Bu","Ve","Sh","Sa","Et"],digits:null,dateFormat:"dd-mm-yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear||r.regionalOptions[""].invalidYear);return o.leapYear(e.year()+(e.year()<1?1:0)+1469)},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(1-(r.dayOfWeek()||7),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(12===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidMonth);(t=i.year())<0&&t++;for(var a=i.day(),s=1;s=this.toJD(e+1,1,1);)e++;for(var n=t-Math.floor(this.toJD(e,1,1)+.5)+1,r=1;n>this.daysInMonth(e,r);)n-=this.daysInMonth(e,r),r++;return this.newDate(e,r,n)}}),r.calendars.nanakshahi=a},55422:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar,i(a.prototype,{name:"Nepali",jdEpoch:1700709.5,daysPerMonth:[31,31,32,32,31,30,30,29,30,29,30,30],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,daysPerYear:365,regionalOptions:{"":{name:"Nepali",epochs:["BBS","ABS"],monthNames:["Baisakh","Jestha","Ashadh","Shrawan","Bhadra","Ashwin","Kartik","Mangsir","Paush","Mangh","Falgun","Chaitra"],monthNamesShort:["Bai","Je","As","Shra","Bha","Ash","Kar","Mang","Pau","Ma","Fal","Chai"],dayNames:["Aaitabaar","Sombaar","Manglbaar","Budhabaar","Bihibaar","Shukrabaar","Shanibaar"],dayNamesShort:["Aaita","Som","Mangl","Budha","Bihi","Shukra","Shani"],dayNamesMin:["Aai","So","Man","Bu","Bi","Shu","Sha"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:1,isRTL:!1}},leapYear:function(t){return this.daysInYear(t)!==this.daysPerYear},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-r.dayOfWeek(),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInYear:function(t){if(t=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear).year(),"undefined"===typeof this.NEPALI_CALENDAR_DATA[t])return this.daysPerYear;for(var e=0,n=this.minMonth;n<=12;n++)e+=this.NEPALI_CALENDAR_DATA[t][n];return e},daysInMonth:function(t,e){return t.year&&(e=t.month(),t=t.year()),this._validate(t,e,this.minDay,r.local.invalidMonth),"undefined"===typeof this.NEPALI_CALENDAR_DATA[t]?this.daysPerMonth[e-1]:this.NEPALI_CALENDAR_DATA[t][e]},weekDay:function(t,e,n){return 6!==this.dayOfWeek(t,e,n)},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);t=i.year(),e=i.month(),n=i.day();var a=r.instance(),o=0,s=e,l=t;this._createMissingCalendarData(t);var c=t-(s>9||9===s&&n>=this.NEPALI_CALENDAR_DATA[l][0]?56:57);for(9!==e&&(o=n,s--);9!==s;)s<=0&&(s=12,l--),o+=this.NEPALI_CALENDAR_DATA[l][s],s--;return 9===e?(o+=n-this.NEPALI_CALENDAR_DATA[l][0])<0&&(o+=a.daysInYear(c)):o+=this.NEPALI_CALENDAR_DATA[l][9]-this.NEPALI_CALENDAR_DATA[l][0],a.newDate(c,1,1).add(o,"d").toJD()},fromJD:function(t){var e=r.instance().fromJD(t),n=e.year(),i=e.dayOfYear(),a=n+56;this._createMissingCalendarData(a);for(var o=9,s=this.NEPALI_CALENDAR_DATA[a][0],l=this.NEPALI_CALENDAR_DATA[a][o]-s+1;i>l;)++o>12&&(o=1,a++),l+=this.NEPALI_CALENDAR_DATA[a][o];var c=this.NEPALI_CALENDAR_DATA[a][o]-(l-i);return this.newDate(a,o,c)},_createMissingCalendarData:function(t){var e=this.daysPerMonth.slice(0);e.unshift(17);for(var n=t-1;n0?474:473))%2820+474+38)%2816<682},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-(r.dayOfWeek()+1)%7,"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(12===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return 5!==this.dayOfWeek(t,e,n)},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);t=i.year(),e=i.month(),n=i.day();var a=t-(t>=0?474:473),s=474+o(a,2820);return n+(e<=7?31*(e-1):30*(e-1)+6)+Math.floor((682*s-110)/2816)+365*(s-1)+1029983*Math.floor(a/2820)+this.jdEpoch-1},fromJD:function(t){var e=(t=Math.floor(t)+.5)-this.toJD(475,1,1),n=Math.floor(e/1029983),r=o(e,1029983),i=2820;if(1029982!==r){var a=Math.floor(r/366),s=o(r,366);i=Math.floor((2134*a+2816*s+2815)/1028522)+a+1}var l=i+2820*n+474;l=l<=0?l-1:l;var c=t-this.toJD(l,1,1)+1,u=c<=186?Math.ceil(c/31):Math.ceil((c-6)/30),f=t-this.toJD(l,u,1)+1;return this.newDate(l,u,f)}}),r.calendars.persian=a,r.calendars.jalali=a},31320:function(t,e,n){var r=n(63489),i=n(56131),a=r.instance();function o(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}o.prototype=new r.baseCalendar,i(o.prototype,{name:"Taiwan",jdEpoch:2419402.5,yearsOffset:1911,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Taiwan",epochs:["BROC","ROC"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:1,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return t=this._t2gYear(e.year()),a.leapYear(t)},weekOfYear:function(t,e,n){var i=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return t=this._t2gYear(i.year()),a.weekOfYear(t,i.month(),i.day())},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(2===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return t=this._t2gYear(i.year()),a.toJD(t,i.month(),i.day())},fromJD:function(t){var e=a.fromJD(t),n=this._g2tYear(e.year());return this.newDate(n,e.month(),e.day())},_t2gYear:function(t){return t+this.yearsOffset+(t>=-this.yearsOffset&&t<=-1?1:0)},_g2tYear:function(t){return t-this.yearsOffset-(t>=1&&t<=this.yearsOffset?1:0)}}),r.calendars.taiwan=o},51367:function(t,e,n){var r=n(63489),i=n(56131),a=r.instance();function o(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}o.prototype=new r.baseCalendar,i(o.prototype,{name:"Thai",jdEpoch:1523098.5,yearsOffset:543,daysPerMonth:[31,28,31,30,31,30,31,31,30,31,30,31],hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Thai",epochs:["BBE","BE"],monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],digits:null,dateFormat:"dd/mm/yyyy",firstDay:0,isRTL:!1}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return t=this._t2gYear(e.year()),a.leapYear(t)},weekOfYear:function(t,e,n){var i=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return t=this._t2gYear(i.year()),a.weekOfYear(t,i.month(),i.day())},daysInMonth:function(t,e){var n=this._validate(t,e,this.minDay,r.local.invalidMonth);return this.daysPerMonth[n.month()-1]+(2===n.month()&&this.leapYear(n.year())?1:0)},weekDay:function(t,e,n){return(this.dayOfWeek(t,e,n)||7)<6},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate);return t=this._t2gYear(i.year()),a.toJD(t,i.month(),i.day())},fromJD:function(t){var e=a.fromJD(t),n=this._g2tYear(e.year());return this.newDate(n,e.month(),e.day())},_t2gYear:function(t){return t-this.yearsOffset-(t>=1&&t<=this.yearsOffset?1:0)},_g2tYear:function(t){return t+this.yearsOffset+(t>=-this.yearsOffset&&t<=-1?1:0)}}),r.calendars.thai=o},21457:function(t,e,n){var r=n(63489),i=n(56131);function a(t){this.local=this.regionalOptions[t||""]||this.regionalOptions[""]}a.prototype=new r.baseCalendar,i(a.prototype,{name:"UmmAlQura",hasYearZero:!1,minMonth:1,firstMonth:1,minDay:1,regionalOptions:{"":{name:"Umm al-Qura",epochs:["BH","AH"],monthNames:["Al-Muharram","Safar","Rabi' al-awwal","Rabi' Al-Thani","Jumada Al-Awwal","Jumada Al-Thani","Rajab","Sha'aban","Ramadan","Shawwal","Dhu al-Qi'dah","Dhu al-Hijjah"],monthNamesShort:["Muh","Saf","Rab1","Rab2","Jum1","Jum2","Raj","Sha'","Ram","Shaw","DhuQ","DhuH"],dayNames:["Yawm al-Ahad","Yawm al-Ithnain","Yawm al-Thal\u0101th\u0101\u2019","Yawm al-Arba\u2018\u0101\u2019","Yawm al-Kham\u012bs","Yawm al-Jum\u2018a","Yawm al-Sabt"],dayNamesMin:["Ah","Ith","Th","Ar","Kh","Ju","Sa"],digits:null,dateFormat:"yyyy/mm/dd",firstDay:6,isRTL:!0}},leapYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,r.local.invalidYear);return 355===this.daysInYear(e.year())},weekOfYear:function(t,e,n){var r=this.newDate(t,e,n);return r.add(-r.dayOfWeek(),"d"),Math.floor((r.dayOfYear()-1)/7)+1},daysInYear:function(t){for(var e=0,n=1;n<=12;n++)e+=this.daysInMonth(t,n);return e},daysInMonth:function(t,e){for(var n=this._validate(t,e,this.minDay,r.local.invalidMonth).toJD()-24e5+.5,i=0,a=0;an)return o[i]-o[i-1];i++}return 30},weekDay:function(t,e,n){return 5!==this.dayOfWeek(t,e,n)},toJD:function(t,e,n){var i=this._validate(t,e,n,r.local.invalidDate),a=12*(i.year()-1)+i.month()-15292;return i.day()+o[a-1]-1+24e5-.5},fromJD:function(t){for(var e=t-24e5+.5,n=0,r=0;re);r++)n++;var i=n+15292,a=Math.floor((i-1)/12),s=a+1,l=i-12*a,c=e-o[n-1]+1;return this.newDate(s,l,c)},isValid:function(t,e,n){var i=r.baseCalendar.prototype.isValid.apply(this,arguments);return i&&(i=(t=null!=t.year?t.year:t)>=1276&&t<=1500),i},_validate:function(t,e,n,i){var a=r.baseCalendar.prototype._validate.apply(this,arguments);if(a.year<1276||a.year>1500)throw i.replace(/\{0\}/,this.local.name);return a}}),r.calendars.ummalqura=a;var o=[20,50,79,109,138,168,197,227,256,286,315,345,374,404,433,463,492,522,551,581,611,641,670,700,729,759,788,818,847,877,906,936,965,995,1024,1054,1083,1113,1142,1172,1201,1231,1260,1290,1320,1350,1379,1409,1438,1468,1497,1527,1556,1586,1615,1645,1674,1704,1733,1763,1792,1822,1851,1881,1910,1940,1969,1999,2028,2058,2087,2117,2146,2176,2205,2235,2264,2294,2323,2353,2383,2413,2442,2472,2501,2531,2560,2590,2619,2649,2678,2708,2737,2767,2796,2826,2855,2885,2914,2944,2973,3003,3032,3062,3091,3121,3150,3180,3209,3239,3268,3298,3327,3357,3386,3416,3446,3476,3505,3535,3564,3594,3623,3653,3682,3712,3741,3771,3800,3830,3859,3889,3918,3948,3977,4007,4036,4066,4095,4125,4155,4185,4214,4244,4273,4303,4332,4362,4391,4421,4450,4480,4509,4539,4568,4598,4627,4657,4686,4716,4745,4775,4804,4834,4863,4893,4922,4952,4981,5011,5040,5070,5099,5129,5158,5188,5218,5248,5277,5307,5336,5366,5395,5425,5454,5484,5513,5543,5572,5602,5631,5661,5690,5720,5749,5779,5808,5838,5867,5897,5926,5956,5985,6015,6044,6074,6103,6133,6162,6192,6221,6251,6281,6311,6340,6370,6399,6429,6458,6488,6517,6547,6576,6606,6635,6665,6694,6724,6753,6783,6812,6842,6871,6901,6930,6960,6989,7019,7048,7078,7107,7137,7166,7196,7225,7255,7284,7314,7344,7374,7403,7433,7462,7492,7521,7551,7580,7610,7639,7669,7698,7728,7757,7787,7816,7846,7875,7905,7934,7964,7993,8023,8053,8083,8112,8142,8171,8201,8230,8260,8289,8319,8348,8378,8407,8437,8466,8496,8525,8555,8584,8614,8643,8673,8702,8732,8761,8791,8821,8850,8880,8909,8938,8968,8997,9027,9056,9086,9115,9145,9175,9205,9234,9264,9293,9322,9352,9381,9410,9440,9470,9499,9529,9559,9589,9618,9648,9677,9706,9736,9765,9794,9824,9853,9883,9913,9943,9972,10002,10032,10061,10090,10120,10149,10178,10208,10237,10267,10297,10326,10356,10386,10415,10445,10474,10504,10533,10562,10592,10621,10651,10680,10710,10740,10770,10799,10829,10858,10888,10917,10947,10976,11005,11035,11064,11094,11124,11153,11183,11213,11242,11272,11301,11331,11360,11389,11419,11448,11478,11507,11537,11567,11596,11626,11655,11685,11715,11744,11774,11803,11832,11862,11891,11921,11950,11980,12010,12039,12069,12099,12128,12158,12187,12216,12246,12275,12304,12334,12364,12393,12423,12453,12483,12512,12542,12571,12600,12630,12659,12688,12718,12747,12777,12807,12837,12866,12896,12926,12955,12984,13014,13043,13072,13102,13131,13161,13191,13220,13250,13280,13310,13339,13368,13398,13427,13456,13486,13515,13545,13574,13604,13634,13664,13693,13723,13752,13782,13811,13840,13870,13899,13929,13958,13988,14018,14047,14077,14107,14136,14166,14195,14224,14254,14283,14313,14342,14372,14401,14431,14461,14490,14520,14550,14579,14609,14638,14667,14697,14726,14756,14785,14815,14844,14874,14904,14933,14963,14993,15021,15051,15081,15110,15140,15169,15199,15228,15258,15287,15317,15347,15377,15406,15436,15465,15494,15524,15553,15582,15612,15641,15671,15701,15731,15760,15790,15820,15849,15878,15908,15937,15966,15996,16025,16055,16085,16114,16144,16174,16204,16233,16262,16292,16321,16350,16380,16409,16439,16468,16498,16528,16558,16587,16617,16646,16676,16705,16734,16764,16793,16823,16852,16882,16912,16941,16971,17001,17030,17060,17089,17118,17148,17177,17207,17236,17266,17295,17325,17355,17384,17414,17444,17473,17502,17532,17561,17591,17620,17650,17679,17709,17738,17768,17798,17827,17857,17886,17916,17945,17975,18004,18034,18063,18093,18122,18152,18181,18211,18241,18270,18300,18330,18359,18388,18418,18447,18476,18506,18535,18565,18595,18625,18654,18684,18714,18743,18772,18802,18831,18860,18890,18919,18949,18979,19008,19038,19068,19098,19127,19156,19186,19215,19244,19274,19303,19333,19362,19392,19422,19452,19481,19511,19540,19570,19599,19628,19658,19687,19717,19746,19776,19806,19836,19865,19895,19924,19954,19983,20012,20042,20071,20101,20130,20160,20190,20219,20249,20279,20308,20338,20367,20396,20426,20455,20485,20514,20544,20573,20603,20633,20662,20692,20721,20751,20780,20810,20839,20869,20898,20928,20957,20987,21016,21046,21076,21105,21135,21164,21194,21223,21253,21282,21312,21341,21371,21400,21430,21459,21489,21519,21548,21578,21607,21637,21666,21696,21725,21754,21784,21813,21843,21873,21902,21932,21962,21991,22021,22050,22080,22109,22138,22168,22197,22227,22256,22286,22316,22346,22375,22405,22434,22464,22493,22522,22552,22581,22611,22640,22670,22700,22730,22759,22789,22818,22848,22877,22906,22936,22965,22994,23024,23054,23083,23113,23143,23173,23202,23232,23261,23290,23320,23349,23379,23408,23438,23467,23497,23527,23556,23586,23616,23645,23674,23704,23733,23763,23792,23822,23851,23881,23910,23940,23970,23999,24029,24058,24088,24117,24147,24176,24206,24235,24265,24294,24324,24353,24383,24413,24442,24472,24501,24531,24560,24590,24619,24648,24678,24707,24737,24767,24796,24826,24856,24885,24915,24944,24974,25003,25032,25062,25091,25121,25150,25180,25210,25240,25269,25299,25328,25358,25387,25416,25446,25475,25505,25534,25564,25594,25624,25653,25683,25712,25742,25771,25800,25830,25859,25888,25918,25948,25977,26007,26037,26067,26096,26126,26155,26184,26214,26243,26272,26302,26332,26361,26391,26421,26451,26480,26510,26539,26568,26598,26627,26656,26686,26715,26745,26775,26805,26834,26864,26893,26923,26952,26982,27011,27041,27070,27099,27129,27159,27188,27218,27248,27277,27307,27336,27366,27395,27425,27454,27484,27513,27542,27572,27602,27631,27661,27691,27720,27750,27779,27809,27838,27868,27897,27926,27956,27985,28015,28045,28074,28104,28134,28163,28193,28222,28252,28281,28310,28340,28369,28399,28428,28458,28488,28517,28547,28577,28607,28636,28665,28695,28724,28754,28783,28813,28843,28872,28901,28931,28960,28990,29019,29049,29078,29108,29137,29167,29196,29226,29255,29285,29315,29345,29375,29404,29434,29463,29492,29522,29551,29580,29610,29640,29669,29699,29729,29759,29788,29818,29847,29876,29906,29935,29964,29994,30023,30053,30082,30112,30141,30171,30200,30230,30259,30289,30318,30348,30378,30408,30437,30467,30496,30526,30555,30585,30614,30644,30673,30703,30732,30762,30791,30821,30850,30880,30909,30939,30968,30998,31027,31057,31086,31116,31145,31175,31204,31234,31263,31293,31322,31352,31381,31411,31441,31471,31500,31530,31559,31589,31618,31648,31676,31706,31736,31766,31795,31825,31854,31884,31913,31943,31972,32002,32031,32061,32090,32120,32150,32180,32209,32239,32268,32298,32327,32357,32386,32416,32445,32475,32504,32534,32563,32593,32622,32652,32681,32711,32740,32770,32799,32829,32858,32888,32917,32947,32976,33006,33035,33065,33094,33124,33153,33183,33213,33243,33272,33302,33331,33361,33390,33420,33450,33479,33509,33539,33568,33598,33627,33657,33686,33716,33745,33775,33804,33834,33863,33893,33922,33952,33981,34011,34040,34069,34099,34128,34158,34187,34217,34247,34277,34306,34336,34365,34395,34424,34454,34483,34512,34542,34571,34601,34631,34660,34690,34719,34749,34778,34808,34837,34867,34896,34926,34955,34985,35015,35044,35074,35103,35133,35162,35192,35222,35251,35280,35310,35340,35370,35399,35429,35458,35488,35517,35547,35576,35605,35635,35665,35694,35723,35753,35782,35811,35841,35871,35901,35930,35960,35989,36019,36048,36078,36107,36136,36166,36195,36225,36254,36284,36314,36343,36373,36403,36433,36462,36492,36521,36551,36580,36610,36639,36669,36698,36728,36757,36786,36816,36845,36875,36904,36934,36963,36993,37022,37052,37081,37111,37141,37170,37200,37229,37259,37288,37318,37347,37377,37406,37436,37465,37495,37524,37554,37584,37613,37643,37672,37701,37731,37760,37790,37819,37849,37878,37908,37938,37967,37997,38027,38056,38085,38115,38144,38174,38203,38233,38262,38292,38322,38351,38381,38410,38440,38469,38499,38528,38558,38587,38617,38646,38676,38705,38735,38764,38794,38823,38853,38882,38912,38941,38971,39001,39030,39059,39089,39118,39148,39178,39208,39237,39267,39297,39326,39355,39385,39414,39444,39473,39503,39532,39562,39592,39621,39650,39680,39709,39739,39768,39798,39827,39857,39886,39916,39946,39975,40005,40035,40064,40094,40123,40153,40182,40212,40241,40271,40300,40330,40359,40389,40418,40448,40477,40507,40536,40566,40595,40625,40655,40685,40714,40744,40773,40803,40832,40862,40892,40921,40951,40980,41009,41039,41068,41098,41127,41157,41186,41216,41245,41275,41304,41334,41364,41393,41422,41452,41481,41511,41540,41570,41599,41629,41658,41688,41718,41748,41777,41807,41836,41865,41894,41924,41953,41983,42012,42042,42072,42102,42131,42161,42190,42220,42249,42279,42308,42337,42367,42397,42426,42456,42485,42515,42545,42574,42604,42633,42662,42692,42721,42751,42780,42810,42839,42869,42899,42929,42958,42988,43017,43046,43076,43105,43135,43164,43194,43223,43253,43283,43312,43342,43371,43401,43430,43460,43489,43519,43548,43578,43607,43637,43666,43696,43726,43755,43785,43814,43844,43873,43903,43932,43962,43991,44021,44050,44080,44109,44139,44169,44198,44228,44258,44287,44317,44346,44375,44405,44434,44464,44493,44523,44553,44582,44612,44641,44671,44700,44730,44759,44788,44818,44847,44877,44906,44936,44966,44996,45025,45055,45084,45114,45143,45172,45202,45231,45261,45290,45320,45350,45380,45409,45439,45468,45498,45527,45556,45586,45615,45644,45674,45704,45733,45763,45793,45823,45852,45882,45911,45940,45970,45999,46028,46058,46088,46117,46147,46177,46206,46236,46265,46295,46324,46354,46383,46413,46442,46472,46501,46531,46560,46590,46620,46649,46679,46708,46738,46767,46797,46826,46856,46885,46915,46944,46974,47003,47033,47063,47092,47122,47151,47181,47210,47240,47269,47298,47328,47357,47387,47417,47446,47476,47506,47535,47565,47594,47624,47653,47682,47712,47741,47771,47800,47830,47860,47890,47919,47949,47978,48008,48037,48066,48096,48125,48155,48184,48214,48244,48273,48303,48333,48362,48392,48421,48450,48480,48509,48538,48568,48598,48627,48657,48687,48717,48746,48776,48805,48834,48864,48893,48922,48952,48982,49011,49041,49071,49100,49130,49160,49189,49218,49248,49277,49306,49336,49365,49395,49425,49455,49484,49514,49543,49573,49602,49632,49661,49690,49720,49749,49779,49809,49838,49868,49898,49927,49957,49986,50016,50045,50075,50104,50133,50163,50192,50222,50252,50281,50311,50340,50370,50400,50429,50459,50488,50518,50547,50576,50606,50635,50665,50694,50724,50754,50784,50813,50843,50872,50902,50931,50960,50990,51019,51049,51078,51108,51138,51167,51197,51227,51256,51286,51315,51345,51374,51403,51433,51462,51492,51522,51552,51582,51611,51641,51670,51699,51729,51758,51787,51816,51846,51876,51906,51936,51965,51995,52025,52054,52083,52113,52142,52171,52200,52230,52260,52290,52319,52349,52379,52408,52438,52467,52497,52526,52555,52585,52614,52644,52673,52703,52733,52762,52792,52822,52851,52881,52910,52939,52969,52998,53028,53057,53087,53116,53146,53176,53205,53235,53264,53294,53324,53353,53383,53412,53441,53471,53500,53530,53559,53589,53619,53648,53678,53708,53737,53767,53796,53825,53855,53884,53913,53943,53973,54003,54032,54062,54092,54121,54151,54180,54209,54239,54268,54297,54327,54357,54387,54416,54446,54476,54505,54535,54564,54593,54623,54652,54681,54711,54741,54770,54800,54830,54859,54889,54919,54948,54977,55007,55036,55066,55095,55125,55154,55184,55213,55243,55273,55302,55332,55361,55391,55420,55450,55479,55508,55538,55567,55597,55627,55657,55686,55716,55745,55775,55804,55834,55863,55892,55922,55951,55981,56011,56040,56070,56100,56129,56159,56188,56218,56247,56276,56306,56335,56365,56394,56424,56454,56483,56513,56543,56572,56601,56631,56660,56690,56719,56749,56778,56808,56837,56867,56897,56926,56956,56985,57015,57044,57074,57103,57133,57162,57192,57221,57251,57280,57310,57340,57369,57399,57429,57458,57487,57517,57546,57576,57605,57634,57664,57694,57723,57753,57783,57813,57842,57871,57901,57930,57959,57989,58018,58048,58077,58107,58137,58167,58196,58226,58255,58285,58314,58343,58373,58402,58432,58461,58491,58521,58551,58580,58610,58639,58669,58698,58727,58757,58786,58816,58845,58875,58905,58934,58964,58994,59023,59053,59082,59111,59141,59170,59200,59229,59259,59288,59318,59348,59377,59407,59436,59466,59495,59525,59554,59584,59613,59643,59672,59702,59731,59761,59791,59820,59850,59879,59909,59939,59968,59997,60027,60056,60086,60115,60145,60174,60204,60234,60264,60293,60323,60352,60381,60411,60440,60469,60499,60528,60558,60588,60618,60648,60677,60707,60736,60765,60795,60824,60853,60883,60912,60942,60972,61002,61031,61061,61090,61120,61149,61179,61208,61237,61267,61296,61326,61356,61385,61415,61445,61474,61504,61533,61563,61592,61621,61651,61680,61710,61739,61769,61799,61828,61858,61888,61917,61947,61976,62006,62035,62064,62094,62123,62153,62182,62212,62242,62271,62301,62331,62360,62390,62419,62448,62478,62507,62537,62566,62596,62625,62655,62685,62715,62744,62774,62803,62832,62862,62891,62921,62950,62980,63009,63039,63069,63099,63128,63157,63187,63216,63246,63275,63305,63334,63363,63393,63423,63453,63482,63512,63541,63571,63600,63630,63659,63689,63718,63747,63777,63807,63836,63866,63895,63925,63955,63984,64014,64043,64073,64102,64131,64161,64190,64220,64249,64279,64309,64339,64368,64398,64427,64457,64486,64515,64545,64574,64603,64633,64663,64692,64722,64752,64782,64811,64841,64870,64899,64929,64958,64987,65017,65047,65076,65106,65136,65166,65195,65225,65254,65283,65313,65342,65371,65401,65431,65460,65490,65520,65549,65579,65608,65638,65667,65697,65726,65755,65785,65815,65844,65874,65903,65933,65963,65992,66022,66051,66081,66110,66140,66169,66199,66228,66258,66287,66317,66346,66376,66405,66435,66465,66494,66524,66553,66583,66612,66641,66671,66700,66730,66760,66789,66819,66849,66878,66908,66937,66967,66996,67025,67055,67084,67114,67143,67173,67203,67233,67262,67292,67321,67351,67380,67409,67439,67468,67497,67527,67557,67587,67617,67646,67676,67705,67735,67764,67793,67823,67852,67882,67911,67941,67971,68e3,68030,68060,68089,68119,68148,68177,68207,68236,68266,68295,68325,68354,68384,68414,68443,68473,68502,68532,68561,68591,68620,68650,68679,68708,68738,68768,68797,68827,68857,68886,68916,68946,68975,69004,69034,69063,69092,69122,69152,69181,69211,69240,69270,69300,69330,69359,69388,69418,69447,69476,69506,69535,69565,69595,69624,69654,69684,69713,69743,69772,69802,69831,69861,69890,69919,69949,69978,70008,70038,70067,70097,70126,70156,70186,70215,70245,70274,70303,70333,70362,70392,70421,70451,70481,70510,70540,70570,70599,70629,70658,70687,70717,70746,70776,70805,70835,70864,70894,70924,70954,70983,71013,71042,71071,71101,71130,71159,71189,71218,71248,71278,71308,71337,71367,71397,71426,71455,71485,71514,71543,71573,71602,71632,71662,71691,71721,71751,71781,71810,71839,71869,71898,71927,71957,71986,72016,72046,72075,72105,72135,72164,72194,72223,72253,72282,72311,72341,72370,72400,72429,72459,72489,72518,72548,72577,72607,72637,72666,72695,72725,72754,72784,72813,72843,72872,72902,72931,72961,72991,73020,73050,73080,73109,73139,73168,73197,73227,73256,73286,73315,73345,73375,73404,73434,73464,73493,73523,73552,73581,73611,73640,73669,73699,73729,73758,73788,73818,73848,73877,73907,73936,73965,73995,74024,74053,74083,74113,74142,74172,74202,74231,74261,74291,74320,74349,74379,74408,74437,74467,74497,74526,74556,74586,74615,74645,74675,74704,74733,74763,74792,74822,74851,74881,74910,74940,74969,74999,75029,75058,75088,75117,75147,75176,75206,75235,75264,75294,75323,75353,75383,75412,75442,75472,75501,75531,75560,75590,75619,75648,75678,75707,75737,75766,75796,75826,75856,75885,75915,75944,75974,76003,76032,76062,76091,76121,76150,76180,76210,76239,76269,76299,76328,76358,76387,76416,76446,76475,76505,76534,76564,76593,76623,76653,76682,76712,76741,76771,76801,76830,76859,76889,76918,76948,76977,77007,77036,77066,77096,77125,77155,77185,77214,77243,77273,77302,77332,77361,77390,77420,77450,77479,77509,77539,77569,77598,77627,77657,77686,77715,77745,77774,77804,77833,77863,77893,77923,77952,77982,78011,78041,78070,78099,78129,78158,78188,78217,78247,78277,78307,78336,78366,78395,78425,78454,78483,78513,78542,78572,78601,78631,78661,78690,78720,78750,78779,78808,78838,78867,78897,78926,78956,78985,79015,79044,79074,79104,79133,79163,79192,79222,79251,79281,79310,79340,79369,79399,79428,79458,79487,79517,79546,79576,79606,79635,79665,79695,79724,79753,79783,79812,79841,79871,79900,79930,79960,79990]},63489:function(t,e,n){var r=n(56131);function i(){this.regionalOptions=[],this.regionalOptions[""]={invalidCalendar:"Calendar {0} not found",invalidDate:"Invalid {0} date",invalidMonth:"Invalid {0} month",invalidYear:"Invalid {0} year",differentCalendars:"Cannot mix {0} and {1} dates"},this.local=this.regionalOptions[""],this.calendars={},this._localCals={}}function a(t,e,n,r){if(this._calendar=t,this._year=e,this._month=n,this._day=r,0===this._calendar._validateLevel&&!this._calendar.isValid(this._year,this._month,this._day))throw(c.local.invalidDate||c.regionalOptions[""].invalidDate).replace(/\{0\}/,this._calendar.local.name)}function o(t,e){return"000000".substring(0,e-(t=""+t).length)+t}function s(){this.shortYearCutoff="+10"}function l(t){this.local=this.regionalOptions[t]||this.regionalOptions[""]}r(i.prototype,{instance:function(t,e){t=(t||"gregorian").toLowerCase(),e=e||"";var n=this._localCals[t+"-"+e];if(!n&&this.calendars[t]&&(n=new this.calendars[t](e),this._localCals[t+"-"+e]=n),!n)throw(this.local.invalidCalendar||this.regionalOptions[""].invalidCalendar).replace(/\{0\}/,t);return n},newDate:function(t,e,n,r,i){return(r=(null!=t&&t.year?t.calendar():"string"===typeof r?this.instance(r,i):r)||this.instance()).newDate(t,e,n)},substituteDigits:function(t){return function(e){return(e+"").replace(/[0-9]/g,(function(e){return t[e]}))}},substituteChineseDigits:function(t,e){return function(n){for(var r="",i=0;n>0;){var a=n%10;r=(0===a?"":t[a]+e[i])+r,i++,n=Math.floor(n/10)}return 0===r.indexOf(t[1]+e[1])&&(r=r.substr(1)),r||t[0]}}}),r(a.prototype,{newDate:function(t,e,n){return this._calendar.newDate(null==t?this:t,e,n)},year:function(t){return 0===arguments.length?this._year:this.set(t,"y")},month:function(t){return 0===arguments.length?this._month:this.set(t,"m")},day:function(t){return 0===arguments.length?this._day:this.set(t,"d")},date:function(t,e,n){if(!this._calendar.isValid(t,e,n))throw(c.local.invalidDate||c.regionalOptions[""].invalidDate).replace(/\{0\}/,this._calendar.local.name);return this._year=t,this._month=e,this._day=n,this},leapYear:function(){return this._calendar.leapYear(this)},epoch:function(){return this._calendar.epoch(this)},formatYear:function(){return this._calendar.formatYear(this)},monthOfYear:function(){return this._calendar.monthOfYear(this)},weekOfYear:function(){return this._calendar.weekOfYear(this)},daysInYear:function(){return this._calendar.daysInYear(this)},dayOfYear:function(){return this._calendar.dayOfYear(this)},daysInMonth:function(){return this._calendar.daysInMonth(this)},dayOfWeek:function(){return this._calendar.dayOfWeek(this)},weekDay:function(){return this._calendar.weekDay(this)},extraInfo:function(){return this._calendar.extraInfo(this)},add:function(t,e){return this._calendar.add(this,t,e)},set:function(t,e){return this._calendar.set(this,t,e)},compareTo:function(t){if(this._calendar.name!==t._calendar.name)throw(c.local.differentCalendars||c.regionalOptions[""].differentCalendars).replace(/\{0\}/,this._calendar.local.name).replace(/\{1\}/,t._calendar.local.name);var e=this._year!==t._year?this._year-t._year:this._month!==t._month?this.monthOfYear()-t.monthOfYear():this._day-t._day;return 0===e?0:e<0?-1:1},calendar:function(){return this._calendar},toJD:function(){return this._calendar.toJD(this)},fromJD:function(t){return this._calendar.fromJD(t)},toJSDate:function(){return this._calendar.toJSDate(this)},fromJSDate:function(t){return this._calendar.fromJSDate(t)},toString:function(){return(this.year()<0?"-":"")+o(Math.abs(this.year()),4)+"-"+o(this.month(),2)+"-"+o(this.day(),2)}}),r(s.prototype,{_validateLevel:0,newDate:function(t,e,n){return null==t?this.today():(t.year&&(this._validate(t,e,n,c.local.invalidDate||c.regionalOptions[""].invalidDate),n=t.day(),e=t.month(),t=t.year()),new a(this,t,e,n))},today:function(){return this.fromJSDate(new Date)},epoch:function(t){return this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear).year()<0?this.local.epochs[0]:this.local.epochs[1]},formatYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear);return(e.year()<0?"-":"")+o(Math.abs(e.year()),4)},monthsInYear:function(t){return this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear),12},monthOfYear:function(t,e){var n=this._validate(t,e,this.minDay,c.local.invalidMonth||c.regionalOptions[""].invalidMonth);return(n.month()+this.monthsInYear(n)-this.firstMonth)%this.monthsInYear(n)+this.minMonth},fromMonthOfYear:function(t,e){var n=(e+this.firstMonth-2*this.minMonth)%this.monthsInYear(t)+this.minMonth;return this._validate(t,n,this.minDay,c.local.invalidMonth||c.regionalOptions[""].invalidMonth),n},daysInYear:function(t){var e=this._validate(t,this.minMonth,this.minDay,c.local.invalidYear||c.regionalOptions[""].invalidYear);return this.leapYear(e)?366:365},dayOfYear:function(t,e,n){var r=this._validate(t,e,n,c.local.invalidDate||c.regionalOptions[""].invalidDate);return r.toJD()-this.newDate(r.year(),this.fromMonthOfYear(r.year(),this.minMonth),this.minDay).toJD()+1},daysInWeek:function(){return 7},dayOfWeek:function(t,e,n){var r=this._validate(t,e,n,c.local.invalidDate||c.regionalOptions[""].invalidDate);return(Math.floor(this.toJD(r))+2)%this.daysInWeek()},extraInfo:function(t,e,n){return this._validate(t,e,n,c.local.invalidDate||c.regionalOptions[""].invalidDate),{}},add:function(t,e,n){return this._validate(t,this.minMonth,this.minDay,c.local.invalidDate||c.regionalOptions[""].invalidDate),this._correctAdd(t,this._add(t,e,n),e,n)},_add:function(t,e,n){if(this._validateLevel++,"d"===n||"w"===n){var r=t.toJD()+e*("w"===n?this.daysInWeek():1),i=t.calendar().fromJD(r);return this._validateLevel--,[i.year(),i.month(),i.day()]}try{var a=t.year()+("y"===n?e:0),o=t.monthOfYear()+("m"===n?e:0);i=t.day(),"y"===n?(t.month()!==this.fromMonthOfYear(a,o)&&(o=this.newDate(a,t.month(),this.minDay).monthOfYear()),o=Math.min(o,this.monthsInYear(a)),i=Math.min(i,this.daysInMonth(a,this.fromMonthOfYear(a,o)))):"m"===n&&(function(t){for(;oe-1+t.minMonth;)a++,o-=e,e=t.monthsInYear(a)}(this),i=Math.min(i,this.daysInMonth(a,this.fromMonthOfYear(a,o))));var s=[a,this.fromMonthOfYear(a,o),i];return this._validateLevel--,s}catch(l){throw this._validateLevel--,l}},_correctAdd:function(t,e,n,r){if(!this.hasYearZero&&("y"===r||"m"===r)&&(0===e[0]||t.year()>0!==e[0]>0)){var i={y:[1,1,"y"],m:[1,this.monthsInYear(-1),"m"],w:[this.daysInWeek(),this.daysInYear(-1),"d"],d:[1,this.daysInYear(-1),"d"]}[r],a=n<0?-1:1;e=this._add(t,n*i[0]+a*i[1],i[2])}return t.date(e[0],e[1],e[2])},set:function(t,e,n){this._validate(t,this.minMonth,this.minDay,c.local.invalidDate||c.regionalOptions[""].invalidDate);var r="y"===n?e:t.year(),i="m"===n?e:t.month(),a="d"===n?e:t.day();return"y"!==n&&"m"!==n||(a=Math.min(a,this.daysInMonth(r,i))),t.date(r,i,a)},isValid:function(t,e,n){this._validateLevel++;var r=this.hasYearZero||0!==t;if(r){var i=this.newDate(t,e,this.minDay);r=e>=this.minMonth&&e-this.minMonth=this.minDay&&n-this.minDay13.5?13:1),c=i-(l>2.5?4716:4715);return c<=0&&c--,this.newDate(c,l,s)},toJSDate:function(t,e,n){var r=this._validate(t,e,n,c.local.invalidDate||c.regionalOptions[""].invalidDate),i=new Date(r.year(),r.month()-1,r.day());return i.setHours(0),i.setMinutes(0),i.setSeconds(0),i.setMilliseconds(0),i.setHours(i.getHours()>12?i.getHours()+2:0),i},fromJSDate:function(t){return this.newDate(t.getFullYear(),t.getMonth()+1,t.getDate())}});var c=t.exports=new i;c.cdate=a,c.baseCalendar=s,c.calendars.gregorian=l},94338:function(t,e,n){var r=n(56131),i=n(63489);r(i.regionalOptions[""],{invalidArguments:"Invalid arguments",invalidFormat:"Cannot format a date from another calendar",missingNumberAt:"Missing number at position {0}",unknownNameAt:"Unknown name at position {0}",unexpectedLiteralAt:"Unexpected literal at position {0}",unexpectedText:"Additional text found at end"}),i.local=i.regionalOptions[""],r(i.cdate.prototype,{formatDate:function(t,e){return"string"!==typeof t&&(e=t,t=""),this._calendar.formatDate(t||"",this,e)}}),r(i.baseCalendar.prototype,{UNIX_EPOCH:i.instance().newDate(1970,1,1).toJD(),SECS_PER_DAY:86400,TICKS_EPOCH:i.instance().jdEpoch,TICKS_PER_DAY:864e9,ATOM:"yyyy-mm-dd",COOKIE:"D, dd M yyyy",FULL:"DD, MM d, yyyy",ISO_8601:"yyyy-mm-dd",JULIAN:"J",RFC_822:"D, d M yy",RFC_850:"DD, dd-M-yy",RFC_1036:"D, d M yy",RFC_1123:"D, d M yyyy",RFC_2822:"D, d M yyyy",RSS:"D, d M yy",TICKS:"!",TIMESTAMP:"@",W3C:"yyyy-mm-dd",formatDate:function(t,e,n){if("string"!==typeof t&&(n=e,e=t,t=""),!e)return"";if(e.calendar()!==this)throw i.local.invalidFormat||i.regionalOptions[""].invalidFormat;t=t||this.local.dateFormat;for(var r,a,o,s,l=(n=n||{}).dayNamesShort||this.local.dayNamesShort,c=n.dayNames||this.local.dayNames,u=n.monthNumbers||this.local.monthNumbers,f=n.monthNamesShort||this.local.monthNamesShort,h=n.monthNames||this.local.monthNames,d=(n.calculateWeek||this.local.calculateWeek,function(e,n){for(var r=1;_+r1}),p=function(t,e,n,r){var i=""+e;if(d(t,r))for(;i.length1},b=function(t,n){var r=y(t,n),a=[2,3,r?4:2,r?4:2,10,11,20]["oyYJ@!".indexOf(t)+1],o=new RegExp("^-?\\d{1,"+a+"}"),s=e.substring(M).match(o);if(!s)throw(i.local.missingNumberAt||i.regionalOptions[""].missingNumberAt).replace(/\{0\}/,M);return M+=s[0].length,parseInt(s[0],10)},x=this,w=function(){if("function"===typeof l){y("m");var t=l.call(x,e.substring(M));return M+=t.length,t}return b("m")},_=function(t,n,r,a){for(var o=y(t,a)?r:n,s=0;s-1){d=1,p=g;for(var E=this.daysInMonth(h,d);p>E;E=this.daysInMonth(h,d))d++,p-=E}return f>-1?this.fromJD(f):this.newDate(h,d,p)},determineDate:function(t,e,n,r,i){n&&"object"!==typeof n&&(i=r,r=n,n=null),"string"!==typeof r&&(i=r,r="");var a=this;return e=e?e.newDate():null,t=null==t?e:"string"===typeof t?function(t){try{return a.parseDate(r,t,i)}catch(l){}for(var e=((t=t.toLowerCase()).match(/^c/)&&n?n.newDate():null)||a.today(),o=/([+-]?[0-9]+)\s*(d|w|m|y)?/g,s=o.exec(t);s;)e.add(parseInt(s[1],10),s[2]||"d"),s=o.exec(t);return e}(t):"number"===typeof t?isNaN(t)||t===1/0||t===-1/0?e:a.today().add(t,"d"):a.newDate(t)}})},69862:function(){},40964:function(){},72077:function(t,e,n){"use strict";var r=["BigInt64Array","BigUint64Array","Float32Array","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Uint8Array","Uint8ClampedArray"],i="undefined"===typeof globalThis?n.g:globalThis;t.exports=function(){for(var t=[],e=0;e>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?k(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?k(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=h.exec(t))?new M(e[1],e[2],e[3],1):(e=d.exec(t))?new M(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=p.exec(t))?k(e[1],e[2],e[3],e[4]):(e=g.exec(t))?k(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=v.exec(t))?O(e[1],e[2]/100,e[3]/100,1):(e=m.exec(t))?O(e[1],e[2]/100,e[3]/100,e[4]):y.hasOwnProperty(t)?_(y[t]):"transparent"===t?new M(NaN,NaN,NaN,0):null}function _(t){return new M(t>>16&255,t>>8&255,255&t,1)}function k(t,e,n,r){return r<=0&&(t=e=n=NaN),new M(t,e,n,r)}function T(t,e,n,r){return 1===arguments.length?((i=t)instanceof a||(i=w(i)),i?new M((i=i.rgb()).r,i.g,i.b,i.opacity):new M):new M(t,e,n,null==r?1:r);var i}function M(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function A(){return"#".concat(P(this.r)).concat(P(this.g)).concat(P(this.b))}function S(){var t=E(this.opacity);return"".concat(1===t?"rgb(":"rgba(").concat(C(this.r),", ").concat(C(this.g),", ").concat(C(this.b)).concat(1===t?")":", ".concat(t,")"))}function E(t){return isNaN(t)?1:Math.max(0,Math.min(1,t))}function C(t){return Math.max(0,Math.min(255,Math.round(t)||0))}function P(t){return((t=C(t))<16?"0":"")+t.toString(16)}function O(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new I(t,e,n,r)}function L(t){if(t instanceof I)return new I(t.h,t.s,t.l,t.opacity);if(t instanceof a||(t=w(t)),!t)return new I;if(t instanceof I)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),o=Math.max(e,n,r),s=NaN,l=o-i,c=(o+i)/2;return l?(s=e===o?(n-r)/l+6*(n0&&c<1?0:s,new I(s,l,c,t.opacity)}function I(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function D(t){return(t=(t||0)%360)<0?t+360:t}function R(t){return Math.max(0,Math.min(1,t||0))}function z(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function N(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}r(a,w,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:b,formatHex:b,formatHex8:function(){return this.rgb().formatHex8()},formatHsl:function(){return L(this).formatHsl()},formatRgb:x,toString:x}),r(M,T,i(a,{brighter:function(t){return t=null==t?s:Math.pow(s,t),new M(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?o:Math.pow(o,t),new M(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},clamp:function(){return new M(C(this.r),C(this.g),C(this.b),E(this.opacity))},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:A,formatHex:A,formatHex8:function(){return"#".concat(P(this.r)).concat(P(this.g)).concat(P(this.b)).concat(P(255*(isNaN(this.opacity)?1:this.opacity)))},formatRgb:S,toString:S})),r(I,(function(t,e,n,r){return 1===arguments.length?L(t):new I(t,e,n,null==r?1:r)}),i(a,{brighter:function(t){return t=null==t?s:Math.pow(s,t),new I(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?o:Math.pow(o,t),new I(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new M(z(t>=240?t-240:t+120,i,r),z(t,i,r),z(t<120?t+240:t-120,i,r),this.opacity)},clamp:function(){return new I(D(this.h),R(this.s),R(this.l),E(this.opacity))},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=E(this.opacity);return"".concat(1===t?"hsl(":"hsla(").concat(D(this.h),", ").concat(100*R(this.s),"%, ").concat(100*R(this.l),"%").concat(1===t?")":", ".concat(t,")"))}}));var j=function(t){return function(){return t}};function F(t,e){return function(n){return t+n*e}}function B(t){return 1===(t=+t)?U:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):j(isNaN(e)?n:e)}}function U(t,e){var n=e-t;return n?F(t,n):j(isNaN(t)?e:t)}var H=function t(e){var n=B(e);function r(t,e){var r=n((t=T(t)).r,(e=T(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=U(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function V(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;n=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=ra&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,l.push({i:o,x:W(n,r)})),a=K.lastIndex;return a
\n * );\n * }\n * ```\n *\n * When the button is clicked the component will shift to the `'entering'` state\n * and stay there for 500ms (the value of `timeout`) before it finally switches\n * to `'entered'`.\n *\n * When `in` is `false` the same thing happens except the state moves from\n * `'exiting'` to `'exited'`.\n */\n\nvar Transition = /*#__PURE__*/function (_React$Component) {\n _inheritsLoose(Transition, _React$Component);\n\n function Transition(props, context) {\n var _this;\n\n _this = _React$Component.call(this, props, context) || this;\n var parentGroup = context; // In the context of a TransitionGroup all enters are really appears\n\n var appear = parentGroup && !parentGroup.isMounting ? props.enter : props.appear;\n var initialStatus;\n _this.appearStatus = null;\n\n if (props.in) {\n if (appear) {\n initialStatus = EXITED;\n _this.appearStatus = ENTERING;\n } else {\n initialStatus = ENTERED;\n }\n } else {\n if (props.unmountOnExit || props.mountOnEnter) {\n initialStatus = UNMOUNTED;\n } else {\n initialStatus = EXITED;\n }\n }\n\n _this.state = {\n status: initialStatus\n };\n _this.nextCallback = null;\n return _this;\n }\n\n Transition.getDerivedStateFromProps = function getDerivedStateFromProps(_ref, prevState) {\n var nextIn = _ref.in;\n\n if (nextIn && prevState.status === UNMOUNTED) {\n return {\n status: EXITED\n };\n }\n\n return null;\n } // getSnapshotBeforeUpdate(prevProps) {\n // let nextStatus = null\n // if (prevProps !== this.props) {\n // const { status } = this.state\n // if (this.props.in) {\n // if (status !== ENTERING && status !== ENTERED) {\n // nextStatus = ENTERING\n // }\n // } else {\n // if (status === ENTERING || status === ENTERED) {\n // nextStatus = EXITING\n // }\n // }\n // }\n // return { nextStatus }\n // }\n ;\n\n var _proto = Transition.prototype;\n\n _proto.componentDidMount = function componentDidMount() {\n this.updateStatus(true, this.appearStatus);\n };\n\n _proto.componentDidUpdate = function componentDidUpdate(prevProps) {\n var nextStatus = null;\n\n if (prevProps !== this.props) {\n var status = this.state.status;\n\n if (this.props.in) {\n if (status !== ENTERING && status !== ENTERED) {\n nextStatus = ENTERING;\n }\n } else {\n if (status === ENTERING || status === ENTERED) {\n nextStatus = EXITING;\n }\n }\n }\n\n this.updateStatus(false, nextStatus);\n };\n\n _proto.componentWillUnmount = function componentWillUnmount() {\n this.cancelNextCallback();\n };\n\n _proto.getTimeouts = function getTimeouts() {\n var timeout = this.props.timeout;\n var exit, enter, appear;\n exit = enter = appear = timeout;\n\n if (timeout != null && typeof timeout !== 'number') {\n exit = timeout.exit;\n enter = timeout.enter; // TODO: remove fallback for next major\n\n appear = timeout.appear !== undefined ? timeout.appear : enter;\n }\n\n return {\n exit: exit,\n enter: enter,\n appear: appear\n };\n };\n\n _proto.updateStatus = function updateStatus(mounting, nextStatus) {\n if (mounting === void 0) {\n mounting = false;\n }\n\n if (nextStatus !== null) {\n // nextStatus will always be ENTERING or EXITING.\n this.cancelNextCallback();\n\n if (nextStatus === ENTERING) {\n if (this.props.unmountOnExit || this.props.mountOnEnter) {\n var node = this.props.nodeRef ? this.props.nodeRef.current : ReactDOM.findDOMNode(this); // https://github.com/reactjs/react-transition-group/pull/749\n // With unmountOnExit or mountOnEnter, the enter animation should happen at the transition between `exited` and `entering`.\n // To make the animation happen, we have to separate each rendering and avoid being processed as batched.\n\n if (node) forceReflow(node);\n }\n\n this.performEnter(mounting);\n } else {\n this.performExit();\n }\n } else if (this.props.unmountOnExit && this.state.status === EXITED) {\n this.setState({\n status: UNMOUNTED\n });\n }\n };\n\n _proto.performEnter = function performEnter(mounting) {\n var _this2 = this;\n\n var enter = this.props.enter;\n var appearing = this.context ? this.context.isMounting : mounting;\n\n var _ref2 = this.props.nodeRef ? [appearing] : [ReactDOM.findDOMNode(this), appearing],\n maybeNode = _ref2[0],\n maybeAppearing = _ref2[1];\n\n var timeouts = this.getTimeouts();\n var enterTimeout = appearing ? timeouts.appear : timeouts.enter; // no enter animation skip right to ENTERED\n // if we are mounting and running this it means appear _must_ be set\n\n if (!mounting && !enter || config.disabled) {\n this.safeSetState({\n status: ENTERED\n }, function () {\n _this2.props.onEntered(maybeNode);\n });\n return;\n }\n\n this.props.onEnter(maybeNode, maybeAppearing);\n this.safeSetState({\n status: ENTERING\n }, function () {\n _this2.props.onEntering(maybeNode, maybeAppearing);\n\n _this2.onTransitionEnd(enterTimeout, function () {\n _this2.safeSetState({\n status: ENTERED\n }, function () {\n _this2.props.onEntered(maybeNode, maybeAppearing);\n });\n });\n });\n };\n\n _proto.performExit = function performExit() {\n var _this3 = this;\n\n var exit = this.props.exit;\n var timeouts = this.getTimeouts();\n var maybeNode = this.props.nodeRef ? undefined : ReactDOM.findDOMNode(this); // no exit animation skip right to EXITED\n\n if (!exit || config.disabled) {\n this.safeSetState({\n status: EXITED\n }, function () {\n _this3.props.onExited(maybeNode);\n });\n return;\n }\n\n this.props.onExit(maybeNode);\n this.safeSetState({\n status: EXITING\n }, function () {\n _this3.props.onExiting(maybeNode);\n\n _this3.onTransitionEnd(timeouts.exit, function () {\n _this3.safeSetState({\n status: EXITED\n }, function () {\n _this3.props.onExited(maybeNode);\n });\n });\n });\n };\n\n _proto.cancelNextCallback = function cancelNextCallback() {\n if (this.nextCallback !== null) {\n this.nextCallback.cancel();\n this.nextCallback = null;\n }\n };\n\n _proto.safeSetState = function safeSetState(nextState, callback) {\n // This shouldn't be necessary, but there are weird race conditions with\n // setState callbacks and unmounting in testing, so always make sure that\n // we can cancel any pending setState callbacks after we unmount.\n callback = this.setNextCallback(callback);\n this.setState(nextState, callback);\n };\n\n _proto.setNextCallback = function setNextCallback(callback) {\n var _this4 = this;\n\n var active = true;\n\n this.nextCallback = function (event) {\n if (active) {\n active = false;\n _this4.nextCallback = null;\n callback(event);\n }\n };\n\n this.nextCallback.cancel = function () {\n active = false;\n };\n\n return this.nextCallback;\n };\n\n _proto.onTransitionEnd = function onTransitionEnd(timeout, handler) {\n this.setNextCallback(handler);\n var node = this.props.nodeRef ? this.props.nodeRef.current : ReactDOM.findDOMNode(this);\n var doesNotHaveTimeoutOrListener = timeout == null && !this.props.addEndListener;\n\n if (!node || doesNotHaveTimeoutOrListener) {\n setTimeout(this.nextCallback, 0);\n return;\n }\n\n if (this.props.addEndListener) {\n var _ref3 = this.props.nodeRef ? [this.nextCallback] : [node, this.nextCallback],\n maybeNode = _ref3[0],\n maybeNextCallback = _ref3[1];\n\n this.props.addEndListener(maybeNode, maybeNextCallback);\n }\n\n if (timeout != null) {\n setTimeout(this.nextCallback, timeout);\n }\n };\n\n _proto.render = function render() {\n var status = this.state.status;\n\n if (status === UNMOUNTED) {\n return null;\n }\n\n var _this$props = this.props,\n children = _this$props.children,\n _in = _this$props.in,\n _mountOnEnter = _this$props.mountOnEnter,\n _unmountOnExit = _this$props.unmountOnExit,\n _appear = _this$props.appear,\n _enter = _this$props.enter,\n _exit = _this$props.exit,\n _timeout = _this$props.timeout,\n _addEndListener = _this$props.addEndListener,\n _onEnter = _this$props.onEnter,\n _onEntering = _this$props.onEntering,\n _onEntered = _this$props.onEntered,\n _onExit = _this$props.onExit,\n _onExiting = _this$props.onExiting,\n _onExited = _this$props.onExited,\n _nodeRef = _this$props.nodeRef,\n childProps = _objectWithoutPropertiesLoose(_this$props, [\"children\", \"in\", \"mountOnEnter\", \"unmountOnExit\", \"appear\", \"enter\", \"exit\", \"timeout\", \"addEndListener\", \"onEnter\", \"onEntering\", \"onEntered\", \"onExit\", \"onExiting\", \"onExited\", \"nodeRef\"]);\n\n return (\n /*#__PURE__*/\n // allows for nested Transitions\n React.createElement(TransitionGroupContext.Provider, {\n value: null\n }, typeof children === 'function' ? children(status, childProps) : React.cloneElement(React.Children.only(children), childProps))\n );\n };\n\n return Transition;\n}(React.Component);\n\nTransition.contextType = TransitionGroupContext;\nTransition.propTypes = process.env.NODE_ENV !== \"production\" ? {\n /**\n * A React reference to DOM element that need to transition:\n * https://stackoverflow.com/a/51127130/4671932\n *\n * - When `nodeRef` prop is used, `node` is not passed to callback functions\n * (e.g. `onEnter`) because user already has direct access to the node.\n * - When changing `key` prop of `Transition` in a `TransitionGroup` a new\n * `nodeRef` need to be provided to `Transition` with changed `key` prop\n * (see\n * [test/CSSTransition-test.js](https://github.com/reactjs/react-transition-group/blob/13435f897b3ab71f6e19d724f145596f5910581c/test/CSSTransition-test.js#L362-L437)).\n */\n nodeRef: PropTypes.shape({\n current: typeof Element === 'undefined' ? PropTypes.any : function (propValue, key, componentName, location, propFullName, secret) {\n var value = propValue[key];\n return PropTypes.instanceOf(value && 'ownerDocument' in value ? value.ownerDocument.defaultView.Element : Element)(propValue, key, componentName, location, propFullName, secret);\n }\n }),\n\n /**\n * A `function` child can be used instead of a React element. This function is\n * called with the current transition status (`'entering'`, `'entered'`,\n * `'exiting'`, `'exited'`), which can be used to apply context\n * specific props to a component.\n *\n * ```jsx\n * \n * {state => (\n * \n * )}\n * \n * ```\n */\n children: PropTypes.oneOfType([PropTypes.func.isRequired, PropTypes.element.isRequired]).isRequired,\n\n /**\n * Show the component; triggers the enter or exit states\n */\n in: PropTypes.bool,\n\n /**\n * By default the child component is mounted immediately along with\n * the parent `Transition` component. If you want to \"lazy mount\" the component on the\n * first `in={true}` you can set `mountOnEnter`. After the first enter transition the component will stay\n * mounted, even on \"exited\", unless you also specify `unmountOnExit`.\n */\n mountOnEnter: PropTypes.bool,\n\n /**\n * By default the child component stays mounted after it reaches the `'exited'` state.\n * Set `unmountOnExit` if you'd prefer to unmount the component after it finishes exiting.\n */\n unmountOnExit: PropTypes.bool,\n\n /**\n * By default the child component does not perform the enter transition when\n * it first mounts, regardless of the value of `in`. If you want this\n * behavior, set both `appear` and `in` to `true`.\n *\n * > **Note**: there are no special appear states like `appearing`/`appeared`, this prop\n * > only adds an additional enter transition. However, in the\n * > `` component that first enter transition does result in\n * > additional `.appear-*` classes, that way you can choose to style it\n * > differently.\n */\n appear: PropTypes.bool,\n\n /**\n * Enable or disable enter transitions.\n */\n enter: PropTypes.bool,\n\n /**\n * Enable or disable exit transitions.\n */\n exit: PropTypes.bool,\n\n /**\n * The duration of the transition, in milliseconds.\n * Required unless `addEndListener` is provided.\n *\n * You may specify a single timeout for all transitions:\n *\n * ```jsx\n * timeout={500}\n * ```\n *\n * or individually:\n *\n * ```jsx\n * timeout={{\n * appear: 500,\n * enter: 300,\n * exit: 500,\n * }}\n * ```\n *\n * - `appear` defaults to the value of `enter`\n * - `enter` defaults to `0`\n * - `exit` defaults to `0`\n *\n * @type {number | { enter?: number, exit?: number, appear?: number }}\n */\n timeout: function timeout(props) {\n var pt = timeoutsShape;\n if (!props.addEndListener) pt = pt.isRequired;\n\n for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n args[_key - 1] = arguments[_key];\n }\n\n return pt.apply(void 0, [props].concat(args));\n },\n\n /**\n * Add a custom transition end trigger. Called with the transitioning\n * DOM node and a `done` callback. Allows for more fine grained transition end\n * logic. Timeouts are still used as a fallback if provided.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * ```jsx\n * addEndListener={(node, done) => {\n * // use the css transitionend event to mark the finish of a transition\n * node.addEventListener('transitionend', done, false);\n * }}\n * ```\n */\n addEndListener: PropTypes.func,\n\n /**\n * Callback fired before the \"entering\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool) -> void\n */\n onEnter: PropTypes.func,\n\n /**\n * Callback fired after the \"entering\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool)\n */\n onEntering: PropTypes.func,\n\n /**\n * Callback fired after the \"entered\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool) -> void\n */\n onEntered: PropTypes.func,\n\n /**\n * Callback fired before the \"exiting\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExit: PropTypes.func,\n\n /**\n * Callback fired after the \"exiting\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExiting: PropTypes.func,\n\n /**\n * Callback fired after the \"exited\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExited: PropTypes.func\n} : {}; // Name the function so it is clearer in the documentation\n\nfunction noop() {}\n\nTransition.defaultProps = {\n in: false,\n mountOnEnter: false,\n unmountOnExit: false,\n appear: false,\n enter: true,\n exit: true,\n onEnter: noop,\n onEntering: noop,\n onEntered: noop,\n onExit: noop,\n onExiting: noop,\n onExited: noop\n};\nTransition.UNMOUNTED = UNMOUNTED;\nTransition.EXITED = EXITED;\nTransition.ENTERING = ENTERING;\nTransition.ENTERED = ENTERED;\nTransition.EXITING = EXITING;\nexport default Transition;","import setPrototypeOf from \"./setPrototypeOf.js\";\nexport default function _inheritsLoose(subClass, superClass) {\n subClass.prototype = Object.create(superClass.prototype);\n subClass.prototype.constructor = subClass;\n setPrototypeOf(subClass, superClass);\n}","export var forceReflow = function forceReflow(node) {\n return node.scrollTop;\n};","import ownerWindow from './ownerWindow';\n/**\n * Returns one or all computed style properties of an element.\n * \n * @param node the element\n * @param psuedoElement the style property\n */\n\nexport default function getComputedStyle(node, psuedoElement) {\n return ownerWindow(node).getComputedStyle(node, psuedoElement);\n}","import ownerDocument from './ownerDocument';\n/**\n * Returns the owner window of a given element.\n * \n * @param node the element\n */\n\nexport default function ownerWindow(node) {\n var doc = ownerDocument(node);\n return doc && doc.defaultView || window;\n}","var rUpper = /([A-Z])/g;\nexport default function hyphenate(string) {\n return string.replace(rUpper, '-$1').toLowerCase();\n}","/**\n * Copyright 2013-2014, Facebook, Inc.\n * All rights reserved.\n * https://github.com/facebook/react/blob/2aeb8a2a6beb00617a4217f7f8284924fa2ad819/src/vendor/core/hyphenateStyleName.js\n */\nimport hyphenate from './hyphenate';\nvar msPattern = /^ms-/;\nexport default function hyphenateStyleName(string) {\n return hyphenate(string).replace(msPattern, '-ms-');\n}","var supportedTransforms = /^((translate|rotate|scale)(X|Y|Z|3d)?|matrix(3d)?|perspective|skew(X|Y)?)$/i;\nexport default function isTransform(value) {\n return !!(value && supportedTransforms.test(value));\n}","import getComputedStyle from './getComputedStyle';\nimport hyphenate from './hyphenateStyle';\nimport isTransform from './isTransform';\n\nfunction style(node, property) {\n var css = '';\n var transforms = '';\n\n if (typeof property === 'string') {\n return node.style.getPropertyValue(hyphenate(property)) || getComputedStyle(node).getPropertyValue(hyphenate(property));\n }\n\n Object.keys(property).forEach(function (key) {\n var value = property[key];\n\n if (!value && value !== 0) {\n node.style.removeProperty(hyphenate(key));\n } else if (isTransform(key)) {\n transforms += key + \"(\" + value + \") \";\n } else {\n css += hyphenate(key) + \": \" + value + \";\";\n }\n });\n\n if (transforms) {\n css += \"transform: \" + transforms + \";\";\n }\n\n node.style.cssText += \";\" + css;\n}\n\nexport default style;","import css from './css';\nimport listen from './listen';\nimport triggerEvent from './triggerEvent';\n\nfunction parseDuration(node) {\n var str = css(node, 'transitionDuration') || '';\n var mult = str.indexOf('ms') === -1 ? 1000 : 1;\n return parseFloat(str) * mult;\n}\n\nfunction emulateTransitionEnd(element, duration, padding) {\n if (padding === void 0) {\n padding = 5;\n }\n\n var called = false;\n var handle = setTimeout(function () {\n if (!called) triggerEvent(element, 'transitionend', true);\n }, duration + padding);\n var remove = listen(element, 'transitionend', function () {\n called = true;\n }, {\n once: true\n });\n return function () {\n clearTimeout(handle);\n remove();\n };\n}\n\nexport default function transitionEnd(element, handler, duration, padding) {\n if (duration == null) duration = parseDuration(element) || 0;\n var removeEmulate = emulateTransitionEnd(element, duration, padding);\n var remove = listen(element, 'transitionend', handler);\n return function () {\n removeEmulate();\n remove();\n };\n}","/**\n * Triggers an event on a given element.\n * \n * @param node the element\n * @param eventName the event name to trigger\n * @param bubbles whether the event should bubble up\n * @param cancelable whether the event should be cancelable\n */\nexport default function triggerEvent(node, eventName, bubbles, cancelable) {\n if (bubbles === void 0) {\n bubbles = false;\n }\n\n if (cancelable === void 0) {\n cancelable = true;\n }\n\n if (node) {\n var event = document.createEvent('HTMLEvents');\n event.initEvent(eventName, bubbles, cancelable);\n node.dispatchEvent(event);\n }\n}","import css from 'dom-helpers/css';\nimport transitionEnd from 'dom-helpers/transitionEnd';\nfunction parseDuration(node, property) {\n const str = css(node, property) || '';\n const mult = str.indexOf('ms') === -1 ? 1000 : 1;\n return parseFloat(str) * mult;\n}\nexport default function transitionEndListener(element, handler) {\n const duration = parseDuration(element, 'transitionDuration');\n const delay = parseDuration(element, 'transitionDelay');\n const remove = transitionEnd(element, e => {\n if (e.target === element) {\n remove();\n handler(e);\n }\n }, duration + delay);\n}","import ReactDOM from 'react-dom';\nexport default function safeFindDOMNode(componentOrElement) {\n if (componentOrElement && 'setState' in componentOrElement) {\n return ReactDOM.findDOMNode(componentOrElement);\n }\n return componentOrElement != null ? componentOrElement : null;\n}","import React, { useCallback, useRef } from 'react';\nimport Transition from 'react-transition-group/Transition';\nimport useMergedRefs from '@restart/hooks/useMergedRefs';\nimport safeFindDOMNode from './safeFindDOMNode';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\n// Normalizes Transition callbacks when nodeRef is used.\nconst TransitionWrapper = /*#__PURE__*/React.forwardRef(({\n onEnter,\n onEntering,\n onEntered,\n onExit,\n onExiting,\n onExited,\n addEndListener,\n children,\n childRef,\n ...props\n}, ref) => {\n const nodeRef = useRef(null);\n const mergedRef = useMergedRefs(nodeRef, childRef);\n const attachRef = r => {\n mergedRef(safeFindDOMNode(r));\n };\n const normalize = callback => param => {\n if (callback && nodeRef.current) {\n callback(nodeRef.current, param);\n }\n };\n\n /* eslint-disable react-hooks/exhaustive-deps */\n const handleEnter = useCallback(normalize(onEnter), [onEnter]);\n const handleEntering = useCallback(normalize(onEntering), [onEntering]);\n const handleEntered = useCallback(normalize(onEntered), [onEntered]);\n const handleExit = useCallback(normalize(onExit), [onExit]);\n const handleExiting = useCallback(normalize(onExiting), [onExiting]);\n const handleExited = useCallback(normalize(onExited), [onExited]);\n const handleAddEndListener = useCallback(normalize(addEndListener), [addEndListener]);\n /* eslint-enable react-hooks/exhaustive-deps */\n\n return /*#__PURE__*/_jsx(Transition, {\n ref: ref,\n ...props,\n onEnter: handleEnter,\n onEntered: handleEntered,\n onEntering: handleEntering,\n onExit: handleExit,\n onExited: handleExited,\n onExiting: handleExiting,\n addEndListener: handleAddEndListener,\n nodeRef: nodeRef,\n children: typeof children === 'function' ? (status, innerProps) =>\n // TODO: Types for RTG missing innerProps, so need to cast.\n children(status, {\n ...innerProps,\n ref: attachRef\n }) : /*#__PURE__*/React.cloneElement(children, {\n ref: attachRef\n })\n });\n});\nexport default TransitionWrapper;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useCallback } from 'react';\nimport { ENTERED, ENTERING } from 'react-transition-group/Transition';\nimport transitionEndListener from './transitionEndListener';\nimport triggerBrowserReflow from './triggerBrowserReflow';\nimport TransitionWrapper from './TransitionWrapper';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst fadeStyles = {\n [ENTERING]: 'show',\n [ENTERED]: 'show'\n};\nconst Fade = /*#__PURE__*/React.forwardRef(({\n className,\n children,\n transitionClasses = {},\n onEnter,\n ...rest\n}, ref) => {\n const props = {\n in: false,\n timeout: 300,\n mountOnEnter: false,\n unmountOnExit: false,\n appear: false,\n ...rest\n };\n const handleEnter = useCallback((node, isAppearing) => {\n triggerBrowserReflow(node);\n onEnter == null ? void 0 : onEnter(node, isAppearing);\n }, [onEnter]);\n return /*#__PURE__*/_jsx(TransitionWrapper, {\n ref: ref,\n addEndListener: transitionEndListener,\n ...props,\n onEnter: handleEnter,\n childRef: children.ref,\n children: (status, innerProps) => /*#__PURE__*/React.cloneElement(children, {\n ...innerProps,\n className: classNames('fade', className, children.props.className, fadeStyles[status], transitionClasses[status])\n })\n });\n});\nFade.displayName = 'Fade';\nexport default Fade;","// reading a dimension prop will cause the browser to recalculate,\n// which will let our animations work\nexport default function triggerBrowserReflow(node) {\n // eslint-disable-next-line @typescript-eslint/no-unused-expressions\n node.offsetHeight;\n}","import * as React from 'react';\nimport { useEffect, useRef, useState } from 'react';\nimport classNames from 'classnames';\nimport BaseOverlay from '@restart/ui/Overlay';\nimport useEventCallback from '@restart/hooks/useEventCallback';\nimport useIsomorphicEffect from '@restart/hooks/useIsomorphicEffect';\nimport useMergedRefs from '@restart/hooks/useMergedRefs';\nimport useOverlayOffset from './useOverlayOffset';\nimport Fade from './Fade';\nimport safeFindDOMNode from './safeFindDOMNode';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction wrapRefs(props, arrowProps) {\n const {\n ref\n } = props;\n const {\n ref: aRef\n } = arrowProps;\n props.ref = ref.__wrapped || (ref.__wrapped = r => ref(safeFindDOMNode(r)));\n arrowProps.ref = aRef.__wrapped || (aRef.__wrapped = r => aRef(safeFindDOMNode(r)));\n}\nconst Overlay = /*#__PURE__*/React.forwardRef(({\n children: overlay,\n transition = Fade,\n popperConfig = {},\n rootClose = false,\n placement = 'top',\n show: outerShow = false,\n ...outerProps\n}, outerRef) => {\n const popperRef = useRef({});\n const [firstRenderedState, setFirstRenderedState] = useState(null);\n const [ref, modifiers] = useOverlayOffset(outerProps.offset);\n const mergedRef = useMergedRefs(outerRef, ref);\n const actualTransition = transition === true ? Fade : transition || undefined;\n const handleFirstUpdate = useEventCallback(state => {\n setFirstRenderedState(state);\n popperConfig == null ? void 0 : popperConfig.onFirstUpdate == null ? void 0 : popperConfig.onFirstUpdate(state);\n });\n useIsomorphicEffect(() => {\n if (firstRenderedState) {\n popperRef.current.scheduleUpdate == null ? void 0 : popperRef.current.scheduleUpdate();\n }\n }, [firstRenderedState]);\n useEffect(() => {\n if (!outerShow) {\n setFirstRenderedState(null);\n }\n }, [outerShow]);\n return /*#__PURE__*/_jsx(BaseOverlay, {\n ...outerProps,\n ref: mergedRef,\n popperConfig: {\n ...popperConfig,\n modifiers: modifiers.concat(popperConfig.modifiers || []),\n onFirstUpdate: handleFirstUpdate\n },\n transition: actualTransition,\n rootClose: rootClose,\n placement: placement,\n show: outerShow,\n children: (overlayProps, {\n arrowProps,\n popper: popperObj,\n show\n }) => {\n var _popperObj$state, _popperObj$state$modi;\n wrapRefs(overlayProps, arrowProps);\n // Need to get placement from popper object, handling case when overlay is flipped using 'flip' prop\n const updatedPlacement = popperObj == null ? void 0 : popperObj.placement;\n const popper = Object.assign(popperRef.current, {\n state: popperObj == null ? void 0 : popperObj.state,\n scheduleUpdate: popperObj == null ? void 0 : popperObj.update,\n placement: updatedPlacement,\n outOfBoundaries: (popperObj == null ? void 0 : (_popperObj$state = popperObj.state) == null ? void 0 : (_popperObj$state$modi = _popperObj$state.modifiersData.hide) == null ? void 0 : _popperObj$state$modi.isReferenceHidden) || false,\n strategy: popperConfig.strategy\n });\n const hasDoneInitialMeasure = !!firstRenderedState;\n if (typeof overlay === 'function') return overlay({\n ...overlayProps,\n placement: updatedPlacement,\n show,\n ...(!transition && show && {\n className: 'show'\n }),\n popper,\n arrowProps,\n hasDoneInitialMeasure\n });\n return /*#__PURE__*/React.cloneElement(overlay, {\n ...overlayProps,\n placement: updatedPlacement,\n arrowProps,\n popper,\n hasDoneInitialMeasure,\n className: classNames(overlay.props.className, !transition && show && 'show'),\n style: {\n ...overlay.props.style,\n ...overlayProps.style\n }\n });\n }\n });\n});\nOverlay.displayName = 'Overlay';\nexport default Overlay;","import { useMemo, useRef } from 'react';\nimport hasClass from 'dom-helpers/hasClass';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport Popover from './Popover';\nimport Tooltip from './Tooltip';\n\n// This is meant for internal use.\n// This applies a custom offset to the overlay if it's a popover or tooltip.\nexport default function useOverlayOffset(customOffset) {\n const overlayRef = useRef(null);\n const popoverClass = useBootstrapPrefix(undefined, 'popover');\n const tooltipClass = useBootstrapPrefix(undefined, 'tooltip');\n const offset = useMemo(() => ({\n name: 'offset',\n options: {\n offset: () => {\n if (customOffset) {\n return customOffset;\n }\n if (overlayRef.current) {\n if (hasClass(overlayRef.current, popoverClass)) {\n return Popover.POPPER_OFFSET;\n }\n if (hasClass(overlayRef.current, tooltipClass)) {\n return Tooltip.TOOLTIP_OFFSET;\n }\n }\n return [0, 0];\n }\n }\n }), [customOffset, popoverClass, tooltipClass]);\n return [overlayRef, [offset]];\n}","import contains from 'dom-helpers/contains';\nimport * as React from 'react';\nimport { cloneElement, useCallback, useRef } from 'react';\nimport useTimeout from '@restart/hooks/useTimeout';\nimport warning from 'warning';\nimport { useUncontrolledProp } from 'uncontrollable';\nimport useMergedRefs from '@restart/hooks/useMergedRefs';\nimport Overlay from './Overlay';\nimport safeFindDOMNode from './safeFindDOMNode';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { Fragment as _Fragment } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nfunction normalizeDelay(delay) {\n return delay && typeof delay === 'object' ? delay : {\n show: delay,\n hide: delay\n };\n}\n\n// Simple implementation of mouseEnter and mouseLeave.\n// React's built version is broken: https://github.com/facebook/react/issues/4251\n// for cases when the trigger is disabled and mouseOut/Over can cause flicker\n// moving from one child element to another.\nfunction handleMouseOverOut(\n// eslint-disable-next-line @typescript-eslint/no-shadow\nhandler, args, relatedNative) {\n const [e] = args;\n const target = e.currentTarget;\n const related = e.relatedTarget || e.nativeEvent[relatedNative];\n if ((!related || related !== target) && !contains(target, related)) {\n handler(...args);\n }\n}\nfunction OverlayTrigger({\n trigger = ['hover', 'focus'],\n overlay,\n children,\n popperConfig = {},\n show: propsShow,\n defaultShow = false,\n onToggle,\n delay: propsDelay,\n placement,\n flip = placement && placement.indexOf('auto') !== -1,\n ...props\n}) {\n const triggerNodeRef = useRef(null);\n const mergedRef = useMergedRefs(triggerNodeRef, children.ref);\n const timeout = useTimeout();\n const hoverStateRef = useRef('');\n const [show, setShow] = useUncontrolledProp(propsShow, defaultShow, onToggle);\n const delay = normalizeDelay(propsDelay);\n const {\n onFocus,\n onBlur,\n onClick\n } = typeof children !== 'function' ? React.Children.only(children).props : {};\n const attachRef = r => {\n mergedRef(safeFindDOMNode(r));\n };\n const handleShow = useCallback(() => {\n timeout.clear();\n hoverStateRef.current = 'show';\n if (!delay.show) {\n setShow(true);\n return;\n }\n timeout.set(() => {\n if (hoverStateRef.current === 'show') setShow(true);\n }, delay.show);\n }, [delay.show, setShow, timeout]);\n const handleHide = useCallback(() => {\n timeout.clear();\n hoverStateRef.current = 'hide';\n if (!delay.hide) {\n setShow(false);\n return;\n }\n timeout.set(() => {\n if (hoverStateRef.current === 'hide') setShow(false);\n }, delay.hide);\n }, [delay.hide, setShow, timeout]);\n const handleFocus = useCallback((...args) => {\n handleShow();\n onFocus == null ? void 0 : onFocus(...args);\n }, [handleShow, onFocus]);\n const handleBlur = useCallback((...args) => {\n handleHide();\n onBlur == null ? void 0 : onBlur(...args);\n }, [handleHide, onBlur]);\n const handleClick = useCallback((...args) => {\n setShow(!show);\n onClick == null ? void 0 : onClick(...args);\n }, [onClick, setShow, show]);\n const handleMouseOver = useCallback((...args) => {\n handleMouseOverOut(handleShow, args, 'fromElement');\n }, [handleShow]);\n const handleMouseOut = useCallback((...args) => {\n handleMouseOverOut(handleHide, args, 'toElement');\n }, [handleHide]);\n const triggers = trigger == null ? [] : [].concat(trigger);\n const triggerProps = {\n ref: attachRef\n };\n if (triggers.indexOf('click') !== -1) {\n triggerProps.onClick = handleClick;\n }\n if (triggers.indexOf('focus') !== -1) {\n triggerProps.onFocus = handleFocus;\n triggerProps.onBlur = handleBlur;\n }\n if (triggers.indexOf('hover') !== -1) {\n process.env.NODE_ENV !== \"production\" ? warning(triggers.length > 1, '[react-bootstrap] Specifying only the `\"hover\"` trigger limits the visibility of the overlay to just mouse users. Consider also including the `\"focus\"` trigger so that touch and keyboard only users can see the overlay as well.') : void 0;\n triggerProps.onMouseOver = handleMouseOver;\n triggerProps.onMouseOut = handleMouseOut;\n }\n return /*#__PURE__*/_jsxs(_Fragment, {\n children: [typeof children === 'function' ? children(triggerProps) : /*#__PURE__*/cloneElement(children, triggerProps), /*#__PURE__*/_jsx(Overlay, {\n ...props,\n show: show,\n onHide: handleHide,\n flip: flip,\n placement: placement,\n popperConfig: popperConfig,\n target: triggerNodeRef.current,\n children: overlay\n })]\n });\n}\nexport default OverlayTrigger;","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React, { useState } from 'react';\nimport Image from 'react-bootstrap/Image';\nimport Tooltip from 'react-bootstrap/Tooltip';\nimport OverlayTrigger from 'react-bootstrap/OverlayTrigger';\nimport './WorkloadDetails.scss';\nimport { api } from '../../App';\nimport { getLabel } from '../Diagnosis/Diagnosis';\nimport Workloads from './../Workloads/Workloads';\n\nexport default function WorkloadDetails({ selectedWorkload, setWarningText, setSelectedOp }) {\n const [spinner, setSpinner] = useState(true);\n\n let deleteWorkload = (selectedWorkload) => {\n api.post('api/workloads/delete?token=' + localStorage.getItem('token'), { workload_id: selectedWorkload.uuid })\n .then(\n response => {\n Workloads.getWorkloads(true);\n }\n )\n .catch(error => {\n setWarningText(error.message);\n setSpinner(false);\n });\n }\n\n const tooltipDelete = (\n \n Delete this workload\n \n );\n\n const tooltipCopy = (\n \n Copy full model path\n \n );\n\n const tooltipFullPath = (\n \n {selectedWorkload?.model_path}\n \n );\n\n return (\n
\n {selectedWorkload &&\n
\n

Details\n \n
{ deleteWorkload(selectedWorkload); setSelectedOp(null); }}>\n (e.currentTarget.src = \"icons/057a-trash-solid.svg\")}\n onMouseOut={e => (e.currentTarget.src = \"icons/057a-trash-solid-red.svg\")}\n />\n
\n
\n

\n \n \n \n \n \n \n \n \n \n \n \n \n
Framework:{selectedWorkload?.framework}
\n Model path:\n \n \n
{getLabel(selectedWorkload?.model_path)}
\n
\n
\n {selectedWorkload?.framework === 'TensorFlow' &&\n \n
{ navigator.clipboard.writeText(selectedWorkload.model_path) }}>\n (e.currentTarget.src = \"icons/146b-copy-outlined-gray.svg\")}\n onMouseOut={e => (e.currentTarget.src = \"icons/146b-copy-outlined.svg\")}\n />\n
\n
\n }\n
\n
\n }\n
\n )\n}","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React, { useEffect, useState } from 'react';\nimport './ModelSummary.scss';\nimport { api } from './../../App';\nimport Spinner from 'react-bootstrap/Spinner';\n\nexport default function ModelSummary({ selectedWorkload, setWarningText }) {\n const [summary, setSummary] = useState(null);\n useEffect(() => {\n if (selectedWorkload) {\n setSummary(null);\n const payload = {\n workload_id: selectedWorkload.uuid,\n };\n api.post('api/model/summary?token=' + localStorage.getItem('token'), payload)\n .then(\n response => {\n setSummary(response);\n })\n .catch(error => {\n setWarningText(error.message);\n });\n }\n }, [selectedWorkload]);\n\n return (\n
\n {!summary &&\n
\n \n
\n }\n
\n        \n          {summary?.data?.summary.replaceAll('\\n\\n', '\\n')}\n        \n      
\n
\n )\n}","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React, { useEffect, useState, useMemo } from 'react';\nimport './Profiling.scss';\nimport { api } from './../../App';\nimport Plot from 'react-plotly.js';\nimport { getLabel } from './../Diagnosis/Diagnosis';\nimport Table from 'react-bootstrap/Table';\n\nexport default function Profiling({ selectedWorkload, setWarningText }) {\n const [profilingTable, setProfilingTable] = useState([]);\n const [profilingChartData, setProfilingChartData] = useState([]);\n return (\n
\n \n \n
\n )\n}\n\nfunction ProfilingTable({ selectedWorkload, profilingTable, setProfilingTable, setProfilingChartData, setWarningText }) {\n\n const [checked, setChecked] = useState({});\n const [sorting, setSorting] = useState({ field: 'node_name', direction: 1 });\n\n useEffect(() => {\n if (selectedWorkload) {\n api.post('api/profiling?token=' + localStorage.getItem('token'), { workload_id: selectedWorkload.uuid })\n .then(\n response => {\n setProfilingTable(response.data);\n setSorting({ field: 'total_execution_time', direction: 1 });\n const showOnChart = {};\n const chartData = [];\n response.data.forEach((node, index) => {\n if (index < 10) {\n showOnChart[node.node_name] = true;\n chartData.push(node);\n } else {\n showOnChart[node.node_name] = false;\n }\n });\n setChecked(showOnChart);\n setProfilingChartData(chartData);\n })\n .catch(error => {\n setWarningText(error.message);\n });\n }\n }, [selectedWorkload]);\n\n let sortedProfiling = useMemo(() => {\n let sortedTable = [...profilingTable];\n if (sorting !== null) {\n sortedTable.sort((a, b) => {\n if (a[sorting.field] < b[sorting.field]) {\n return sorting.direction;\n }\n if (a[sorting.field] > b[sorting.field]) {\n return -sorting.direction;\n }\n return 0;\n });\n }\n return sortedTable;\n }, [sorting]);\n\n const requestSorting = field => {\n let direction = -sorting.direction;\n setSorting({ field, direction });\n };\n\n const getSortingClass = (name) => {\n let classes = 'header clickable';\n if (sorting.field === name) {\n return classes + (sorting.direction === 1 ? ' ascending' : ' descending');\n }\n return 'header clickable';\n };\n\n const requestChartCheck = (nodeName, value) => {\n let chartCheck = checked;\n chartCheck[nodeName] = value;\n setChecked(chartCheck);\n const newProfilingChartData = profilingTable.filter(node => checked[node.node_name] === true);\n setProfilingChartData(newProfilingChartData);\n };\n\n const tableContent = sortedProfiling?.map(profiling => {\n return (\n \n {profiling.node_name}\n {profiling.accelerator_execution_time}\n {profiling.cpu_execution_time}\n {profiling.op_defined}\n {profiling.op_run}\n {profiling.total_execution_time}\n \n {\n requestChartCheck(profiling.node_name, e.target.checked);\n }}\n />\n \n \n );\n });\n\n return (\n
\n \n \n \n \n \n \n \n \n \n \n \n {tableContent}\n \n
requestSorting('node_name')}>Name requestSorting('accelerator_execution_time')}>Accelerator execution time [μs] requestSorting('cpu_execution_time')}>CPU execution time [μs] requestSorting('op_defined')}>Op defined requestSorting('op_run')}>Op run requestSorting('total_execution_time')}>Total execution time [μs]Show on chart
\n
\n );\n}\n\nfunction ProfilingChart({ profilingChartData }) {\n return (
\n \n
)\n};\n\nconst getChartData = (profilingData) => {\n let data = [];\n if (Object.keys(profilingData).length) {\n const colorPalette = generateColor(profilingData.length);\n profilingData.forEach((node, index) => {\n data.push({\n name: getLabel(node.node_name),\n x: [node.node_name],\n y: [node.total_execution_time],\n type: 'bar',\n marker: {\n color: colorPalette[index]\n }\n });\n });\n }\n return data;\n}\n\nconst layout = {\n responsive: true,\n xaxis: {\n title: 'Total execution time [μs]',\n showticklabels: false\n },\n yaxis: {\n showgrid: true,\n },\n legend: {\n tracegroupgap: 0,\n },\n opacity: 1,\n};\n\nconst generateColor = (num) => {\n const colorPalette = [];\n const step = 100 / num;\n for (let i = num; i > 0; --i) {\n colorPalette.push(`rgb(${20 + (step * i)}, ${100 - (step * i * 0.1)}, ${200 - (step * i * 0.1)})`);\n }\n return colorPalette;\n}\n\n","import { useState } from 'react';\nimport useEffect from './useIsomorphicEffect';\nvar targetMap = new WeakMap();\nvar resizeObserver;\n\nfunction getResizeObserver() {\n // eslint-disable-next-line no-return-assign\n return resizeObserver = resizeObserver || new window.ResizeObserver(function (entries) {\n entries.forEach(function (entry) {\n var handler = targetMap.get(entry.target);\n if (handler) handler(entry.contentRect);\n });\n });\n}\n/**\n * Efficiently observe size changes on an element. Depends on the `ResizeObserver` api,\n * and polyfills are needed in older browsers.\n *\n * ```ts\n * const [ref, attachRef] = useCallbackRef(null);\n *\n * const rect = useResizeObserver(ref);\n *\n * return (\n *
\n * {JSON.stringify(rect)}\n *
\n * )\n * ```\n *\n * @param element The DOM element to observe\n */\n\n\nexport default function useResizeObserver(element) {\n var _useState = useState(null),\n rect = _useState[0],\n setRect = _useState[1];\n\n useEffect(function () {\n if (!element) return;\n getResizeObserver().observe(element);\n setRect(element.getBoundingClientRect());\n targetMap.set(element, function (rect) {\n setRect(rect);\n });\n return function () {\n targetMap.delete(element);\n };\n }, [element]);\n return rect;\n}","const _excluded = [\"onKeyDown\"];\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\n/* eslint-disable jsx-a11y/no-static-element-interactions */\n/* eslint-disable jsx-a11y/anchor-has-content */\n\nimport * as React from 'react';\nimport { useEventCallback } from '@restart/hooks';\nimport { useButtonProps } from './Button';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport function isTrivialHref(href) {\n return !href || href.trim() === '#';\n}\n/**\n * An generic `
` component that covers a few A11y cases, ensuring that\n * cases where the `href` is missing or trivial like \"#\" are treated like buttons.\n */\nconst Anchor = /*#__PURE__*/React.forwardRef((_ref, ref) => {\n let {\n onKeyDown\n } = _ref,\n props = _objectWithoutPropertiesLoose(_ref, _excluded);\n const [buttonProps] = useButtonProps(Object.assign({\n tagName: 'a'\n }, props));\n const handleKeyDown = useEventCallback(e => {\n buttonProps.onKeyDown(e);\n onKeyDown == null ? void 0 : onKeyDown(e);\n });\n if (isTrivialHref(props.href) || props.role === 'button') {\n return /*#__PURE__*/_jsx(\"a\", Object.assign({\n ref: ref\n }, props, buttonProps, {\n onKeyDown: handleKeyDown\n }));\n }\n return /*#__PURE__*/_jsx(\"a\", Object.assign({\n ref: ref\n }, props, {\n onKeyDown: onKeyDown\n }));\n});\nAnchor.displayName = 'Anchor';\nexport default Anchor;","import PropTypes from 'prop-types';\nimport * as React from 'react';\nimport classNames from 'classnames';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst propTypes = {\n /** An accessible label indicating the relevant information about the Close Button. */\n 'aria-label': PropTypes.string,\n /** A callback fired after the Close Button is clicked. */\n onClick: PropTypes.func,\n /**\n * Render different color variant for the button.\n *\n * Omitting this will render the default dark color.\n */\n variant: PropTypes.oneOf(['white'])\n};\nconst CloseButton = /*#__PURE__*/React.forwardRef(({\n className,\n variant,\n 'aria-label': ariaLabel = 'Close',\n ...props\n}, ref) => /*#__PURE__*/_jsx(\"button\", {\n ref: ref,\n type: \"button\",\n className: classNames('btn-close', variant && `btn-close-${variant}`, className),\n \"aria-label\": ariaLabel,\n ...props\n}));\nCloseButton.displayName = 'CloseButton';\nCloseButton.propTypes = propTypes;\nexport default CloseButton;","import * as React from 'react';\nimport classNames from 'classnames';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default (className => /*#__PURE__*/React.forwardRef((p, ref) => /*#__PURE__*/_jsx(\"div\", {\n ...p,\n ref: ref,\n className: classNames(p.className, className)\n})));","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useUncontrolled } from 'uncontrollable';\nimport useEventCallback from '@restart/hooks/useEventCallback';\nimport Anchor from '@restart/ui/Anchor';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport Fade from './Fade';\nimport CloseButton from './CloseButton';\nimport divWithClassName from './divWithClassName';\nimport createWithBsPrefix from './createWithBsPrefix';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst DivStyledAsH4 = divWithClassName('h4');\nDivStyledAsH4.displayName = 'DivStyledAsH4';\nconst AlertHeading = createWithBsPrefix('alert-heading', {\n Component: DivStyledAsH4\n});\nconst AlertLink = createWithBsPrefix('alert-link', {\n Component: Anchor\n});\nconst Alert = /*#__PURE__*/React.forwardRef((uncontrolledProps, ref) => {\n const {\n bsPrefix,\n show = true,\n closeLabel = 'Close alert',\n closeVariant,\n className,\n children,\n variant = 'primary',\n onClose,\n dismissible,\n transition = Fade,\n ...props\n } = useUncontrolled(uncontrolledProps, {\n show: 'onClose'\n });\n const prefix = useBootstrapPrefix(bsPrefix, 'alert');\n const handleClose = useEventCallback(e => {\n if (onClose) {\n onClose(false, e);\n }\n });\n const Transition = transition === true ? Fade : transition;\n const alert = /*#__PURE__*/_jsxs(\"div\", {\n role: \"alert\",\n ...(!Transition ? props : undefined),\n ref: ref,\n className: classNames(className, prefix, variant && `${prefix}-${variant}`, dismissible && `${prefix}-dismissible`),\n children: [dismissible && /*#__PURE__*/_jsx(CloseButton, {\n onClick: handleClose,\n \"aria-label\": closeLabel,\n variant: closeVariant\n }), children]\n });\n if (!Transition) return show ? alert : null;\n return /*#__PURE__*/_jsx(Transition, {\n unmountOnExit: true,\n ...props,\n ref: undefined,\n in: show,\n children: alert\n });\n});\nAlert.displayName = 'Alert';\nexport default Object.assign(Alert, {\n Link: AlertLink,\n Heading: AlertHeading\n});","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React from 'react';\nimport Alert from 'react-bootstrap/Alert';\nimport Button from 'react-bootstrap/Button';\nimport './Warning.scss';\n\nexport default function Warning({ warningText, setWarningText }) {\n\n if (warningText.length) {\n return (\n \n Error\n

\n {warningText}\n

\n
\n \n
\n
\n );\n }\n return;\n}\n","import classNames from 'classnames';\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst propTypes = {\n /**\n * Specify whether the feedback is for valid or invalid fields\n *\n * @type {('valid'|'invalid')}\n */\n type: PropTypes.string,\n /** Display feedback as a tooltip. */\n tooltip: PropTypes.bool,\n as: PropTypes.elementType\n};\nconst Feedback = /*#__PURE__*/React.forwardRef(\n// Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n({\n as: Component = 'div',\n className,\n type = 'valid',\n tooltip = false,\n ...props\n}, ref) => /*#__PURE__*/_jsx(Component, {\n ...props,\n ref: ref,\n className: classNames(className, `${type}-${tooltip ? 'tooltip' : 'feedback'}`)\n}));\nFeedback.displayName = 'Feedback';\nFeedback.propTypes = propTypes;\nexport default Feedback;","import * as React from 'react';\n\n// TODO\n\nconst FormContext = /*#__PURE__*/React.createContext({});\nexport default FormContext;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport FormContext from './FormContext';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormCheckInput = /*#__PURE__*/React.forwardRef(({\n id,\n bsPrefix,\n className,\n type = 'checkbox',\n isValid = false,\n isInvalid = false,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'input',\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-check-input');\n return /*#__PURE__*/_jsx(Component, {\n ...props,\n ref: ref,\n type: type,\n id: id || controlId,\n className: classNames(className, bsPrefix, isValid && 'is-valid', isInvalid && 'is-invalid')\n });\n});\nFormCheckInput.displayName = 'FormCheckInput';\nexport default FormCheckInput;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport FormContext from './FormContext';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormCheckLabel = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n className,\n htmlFor,\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-check-label');\n return /*#__PURE__*/_jsx(\"label\", {\n ...props,\n ref: ref,\n htmlFor: htmlFor || controlId,\n className: classNames(className, bsPrefix)\n });\n});\nFormCheckLabel.displayName = 'FormCheckLabel';\nexport default FormCheckLabel;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext, useMemo } from 'react';\nimport Feedback from './Feedback';\nimport FormCheckInput from './FormCheckInput';\nimport FormCheckLabel from './FormCheckLabel';\nimport FormContext from './FormContext';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { hasChildOfType } from './ElementChildren';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { Fragment as _Fragment } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst FormCheck = /*#__PURE__*/React.forwardRef(({\n id,\n bsPrefix,\n bsSwitchPrefix,\n inline = false,\n reverse = false,\n disabled = false,\n isValid = false,\n isInvalid = false,\n feedbackTooltip = false,\n feedback,\n feedbackType,\n className,\n style,\n title = '',\n type = 'checkbox',\n label,\n children,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as = 'input',\n ...props\n}, ref) => {\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-check');\n bsSwitchPrefix = useBootstrapPrefix(bsSwitchPrefix, 'form-switch');\n const {\n controlId\n } = useContext(FormContext);\n const innerFormContext = useMemo(() => ({\n controlId: id || controlId\n }), [controlId, id]);\n const hasLabel = !children && label != null && label !== false || hasChildOfType(children, FormCheckLabel);\n const input = /*#__PURE__*/_jsx(FormCheckInput, {\n ...props,\n type: type === 'switch' ? 'checkbox' : type,\n ref: ref,\n isValid: isValid,\n isInvalid: isInvalid,\n disabled: disabled,\n as: as\n });\n return /*#__PURE__*/_jsx(FormContext.Provider, {\n value: innerFormContext,\n children: /*#__PURE__*/_jsx(\"div\", {\n style: style,\n className: classNames(className, hasLabel && bsPrefix, inline && `${bsPrefix}-inline`, reverse && `${bsPrefix}-reverse`, type === 'switch' && bsSwitchPrefix),\n children: children || /*#__PURE__*/_jsxs(_Fragment, {\n children: [input, hasLabel && /*#__PURE__*/_jsx(FormCheckLabel, {\n title: title,\n children: label\n }), feedback && /*#__PURE__*/_jsx(Feedback, {\n type: feedbackType,\n tooltip: feedbackTooltip,\n children: feedback\n })]\n })\n })\n });\n});\nFormCheck.displayName = 'FormCheck';\nexport default Object.assign(FormCheck, {\n Input: FormCheckInput,\n Label: FormCheckLabel\n});","import * as React from 'react';\n\n/**\n * Iterates through children that are typically specified as `props.children`,\n * but only maps over children that are \"valid elements\".\n *\n * The mapFunction provided index will be normalised to the components mapped,\n * so an invalid component would not increase the index.\n *\n */\nfunction map(children, func) {\n let index = 0;\n return React.Children.map(children, child => /*#__PURE__*/React.isValidElement(child) ? func(child, index++) : child);\n}\n\n/**\n * Iterates through children that are \"valid elements\".\n *\n * The provided forEachFunc(child, index) will be called for each\n * leaf child with the index reflecting the position relative to \"valid components\".\n */\nfunction forEach(children, func) {\n let index = 0;\n React.Children.forEach(children, child => {\n if ( /*#__PURE__*/React.isValidElement(child)) func(child, index++);\n });\n}\n\n/**\n * Finds whether a component's `children` prop includes a React element of the\n * specified type.\n */\nfunction hasChildOfType(children, type) {\n return React.Children.toArray(children).some(child => /*#__PURE__*/React.isValidElement(child) && child.type === type);\n}\nexport { map, forEach, hasChildOfType };","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport warning from 'warning';\nimport Feedback from './Feedback';\nimport FormContext from './FormContext';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormControl = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n type,\n size,\n htmlSize,\n id,\n className,\n isValid = false,\n isInvalid = false,\n plaintext,\n readOnly,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'input',\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-control');\n let classes;\n if (plaintext) {\n classes = {\n [`${bsPrefix}-plaintext`]: true\n };\n } else {\n classes = {\n [bsPrefix]: true,\n [`${bsPrefix}-${size}`]: size\n };\n }\n process.env.NODE_ENV !== \"production\" ? warning(controlId == null || !id, '`controlId` is ignored on `` when `id` is specified.') : void 0;\n return /*#__PURE__*/_jsx(Component, {\n ...props,\n type: type,\n size: htmlSize,\n ref: ref,\n readOnly: readOnly,\n id: id || controlId,\n className: classNames(className, classes, isValid && `is-valid`, isInvalid && `is-invalid`, type === 'color' && `${bsPrefix}-color`)\n });\n});\nFormControl.displayName = 'FormControl';\nexport default Object.assign(FormControl, {\n Feedback\n});","import createWithBsPrefix from './createWithBsPrefix';\nexport default createWithBsPrefix('form-floating');","import * as React from 'react';\nimport { useMemo } from 'react';\nimport FormContext from './FormContext';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormGroup = /*#__PURE__*/React.forwardRef(({\n controlId,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'div',\n ...props\n}, ref) => {\n const context = useMemo(() => ({\n controlId\n }), [controlId]);\n return /*#__PURE__*/_jsx(FormContext.Provider, {\n value: context,\n children: /*#__PURE__*/_jsx(Component, {\n ...props,\n ref: ref\n })\n });\n});\nFormGroup.displayName = 'FormGroup';\nexport default FormGroup;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useBootstrapPrefix, useBootstrapBreakpoints, useBootstrapMinBreakpoint } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport function useCol({\n as,\n bsPrefix,\n className,\n ...props\n}) {\n bsPrefix = useBootstrapPrefix(bsPrefix, 'col');\n const breakpoints = useBootstrapBreakpoints();\n const minBreakpoint = useBootstrapMinBreakpoint();\n const spans = [];\n const classes = [];\n breakpoints.forEach(brkPoint => {\n const propValue = props[brkPoint];\n delete props[brkPoint];\n let span;\n let offset;\n let order;\n if (typeof propValue === 'object' && propValue != null) {\n ({\n span,\n offset,\n order\n } = propValue);\n } else {\n span = propValue;\n }\n const infix = brkPoint !== minBreakpoint ? `-${brkPoint}` : '';\n if (span) spans.push(span === true ? `${bsPrefix}${infix}` : `${bsPrefix}${infix}-${span}`);\n if (order != null) classes.push(`order${infix}-${order}`);\n if (offset != null) classes.push(`offset${infix}-${offset}`);\n });\n return [{\n ...props,\n className: classNames(className, ...spans, ...classes)\n }, {\n as,\n bsPrefix,\n spans\n }];\n}\nconst Col = /*#__PURE__*/React.forwardRef(\n// Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n(props, ref) => {\n const [{\n className,\n ...colProps\n }, {\n as: Component = 'div',\n bsPrefix,\n spans\n }] = useCol(props);\n return /*#__PURE__*/_jsx(Component, {\n ...colProps,\n ref: ref,\n className: classNames(className, !spans.length && bsPrefix)\n });\n});\nCol.displayName = 'Col';\nexport default Col;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport warning from 'warning';\nimport Col from './Col';\nimport FormContext from './FormContext';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormLabel = /*#__PURE__*/React.forwardRef(({\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'label',\n bsPrefix,\n column = false,\n visuallyHidden = false,\n className,\n htmlFor,\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-label');\n let columnClass = 'col-form-label';\n if (typeof column === 'string') columnClass = `${columnClass} ${columnClass}-${column}`;\n const classes = classNames(className, bsPrefix, visuallyHidden && 'visually-hidden', column && columnClass);\n process.env.NODE_ENV !== \"production\" ? warning(controlId == null || !htmlFor, '`controlId` is ignored on `` when `htmlFor` is specified.') : void 0;\n htmlFor = htmlFor || controlId;\n if (column) return /*#__PURE__*/_jsx(Col, {\n ref: ref,\n as: \"label\",\n className: classes,\n htmlFor: htmlFor,\n ...props\n });\n return (\n /*#__PURE__*/\n // eslint-disable-next-line jsx-a11y/label-has-for, jsx-a11y/label-has-associated-control\n _jsx(Component, {\n ref: ref,\n className: classes,\n htmlFor: htmlFor,\n ...props\n })\n );\n});\nFormLabel.displayName = 'FormLabel';\nexport default FormLabel;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport FormContext from './FormContext';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormRange = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n className,\n id,\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-range');\n return /*#__PURE__*/_jsx(\"input\", {\n ...props,\n type: \"range\",\n ref: ref,\n className: classNames(className, bsPrefix),\n id: id || controlId\n });\n});\nFormRange.displayName = 'FormRange';\nexport default FormRange;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useContext } from 'react';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport FormContext from './FormContext';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormSelect = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n size,\n htmlSize,\n className,\n isValid = false,\n isInvalid = false,\n id,\n ...props\n}, ref) => {\n const {\n controlId\n } = useContext(FormContext);\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-select');\n return /*#__PURE__*/_jsx(\"select\", {\n ...props,\n size: htmlSize,\n ref: ref,\n className: classNames(className, bsPrefix, size && `${bsPrefix}-${size}`, isValid && `is-valid`, isInvalid && `is-invalid`),\n id: id || controlId\n });\n});\nFormSelect.displayName = 'FormSelect';\nexport default FormSelect;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst FormText = /*#__PURE__*/React.forwardRef(\n// Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n({\n bsPrefix,\n className,\n as: Component = 'small',\n muted,\n ...props\n}, ref) => {\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-text');\n return /*#__PURE__*/_jsx(Component, {\n ...props,\n ref: ref,\n className: classNames(className, bsPrefix, muted && 'text-muted')\n });\n});\nFormText.displayName = 'FormText';\nexport default FormText;","import * as React from 'react';\nimport FormCheck from './FormCheck';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst Switch = /*#__PURE__*/React.forwardRef((props, ref) => /*#__PURE__*/_jsx(FormCheck, {\n ...props,\n ref: ref,\n type: \"switch\"\n}));\nSwitch.displayName = 'Switch';\nexport default Object.assign(Switch, {\n Input: FormCheck.Input,\n Label: FormCheck.Label\n});","import classNames from 'classnames';\nimport * as React from 'react';\nimport FormGroup from './FormGroup';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst FloatingLabel = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n className,\n children,\n controlId,\n label,\n ...props\n}, ref) => {\n bsPrefix = useBootstrapPrefix(bsPrefix, 'form-floating');\n return /*#__PURE__*/_jsxs(FormGroup, {\n ref: ref,\n className: classNames(className, bsPrefix),\n controlId: controlId,\n ...props,\n children: [children, /*#__PURE__*/_jsx(\"label\", {\n htmlFor: controlId,\n children: label\n })]\n });\n});\nFloatingLabel.displayName = 'FloatingLabel';\nexport default FloatingLabel;","import classNames from 'classnames';\nimport PropTypes from 'prop-types';\nimport * as React from 'react';\nimport FormCheck from './FormCheck';\nimport FormControl from './FormControl';\nimport FormFloating from './FormFloating';\nimport FormGroup from './FormGroup';\nimport FormLabel from './FormLabel';\nimport FormRange from './FormRange';\nimport FormSelect from './FormSelect';\nimport FormText from './FormText';\nimport Switch from './Switch';\nimport FloatingLabel from './FloatingLabel';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst propTypes = {\n /**\n * The Form `ref` will be forwarded to the underlying element,\n * which means, unless it's rendered `as` a composite component,\n * it will be a DOM node, when resolved.\n *\n * @type {ReactRef}\n * @alias ref\n */\n _ref: PropTypes.any,\n /**\n * Mark a form as having been validated. Setting it to `true` will\n * toggle any validation styles on the forms elements.\n */\n validated: PropTypes.bool,\n as: PropTypes.elementType\n};\nconst Form = /*#__PURE__*/React.forwardRef(({\n className,\n validated,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'form',\n ...props\n}, ref) => /*#__PURE__*/_jsx(Component, {\n ...props,\n ref: ref,\n className: classNames(className, validated && 'was-validated')\n}));\nForm.displayName = 'Form';\nForm.propTypes = propTypes;\nexport default Object.assign(Form, {\n Group: FormGroup,\n Control: FormControl,\n Floating: FormFloating,\n Check: FormCheck,\n Switch,\n Label: FormLabel,\n Text: FormText,\n Range: FormRange,\n Select: FormSelect,\n FloatingLabel\n});","import * as React from 'react';\nconst context = /*#__PURE__*/React.createContext(null);\ncontext.displayName = 'InputGroupContext';\nexport default context;","import classNames from 'classnames';\nimport * as React from 'react';\nimport { useMemo } from 'react';\nimport createWithBsPrefix from './createWithBsPrefix';\nimport { useBootstrapPrefix } from './ThemeProvider';\nimport FormCheckInput from './FormCheckInput';\nimport InputGroupContext from './InputGroupContext';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst InputGroupText = createWithBsPrefix('input-group-text', {\n Component: 'span'\n});\nconst InputGroupCheckbox = props => /*#__PURE__*/_jsx(InputGroupText, {\n children: /*#__PURE__*/_jsx(FormCheckInput, {\n type: \"checkbox\",\n ...props\n })\n});\nconst InputGroupRadio = props => /*#__PURE__*/_jsx(InputGroupText, {\n children: /*#__PURE__*/_jsx(FormCheckInput, {\n type: \"radio\",\n ...props\n })\n});\nconst InputGroup = /*#__PURE__*/React.forwardRef(({\n bsPrefix,\n size,\n hasValidation,\n className,\n // Need to define the default \"as\" during prop destructuring to be compatible with styled-components github.com/react-bootstrap/react-bootstrap/issues/3595\n as: Component = 'div',\n ...props\n}, ref) => {\n bsPrefix = useBootstrapPrefix(bsPrefix, 'input-group');\n\n // Intentionally an empty object. Used in detecting if a dropdown\n // exists under an input group.\n const contextValue = useMemo(() => ({}), []);\n return /*#__PURE__*/_jsx(InputGroupContext.Provider, {\n value: contextValue,\n children: /*#__PURE__*/_jsx(Component, {\n ref: ref,\n ...props,\n className: classNames(className, bsPrefix, size && `${bsPrefix}-${size}`, hasValidation && 'has-validation')\n })\n });\n});\nInputGroup.displayName = 'InputGroup';\nexport default Object.assign(InputGroup, {\n Text: InputGroupText,\n Radio: InputGroupRadio,\n Checkbox: InputGroupCheckbox\n});","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React, { useState } from 'react';\nimport './Diagnosis.scss';\nimport Graph from './../Graph/Graph';\nimport OpDetails from './../OpDetails/OpDetails';\nimport OpList from './../OpList/OpList';\nimport Histogram from './../Histogram/Histogram';\nimport Workloads from './../Workloads/Workloads';\nimport WorkloadDetails from './../WorkloadDetails/WorkloadDetails';\nimport ModelSummary from './../ModelSummary/ModelSummary';\nimport Profiling from './../Profiling/Profiling';\nimport Warning from './../Warning/Warning';\nimport Form from 'react-bootstrap/Form';\nimport InputGroup from 'react-bootstrap/InputGroup';\nimport Button from 'react-bootstrap/esm/Button';\nimport Spinner from 'react-bootstrap/Spinner';\n\nfunction Diagnosis() {\n const [selectedNode, setSelectedNode] = useState(null);\n const [selectedWorkload, setSelectedWorkload] = useState(null);\n const [selectedOp, setSelectedOp] = useState(null);\n const [selectedPattern, setSelectedPattern] = useState([]);\n const [histogramType, setHistogramType] = useState(null);\n const [warningText, setWarningText] = useState('');\n\n return (\n
\n \n
\n
\n
\n \n
\n \n {selectedWorkload?.mode === 'quantization' &&\n \n }\n
\n {selectedWorkload?.mode === 'benchmark' &&\n
\n \n
\n }\n {selectedWorkload?.mode === 'quantization' &&\n
\n {selectedWorkload.framework !== 'PyTorch' &&\n \n }\n {selectedWorkload.framework === 'PyTorch' &&\n \n }\n
\n }\n {selectedWorkload?.mode === 'quantization' &&\n
\n \n \n
\n }\n
\n {selectedWorkload?.mode === 'quantization' && selectedOp &&\n
\n
\n \n
\n
\n {histogramType && }\n
\n
\n }\n
\n )\n};\n\nfunction NodeProperties({ selectedNode }) {\n if (selectedNode) {\n const propertyList = Object.entries(selectedNode.properties).map(([key, value]) => {\n return (\n \n {key}\n {getLabel(value)}\n \n )\n });\n\n const attributeList = selectedNode.attributes?.map(attribute => {\n return (\n \n {attribute.name}\n {attribute.attribute_type}\n {attribute.attribute_type !== \"float32\" &&\n {attribute.value?.toString()}\n }\n {attribute.attribute_type === \"float32\" &&\n {attribute.value.toExponential(2)}\n }\n \n )\n });\n\n return (\n
\n

Node details

\n \n \n \n \n \n {propertyList}\n \n {attributeList && }\n \n {attributeList}\n \n
Properties
Attributes
\n
\n );\n } else {\n return;\n }\n}\n\nclass NodeSearch extends React.Component {\n render() {\n return (\n
\n

Node search

\n \n \n \n \n
\n )\n }\n}\n\nfunction AccuracyResults({ selectedWorkload }) {\n return (\n
\n {selectedWorkload.status === 'wip' &&\n

Quantization is in progress.\n

\n \n
\n

\n }\n {selectedWorkload.status !== 'wip' &&\n !selectedWorkload.accuracy_data.ratio &&\n \n \n \n \n \n \n \n \n \n
Accuracy
results
\n
N/A
\n
FP32
\n
\n
N/A
\n
INT8
\n
\n
N/A
\n
Ratio
\n
\n }\n {selectedWorkload.status !== 'wip' &&\n selectedWorkload.accuracy_data.ratio &&\n \n \n \n \n \n \n \n \n \n
Accuracy
results
\n
{(selectedWorkload.accuracy_data.baseline_accuracy * 100).toPrecision(3)}%
\n
FP32
\n
\n
{(selectedWorkload.accuracy_data.optimized_accuracy * 100).toPrecision(3)}%
\n
INT8
\n
\n
{(selectedWorkload.accuracy_data.ratio * 100).toPrecision(2)}%
\n
Ratio
\n
\n }\n
\n )\n}\n\nexport const getLabel = (label) => {\n if (label.includes('/')) {\n return label.replace(/^.*[\\\\\\/]/, '');\n } else {\n return label;\n }\n}\n\nexport const customColor = [\n '#5B69FF',\n '#FF848A',\n '#EDB200',\n '#1E2EB8',\n '#FF5662',\n '#C98F00',\n '#000F8A',\n '#C81326',\n '#000864',\n '#9D79BC',\n '#A14DA0',\n];\n\nexport default Diagnosis;\n","'use strict';\n\nexport default function bind(fn, thisArg) {\n return function wrap() {\n return fn.apply(thisArg, arguments);\n };\n}\n","'use strict';\n\nimport bind from './helpers/bind.js';\n\n// utils is a library of generic helper functions non-specific to axios\n\nconst {toString} = Object.prototype;\nconst {getPrototypeOf} = Object;\n\nconst kindOf = (cache => thing => {\n const str = toString.call(thing);\n return cache[str] || (cache[str] = str.slice(8, -1).toLowerCase());\n})(Object.create(null));\n\nconst kindOfTest = (type) => {\n type = type.toLowerCase();\n return (thing) => kindOf(thing) === type\n}\n\nconst typeOfTest = type => thing => typeof thing === type;\n\n/**\n * Determine if a value is an Array\n *\n * @param {Object} val The value to test\n *\n * @returns {boolean} True if value is an Array, otherwise false\n */\nconst {isArray} = Array;\n\n/**\n * Determine if a value is undefined\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if the value is undefined, otherwise false\n */\nconst isUndefined = typeOfTest('undefined');\n\n/**\n * Determine if a value is a Buffer\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a Buffer, otherwise false\n */\nfunction isBuffer(val) {\n return val !== null && !isUndefined(val) && val.constructor !== null && !isUndefined(val.constructor)\n && isFunction(val.constructor.isBuffer) && val.constructor.isBuffer(val);\n}\n\n/**\n * Determine if a value is an ArrayBuffer\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is an ArrayBuffer, otherwise false\n */\nconst isArrayBuffer = kindOfTest('ArrayBuffer');\n\n\n/**\n * Determine if a value is a view on an ArrayBuffer\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a view on an ArrayBuffer, otherwise false\n */\nfunction isArrayBufferView(val) {\n let result;\n if ((typeof ArrayBuffer !== 'undefined') && (ArrayBuffer.isView)) {\n result = ArrayBuffer.isView(val);\n } else {\n result = (val) && (val.buffer) && (isArrayBuffer(val.buffer));\n }\n return result;\n}\n\n/**\n * Determine if a value is a String\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a String, otherwise false\n */\nconst isString = typeOfTest('string');\n\n/**\n * Determine if a value is a Function\n *\n * @param {*} val The value to test\n * @returns {boolean} True if value is a Function, otherwise false\n */\nconst isFunction = typeOfTest('function');\n\n/**\n * Determine if a value is a Number\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a Number, otherwise false\n */\nconst isNumber = typeOfTest('number');\n\n/**\n * Determine if a value is an Object\n *\n * @param {*} thing The value to test\n *\n * @returns {boolean} True if value is an Object, otherwise false\n */\nconst isObject = (thing) => thing !== null && typeof thing === 'object';\n\n/**\n * Determine if a value is a Boolean\n *\n * @param {*} thing The value to test\n * @returns {boolean} True if value is a Boolean, otherwise false\n */\nconst isBoolean = thing => thing === true || thing === false;\n\n/**\n * Determine if a value is a plain Object\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a plain Object, otherwise false\n */\nconst isPlainObject = (val) => {\n if (kindOf(val) !== 'object') {\n return false;\n }\n\n const prototype = getPrototypeOf(val);\n return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in val) && !(Symbol.iterator in val);\n}\n\n/**\n * Determine if a value is a Date\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a Date, otherwise false\n */\nconst isDate = kindOfTest('Date');\n\n/**\n * Determine if a value is a File\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a File, otherwise false\n */\nconst isFile = kindOfTest('File');\n\n/**\n * Determine if a value is a Blob\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a Blob, otherwise false\n */\nconst isBlob = kindOfTest('Blob');\n\n/**\n * Determine if a value is a FileList\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a File, otherwise false\n */\nconst isFileList = kindOfTest('FileList');\n\n/**\n * Determine if a value is a Stream\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a Stream, otherwise false\n */\nconst isStream = (val) => isObject(val) && isFunction(val.pipe);\n\n/**\n * Determine if a value is a FormData\n *\n * @param {*} thing The value to test\n *\n * @returns {boolean} True if value is an FormData, otherwise false\n */\nconst isFormData = (thing) => {\n let kind;\n return thing && (\n (typeof FormData === 'function' && thing instanceof FormData) || (\n isFunction(thing.append) && (\n (kind = kindOf(thing)) === 'formdata' ||\n // detect form-data instance\n (kind === 'object' && isFunction(thing.toString) && thing.toString() === '[object FormData]')\n )\n )\n )\n}\n\n/**\n * Determine if a value is a URLSearchParams object\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a URLSearchParams object, otherwise false\n */\nconst isURLSearchParams = kindOfTest('URLSearchParams');\n\n/**\n * Trim excess whitespace off the beginning and end of a string\n *\n * @param {String} str The String to trim\n *\n * @returns {String} The String freed of excess whitespace\n */\nconst trim = (str) => str.trim ?\n str.trim() : str.replace(/^[\\s\\uFEFF\\xA0]+|[\\s\\uFEFF\\xA0]+$/g, '');\n\n/**\n * Iterate over an Array or an Object invoking a function for each item.\n *\n * If `obj` is an Array callback will be called passing\n * the value, index, and complete array for each item.\n *\n * If 'obj' is an Object callback will be called passing\n * the value, key, and complete object for each property.\n *\n * @param {Object|Array} obj The object to iterate\n * @param {Function} fn The callback to invoke for each item\n *\n * @param {Boolean} [allOwnKeys = false]\n * @returns {any}\n */\nfunction forEach(obj, fn, {allOwnKeys = false} = {}) {\n // Don't bother if no value provided\n if (obj === null || typeof obj === 'undefined') {\n return;\n }\n\n let i;\n let l;\n\n // Force an array if not already something iterable\n if (typeof obj !== 'object') {\n /*eslint no-param-reassign:0*/\n obj = [obj];\n }\n\n if (isArray(obj)) {\n // Iterate over array values\n for (i = 0, l = obj.length; i < l; i++) {\n fn.call(null, obj[i], i, obj);\n }\n } else {\n // Iterate over object keys\n const keys = allOwnKeys ? Object.getOwnPropertyNames(obj) : Object.keys(obj);\n const len = keys.length;\n let key;\n\n for (i = 0; i < len; i++) {\n key = keys[i];\n fn.call(null, obj[key], key, obj);\n }\n }\n}\n\nfunction findKey(obj, key) {\n key = key.toLowerCase();\n const keys = Object.keys(obj);\n let i = keys.length;\n let _key;\n while (i-- > 0) {\n _key = keys[i];\n if (key === _key.toLowerCase()) {\n return _key;\n }\n }\n return null;\n}\n\nconst _global = (() => {\n /*eslint no-undef:0*/\n if (typeof globalThis !== \"undefined\") return globalThis;\n return typeof self !== \"undefined\" ? self : (typeof window !== 'undefined' ? window : global)\n})();\n\nconst isContextDefined = (context) => !isUndefined(context) && context !== _global;\n\n/**\n * Accepts varargs expecting each argument to be an object, then\n * immutably merges the properties of each object and returns result.\n *\n * When multiple objects contain the same key the later object in\n * the arguments list will take precedence.\n *\n * Example:\n *\n * ```js\n * var result = merge({foo: 123}, {foo: 456});\n * console.log(result.foo); // outputs 456\n * ```\n *\n * @param {Object} obj1 Object to merge\n *\n * @returns {Object} Result of all merge properties\n */\nfunction merge(/* obj1, obj2, obj3, ... */) {\n const {caseless} = isContextDefined(this) && this || {};\n const result = {};\n const assignValue = (val, key) => {\n const targetKey = caseless && findKey(result, key) || key;\n if (isPlainObject(result[targetKey]) && isPlainObject(val)) {\n result[targetKey] = merge(result[targetKey], val);\n } else if (isPlainObject(val)) {\n result[targetKey] = merge({}, val);\n } else if (isArray(val)) {\n result[targetKey] = val.slice();\n } else {\n result[targetKey] = val;\n }\n }\n\n for (let i = 0, l = arguments.length; i < l; i++) {\n arguments[i] && forEach(arguments[i], assignValue);\n }\n return result;\n}\n\n/**\n * Extends object a by mutably adding to it the properties of object b.\n *\n * @param {Object} a The object to be extended\n * @param {Object} b The object to copy properties from\n * @param {Object} thisArg The object to bind function to\n *\n * @param {Boolean} [allOwnKeys]\n * @returns {Object} The resulting value of object a\n */\nconst extend = (a, b, thisArg, {allOwnKeys}= {}) => {\n forEach(b, (val, key) => {\n if (thisArg && isFunction(val)) {\n a[key] = bind(val, thisArg);\n } else {\n a[key] = val;\n }\n }, {allOwnKeys});\n return a;\n}\n\n/**\n * Remove byte order marker. This catches EF BB BF (the UTF-8 BOM)\n *\n * @param {string} content with BOM\n *\n * @returns {string} content value without BOM\n */\nconst stripBOM = (content) => {\n if (content.charCodeAt(0) === 0xFEFF) {\n content = content.slice(1);\n }\n return content;\n}\n\n/**\n * Inherit the prototype methods from one constructor into another\n * @param {function} constructor\n * @param {function} superConstructor\n * @param {object} [props]\n * @param {object} [descriptors]\n *\n * @returns {void}\n */\nconst inherits = (constructor, superConstructor, props, descriptors) => {\n constructor.prototype = Object.create(superConstructor.prototype, descriptors);\n constructor.prototype.constructor = constructor;\n Object.defineProperty(constructor, 'super', {\n value: superConstructor.prototype\n });\n props && Object.assign(constructor.prototype, props);\n}\n\n/**\n * Resolve object with deep prototype chain to a flat object\n * @param {Object} sourceObj source object\n * @param {Object} [destObj]\n * @param {Function|Boolean} [filter]\n * @param {Function} [propFilter]\n *\n * @returns {Object}\n */\nconst toFlatObject = (sourceObj, destObj, filter, propFilter) => {\n let props;\n let i;\n let prop;\n const merged = {};\n\n destObj = destObj || {};\n // eslint-disable-next-line no-eq-null,eqeqeq\n if (sourceObj == null) return destObj;\n\n do {\n props = Object.getOwnPropertyNames(sourceObj);\n i = props.length;\n while (i-- > 0) {\n prop = props[i];\n if ((!propFilter || propFilter(prop, sourceObj, destObj)) && !merged[prop]) {\n destObj[prop] = sourceObj[prop];\n merged[prop] = true;\n }\n }\n sourceObj = filter !== false && getPrototypeOf(sourceObj);\n } while (sourceObj && (!filter || filter(sourceObj, destObj)) && sourceObj !== Object.prototype);\n\n return destObj;\n}\n\n/**\n * Determines whether a string ends with the characters of a specified string\n *\n * @param {String} str\n * @param {String} searchString\n * @param {Number} [position= 0]\n *\n * @returns {boolean}\n */\nconst endsWith = (str, searchString, position) => {\n str = String(str);\n if (position === undefined || position > str.length) {\n position = str.length;\n }\n position -= searchString.length;\n const lastIndex = str.indexOf(searchString, position);\n return lastIndex !== -1 && lastIndex === position;\n}\n\n\n/**\n * Returns new array from array like object or null if failed\n *\n * @param {*} [thing]\n *\n * @returns {?Array}\n */\nconst toArray = (thing) => {\n if (!thing) return null;\n if (isArray(thing)) return thing;\n let i = thing.length;\n if (!isNumber(i)) return null;\n const arr = new Array(i);\n while (i-- > 0) {\n arr[i] = thing[i];\n }\n return arr;\n}\n\n/**\n * Checking if the Uint8Array exists and if it does, it returns a function that checks if the\n * thing passed in is an instance of Uint8Array\n *\n * @param {TypedArray}\n *\n * @returns {Array}\n */\n// eslint-disable-next-line func-names\nconst isTypedArray = (TypedArray => {\n // eslint-disable-next-line func-names\n return thing => {\n return TypedArray && thing instanceof TypedArray;\n };\n})(typeof Uint8Array !== 'undefined' && getPrototypeOf(Uint8Array));\n\n/**\n * For each entry in the object, call the function with the key and value.\n *\n * @param {Object} obj - The object to iterate over.\n * @param {Function} fn - The function to call for each entry.\n *\n * @returns {void}\n */\nconst forEachEntry = (obj, fn) => {\n const generator = obj && obj[Symbol.iterator];\n\n const iterator = generator.call(obj);\n\n let result;\n\n while ((result = iterator.next()) && !result.done) {\n const pair = result.value;\n fn.call(obj, pair[0], pair[1]);\n }\n}\n\n/**\n * It takes a regular expression and a string, and returns an array of all the matches\n *\n * @param {string} regExp - The regular expression to match against.\n * @param {string} str - The string to search.\n *\n * @returns {Array}\n */\nconst matchAll = (regExp, str) => {\n let matches;\n const arr = [];\n\n while ((matches = regExp.exec(str)) !== null) {\n arr.push(matches);\n }\n\n return arr;\n}\n\n/* Checking if the kindOfTest function returns true when passed an HTMLFormElement. */\nconst isHTMLForm = kindOfTest('HTMLFormElement');\n\nconst toCamelCase = str => {\n return str.toLowerCase().replace(/[-_\\s]([a-z\\d])(\\w*)/g,\n function replacer(m, p1, p2) {\n return p1.toUpperCase() + p2;\n }\n );\n};\n\n/* Creating a function that will check if an object has a property. */\nconst hasOwnProperty = (({hasOwnProperty}) => (obj, prop) => hasOwnProperty.call(obj, prop))(Object.prototype);\n\n/**\n * Determine if a value is a RegExp object\n *\n * @param {*} val The value to test\n *\n * @returns {boolean} True if value is a RegExp object, otherwise false\n */\nconst isRegExp = kindOfTest('RegExp');\n\nconst reduceDescriptors = (obj, reducer) => {\n const descriptors = Object.getOwnPropertyDescriptors(obj);\n const reducedDescriptors = {};\n\n forEach(descriptors, (descriptor, name) => {\n if (reducer(descriptor, name, obj) !== false) {\n reducedDescriptors[name] = descriptor;\n }\n });\n\n Object.defineProperties(obj, reducedDescriptors);\n}\n\n/**\n * Makes all methods read-only\n * @param {Object} obj\n */\n\nconst freezeMethods = (obj) => {\n reduceDescriptors(obj, (descriptor, name) => {\n // skip restricted props in strict mode\n if (isFunction(obj) && ['arguments', 'caller', 'callee'].indexOf(name) !== -1) {\n return false;\n }\n\n const value = obj[name];\n\n if (!isFunction(value)) return;\n\n descriptor.enumerable = false;\n\n if ('writable' in descriptor) {\n descriptor.writable = false;\n return;\n }\n\n if (!descriptor.set) {\n descriptor.set = () => {\n throw Error('Can not rewrite read-only method \\'' + name + '\\'');\n };\n }\n });\n}\n\nconst toObjectSet = (arrayOrString, delimiter) => {\n const obj = {};\n\n const define = (arr) => {\n arr.forEach(value => {\n obj[value] = true;\n });\n }\n\n isArray(arrayOrString) ? define(arrayOrString) : define(String(arrayOrString).split(delimiter));\n\n return obj;\n}\n\nconst noop = () => {}\n\nconst toFiniteNumber = (value, defaultValue) => {\n value = +value;\n return Number.isFinite(value) ? value : defaultValue;\n}\n\nconst ALPHA = 'abcdefghijklmnopqrstuvwxyz'\n\nconst DIGIT = '0123456789';\n\nconst ALPHABET = {\n DIGIT,\n ALPHA,\n ALPHA_DIGIT: ALPHA + ALPHA.toUpperCase() + DIGIT\n}\n\nconst generateString = (size = 16, alphabet = ALPHABET.ALPHA_DIGIT) => {\n let str = '';\n const {length} = alphabet;\n while (size--) {\n str += alphabet[Math.random() * length|0]\n }\n\n return str;\n}\n\n/**\n * If the thing is a FormData object, return true, otherwise return false.\n *\n * @param {unknown} thing - The thing to check.\n *\n * @returns {boolean}\n */\nfunction isSpecCompliantForm(thing) {\n return !!(thing && isFunction(thing.append) && thing[Symbol.toStringTag] === 'FormData' && thing[Symbol.iterator]);\n}\n\nconst toJSONObject = (obj) => {\n const stack = new Array(10);\n\n const visit = (source, i) => {\n\n if (isObject(source)) {\n if (stack.indexOf(source) >= 0) {\n return;\n }\n\n if(!('toJSON' in source)) {\n stack[i] = source;\n const target = isArray(source) ? [] : {};\n\n forEach(source, (value, key) => {\n const reducedValue = visit(value, i + 1);\n !isUndefined(reducedValue) && (target[key] = reducedValue);\n });\n\n stack[i] = undefined;\n\n return target;\n }\n }\n\n return source;\n }\n\n return visit(obj, 0);\n}\n\nconst isAsyncFn = kindOfTest('AsyncFunction');\n\nconst isThenable = (thing) =>\n thing && (isObject(thing) || isFunction(thing)) && isFunction(thing.then) && isFunction(thing.catch);\n\nexport default {\n isArray,\n isArrayBuffer,\n isBuffer,\n isFormData,\n isArrayBufferView,\n isString,\n isNumber,\n isBoolean,\n isObject,\n isPlainObject,\n isUndefined,\n isDate,\n isFile,\n isBlob,\n isRegExp,\n isFunction,\n isStream,\n isURLSearchParams,\n isTypedArray,\n isFileList,\n forEach,\n merge,\n extend,\n trim,\n stripBOM,\n inherits,\n toFlatObject,\n kindOf,\n kindOfTest,\n endsWith,\n toArray,\n forEachEntry,\n matchAll,\n isHTMLForm,\n hasOwnProperty,\n hasOwnProp: hasOwnProperty, // an alias to avoid ESLint no-prototype-builtins detection\n reduceDescriptors,\n freezeMethods,\n toObjectSet,\n toCamelCase,\n noop,\n toFiniteNumber,\n findKey,\n global: _global,\n isContextDefined,\n ALPHABET,\n generateString,\n isSpecCompliantForm,\n toJSONObject,\n isAsyncFn,\n isThenable\n};\n","'use strict';\n\nimport utils from '../utils.js';\n\n/**\n * Create an Error with the specified message, config, error code, request and response.\n *\n * @param {string} message The error message.\n * @param {string} [code] The error code (for example, 'ECONNABORTED').\n * @param {Object} [config] The config.\n * @param {Object} [request] The request.\n * @param {Object} [response] The response.\n *\n * @returns {Error} The created error.\n */\nfunction AxiosError(message, code, config, request, response) {\n Error.call(this);\n\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, this.constructor);\n } else {\n this.stack = (new Error()).stack;\n }\n\n this.message = message;\n this.name = 'AxiosError';\n code && (this.code = code);\n config && (this.config = config);\n request && (this.request = request);\n response && (this.response = response);\n}\n\nutils.inherits(AxiosError, Error, {\n toJSON: function toJSON() {\n return {\n // Standard\n message: this.message,\n name: this.name,\n // Microsoft\n description: this.description,\n number: this.number,\n // Mozilla\n fileName: this.fileName,\n lineNumber: this.lineNumber,\n columnNumber: this.columnNumber,\n stack: this.stack,\n // Axios\n config: utils.toJSONObject(this.config),\n code: this.code,\n status: this.response && this.response.status ? this.response.status : null\n };\n }\n});\n\nconst prototype = AxiosError.prototype;\nconst descriptors = {};\n\n[\n 'ERR_BAD_OPTION_VALUE',\n 'ERR_BAD_OPTION',\n 'ECONNABORTED',\n 'ETIMEDOUT',\n 'ERR_NETWORK',\n 'ERR_FR_TOO_MANY_REDIRECTS',\n 'ERR_DEPRECATED',\n 'ERR_BAD_RESPONSE',\n 'ERR_BAD_REQUEST',\n 'ERR_CANCELED',\n 'ERR_NOT_SUPPORT',\n 'ERR_INVALID_URL'\n// eslint-disable-next-line func-names\n].forEach(code => {\n descriptors[code] = {value: code};\n});\n\nObject.defineProperties(AxiosError, descriptors);\nObject.defineProperty(prototype, 'isAxiosError', {value: true});\n\n// eslint-disable-next-line func-names\nAxiosError.from = (error, code, config, request, response, customProps) => {\n const axiosError = Object.create(prototype);\n\n utils.toFlatObject(error, axiosError, function filter(obj) {\n return obj !== Error.prototype;\n }, prop => {\n return prop !== 'isAxiosError';\n });\n\n AxiosError.call(axiosError, error.message, code, config, request, response);\n\n axiosError.cause = error;\n\n axiosError.name = error.name;\n\n customProps && Object.assign(axiosError, customProps);\n\n return axiosError;\n};\n\nexport default AxiosError;\n","'use strict';\n\nimport utils from '../utils.js';\nimport AxiosError from '../core/AxiosError.js';\n// temporary hotfix to avoid circular references until AxiosURLSearchParams is refactored\nimport PlatformFormData from '../platform/node/classes/FormData.js';\n\n/**\n * Determines if the given thing is a array or js object.\n *\n * @param {string} thing - The object or array to be visited.\n *\n * @returns {boolean}\n */\nfunction isVisitable(thing) {\n return utils.isPlainObject(thing) || utils.isArray(thing);\n}\n\n/**\n * It removes the brackets from the end of a string\n *\n * @param {string} key - The key of the parameter.\n *\n * @returns {string} the key without the brackets.\n */\nfunction removeBrackets(key) {\n return utils.endsWith(key, '[]') ? key.slice(0, -2) : key;\n}\n\n/**\n * It takes a path, a key, and a boolean, and returns a string\n *\n * @param {string} path - The path to the current key.\n * @param {string} key - The key of the current object being iterated over.\n * @param {string} dots - If true, the key will be rendered with dots instead of brackets.\n *\n * @returns {string} The path to the current key.\n */\nfunction renderKey(path, key, dots) {\n if (!path) return key;\n return path.concat(key).map(function each(token, i) {\n // eslint-disable-next-line no-param-reassign\n token = removeBrackets(token);\n return !dots && i ? '[' + token + ']' : token;\n }).join(dots ? '.' : '');\n}\n\n/**\n * If the array is an array and none of its elements are visitable, then it's a flat array.\n *\n * @param {Array} arr - The array to check\n *\n * @returns {boolean}\n */\nfunction isFlatArray(arr) {\n return utils.isArray(arr) && !arr.some(isVisitable);\n}\n\nconst predicates = utils.toFlatObject(utils, {}, null, function filter(prop) {\n return /^is[A-Z]/.test(prop);\n});\n\n/**\n * Convert a data object to FormData\n *\n * @param {Object} obj\n * @param {?Object} [formData]\n * @param {?Object} [options]\n * @param {Function} [options.visitor]\n * @param {Boolean} [options.metaTokens = true]\n * @param {Boolean} [options.dots = false]\n * @param {?Boolean} [options.indexes = false]\n *\n * @returns {Object}\n **/\n\n/**\n * It converts an object into a FormData object\n *\n * @param {Object} obj - The object to convert to form data.\n * @param {string} formData - The FormData object to append to.\n * @param {Object} options\n *\n * @returns\n */\nfunction toFormData(obj, formData, options) {\n if (!utils.isObject(obj)) {\n throw new TypeError('target must be an object');\n }\n\n // eslint-disable-next-line no-param-reassign\n formData = formData || new (PlatformFormData || FormData)();\n\n // eslint-disable-next-line no-param-reassign\n options = utils.toFlatObject(options, {\n metaTokens: true,\n dots: false,\n indexes: false\n }, false, function defined(option, source) {\n // eslint-disable-next-line no-eq-null,eqeqeq\n return !utils.isUndefined(source[option]);\n });\n\n const metaTokens = options.metaTokens;\n // eslint-disable-next-line no-use-before-define\n const visitor = options.visitor || defaultVisitor;\n const dots = options.dots;\n const indexes = options.indexes;\n const _Blob = options.Blob || typeof Blob !== 'undefined' && Blob;\n const useBlob = _Blob && utils.isSpecCompliantForm(formData);\n\n if (!utils.isFunction(visitor)) {\n throw new TypeError('visitor must be a function');\n }\n\n function convertValue(value) {\n if (value === null) return '';\n\n if (utils.isDate(value)) {\n return value.toISOString();\n }\n\n if (!useBlob && utils.isBlob(value)) {\n throw new AxiosError('Blob is not supported. Use a Buffer instead.');\n }\n\n if (utils.isArrayBuffer(value) || utils.isTypedArray(value)) {\n return useBlob && typeof Blob === 'function' ? new Blob([value]) : Buffer.from(value);\n }\n\n return value;\n }\n\n /**\n * Default visitor.\n *\n * @param {*} value\n * @param {String|Number} key\n * @param {Array} path\n * @this {FormData}\n *\n * @returns {boolean} return true to visit the each prop of the value recursively\n */\n function defaultVisitor(value, key, path) {\n let arr = value;\n\n if (value && !path && typeof value === 'object') {\n if (utils.endsWith(key, '{}')) {\n // eslint-disable-next-line no-param-reassign\n key = metaTokens ? key : key.slice(0, -2);\n // eslint-disable-next-line no-param-reassign\n value = JSON.stringify(value);\n } else if (\n (utils.isArray(value) && isFlatArray(value)) ||\n ((utils.isFileList(value) || utils.endsWith(key, '[]')) && (arr = utils.toArray(value))\n )) {\n // eslint-disable-next-line no-param-reassign\n key = removeBrackets(key);\n\n arr.forEach(function each(el, index) {\n !(utils.isUndefined(el) || el === null) && formData.append(\n // eslint-disable-next-line no-nested-ternary\n indexes === true ? renderKey([key], index, dots) : (indexes === null ? key : key + '[]'),\n convertValue(el)\n );\n });\n return false;\n }\n }\n\n if (isVisitable(value)) {\n return true;\n }\n\n formData.append(renderKey(path, key, dots), convertValue(value));\n\n return false;\n }\n\n const stack = [];\n\n const exposedHelpers = Object.assign(predicates, {\n defaultVisitor,\n convertValue,\n isVisitable\n });\n\n function build(value, path) {\n if (utils.isUndefined(value)) return;\n\n if (stack.indexOf(value) !== -1) {\n throw Error('Circular reference detected in ' + path.join('.'));\n }\n\n stack.push(value);\n\n utils.forEach(value, function each(el, key) {\n const result = !(utils.isUndefined(el) || el === null) && visitor.call(\n formData, el, utils.isString(key) ? key.trim() : key, path, exposedHelpers\n );\n\n if (result === true) {\n build(el, path ? path.concat(key) : [key]);\n }\n });\n\n stack.pop();\n }\n\n if (!utils.isObject(obj)) {\n throw new TypeError('data must be an object');\n }\n\n build(obj);\n\n return formData;\n}\n\nexport default toFormData;\n","'use strict';\n\nimport toFormData from './toFormData.js';\n\n/**\n * It encodes a string by replacing all characters that are not in the unreserved set with\n * their percent-encoded equivalents\n *\n * @param {string} str - The string to encode.\n *\n * @returns {string} The encoded string.\n */\nfunction encode(str) {\n const charMap = {\n '!': '%21',\n \"'\": '%27',\n '(': '%28',\n ')': '%29',\n '~': '%7E',\n '%20': '+',\n '%00': '\\x00'\n };\n return encodeURIComponent(str).replace(/[!'()~]|%20|%00/g, function replacer(match) {\n return charMap[match];\n });\n}\n\n/**\n * It takes a params object and converts it to a FormData object\n *\n * @param {Object} params - The parameters to be converted to a FormData object.\n * @param {Object} options - The options object passed to the Axios constructor.\n *\n * @returns {void}\n */\nfunction AxiosURLSearchParams(params, options) {\n this._pairs = [];\n\n params && toFormData(params, this, options);\n}\n\nconst prototype = AxiosURLSearchParams.prototype;\n\nprototype.append = function append(name, value) {\n this._pairs.push([name, value]);\n};\n\nprototype.toString = function toString(encoder) {\n const _encode = encoder ? function(value) {\n return encoder.call(this, value, encode);\n } : encode;\n\n return this._pairs.map(function each(pair) {\n return _encode(pair[0]) + '=' + _encode(pair[1]);\n }, '').join('&');\n};\n\nexport default AxiosURLSearchParams;\n","'use strict';\n\nimport utils from '../utils.js';\nimport AxiosURLSearchParams from '../helpers/AxiosURLSearchParams.js';\n\n/**\n * It replaces all instances of the characters `:`, `$`, `,`, `+`, `[`, and `]` with their\n * URI encoded counterparts\n *\n * @param {string} val The value to be encoded.\n *\n * @returns {string} The encoded value.\n */\nfunction encode(val) {\n return encodeURIComponent(val).\n replace(/%3A/gi, ':').\n replace(/%24/g, '$').\n replace(/%2C/gi, ',').\n replace(/%20/g, '+').\n replace(/%5B/gi, '[').\n replace(/%5D/gi, ']');\n}\n\n/**\n * Build a URL by appending params to the end\n *\n * @param {string} url The base of the url (e.g., http://www.google.com)\n * @param {object} [params] The params to be appended\n * @param {?object} options\n *\n * @returns {string} The formatted url\n */\nexport default function buildURL(url, params, options) {\n /*eslint no-param-reassign:0*/\n if (!params) {\n return url;\n }\n \n const _encode = options && options.encode || encode;\n\n const serializeFn = options && options.serialize;\n\n let serializedParams;\n\n if (serializeFn) {\n serializedParams = serializeFn(params, options);\n } else {\n serializedParams = utils.isURLSearchParams(params) ?\n params.toString() :\n new AxiosURLSearchParams(params, options).toString(_encode);\n }\n\n if (serializedParams) {\n const hashmarkIndex = url.indexOf(\"#\");\n\n if (hashmarkIndex !== -1) {\n url = url.slice(0, hashmarkIndex);\n }\n url += (url.indexOf('?') === -1 ? '?' : '&') + serializedParams;\n }\n\n return url;\n}\n","'use strict';\n\nimport utils from './../utils.js';\n\nclass InterceptorManager {\n constructor() {\n this.handlers = [];\n }\n\n /**\n * Add a new interceptor to the stack\n *\n * @param {Function} fulfilled The function to handle `then` for a `Promise`\n * @param {Function} rejected The function to handle `reject` for a `Promise`\n *\n * @return {Number} An ID used to remove interceptor later\n */\n use(fulfilled, rejected, options) {\n this.handlers.push({\n fulfilled,\n rejected,\n synchronous: options ? options.synchronous : false,\n runWhen: options ? options.runWhen : null\n });\n return this.handlers.length - 1;\n }\n\n /**\n * Remove an interceptor from the stack\n *\n * @param {Number} id The ID that was returned by `use`\n *\n * @returns {Boolean} `true` if the interceptor was removed, `false` otherwise\n */\n eject(id) {\n if (this.handlers[id]) {\n this.handlers[id] = null;\n }\n }\n\n /**\n * Clear all interceptors from the stack\n *\n * @returns {void}\n */\n clear() {\n if (this.handlers) {\n this.handlers = [];\n }\n }\n\n /**\n * Iterate over all the registered interceptors\n *\n * This method is particularly useful for skipping over any\n * interceptors that may have become `null` calling `eject`.\n *\n * @param {Function} fn The function to call for each interceptor\n *\n * @returns {void}\n */\n forEach(fn) {\n utils.forEach(this.handlers, function forEachHandler(h) {\n if (h !== null) {\n fn(h);\n }\n });\n }\n}\n\nexport default InterceptorManager;\n","'use strict';\n\nexport default {\n silentJSONParsing: true,\n forcedJSONParsing: true,\n clarifyTimeoutError: false\n};\n","import URLSearchParams from './classes/URLSearchParams.js'\nimport FormData from './classes/FormData.js'\nimport Blob from './classes/Blob.js'\n\n/**\n * Determine if we're running in a standard browser environment\n *\n * This allows axios to run in a web worker, and react-native.\n * Both environments support XMLHttpRequest, but not fully standard globals.\n *\n * web workers:\n * typeof window -> undefined\n * typeof document -> undefined\n *\n * react-native:\n * navigator.product -> 'ReactNative'\n * nativescript\n * navigator.product -> 'NativeScript' or 'NS'\n *\n * @returns {boolean}\n */\nconst isStandardBrowserEnv = (() => {\n let product;\n if (typeof navigator !== 'undefined' && (\n (product = navigator.product) === 'ReactNative' ||\n product === 'NativeScript' ||\n product === 'NS')\n ) {\n return false;\n }\n\n return typeof window !== 'undefined' && typeof document !== 'undefined';\n})();\n\n/**\n * Determine if we're running in a standard browser webWorker environment\n *\n * Although the `isStandardBrowserEnv` method indicates that\n * `allows axios to run in a web worker`, the WebWorker will still be\n * filtered out due to its judgment standard\n * `typeof window !== 'undefined' && typeof document !== 'undefined'`.\n * This leads to a problem when axios post `FormData` in webWorker\n */\n const isStandardBrowserWebWorkerEnv = (() => {\n return (\n typeof WorkerGlobalScope !== 'undefined' &&\n // eslint-disable-next-line no-undef\n self instanceof WorkerGlobalScope &&\n typeof self.importScripts === 'function'\n );\n})();\n\n\nexport default {\n isBrowser: true,\n classes: {\n URLSearchParams,\n FormData,\n Blob\n },\n isStandardBrowserEnv,\n isStandardBrowserWebWorkerEnv,\n protocols: ['http', 'https', 'file', 'blob', 'url', 'data']\n};\n","'use strict';\n\nimport AxiosURLSearchParams from '../../../helpers/AxiosURLSearchParams.js';\nexport default typeof URLSearchParams !== 'undefined' ? URLSearchParams : AxiosURLSearchParams;\n","'use strict';\n\nexport default typeof FormData !== 'undefined' ? FormData : null;\n","'use strict'\n\nexport default typeof Blob !== 'undefined' ? Blob : null\n","'use strict';\n\nimport utils from '../utils.js';\n\n/**\n * It takes a string like `foo[x][y][z]` and returns an array like `['foo', 'x', 'y', 'z']\n *\n * @param {string} name - The name of the property to get.\n *\n * @returns An array of strings.\n */\nfunction parsePropPath(name) {\n // foo[x][y][z]\n // foo.x.y.z\n // foo-x-y-z\n // foo x y z\n return utils.matchAll(/\\w+|\\[(\\w*)]/g, name).map(match => {\n return match[0] === '[]' ? '' : match[1] || match[0];\n });\n}\n\n/**\n * Convert an array to an object.\n *\n * @param {Array} arr - The array to convert to an object.\n *\n * @returns An object with the same keys and values as the array.\n */\nfunction arrayToObject(arr) {\n const obj = {};\n const keys = Object.keys(arr);\n let i;\n const len = keys.length;\n let key;\n for (i = 0; i < len; i++) {\n key = keys[i];\n obj[key] = arr[key];\n }\n return obj;\n}\n\n/**\n * It takes a FormData object and returns a JavaScript object\n *\n * @param {string} formData The FormData object to convert to JSON.\n *\n * @returns {Object | null} The converted object.\n */\nfunction formDataToJSON(formData) {\n function buildPath(path, value, target, index) {\n let name = path[index++];\n const isNumericKey = Number.isFinite(+name);\n const isLast = index >= path.length;\n name = !name && utils.isArray(target) ? target.length : name;\n\n if (isLast) {\n if (utils.hasOwnProp(target, name)) {\n target[name] = [target[name], value];\n } else {\n target[name] = value;\n }\n\n return !isNumericKey;\n }\n\n if (!target[name] || !utils.isObject(target[name])) {\n target[name] = [];\n }\n\n const result = buildPath(path, value, target[name], index);\n\n if (result && utils.isArray(target[name])) {\n target[name] = arrayToObject(target[name]);\n }\n\n return !isNumericKey;\n }\n\n if (utils.isFormData(formData) && utils.isFunction(formData.entries)) {\n const obj = {};\n\n utils.forEachEntry(formData, (name, value) => {\n buildPath(parsePropPath(name), value, obj, 0);\n });\n\n return obj;\n }\n\n return null;\n}\n\nexport default formDataToJSON;\n","'use strict';\n\nimport utils from '../utils.js';\nimport AxiosError from '../core/AxiosError.js';\nimport transitionalDefaults from './transitional.js';\nimport toFormData from '../helpers/toFormData.js';\nimport toURLEncodedForm from '../helpers/toURLEncodedForm.js';\nimport platform from '../platform/index.js';\nimport formDataToJSON from '../helpers/formDataToJSON.js';\n\nconst DEFAULT_CONTENT_TYPE = {\n 'Content-Type': undefined\n};\n\n/**\n * It takes a string, tries to parse it, and if it fails, it returns the stringified version\n * of the input\n *\n * @param {any} rawValue - The value to be stringified.\n * @param {Function} parser - A function that parses a string into a JavaScript object.\n * @param {Function} encoder - A function that takes a value and returns a string.\n *\n * @returns {string} A stringified version of the rawValue.\n */\nfunction stringifySafely(rawValue, parser, encoder) {\n if (utils.isString(rawValue)) {\n try {\n (parser || JSON.parse)(rawValue);\n return utils.trim(rawValue);\n } catch (e) {\n if (e.name !== 'SyntaxError') {\n throw e;\n }\n }\n }\n\n return (encoder || JSON.stringify)(rawValue);\n}\n\nconst defaults = {\n\n transitional: transitionalDefaults,\n\n adapter: ['xhr', 'http'],\n\n transformRequest: [function transformRequest(data, headers) {\n const contentType = headers.getContentType() || '';\n const hasJSONContentType = contentType.indexOf('application/json') > -1;\n const isObjectPayload = utils.isObject(data);\n\n if (isObjectPayload && utils.isHTMLForm(data)) {\n data = new FormData(data);\n }\n\n const isFormData = utils.isFormData(data);\n\n if (isFormData) {\n if (!hasJSONContentType) {\n return data;\n }\n return hasJSONContentType ? JSON.stringify(formDataToJSON(data)) : data;\n }\n\n if (utils.isArrayBuffer(data) ||\n utils.isBuffer(data) ||\n utils.isStream(data) ||\n utils.isFile(data) ||\n utils.isBlob(data)\n ) {\n return data;\n }\n if (utils.isArrayBufferView(data)) {\n return data.buffer;\n }\n if (utils.isURLSearchParams(data)) {\n headers.setContentType('application/x-www-form-urlencoded;charset=utf-8', false);\n return data.toString();\n }\n\n let isFileList;\n\n if (isObjectPayload) {\n if (contentType.indexOf('application/x-www-form-urlencoded') > -1) {\n return toURLEncodedForm(data, this.formSerializer).toString();\n }\n\n if ((isFileList = utils.isFileList(data)) || contentType.indexOf('multipart/form-data') > -1) {\n const _FormData = this.env && this.env.FormData;\n\n return toFormData(\n isFileList ? {'files[]': data} : data,\n _FormData && new _FormData(),\n this.formSerializer\n );\n }\n }\n\n if (isObjectPayload || hasJSONContentType ) {\n headers.setContentType('application/json', false);\n return stringifySafely(data);\n }\n\n return data;\n }],\n\n transformResponse: [function transformResponse(data) {\n const transitional = this.transitional || defaults.transitional;\n const forcedJSONParsing = transitional && transitional.forcedJSONParsing;\n const JSONRequested = this.responseType === 'json';\n\n if (data && utils.isString(data) && ((forcedJSONParsing && !this.responseType) || JSONRequested)) {\n const silentJSONParsing = transitional && transitional.silentJSONParsing;\n const strictJSONParsing = !silentJSONParsing && JSONRequested;\n\n try {\n return JSON.parse(data);\n } catch (e) {\n if (strictJSONParsing) {\n if (e.name === 'SyntaxError') {\n throw AxiosError.from(e, AxiosError.ERR_BAD_RESPONSE, this, null, this.response);\n }\n throw e;\n }\n }\n }\n\n return data;\n }],\n\n /**\n * A timeout in milliseconds to abort a request. If set to 0 (default) a\n * timeout is not created.\n */\n timeout: 0,\n\n xsrfCookieName: 'XSRF-TOKEN',\n xsrfHeaderName: 'X-XSRF-TOKEN',\n\n maxContentLength: -1,\n maxBodyLength: -1,\n\n env: {\n FormData: platform.classes.FormData,\n Blob: platform.classes.Blob\n },\n\n validateStatus: function validateStatus(status) {\n return status >= 200 && status < 300;\n },\n\n headers: {\n common: {\n 'Accept': 'application/json, text/plain, */*'\n }\n }\n};\n\nutils.forEach(['delete', 'get', 'head'], function forEachMethodNoData(method) {\n defaults.headers[method] = {};\n});\n\nutils.forEach(['post', 'put', 'patch'], function forEachMethodWithData(method) {\n defaults.headers[method] = utils.merge(DEFAULT_CONTENT_TYPE);\n});\n\nexport default defaults;\n","'use strict';\n\nimport utils from '../utils.js';\nimport toFormData from './toFormData.js';\nimport platform from '../platform/index.js';\n\nexport default function toURLEncodedForm(data, options) {\n return toFormData(data, new platform.classes.URLSearchParams(), Object.assign({\n visitor: function(value, key, path, helpers) {\n if (platform.isNode && utils.isBuffer(value)) {\n this.append(key, value.toString('base64'));\n return false;\n }\n\n return helpers.defaultVisitor.apply(this, arguments);\n }\n }, options));\n}\n","'use strict';\n\nimport utils from './../utils.js';\n\n// RawAxiosHeaders whose duplicates are ignored by node\n// c.f. https://nodejs.org/api/http.html#http_message_headers\nconst ignoreDuplicateOf = utils.toObjectSet([\n 'age', 'authorization', 'content-length', 'content-type', 'etag',\n 'expires', 'from', 'host', 'if-modified-since', 'if-unmodified-since',\n 'last-modified', 'location', 'max-forwards', 'proxy-authorization',\n 'referer', 'retry-after', 'user-agent'\n]);\n\n/**\n * Parse headers into an object\n *\n * ```\n * Date: Wed, 27 Aug 2014 08:58:49 GMT\n * Content-Type: application/json\n * Connection: keep-alive\n * Transfer-Encoding: chunked\n * ```\n *\n * @param {String} rawHeaders Headers needing to be parsed\n *\n * @returns {Object} Headers parsed into an object\n */\nexport default rawHeaders => {\n const parsed = {};\n let key;\n let val;\n let i;\n\n rawHeaders && rawHeaders.split('\\n').forEach(function parser(line) {\n i = line.indexOf(':');\n key = line.substring(0, i).trim().toLowerCase();\n val = line.substring(i + 1).trim();\n\n if (!key || (parsed[key] && ignoreDuplicateOf[key])) {\n return;\n }\n\n if (key === 'set-cookie') {\n if (parsed[key]) {\n parsed[key].push(val);\n } else {\n parsed[key] = [val];\n }\n } else {\n parsed[key] = parsed[key] ? parsed[key] + ', ' + val : val;\n }\n });\n\n return parsed;\n};\n","'use strict';\n\nimport utils from '../utils.js';\nimport parseHeaders from '../helpers/parseHeaders.js';\n\nconst $internals = Symbol('internals');\n\nfunction normalizeHeader(header) {\n return header && String(header).trim().toLowerCase();\n}\n\nfunction normalizeValue(value) {\n if (value === false || value == null) {\n return value;\n }\n\n return utils.isArray(value) ? value.map(normalizeValue) : String(value);\n}\n\nfunction parseTokens(str) {\n const tokens = Object.create(null);\n const tokensRE = /([^\\s,;=]+)\\s*(?:=\\s*([^,;]+))?/g;\n let match;\n\n while ((match = tokensRE.exec(str))) {\n tokens[match[1]] = match[2];\n }\n\n return tokens;\n}\n\nconst isValidHeaderName = (str) => /^[-_a-zA-Z0-9^`|~,!#$%&'*+.]+$/.test(str.trim());\n\nfunction matchHeaderValue(context, value, header, filter, isHeaderNameFilter) {\n if (utils.isFunction(filter)) {\n return filter.call(this, value, header);\n }\n\n if (isHeaderNameFilter) {\n value = header;\n }\n\n if (!utils.isString(value)) return;\n\n if (utils.isString(filter)) {\n return value.indexOf(filter) !== -1;\n }\n\n if (utils.isRegExp(filter)) {\n return filter.test(value);\n }\n}\n\nfunction formatHeader(header) {\n return header.trim()\n .toLowerCase().replace(/([a-z\\d])(\\w*)/g, (w, char, str) => {\n return char.toUpperCase() + str;\n });\n}\n\nfunction buildAccessors(obj, header) {\n const accessorName = utils.toCamelCase(' ' + header);\n\n ['get', 'set', 'has'].forEach(methodName => {\n Object.defineProperty(obj, methodName + accessorName, {\n value: function(arg1, arg2, arg3) {\n return this[methodName].call(this, header, arg1, arg2, arg3);\n },\n configurable: true\n });\n });\n}\n\nclass AxiosHeaders {\n constructor(headers) {\n headers && this.set(headers);\n }\n\n set(header, valueOrRewrite, rewrite) {\n const self = this;\n\n function setHeader(_value, _header, _rewrite) {\n const lHeader = normalizeHeader(_header);\n\n if (!lHeader) {\n throw new Error('header name must be a non-empty string');\n }\n\n const key = utils.findKey(self, lHeader);\n\n if(!key || self[key] === undefined || _rewrite === true || (_rewrite === undefined && self[key] !== false)) {\n self[key || _header] = normalizeValue(_value);\n }\n }\n\n const setHeaders = (headers, _rewrite) =>\n utils.forEach(headers, (_value, _header) => setHeader(_value, _header, _rewrite));\n\n if (utils.isPlainObject(header) || header instanceof this.constructor) {\n setHeaders(header, valueOrRewrite)\n } else if(utils.isString(header) && (header = header.trim()) && !isValidHeaderName(header)) {\n setHeaders(parseHeaders(header), valueOrRewrite);\n } else {\n header != null && setHeader(valueOrRewrite, header, rewrite);\n }\n\n return this;\n }\n\n get(header, parser) {\n header = normalizeHeader(header);\n\n if (header) {\n const key = utils.findKey(this, header);\n\n if (key) {\n const value = this[key];\n\n if (!parser) {\n return value;\n }\n\n if (parser === true) {\n return parseTokens(value);\n }\n\n if (utils.isFunction(parser)) {\n return parser.call(this, value, key);\n }\n\n if (utils.isRegExp(parser)) {\n return parser.exec(value);\n }\n\n throw new TypeError('parser must be boolean|regexp|function');\n }\n }\n }\n\n has(header, matcher) {\n header = normalizeHeader(header);\n\n if (header) {\n const key = utils.findKey(this, header);\n\n return !!(key && this[key] !== undefined && (!matcher || matchHeaderValue(this, this[key], key, matcher)));\n }\n\n return false;\n }\n\n delete(header, matcher) {\n const self = this;\n let deleted = false;\n\n function deleteHeader(_header) {\n _header = normalizeHeader(_header);\n\n if (_header) {\n const key = utils.findKey(self, _header);\n\n if (key && (!matcher || matchHeaderValue(self, self[key], key, matcher))) {\n delete self[key];\n\n deleted = true;\n }\n }\n }\n\n if (utils.isArray(header)) {\n header.forEach(deleteHeader);\n } else {\n deleteHeader(header);\n }\n\n return deleted;\n }\n\n clear(matcher) {\n const keys = Object.keys(this);\n let i = keys.length;\n let deleted = false;\n\n while (i--) {\n const key = keys[i];\n if(!matcher || matchHeaderValue(this, this[key], key, matcher, true)) {\n delete this[key];\n deleted = true;\n }\n }\n\n return deleted;\n }\n\n normalize(format) {\n const self = this;\n const headers = {};\n\n utils.forEach(this, (value, header) => {\n const key = utils.findKey(headers, header);\n\n if (key) {\n self[key] = normalizeValue(value);\n delete self[header];\n return;\n }\n\n const normalized = format ? formatHeader(header) : String(header).trim();\n\n if (normalized !== header) {\n delete self[header];\n }\n\n self[normalized] = normalizeValue(value);\n\n headers[normalized] = true;\n });\n\n return this;\n }\n\n concat(...targets) {\n return this.constructor.concat(this, ...targets);\n }\n\n toJSON(asStrings) {\n const obj = Object.create(null);\n\n utils.forEach(this, (value, header) => {\n value != null && value !== false && (obj[header] = asStrings && utils.isArray(value) ? value.join(', ') : value);\n });\n\n return obj;\n }\n\n [Symbol.iterator]() {\n return Object.entries(this.toJSON())[Symbol.iterator]();\n }\n\n toString() {\n return Object.entries(this.toJSON()).map(([header, value]) => header + ': ' + value).join('\\n');\n }\n\n get [Symbol.toStringTag]() {\n return 'AxiosHeaders';\n }\n\n static from(thing) {\n return thing instanceof this ? thing : new this(thing);\n }\n\n static concat(first, ...targets) {\n const computed = new this(first);\n\n targets.forEach((target) => computed.set(target));\n\n return computed;\n }\n\n static accessor(header) {\n const internals = this[$internals] = (this[$internals] = {\n accessors: {}\n });\n\n const accessors = internals.accessors;\n const prototype = this.prototype;\n\n function defineAccessor(_header) {\n const lHeader = normalizeHeader(_header);\n\n if (!accessors[lHeader]) {\n buildAccessors(prototype, _header);\n accessors[lHeader] = true;\n }\n }\n\n utils.isArray(header) ? header.forEach(defineAccessor) : defineAccessor(header);\n\n return this;\n }\n}\n\nAxiosHeaders.accessor(['Content-Type', 'Content-Length', 'Accept', 'Accept-Encoding', 'User-Agent', 'Authorization']);\n\nutils.freezeMethods(AxiosHeaders.prototype);\nutils.freezeMethods(AxiosHeaders);\n\nexport default AxiosHeaders;\n","'use strict';\n\nimport utils from './../utils.js';\nimport defaults from '../defaults/index.js';\nimport AxiosHeaders from '../core/AxiosHeaders.js';\n\n/**\n * Transform the data for a request or a response\n *\n * @param {Array|Function} fns A single function or Array of functions\n * @param {?Object} response The response object\n *\n * @returns {*} The resulting transformed data\n */\nexport default function transformData(fns, response) {\n const config = this || defaults;\n const context = response || config;\n const headers = AxiosHeaders.from(context.headers);\n let data = context.data;\n\n utils.forEach(fns, function transform(fn) {\n data = fn.call(config, data, headers.normalize(), response ? response.status : undefined);\n });\n\n headers.normalize();\n\n return data;\n}\n","'use strict';\n\nexport default function isCancel(value) {\n return !!(value && value.__CANCEL__);\n}\n","'use strict';\n\nimport AxiosError from '../core/AxiosError.js';\nimport utils from '../utils.js';\n\n/**\n * A `CanceledError` is an object that is thrown when an operation is canceled.\n *\n * @param {string=} message The message.\n * @param {Object=} config The config.\n * @param {Object=} request The request.\n *\n * @returns {CanceledError} The created error.\n */\nfunction CanceledError(message, config, request) {\n // eslint-disable-next-line no-eq-null,eqeqeq\n AxiosError.call(this, message == null ? 'canceled' : message, AxiosError.ERR_CANCELED, config, request);\n this.name = 'CanceledError';\n}\n\nutils.inherits(CanceledError, AxiosError, {\n __CANCEL__: true\n});\n\nexport default CanceledError;\n","'use strict';\n\nimport utils from './../utils.js';\nimport platform from '../platform/index.js';\n\nexport default platform.isStandardBrowserEnv ?\n\n// Standard browser envs support document.cookie\n (function standardBrowserEnv() {\n return {\n write: function write(name, value, expires, path, domain, secure) {\n const cookie = [];\n cookie.push(name + '=' + encodeURIComponent(value));\n\n if (utils.isNumber(expires)) {\n cookie.push('expires=' + new Date(expires).toGMTString());\n }\n\n if (utils.isString(path)) {\n cookie.push('path=' + path);\n }\n\n if (utils.isString(domain)) {\n cookie.push('domain=' + domain);\n }\n\n if (secure === true) {\n cookie.push('secure');\n }\n\n document.cookie = cookie.join('; ');\n },\n\n read: function read(name) {\n const match = document.cookie.match(new RegExp('(^|;\\\\s*)(' + name + ')=([^;]*)'));\n return (match ? decodeURIComponent(match[3]) : null);\n },\n\n remove: function remove(name) {\n this.write(name, '', Date.now() - 86400000);\n }\n };\n })() :\n\n// Non standard browser env (web workers, react-native) lack needed support.\n (function nonStandardBrowserEnv() {\n return {\n write: function write() {},\n read: function read() { return null; },\n remove: function remove() {}\n };\n })();\n","'use strict';\n\nimport isAbsoluteURL from '../helpers/isAbsoluteURL.js';\nimport combineURLs from '../helpers/combineURLs.js';\n\n/**\n * Creates a new URL by combining the baseURL with the requestedURL,\n * only when the requestedURL is not already an absolute URL.\n * If the requestURL is absolute, this function returns the requestedURL untouched.\n *\n * @param {string} baseURL The base URL\n * @param {string} requestedURL Absolute or relative URL to combine\n *\n * @returns {string} The combined full path\n */\nexport default function buildFullPath(baseURL, requestedURL) {\n if (baseURL && !isAbsoluteURL(requestedURL)) {\n return combineURLs(baseURL, requestedURL);\n }\n return requestedURL;\n}\n","'use strict';\n\n/**\n * Determines whether the specified URL is absolute\n *\n * @param {string} url The URL to test\n *\n * @returns {boolean} True if the specified URL is absolute, otherwise false\n */\nexport default function isAbsoluteURL(url) {\n // A URL is considered absolute if it begins with \"://\" or \"//\" (protocol-relative URL).\n // RFC 3986 defines scheme name as a sequence of characters beginning with a letter and followed\n // by any combination of letters, digits, plus, period, or hyphen.\n return /^([a-z][a-z\\d+\\-.]*:)?\\/\\//i.test(url);\n}\n","'use strict';\n\n/**\n * Creates a new URL by combining the specified URLs\n *\n * @param {string} baseURL The base URL\n * @param {string} relativeURL The relative URL\n *\n * @returns {string} The combined URL\n */\nexport default function combineURLs(baseURL, relativeURL) {\n return relativeURL\n ? baseURL.replace(/\\/+$/, '') + '/' + relativeURL.replace(/^\\/+/, '')\n : baseURL;\n}\n","'use strict';\n\nimport utils from './../utils.js';\nimport platform from '../platform/index.js';\n\nexport default platform.isStandardBrowserEnv ?\n\n// Standard browser envs have full support of the APIs needed to test\n// whether the request URL is of the same origin as current location.\n (function standardBrowserEnv() {\n const msie = /(msie|trident)/i.test(navigator.userAgent);\n const urlParsingNode = document.createElement('a');\n let originURL;\n\n /**\n * Parse a URL to discover it's components\n *\n * @param {String} url The URL to be parsed\n * @returns {Object}\n */\n function resolveURL(url) {\n let href = url;\n\n if (msie) {\n // IE needs attribute set twice to normalize properties\n urlParsingNode.setAttribute('href', href);\n href = urlParsingNode.href;\n }\n\n urlParsingNode.setAttribute('href', href);\n\n // urlParsingNode provides the UrlUtils interface - http://url.spec.whatwg.org/#urlutils\n return {\n href: urlParsingNode.href,\n protocol: urlParsingNode.protocol ? urlParsingNode.protocol.replace(/:$/, '') : '',\n host: urlParsingNode.host,\n search: urlParsingNode.search ? urlParsingNode.search.replace(/^\\?/, '') : '',\n hash: urlParsingNode.hash ? urlParsingNode.hash.replace(/^#/, '') : '',\n hostname: urlParsingNode.hostname,\n port: urlParsingNode.port,\n pathname: (urlParsingNode.pathname.charAt(0) === '/') ?\n urlParsingNode.pathname :\n '/' + urlParsingNode.pathname\n };\n }\n\n originURL = resolveURL(window.location.href);\n\n /**\n * Determine if a URL shares the same origin as the current location\n *\n * @param {String} requestURL The URL to test\n * @returns {boolean} True if URL shares the same origin, otherwise false\n */\n return function isURLSameOrigin(requestURL) {\n const parsed = (utils.isString(requestURL)) ? resolveURL(requestURL) : requestURL;\n return (parsed.protocol === originURL.protocol &&\n parsed.host === originURL.host);\n };\n })() :\n\n // Non standard browser envs (web workers, react-native) lack needed support.\n (function nonStandardBrowserEnv() {\n return function isURLSameOrigin() {\n return true;\n };\n })();\n","'use strict';\n\n/**\n * Calculate data maxRate\n * @param {Number} [samplesCount= 10]\n * @param {Number} [min= 1000]\n * @returns {Function}\n */\nfunction speedometer(samplesCount, min) {\n samplesCount = samplesCount || 10;\n const bytes = new Array(samplesCount);\n const timestamps = new Array(samplesCount);\n let head = 0;\n let tail = 0;\n let firstSampleTS;\n\n min = min !== undefined ? min : 1000;\n\n return function push(chunkLength) {\n const now = Date.now();\n\n const startedAt = timestamps[tail];\n\n if (!firstSampleTS) {\n firstSampleTS = now;\n }\n\n bytes[head] = chunkLength;\n timestamps[head] = now;\n\n let i = tail;\n let bytesCount = 0;\n\n while (i !== head) {\n bytesCount += bytes[i++];\n i = i % samplesCount;\n }\n\n head = (head + 1) % samplesCount;\n\n if (head === tail) {\n tail = (tail + 1) % samplesCount;\n }\n\n if (now - firstSampleTS < min) {\n return;\n }\n\n const passed = startedAt && now - startedAt;\n\n return passed ? Math.round(bytesCount * 1000 / passed) : undefined;\n };\n}\n\nexport default speedometer;\n","'use strict';\n\nimport utils from './../utils.js';\nimport settle from './../core/settle.js';\nimport cookies from './../helpers/cookies.js';\nimport buildURL from './../helpers/buildURL.js';\nimport buildFullPath from '../core/buildFullPath.js';\nimport isURLSameOrigin from './../helpers/isURLSameOrigin.js';\nimport transitionalDefaults from '../defaults/transitional.js';\nimport AxiosError from '../core/AxiosError.js';\nimport CanceledError from '../cancel/CanceledError.js';\nimport parseProtocol from '../helpers/parseProtocol.js';\nimport platform from '../platform/index.js';\nimport AxiosHeaders from '../core/AxiosHeaders.js';\nimport speedometer from '../helpers/speedometer.js';\n\nfunction progressEventReducer(listener, isDownloadStream) {\n let bytesNotified = 0;\n const _speedometer = speedometer(50, 250);\n\n return e => {\n const loaded = e.loaded;\n const total = e.lengthComputable ? e.total : undefined;\n const progressBytes = loaded - bytesNotified;\n const rate = _speedometer(progressBytes);\n const inRange = loaded <= total;\n\n bytesNotified = loaded;\n\n const data = {\n loaded,\n total,\n progress: total ? (loaded / total) : undefined,\n bytes: progressBytes,\n rate: rate ? rate : undefined,\n estimated: rate && total && inRange ? (total - loaded) / rate : undefined,\n event: e\n };\n\n data[isDownloadStream ? 'download' : 'upload'] = true;\n\n listener(data);\n };\n}\n\nconst isXHRAdapterSupported = typeof XMLHttpRequest !== 'undefined';\n\nexport default isXHRAdapterSupported && function (config) {\n return new Promise(function dispatchXhrRequest(resolve, reject) {\n let requestData = config.data;\n const requestHeaders = AxiosHeaders.from(config.headers).normalize();\n const responseType = config.responseType;\n let onCanceled;\n function done() {\n if (config.cancelToken) {\n config.cancelToken.unsubscribe(onCanceled);\n }\n\n if (config.signal) {\n config.signal.removeEventListener('abort', onCanceled);\n }\n }\n\n if (utils.isFormData(requestData)) {\n if (platform.isStandardBrowserEnv || platform.isStandardBrowserWebWorkerEnv) {\n requestHeaders.setContentType(false); // Let the browser set it\n } else {\n requestHeaders.setContentType('multipart/form-data;', false); // mobile/desktop app frameworks\n }\n }\n\n let request = new XMLHttpRequest();\n\n // HTTP basic authentication\n if (config.auth) {\n const username = config.auth.username || '';\n const password = config.auth.password ? unescape(encodeURIComponent(config.auth.password)) : '';\n requestHeaders.set('Authorization', 'Basic ' + btoa(username + ':' + password));\n }\n\n const fullPath = buildFullPath(config.baseURL, config.url);\n\n request.open(config.method.toUpperCase(), buildURL(fullPath, config.params, config.paramsSerializer), true);\n\n // Set the request timeout in MS\n request.timeout = config.timeout;\n\n function onloadend() {\n if (!request) {\n return;\n }\n // Prepare the response\n const responseHeaders = AxiosHeaders.from(\n 'getAllResponseHeaders' in request && request.getAllResponseHeaders()\n );\n const responseData = !responseType || responseType === 'text' || responseType === 'json' ?\n request.responseText : request.response;\n const response = {\n data: responseData,\n status: request.status,\n statusText: request.statusText,\n headers: responseHeaders,\n config,\n request\n };\n\n settle(function _resolve(value) {\n resolve(value);\n done();\n }, function _reject(err) {\n reject(err);\n done();\n }, response);\n\n // Clean up request\n request = null;\n }\n\n if ('onloadend' in request) {\n // Use onloadend if available\n request.onloadend = onloadend;\n } else {\n // Listen for ready state to emulate onloadend\n request.onreadystatechange = function handleLoad() {\n if (!request || request.readyState !== 4) {\n return;\n }\n\n // The request errored out and we didn't get a response, this will be\n // handled by onerror instead\n // With one exception: request that using file: protocol, most browsers\n // will return status as 0 even though it's a successful request\n if (request.status === 0 && !(request.responseURL && request.responseURL.indexOf('file:') === 0)) {\n return;\n }\n // readystate handler is calling before onerror or ontimeout handlers,\n // so we should call onloadend on the next 'tick'\n setTimeout(onloadend);\n };\n }\n\n // Handle browser request cancellation (as opposed to a manual cancellation)\n request.onabort = function handleAbort() {\n if (!request) {\n return;\n }\n\n reject(new AxiosError('Request aborted', AxiosError.ECONNABORTED, config, request));\n\n // Clean up request\n request = null;\n };\n\n // Handle low level network errors\n request.onerror = function handleError() {\n // Real errors are hidden from us by the browser\n // onerror should only fire if it's a network error\n reject(new AxiosError('Network Error', AxiosError.ERR_NETWORK, config, request));\n\n // Clean up request\n request = null;\n };\n\n // Handle timeout\n request.ontimeout = function handleTimeout() {\n let timeoutErrorMessage = config.timeout ? 'timeout of ' + config.timeout + 'ms exceeded' : 'timeout exceeded';\n const transitional = config.transitional || transitionalDefaults;\n if (config.timeoutErrorMessage) {\n timeoutErrorMessage = config.timeoutErrorMessage;\n }\n reject(new AxiosError(\n timeoutErrorMessage,\n transitional.clarifyTimeoutError ? AxiosError.ETIMEDOUT : AxiosError.ECONNABORTED,\n config,\n request));\n\n // Clean up request\n request = null;\n };\n\n // Add xsrf header\n // This is only done if running in a standard browser environment.\n // Specifically not if we're in a web worker, or react-native.\n if (platform.isStandardBrowserEnv) {\n // Add xsrf header\n const xsrfValue = (config.withCredentials || isURLSameOrigin(fullPath))\n && config.xsrfCookieName && cookies.read(config.xsrfCookieName);\n\n if (xsrfValue) {\n requestHeaders.set(config.xsrfHeaderName, xsrfValue);\n }\n }\n\n // Remove Content-Type if data is undefined\n requestData === undefined && requestHeaders.setContentType(null);\n\n // Add headers to the request\n if ('setRequestHeader' in request) {\n utils.forEach(requestHeaders.toJSON(), function setRequestHeader(val, key) {\n request.setRequestHeader(key, val);\n });\n }\n\n // Add withCredentials to request if needed\n if (!utils.isUndefined(config.withCredentials)) {\n request.withCredentials = !!config.withCredentials;\n }\n\n // Add responseType to request if needed\n if (responseType && responseType !== 'json') {\n request.responseType = config.responseType;\n }\n\n // Handle progress if needed\n if (typeof config.onDownloadProgress === 'function') {\n request.addEventListener('progress', progressEventReducer(config.onDownloadProgress, true));\n }\n\n // Not all browsers support upload events\n if (typeof config.onUploadProgress === 'function' && request.upload) {\n request.upload.addEventListener('progress', progressEventReducer(config.onUploadProgress));\n }\n\n if (config.cancelToken || config.signal) {\n // Handle cancellation\n // eslint-disable-next-line func-names\n onCanceled = cancel => {\n if (!request) {\n return;\n }\n reject(!cancel || cancel.type ? new CanceledError(null, config, request) : cancel);\n request.abort();\n request = null;\n };\n\n config.cancelToken && config.cancelToken.subscribe(onCanceled);\n if (config.signal) {\n config.signal.aborted ? onCanceled() : config.signal.addEventListener('abort', onCanceled);\n }\n }\n\n const protocol = parseProtocol(fullPath);\n\n if (protocol && platform.protocols.indexOf(protocol) === -1) {\n reject(new AxiosError('Unsupported protocol ' + protocol + ':', AxiosError.ERR_BAD_REQUEST, config));\n return;\n }\n\n\n // Send the request\n request.send(requestData || null);\n });\n}\n","'use strict';\n\nimport AxiosError from './AxiosError.js';\n\n/**\n * Resolve or reject a Promise based on response status.\n *\n * @param {Function} resolve A function that resolves the promise.\n * @param {Function} reject A function that rejects the promise.\n * @param {object} response The response.\n *\n * @returns {object} The response.\n */\nexport default function settle(resolve, reject, response) {\n const validateStatus = response.config.validateStatus;\n if (!response.status || !validateStatus || validateStatus(response.status)) {\n resolve(response);\n } else {\n reject(new AxiosError(\n 'Request failed with status code ' + response.status,\n [AxiosError.ERR_BAD_REQUEST, AxiosError.ERR_BAD_RESPONSE][Math.floor(response.status / 100) - 4],\n response.config,\n response.request,\n response\n ));\n }\n}\n","'use strict';\n\nexport default function parseProtocol(url) {\n const match = /^([-+\\w]{1,25})(:?\\/\\/|:)/.exec(url);\n return match && match[1] || '';\n}\n","import utils from '../utils.js';\nimport httpAdapter from './http.js';\nimport xhrAdapter from './xhr.js';\nimport AxiosError from \"../core/AxiosError.js\";\n\nconst knownAdapters = {\n http: httpAdapter,\n xhr: xhrAdapter\n}\n\nutils.forEach(knownAdapters, (fn, value) => {\n if(fn) {\n try {\n Object.defineProperty(fn, 'name', {value});\n } catch (e) {\n // eslint-disable-next-line no-empty\n }\n Object.defineProperty(fn, 'adapterName', {value});\n }\n});\n\nexport default {\n getAdapter: (adapters) => {\n adapters = utils.isArray(adapters) ? adapters : [adapters];\n\n const {length} = adapters;\n let nameOrAdapter;\n let adapter;\n\n for (let i = 0; i < length; i++) {\n nameOrAdapter = adapters[i];\n if((adapter = utils.isString(nameOrAdapter) ? knownAdapters[nameOrAdapter.toLowerCase()] : nameOrAdapter)) {\n break;\n }\n }\n\n if (!adapter) {\n if (adapter === false) {\n throw new AxiosError(\n `Adapter ${nameOrAdapter} is not supported by the environment`,\n 'ERR_NOT_SUPPORT'\n );\n }\n\n throw new Error(\n utils.hasOwnProp(knownAdapters, nameOrAdapter) ?\n `Adapter '${nameOrAdapter}' is not available in the build` :\n `Unknown adapter '${nameOrAdapter}'`\n );\n }\n\n if (!utils.isFunction(adapter)) {\n throw new TypeError('adapter is not a function');\n }\n\n return adapter;\n },\n adapters: knownAdapters\n}\n","// eslint-disable-next-line strict\nexport default null;\n","'use strict';\n\nimport transformData from './transformData.js';\nimport isCancel from '../cancel/isCancel.js';\nimport defaults from '../defaults/index.js';\nimport CanceledError from '../cancel/CanceledError.js';\nimport AxiosHeaders from '../core/AxiosHeaders.js';\nimport adapters from \"../adapters/adapters.js\";\n\n/**\n * Throws a `CanceledError` if cancellation has been requested.\n *\n * @param {Object} config The config that is to be used for the request\n *\n * @returns {void}\n */\nfunction throwIfCancellationRequested(config) {\n if (config.cancelToken) {\n config.cancelToken.throwIfRequested();\n }\n\n if (config.signal && config.signal.aborted) {\n throw new CanceledError(null, config);\n }\n}\n\n/**\n * Dispatch a request to the server using the configured adapter.\n *\n * @param {object} config The config that is to be used for the request\n *\n * @returns {Promise} The Promise to be fulfilled\n */\nexport default function dispatchRequest(config) {\n throwIfCancellationRequested(config);\n\n config.headers = AxiosHeaders.from(config.headers);\n\n // Transform request data\n config.data = transformData.call(\n config,\n config.transformRequest\n );\n\n if (['post', 'put', 'patch'].indexOf(config.method) !== -1) {\n config.headers.setContentType('application/x-www-form-urlencoded', false);\n }\n\n const adapter = adapters.getAdapter(config.adapter || defaults.adapter);\n\n return adapter(config).then(function onAdapterResolution(response) {\n throwIfCancellationRequested(config);\n\n // Transform response data\n response.data = transformData.call(\n config,\n config.transformResponse,\n response\n );\n\n response.headers = AxiosHeaders.from(response.headers);\n\n return response;\n }, function onAdapterRejection(reason) {\n if (!isCancel(reason)) {\n throwIfCancellationRequested(config);\n\n // Transform response data\n if (reason && reason.response) {\n reason.response.data = transformData.call(\n config,\n config.transformResponse,\n reason.response\n );\n reason.response.headers = AxiosHeaders.from(reason.response.headers);\n }\n }\n\n return Promise.reject(reason);\n });\n}\n","'use strict';\n\nimport utils from '../utils.js';\nimport AxiosHeaders from \"./AxiosHeaders.js\";\n\nconst headersToObject = (thing) => thing instanceof AxiosHeaders ? thing.toJSON() : thing;\n\n/**\n * Config-specific merge-function which creates a new config-object\n * by merging two configuration objects together.\n *\n * @param {Object} config1\n * @param {Object} config2\n *\n * @returns {Object} New object resulting from merging config2 to config1\n */\nexport default function mergeConfig(config1, config2) {\n // eslint-disable-next-line no-param-reassign\n config2 = config2 || {};\n const config = {};\n\n function getMergedValue(target, source, caseless) {\n if (utils.isPlainObject(target) && utils.isPlainObject(source)) {\n return utils.merge.call({caseless}, target, source);\n } else if (utils.isPlainObject(source)) {\n return utils.merge({}, source);\n } else if (utils.isArray(source)) {\n return source.slice();\n }\n return source;\n }\n\n // eslint-disable-next-line consistent-return\n function mergeDeepProperties(a, b, caseless) {\n if (!utils.isUndefined(b)) {\n return getMergedValue(a, b, caseless);\n } else if (!utils.isUndefined(a)) {\n return getMergedValue(undefined, a, caseless);\n }\n }\n\n // eslint-disable-next-line consistent-return\n function valueFromConfig2(a, b) {\n if (!utils.isUndefined(b)) {\n return getMergedValue(undefined, b);\n }\n }\n\n // eslint-disable-next-line consistent-return\n function defaultToConfig2(a, b) {\n if (!utils.isUndefined(b)) {\n return getMergedValue(undefined, b);\n } else if (!utils.isUndefined(a)) {\n return getMergedValue(undefined, a);\n }\n }\n\n // eslint-disable-next-line consistent-return\n function mergeDirectKeys(a, b, prop) {\n if (prop in config2) {\n return getMergedValue(a, b);\n } else if (prop in config1) {\n return getMergedValue(undefined, a);\n }\n }\n\n const mergeMap = {\n url: valueFromConfig2,\n method: valueFromConfig2,\n data: valueFromConfig2,\n baseURL: defaultToConfig2,\n transformRequest: defaultToConfig2,\n transformResponse: defaultToConfig2,\n paramsSerializer: defaultToConfig2,\n timeout: defaultToConfig2,\n timeoutMessage: defaultToConfig2,\n withCredentials: defaultToConfig2,\n adapter: defaultToConfig2,\n responseType: defaultToConfig2,\n xsrfCookieName: defaultToConfig2,\n xsrfHeaderName: defaultToConfig2,\n onUploadProgress: defaultToConfig2,\n onDownloadProgress: defaultToConfig2,\n decompress: defaultToConfig2,\n maxContentLength: defaultToConfig2,\n maxBodyLength: defaultToConfig2,\n beforeRedirect: defaultToConfig2,\n transport: defaultToConfig2,\n httpAgent: defaultToConfig2,\n httpsAgent: defaultToConfig2,\n cancelToken: defaultToConfig2,\n socketPath: defaultToConfig2,\n responseEncoding: defaultToConfig2,\n validateStatus: mergeDirectKeys,\n headers: (a, b) => mergeDeepProperties(headersToObject(a), headersToObject(b), true)\n };\n\n utils.forEach(Object.keys(Object.assign({}, config1, config2)), function computeConfigValue(prop) {\n const merge = mergeMap[prop] || mergeDeepProperties;\n const configValue = merge(config1[prop], config2[prop], prop);\n (utils.isUndefined(configValue) && merge !== mergeDirectKeys) || (config[prop] = configValue);\n });\n\n return config;\n}\n","export const VERSION = \"1.4.0\";","'use strict';\n\nimport {VERSION} from '../env/data.js';\nimport AxiosError from '../core/AxiosError.js';\n\nconst validators = {};\n\n// eslint-disable-next-line func-names\n['object', 'boolean', 'number', 'function', 'string', 'symbol'].forEach((type, i) => {\n validators[type] = function validator(thing) {\n return typeof thing === type || 'a' + (i < 1 ? 'n ' : ' ') + type;\n };\n});\n\nconst deprecatedWarnings = {};\n\n/**\n * Transitional option validator\n *\n * @param {function|boolean?} validator - set to false if the transitional option has been removed\n * @param {string?} version - deprecated version / removed since version\n * @param {string?} message - some message with additional info\n *\n * @returns {function}\n */\nvalidators.transitional = function transitional(validator, version, message) {\n function formatMessage(opt, desc) {\n return '[Axios v' + VERSION + '] Transitional option \\'' + opt + '\\'' + desc + (message ? '. ' + message : '');\n }\n\n // eslint-disable-next-line func-names\n return (value, opt, opts) => {\n if (validator === false) {\n throw new AxiosError(\n formatMessage(opt, ' has been removed' + (version ? ' in ' + version : '')),\n AxiosError.ERR_DEPRECATED\n );\n }\n\n if (version && !deprecatedWarnings[opt]) {\n deprecatedWarnings[opt] = true;\n // eslint-disable-next-line no-console\n console.warn(\n formatMessage(\n opt,\n ' has been deprecated since v' + version + ' and will be removed in the near future'\n )\n );\n }\n\n return validator ? validator(value, opt, opts) : true;\n };\n};\n\n/**\n * Assert object's properties type\n *\n * @param {object} options\n * @param {object} schema\n * @param {boolean?} allowUnknown\n *\n * @returns {object}\n */\n\nfunction assertOptions(options, schema, allowUnknown) {\n if (typeof options !== 'object') {\n throw new AxiosError('options must be an object', AxiosError.ERR_BAD_OPTION_VALUE);\n }\n const keys = Object.keys(options);\n let i = keys.length;\n while (i-- > 0) {\n const opt = keys[i];\n const validator = schema[opt];\n if (validator) {\n const value = options[opt];\n const result = value === undefined || validator(value, opt, options);\n if (result !== true) {\n throw new AxiosError('option ' + opt + ' must be ' + result, AxiosError.ERR_BAD_OPTION_VALUE);\n }\n continue;\n }\n if (allowUnknown !== true) {\n throw new AxiosError('Unknown option ' + opt, AxiosError.ERR_BAD_OPTION);\n }\n }\n}\n\nexport default {\n assertOptions,\n validators\n};\n","'use strict';\n\nimport utils from './../utils.js';\nimport buildURL from '../helpers/buildURL.js';\nimport InterceptorManager from './InterceptorManager.js';\nimport dispatchRequest from './dispatchRequest.js';\nimport mergeConfig from './mergeConfig.js';\nimport buildFullPath from './buildFullPath.js';\nimport validator from '../helpers/validator.js';\nimport AxiosHeaders from './AxiosHeaders.js';\n\nconst validators = validator.validators;\n\n/**\n * Create a new instance of Axios\n *\n * @param {Object} instanceConfig The default config for the instance\n *\n * @return {Axios} A new instance of Axios\n */\nclass Axios {\n constructor(instanceConfig) {\n this.defaults = instanceConfig;\n this.interceptors = {\n request: new InterceptorManager(),\n response: new InterceptorManager()\n };\n }\n\n /**\n * Dispatch a request\n *\n * @param {String|Object} configOrUrl The config specific for this request (merged with this.defaults)\n * @param {?Object} config\n *\n * @returns {Promise} The Promise to be fulfilled\n */\n request(configOrUrl, config) {\n /*eslint no-param-reassign:0*/\n // Allow for axios('example/url'[, config]) a la fetch API\n if (typeof configOrUrl === 'string') {\n config = config || {};\n config.url = configOrUrl;\n } else {\n config = configOrUrl || {};\n }\n\n config = mergeConfig(this.defaults, config);\n\n const {transitional, paramsSerializer, headers} = config;\n\n if (transitional !== undefined) {\n validator.assertOptions(transitional, {\n silentJSONParsing: validators.transitional(validators.boolean),\n forcedJSONParsing: validators.transitional(validators.boolean),\n clarifyTimeoutError: validators.transitional(validators.boolean)\n }, false);\n }\n\n if (paramsSerializer != null) {\n if (utils.isFunction(paramsSerializer)) {\n config.paramsSerializer = {\n serialize: paramsSerializer\n }\n } else {\n validator.assertOptions(paramsSerializer, {\n encode: validators.function,\n serialize: validators.function\n }, true);\n }\n }\n\n // Set config.method\n config.method = (config.method || this.defaults.method || 'get').toLowerCase();\n\n let contextHeaders;\n\n // Flatten headers\n contextHeaders = headers && utils.merge(\n headers.common,\n headers[config.method]\n );\n\n contextHeaders && utils.forEach(\n ['delete', 'get', 'head', 'post', 'put', 'patch', 'common'],\n (method) => {\n delete headers[method];\n }\n );\n\n config.headers = AxiosHeaders.concat(contextHeaders, headers);\n\n // filter out skipped interceptors\n const requestInterceptorChain = [];\n let synchronousRequestInterceptors = true;\n this.interceptors.request.forEach(function unshiftRequestInterceptors(interceptor) {\n if (typeof interceptor.runWhen === 'function' && interceptor.runWhen(config) === false) {\n return;\n }\n\n synchronousRequestInterceptors = synchronousRequestInterceptors && interceptor.synchronous;\n\n requestInterceptorChain.unshift(interceptor.fulfilled, interceptor.rejected);\n });\n\n const responseInterceptorChain = [];\n this.interceptors.response.forEach(function pushResponseInterceptors(interceptor) {\n responseInterceptorChain.push(interceptor.fulfilled, interceptor.rejected);\n });\n\n let promise;\n let i = 0;\n let len;\n\n if (!synchronousRequestInterceptors) {\n const chain = [dispatchRequest.bind(this), undefined];\n chain.unshift.apply(chain, requestInterceptorChain);\n chain.push.apply(chain, responseInterceptorChain);\n len = chain.length;\n\n promise = Promise.resolve(config);\n\n while (i < len) {\n promise = promise.then(chain[i++], chain[i++]);\n }\n\n return promise;\n }\n\n len = requestInterceptorChain.length;\n\n let newConfig = config;\n\n i = 0;\n\n while (i < len) {\n const onFulfilled = requestInterceptorChain[i++];\n const onRejected = requestInterceptorChain[i++];\n try {\n newConfig = onFulfilled(newConfig);\n } catch (error) {\n onRejected.call(this, error);\n break;\n }\n }\n\n try {\n promise = dispatchRequest.call(this, newConfig);\n } catch (error) {\n return Promise.reject(error);\n }\n\n i = 0;\n len = responseInterceptorChain.length;\n\n while (i < len) {\n promise = promise.then(responseInterceptorChain[i++], responseInterceptorChain[i++]);\n }\n\n return promise;\n }\n\n getUri(config) {\n config = mergeConfig(this.defaults, config);\n const fullPath = buildFullPath(config.baseURL, config.url);\n return buildURL(fullPath, config.params, config.paramsSerializer);\n }\n}\n\n// Provide aliases for supported request methods\nutils.forEach(['delete', 'get', 'head', 'options'], function forEachMethodNoData(method) {\n /*eslint func-names:0*/\n Axios.prototype[method] = function(url, config) {\n return this.request(mergeConfig(config || {}, {\n method,\n url,\n data: (config || {}).data\n }));\n };\n});\n\nutils.forEach(['post', 'put', 'patch'], function forEachMethodWithData(method) {\n /*eslint func-names:0*/\n\n function generateHTTPMethod(isForm) {\n return function httpMethod(url, data, config) {\n return this.request(mergeConfig(config || {}, {\n method,\n headers: isForm ? {\n 'Content-Type': 'multipart/form-data'\n } : {},\n url,\n data\n }));\n };\n }\n\n Axios.prototype[method] = generateHTTPMethod();\n\n Axios.prototype[method + 'Form'] = generateHTTPMethod(true);\n});\n\nexport default Axios;\n","'use strict';\n\nimport CanceledError from './CanceledError.js';\n\n/**\n * A `CancelToken` is an object that can be used to request cancellation of an operation.\n *\n * @param {Function} executor The executor function.\n *\n * @returns {CancelToken}\n */\nclass CancelToken {\n constructor(executor) {\n if (typeof executor !== 'function') {\n throw new TypeError('executor must be a function.');\n }\n\n let resolvePromise;\n\n this.promise = new Promise(function promiseExecutor(resolve) {\n resolvePromise = resolve;\n });\n\n const token = this;\n\n // eslint-disable-next-line func-names\n this.promise.then(cancel => {\n if (!token._listeners) return;\n\n let i = token._listeners.length;\n\n while (i-- > 0) {\n token._listeners[i](cancel);\n }\n token._listeners = null;\n });\n\n // eslint-disable-next-line func-names\n this.promise.then = onfulfilled => {\n let _resolve;\n // eslint-disable-next-line func-names\n const promise = new Promise(resolve => {\n token.subscribe(resolve);\n _resolve = resolve;\n }).then(onfulfilled);\n\n promise.cancel = function reject() {\n token.unsubscribe(_resolve);\n };\n\n return promise;\n };\n\n executor(function cancel(message, config, request) {\n if (token.reason) {\n // Cancellation has already been requested\n return;\n }\n\n token.reason = new CanceledError(message, config, request);\n resolvePromise(token.reason);\n });\n }\n\n /**\n * Throws a `CanceledError` if cancellation has been requested.\n */\n throwIfRequested() {\n if (this.reason) {\n throw this.reason;\n }\n }\n\n /**\n * Subscribe to the cancel signal\n */\n\n subscribe(listener) {\n if (this.reason) {\n listener(this.reason);\n return;\n }\n\n if (this._listeners) {\n this._listeners.push(listener);\n } else {\n this._listeners = [listener];\n }\n }\n\n /**\n * Unsubscribe from the cancel signal\n */\n\n unsubscribe(listener) {\n if (!this._listeners) {\n return;\n }\n const index = this._listeners.indexOf(listener);\n if (index !== -1) {\n this._listeners.splice(index, 1);\n }\n }\n\n /**\n * Returns an object that contains a new `CancelToken` and a function that, when called,\n * cancels the `CancelToken`.\n */\n static source() {\n let cancel;\n const token = new CancelToken(function executor(c) {\n cancel = c;\n });\n return {\n token,\n cancel\n };\n }\n}\n\nexport default CancelToken;\n","const HttpStatusCode = {\n Continue: 100,\n SwitchingProtocols: 101,\n Processing: 102,\n EarlyHints: 103,\n Ok: 200,\n Created: 201,\n Accepted: 202,\n NonAuthoritativeInformation: 203,\n NoContent: 204,\n ResetContent: 205,\n PartialContent: 206,\n MultiStatus: 207,\n AlreadyReported: 208,\n ImUsed: 226,\n MultipleChoices: 300,\n MovedPermanently: 301,\n Found: 302,\n SeeOther: 303,\n NotModified: 304,\n UseProxy: 305,\n Unused: 306,\n TemporaryRedirect: 307,\n PermanentRedirect: 308,\n BadRequest: 400,\n Unauthorized: 401,\n PaymentRequired: 402,\n Forbidden: 403,\n NotFound: 404,\n MethodNotAllowed: 405,\n NotAcceptable: 406,\n ProxyAuthenticationRequired: 407,\n RequestTimeout: 408,\n Conflict: 409,\n Gone: 410,\n LengthRequired: 411,\n PreconditionFailed: 412,\n PayloadTooLarge: 413,\n UriTooLong: 414,\n UnsupportedMediaType: 415,\n RangeNotSatisfiable: 416,\n ExpectationFailed: 417,\n ImATeapot: 418,\n MisdirectedRequest: 421,\n UnprocessableEntity: 422,\n Locked: 423,\n FailedDependency: 424,\n TooEarly: 425,\n UpgradeRequired: 426,\n PreconditionRequired: 428,\n TooManyRequests: 429,\n RequestHeaderFieldsTooLarge: 431,\n UnavailableForLegalReasons: 451,\n InternalServerError: 500,\n NotImplemented: 501,\n BadGateway: 502,\n ServiceUnavailable: 503,\n GatewayTimeout: 504,\n HttpVersionNotSupported: 505,\n VariantAlsoNegotiates: 506,\n InsufficientStorage: 507,\n LoopDetected: 508,\n NotExtended: 510,\n NetworkAuthenticationRequired: 511,\n};\n\nObject.entries(HttpStatusCode).forEach(([key, value]) => {\n HttpStatusCode[value] = key;\n});\n\nexport default HttpStatusCode;\n","'use strict';\n\nimport utils from './utils.js';\nimport bind from './helpers/bind.js';\nimport Axios from './core/Axios.js';\nimport mergeConfig from './core/mergeConfig.js';\nimport defaults from './defaults/index.js';\nimport formDataToJSON from './helpers/formDataToJSON.js';\nimport CanceledError from './cancel/CanceledError.js';\nimport CancelToken from './cancel/CancelToken.js';\nimport isCancel from './cancel/isCancel.js';\nimport {VERSION} from './env/data.js';\nimport toFormData from './helpers/toFormData.js';\nimport AxiosError from './core/AxiosError.js';\nimport spread from './helpers/spread.js';\nimport isAxiosError from './helpers/isAxiosError.js';\nimport AxiosHeaders from \"./core/AxiosHeaders.js\";\nimport HttpStatusCode from './helpers/HttpStatusCode.js';\n\n/**\n * Create an instance of Axios\n *\n * @param {Object} defaultConfig The default config for the instance\n *\n * @returns {Axios} A new instance of Axios\n */\nfunction createInstance(defaultConfig) {\n const context = new Axios(defaultConfig);\n const instance = bind(Axios.prototype.request, context);\n\n // Copy axios.prototype to instance\n utils.extend(instance, Axios.prototype, context, {allOwnKeys: true});\n\n // Copy context to instance\n utils.extend(instance, context, null, {allOwnKeys: true});\n\n // Factory for creating new instances\n instance.create = function create(instanceConfig) {\n return createInstance(mergeConfig(defaultConfig, instanceConfig));\n };\n\n return instance;\n}\n\n// Create the default instance to be exported\nconst axios = createInstance(defaults);\n\n// Expose Axios class to allow class inheritance\naxios.Axios = Axios;\n\n// Expose Cancel & CancelToken\naxios.CanceledError = CanceledError;\naxios.CancelToken = CancelToken;\naxios.isCancel = isCancel;\naxios.VERSION = VERSION;\naxios.toFormData = toFormData;\n\n// Expose AxiosError class\naxios.AxiosError = AxiosError;\n\n// alias for CanceledError for backward compatibility\naxios.Cancel = axios.CanceledError;\n\n// Expose all/spread\naxios.all = function all(promises) {\n return Promise.all(promises);\n};\n\naxios.spread = spread;\n\n// Expose isAxiosError\naxios.isAxiosError = isAxiosError;\n\n// Expose mergeConfig\naxios.mergeConfig = mergeConfig;\n\naxios.AxiosHeaders = AxiosHeaders;\n\naxios.formToJSON = thing => formDataToJSON(utils.isHTMLForm(thing) ? new FormData(thing) : thing);\n\naxios.HttpStatusCode = HttpStatusCode;\n\naxios.default = axios;\n\n// this module should only have a default export\nexport default axios\n","'use strict';\n\n/**\n * Syntactic sugar for invoking a function and expanding an array for arguments.\n *\n * Common use case would be to use `Function.prototype.apply`.\n *\n * ```js\n * function f(x, y, z) {}\n * var args = [1, 2, 3];\n * f.apply(null, args);\n * ```\n *\n * With `spread` this example can be re-written.\n *\n * ```js\n * spread(function(x, y, z) {})([1, 2, 3]);\n * ```\n *\n * @param {Function} callback\n *\n * @returns {Function}\n */\nexport default function spread(callback) {\n return function wrap(arr) {\n return callback.apply(null, arr);\n };\n}\n","'use strict';\n\nimport utils from './../utils.js';\n\n/**\n * Determines whether the payload is an error thrown by Axios\n *\n * @param {*} payload The value to test\n *\n * @returns {boolean} True if the payload is an error thrown by Axios, otherwise false\n */\nexport default function isAxiosError(payload) {\n return utils.isObject(payload) && (payload.isAxiosError === true);\n}\n","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport './App.scss';\nimport Diagnosis from './components/Diagnosis/Diagnosis';\nimport axios from 'axios';\n\nexport const api = axios.create({\n baseURL: `/`\n});\n\nfunction App() {\n document.body.style = 'background: #ececec;';\n localStorage.setItem('token', window.location.search.replace('?token=', ''));\n return (\n
\n \n
\n );\n}\n\nexport default App;\n","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nconst reportWebVitals = onPerfEntry => {\n if (onPerfEntry && onPerfEntry instanceof Function) {\n import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {\n getCLS(onPerfEntry);\n getFID(onPerfEntry);\n getFCP(onPerfEntry);\n getLCP(onPerfEntry);\n getTTFB(onPerfEntry);\n });\n }\n};\n\nexport default reportWebVitals;\n","// -* - coding: utf - 8 -* -\n// Copyright(c) 2023 Intel Corporation\n//\n// Licensed under the Apache License, Version 2.0(the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\nimport React from 'react';\nimport ReactDOM from 'react-dom/client';\nimport './index.scss';\nimport App from './App';\nimport reportWebVitals from './reportWebVitals';\nimport 'bootstrap/dist/css/bootstrap.min.css';\n\nconst root = ReactDOM.createRoot(document.getElementById('root'));\nroot.render(\n \n);\n\n// If you want to start measuring performance in your app, pass a function\n// to log results (for example: reportWebVitals(console.log))\n// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals\nreportWebVitals();\n"],"names":["hasOwn","hasOwnProperty","classNames","classes","i","arguments","length","arg","argType","push","Array","isArray","inner","apply","toString","Object","prototype","includes","key","call","join","module","exports","default","factory","__WEBPACK_EXTERNAL_MODULE__245__","__webpack_modules__","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","n","getter","__esModule","d","a","definition","o","defineProperty","enumerable","get","obj","prop","__webpack_exports__","external_commonjs_elkjs_commonjs2_elkjs_amd_elkjs_root_ELK_","external_commonjs_elkjs_commonjs2_elkjs_amd_elkjs_root_ELK_default","src_assign","assign","bind","tgt","_len","srcs","_key","forEach","src","keys","k","src_defaults","nodeDimensionsIncludeLabels","fit","padding","animate","animateFilter","animationDuration","animationEasing","transform","node","pos","ready","stop","nodeLayoutOptions","elk","algorithm","priority","_defineProperties","target","props","descriptor","configurable","writable","elkOverrides","makeNode","options","_cyEle","id","layoutOptions","isParent","dims","layoutDimensions","p","position","x","w","y","h","width","height","scratch","makeEdge","edge","source","data","Layout","instance","Constructor","TypeError","_classCallCheck","this","elkOptions","cy","aspectRatio","protoProps","staticProps","value","layout","eles","nodes","edges","graph","elkNodes","elkEdges","elkEleLookup","children","_i","e","_k","_i2","_k2","_n","isChild","parentK","parent","_i3","_k3","makeGraph","then","filter","layoutPositions","ele","nonempty","kp","getPos","register","cytoscape","require","$$find","arr","predicate","thisArg","LabelElement","_a","params","_b","_c","updateParams","_node","initStyles","cssClass","updateData","updatePosition","tpl","_d","halign","_e","valign","_f","halignBox","_g","valignBox","_align","firstChild","removeChild","DOMParser","parseFromString","body","el","appendChild","getNode","_renderPosition","style","classList","add","prev","_position","val","toFixed","stl","webkitTransform","msTransform","LabelContainer","_elements","addOrUpdateElem","param","payload","cur","nodeElem","document","createElement","removeElemById","updateElemPosition","updatePanZoom","pan","zoom","origin","webkitTransformOrigin","msTransformOrigin","transformOrigin","cyNodeHtmlLabel","_cy","_params","_lc","_cyContainer","container","_titlesContainer","_cyCanvas","querySelector","parentNode","margin","border","outline","enablePointerEvents","createLabelContainer","one","elements","query","isNode","getNodePosition","createNodesCyHandler","wrapCyHandler","on","ev","slice","reverse","is","updateDataOrStyleCyHandler","setTimeout","removed","optArr","define","debounce","Heap","set","toPath","_interopDefaultLegacy","debounce__default","Heap__default","get__default","set__default","toPath__default","_typeof","Symbol","iterator","constructor","_createClass","_defineProperty","_slicedToArray","_arrayWithHoles","_s","_arr","next","done","err","_iterableToArrayLimit","minLen","_arrayLikeToArray","name","from","test","_unsupportedIterableToArray","_nonIterableRest","len","arr2","window$1","window","navigator","typeofstr","typeofobj","typeoffn","typeofhtmlele","HTMLElement","instanceStr","instanceString","fn$6","string","array","elementOrCollection","plainObject","number$1","isNaN","htmlElement","element","collection","_private","single","core","stylesheet","emptyString","match","promise","object","memoize","fn","keyFn","args","memoizedFn","ret","cache","camel2dash","str","replace","v","toLowerCase","dash2camel","toUpperCase","prependCamel","prefix","substring","capitalize","charAt","number","rgba","rgbaNoBackRefs","hsla","hslaNoBackRefs","ascending","b","extend","j","color2tuple","color","colors","colorname2tuple","hex","r","g","base","parseInt","hex2tuple","rgb","m","RegExp","exec","isPct","channel","parseFloat","Math","floor","atLeastOneIsPct","allArePct","alpha","rgb2tuple","hsl","s","l","hue2rgb","q","t","round","hsl2tuple","transparent","aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkgrey","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkslategrey","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dimgrey","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","gainsboro","ghostwhite","gold","goldenrod","gray","grey","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightgrey","lightpink","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightslategrey","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","slategrey","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen","setMap","map","Error","getMap","performance","pnow","now","Date","raf","requestAnimationFrame","mozRequestAnimationFrame","webkitRequestAnimationFrame","msRequestAnimationFrame","performanceNow","DEFAULT_HASH_SEED","DEFAULT_HASH_SEED_ALT","hashIterableInts","entry","hash","hashInt","num","hashIntAlt","seed","combineHashesArray","hashes","hashArrays","hashes1","hashes2","hashString","charCodeAt","hashStrings","hashStringsArray","strs","warningsEnabled","warnSupported","console","warn","traceSupported","trace","MAX_INT$1","Number","MAX_SAFE_INTEGER","trueify","falsify","zeroify","noop$1","error","msg","warnings","enabled","log","copy","clone","uuid","random","_staticEmptyObject","staticEmptyObject","defaults$g","_defaults","opts","filledOpts","optVal","removeFromArray","oneCopy","splice","clearArray","getPrefixedProperty","propName","setPrefixedProperty","Map$1","Map","ObjectMap","_obj","ObjectSet","arrayOrObjectSet","create","size","toArray","_this","has","callback","Set$1","Set","Element","restore","group","_p","autoWidth","autoHeight","autoPadding","compoundBoundsClean","listeners","rstyle","styleCxts","styleKeys","selected","selectable","locked","grabbed","grabbable","pannable","active","animation","current","queue","rscratch","traversalCache","backgrounding","bbCache","bbCacheShift","bodyBounds","overlayBounds","labelBounds","all","main","arrowBounds","renderedPosition","rpos","split","cls","createEmitter","bypass","css","defineSearch","bfs","dfs","roots","directed","root","visit","found","Q","connectedNodes","connectedBy","id2depth","V","_this$byGroup","byGroup","vi","viId","unshift","_loop","shift","pop","vId","depth","prevEdge","prevNode","same","vwEdges","connectedEdges","wId","_ret","connectedEles","path","elesfn$v","breadthFirstSearch","depthFirstSearch","dijkstraDefaults","weight","elesfn$u","dijkstra","_dijkstraDefaults","weightFn","dist","knownDist","unmergeBy","isLoop","getDist","setDist","updateItem","Infinity","distBetween","u","smallestEdge","uvs","edgesTo","edgesWith","intersect","smallestDistance","_weight","smalletsDist","uid","neighbors","neighborhood","vid","vDist","alt","distanceTo","pathTo","S","spawn","elesfn$t","kruskal","numNodes","forest","A","findSetIndex","sort","setUIndex","setVIndex","setU","setV","merge","aStarDefaults","goal","heuristic","elesfn$s","aStar","_aStarDefaults","cMin","cMinId","sid","tid","gScore","fScore","closedSetIds","openSet","openSetIds","cameFrom","cameFromEdge","addToOpenSet","steps","pathNode","pathNodeId","pathEdge","distance","hasElementWithId","wSrc","wTgt","wid","tempScore","floydWarshallDefaults","elesfn$r","floydWarshall","_floydWarshallDefault","N","Nsq","indexOf","atIndex","edgeNext","st","ts","ik","_j","ij","kj","indexOfArgEle","getArgEle","res","to","fromNode","bellmanFordDefaults","elesfn$q","bellmanFord","_bellmanFordDefaults","infoMap","hasNegativeWeightCycle","negativeWeightCycles","numEdges","getInfo","getNodeFromTo","$","info","pred","replacedEdge","checkForEdgeReplacement","node1","node2","info1","info2","srcInfo","tgtInfo","negativeWeightCycleIds","_edge","_src","_tgt","_weight2","srcDist","tgtDist","findNegativeWeightCycles","negativeNodes","numNegativeNodes","start","cycle","smallestId","smallestIndex","c","concat","cycleId","thisStart","_getInfo","sqrt2","sqrt","collapse","edgeIndex","nodeMap","remainingEdges","edgeInfo","sourceIn","targetIn","partition1","partition2","newEdges","contractUntil","metaNodeMap","sizeLimit","elesfn$p","kargerStein","numIter","ceil","pow","LN2","stopSize","edgeIndexes","minCutSize","minCutEdgeIndexes","minCutNodeMap","metaNodeMap2","copyNodesMap","iter","_i4","edgesState","edgesState2","res1","res2","cut","witnessNodePartition","_i5","partitionId","constructComponent","subset","component","contains","components","modelToRenderedPosition","renderedToModelPosition","array2point","getAngleFromDisp","dispX","dispY","atan2","PI","log2","signum","p1","p2","sqdist","dx","dy","inPlaceSumNormalize","total","qbezierAt","p0","qbezierPtAt","bound","min","max","makeBoundingBox","bb","x1","y1","x2","y2","expandBoundingBoxByPoint","expandBoundingBox","expandBoundingBoxSides","top","right","bottom","left","_padding","assignBoundingBox","bb1","bb2","boundingBoxesIntersect","inBoundingBox","boundingBoxInBoundingBox","roundRectangleIntersectLine","nodeX","nodeY","straightLineIntersections","cornerRadius","getRoundRectangleRadius","halfWidth","halfHeight","topStartY","finiteLinesIntersect","rightStartX","bottomStartY","arcIntersections","leftStartX","topLeftCenterX","topLeftCenterY","intersectLineCircle","topRightCenterX","topRightCenterY","bottomRightCenterX","bottomRightCenterY","bottomLeftCenterX","bottomLeftCenterY","inLineVicinity","lx1","ly1","lx2","ly2","tolerance","inBezierVicinity","x3","y3","sqdistToQuadraticBezier","result","discriminant","dum1","term1","r13","acos","cos","solveCubic","index","abs","curX","curY","distSquared","minDistanceSquared","sqdistToFiniteLine","offset","line","lineSq","hypSq","dotProduct","adjSq","pointInsidePolygonPoints","points","up","pointInsidePolygon","basePoints","centerX","centerY","direction","angle","transformedPoints","atan","sin","expandedLineSet","expandPolygon","joinLines","lineSet","currentLineStartX","currentLineStartY","currentLineEndX","currentLineEndY","nextLineStartX","nextLineStartY","nextLineEndX","nextLineEndY","vertices","intersection","pad","currentPointX","currentPointY","nextPointX","nextPointY","offsetX","offsetY","offsetLength","normalizedOffsetX","normalizedOffsetY","checkInEllipse","radius","f","t1","t2","tMin","tMax","inRangeParams","nearIntersectionX","nearIntersectionY","midOfThree","x4","y4","infiniteLines","dx13","dx21","dx43","dy13","dy21","dy43","ua_t","ub_t","u_b","ua","ub","_min","polygonIntersectLine","currentX","currentY","nextX","nextY","intersections","doTransform","shortenIntersection","amount","disp","lenRatio","generateUnitNgonPointsFitToSquare","sides","rotationRadians","generateUnitNgonPoints","fitPolygonToSquare","minX","minY","maxX","maxY","sx","sy","_i6","increment","startAngle","currentAngle","getRoundPolygonRadius","getBarrelCurveConstants","heightOffset","widthOffset","ctrlPtOffsetPct","pageRankDefaults","dampingFactor","precision","iterations","elesfn$o","pageRank","_pageRankDefaults","numNodesSqd","matrix","columnSum","additionalProb","srcId","tgtId","indexOfId","_n3","previous","eigenvector","temp","_j2","_n4","diff","_i7","delta","rank","defaults$f","elesfn$n","degreeCentralityNormalized","indegrees","outdegrees","maxIndegree","maxOutdegree","_currDegree","degreeCentrality","indegree","outdegree","degrees","maxDegree","currDegree","degree","callingEles","_options","incoming","outgoing","k_in","k_out","s_in","s_out","connEdges","dc","dcn","degreeCentralityNormalised","defaults$e","harmonic","elesfn$m","closenessCentralityNormalized","closenesses","maxCloseness","fw","currCloseness","node_i","closeness","closenessCentrality","_defaults2","totalDistance","cc","ccn","closenessCentralityNormalised","defaults$d","elesfn$l","betweennessCentrality","weighted","_C","C","outgoers","openNeighborhood","P","_vid","empty","_v","vEle","getElementById","edgeWeight","_w","_w2","_v2","betweenness","betweennessNormalized","betweennessNormalised","bc","defaults$c","expandFactor","inflateFactor","multFactor","maxIterations","attributes","getSimilarity$1","normalize","M","sum","col","row","_row","mmult","B","expand","_M","inflate","hasConverged","n2","roundFactor","isDuplicate","c1","c2","markovClustering","setOptions$3","id2position","sim","addLoops","isStillMoving","clusters","cluster","assign$2","removeDuplicates","markovClustering$1","mcl","identity","absDiff","addAbsDiff","addSquaredDiff","maxAbsDiff","currentMax","getDistance","getP","getQ","init","post","dim","distances","euclidean","squaredEuclidean","manhattan","clusteringDistance","method","nodeP","nodeQ","impl","defaults$b","sensitivityThreshold","testMode","testCentroids","setOptions$2","type","centroid","mode","randomCentroids","ndim","centroids","classify","buildCluster","assignment","haveMatricesConverged","v1","v2","seenBefore","medoids","randomMedoids","findCost","potentialNewMedoid","cost","updateCentroids","U","numerator","denominator","_n2","updateMembership","_U","fuzzyCMeans","_c2","_c3","assign$1","degreeOfMembership","kClustering","kMeans","newCentroid","kMedoids","curCost","minCosts","fcm","defaults$a","linkage","threshold","addDendrogram","dendrogramDepth","linkageAliases","mergeClosest","dists","mins","minKey","attrs","n1","_dist","merged","key1","key2","getAllChildren","buildDendrogram","leftStr","rightStr","buildClustersFromTree","leaves","hierarchicalClustering","preferredAlias","setOptions$1","retClusters","hierarchicalClustering$1","hca","defaults$9","preference","damping","minIterations","getSimilarity","attr","getPreference","begin","end","includeHoles","off","isFinite","mid","median","mean","assignClusters","exemplars","ei","_ei","affinityPropagation","R","dmp","pref","validPrefs","some","setOptions","old","Rp","se","_i8","max2","maxI","AS","_i9","_j3","_j4","K","_i10","E","_sum","_i11","_j5","exemplarsIndices","indices","findExemplars","clusterIndices","ii","maxSum","_i12","clusterIndex","affinityPropagation$1","ap","hierholzerDefaults","elesfn$k","hierholzer","oddIn","oddOut","startVertex","_hierholzerDefaults","dflag","ind","outd","d1","d2","isEdge","trail","walk","adj","adjTail","adjHead","currentNode","subtour","hopcroftTarjanBiconnected","edgeCount","stack","visitedEdges","biconnectedSearch","low","cutVertex","sourceId","targetId","otherNodeId","edgeId","cutset","nodeId","buildComponent","cutVertices","tarjanStronglyConnected","stronglyConnectedSearch","sourceNodeId","explored","targetNodeId","componentNodes","componentEdges","difference","elesfn$j","htbc","htb","hopcroftTarjanBiconnectedComponents","tsc","tscc","tarjanStronglyConnectedComponents","api","executor","state","fulfillValue","rejectReason","onFulfilled","onRejected","proxy","fulfill","reject","deliver","curr","resolver","execute","execute_handlers","handlers","func","setImmediate","cb","resolve","resolved","ps","resolveAll","rejectAll","vals","doneCount","Promise$1","Promise","Animation","opts2","isCore","isEle","duration","started","playing","hooked","applying","progress","completes","frames","complete","startPosition","startStyle","getAnimationStartStyle","startPan","startZoom","anifn","hook","tAni","addToAnimationPool","play","stopped","pause","rewind","fastforward","time","wasPlaying","completed","swap","_pa","startStyleProp","run","running","define$3","animated","self","styleEnabled","clearQueue","delay","delayAnimation","properties","selfIsArrayLike","isEles","getPropsList","panBy","cyPan","center","centre","centerPan","getCenterPan","fitVp","getFitViewport","boundingBox","vp","getZoomedViewport","zoomed","panned","jumpToEnd","anis","ani_p","notify","define$2","field","bindingEvent","allowBinding","allowSetting","allowGetting","settingEvent","settingTriggersEvent","triggerFnName","immutableKeys","updateStyle","beforeGet","beforeSet","onSet","canSet","change","_ele","removeData","event","triggerEvent","names","i_a","l_a","_i_a","_l_a","_privateFields","_keys","define$1","eventAliasesOn","proto","addListener","listen","unlisten","unbind","removeListener","trigger","emit","pon","promiseOn","events","selector","onArgs","offArgs","elesfn$i","elesfn$h","_classes","changed","classesSet","eleClasses","changedEle","addClass","toggleClass","hasClass","className","toggle","toggleUndefd","il","changedNow","removeClass","flashClass","tokens","metaChar","comparatorOp","boolOp","meta","separator","descendant","child","subject","directedEdge","undirectedEdge","variable","ops","op","Type","stateSelectors","matches","visible","isChildless","isOrphan","isSimple","descending","lookup","selToFn","stateSelectorRegex","cleanMetaChars","$1","replaceLastQuery","examiningQuery","replacementQuery","exprs","regex","populate","_ref","checks","_ref3","_ref5","_ref7","_ref9","_ref11","_ref12","operator","_ref13","_ref14","_ref15","_ref16","currentSubject","compoundCount","lastQ","edgeQuery","srcTgtQ","_source","_target","nhoodQ","neighbor","parentChildQuery","compound","_child","_parent","_parent2","_child2","pcQChecks","ancChQuery","ancestor","_descendant","_ancestor","_ancestor2","_descendant2","adQChecks","modifier","topChk","topType","regexObj","consumeExpr","remaining","expr","consumed","parse$1","parse","inputText","currentQuery","consumeWhitespace","exprInfo","toStringCache","clean","cleanVal","space","checkToString","check","_operator","_field","_operator2","queryToString","lhs","sub","rhs","reduce","chk","valCmp","fieldVal","fieldStr","valStr","isFieldStr","isFieldNum","isValStr","caseInsensitive","notExpr","isIneqCmp","data$1","matches$1","every","sel","stateSelectorMatches","boolCmp","qA","qB","incomers","ancestors","descendants","matching","selectorFunction","text","Selector","addQuery","invalid","selfn","eq","sameText","otherSel","elesfn$g","allAre","selObj","thisLength","anySame","allAreNeighbors","nhood","allAreNeighbours","equal","equals","fn$5","elesfn$e","arg1","arg2","arg3","arg4","selectorOrEles","tch","ch","cacheHit","elesfn$f","parents","commonAncestors","orphans","stdFilter","nonorphans","eleChildren","siblings","not","forEachCompound","includeSelf","recursiveStep","did","hasCompounds","hasCompoundNodes","addChildren","addParent","addParentAndChildren","forEachDown","forEachUp","forEachUpAndDown","removeScratch","removeRscratch","removeAttr","fn$4","elesfn$c","elesfn$d","defineDegreeFunction","includeLoops","defineDegreeBoundsFunction","degreeFn","minDegree","minIndegree","minOutdegree","totalDegree","beforePositionSet","newPos","silent","oldPos","dirtyBoundingBoxCache","positionDef","validKeys","updateCompoundBounds","dirtyCompoundBoundsCache","silentPosition","positions","_fn","startBatch","_pos","endBatch","silentPositions","silentShift","setting","relativePosition","ppos","_hasParent","_relativeToParent","_origin","_ele2","hasParent","relativeToParent","modelPosition","point","modelPositions","renderedPoint","relativePoint","fn$3","elesfn$b","renderedBoundingBox","emitAndNotify","force","batching","update","includeLabels","pstyle","pfValue","includeOverlays","useCache","leftVal","units","rightVal","topVal","bottomVal","widthBiasDiffs","computeBiasValues","diffLeft","biasDiff","diffRight","biasComplementDiff","heightBiasDiffs","diffTop","diffBottom","paddingObject","relativeTo","computePaddingValues","propDiff","propBias","propBiasComplement","biasTotal","noninf","updateBounds","updateBoundsFromBox","b2","prefixedProperty","updateBoundsFromArrow","bounds","headless","halfArW","arrowWidth","srcX","srcY","tgtX","tgtY","midX","midY","bbs","updateBoundsFromLabel","prefixDash","strValue","labelWidth","labelHeight","labelX","labelY","marginX","marginY","rotation","outlineWidth","halfBorderWidth","lh","lw","lw_2","lh_2","bbPrefix","isAutorotate","isPfValue","theta","xo","yo","rotate","px1y1","px1y2","px2y1","px2y2","bbPrefixRot","bbRot","getKey","tf","incudeNodes","includeEdges","includeMainLabels","includeSourceLabels","includeTargetLabels","getBoundingBoxPosKey","ints","hashIntsArray","cachedBoundingBoxImpl","usingDefOpts","defBbOptsKey","currPosKey","isPosKeySame","bbCachePosKey","isDirty","styleDirty","recalculateRenderedStyle","ex1","ex2","ey1","ey2","manualExpansion","isDisplayed","displayed","overlayPadding","underlayPadding","includeUnderlays","wHalf","includeNodes","halfW","outerWidth","halfH","outerHeight","curveStyle","hpts","haystackPts","_temp","pts","bezierPts","linePts","pt","n1pos","n2pos","_temp2","_temp3","gx","gy","bbBody","bbOverlay","bbLabels","boundingBoxImpl","defBbOpts","mainRot","sourceRot","targetRot","filledBbOpts","boundingBoxAt","bbAtOldPos","copyBoundingBox","boundingbox","renderedBoundingbox","fn$2","elesfn$a","defineDimFns","uppercaseName","autoName","labelName","outerName","uppercaseOuterName","paddedHeight","paddedWidth","widthHeight","controlPoints","renderer","getControlPoints","mult","segmentPoints","getSegmentPoints","sourceEndpoint","getSourceEndpoint","targetEndpoint","getTargetEndpoint","midpoint","getEdgeMidpoint","edgePoints","spec","rName","substr","renderedName","getValue","ifEdge","getPoints","ifEdgeRenderedPositions","getPoint","ifEdgeRenderedPosition","dimensions","Event","recycle","returnFalse","returnTrue","isImmediatePropagationStopped","isPropagationStopped","isDefaultPrevented","preventDefault","defaultPrevented","originalEvent","namespace","timeStamp","stopPropagation","stopImmediatePropagation","eventRegex","defaults$8","qualifierCompare","q1","q2","eventMatches","addEventFields","callbackContext","context","beforeEmit","afterEmit","bubble","defaultsKeys","emptyOpts","Emitter","emitting","forEachEvent","handler","qualifier","conf","confOverrides","eventList","evt","makeEventObj","forEachEventObj","listener","removeAllListeners","extraParams","manualCallback","numListenersBeforeEmit","eventObj","_loop2","otherArr","emitterOptions$1","selector1","selector2","once","onceCollection","argSelector$1","elesfn$9","emitter","argSel","elesfn$8","_filter","filterEles","toRemove","absoluteComplement","mutableElements","other","col2","col1Smaller","colS","colL","xor","both","retEles","toAdd","spawnSelf","toAddEle","unmergeAt","unmergedLastEle","lastEleI","lastEle","lastEleId","unmergeOne","unmerge","toRmFn","mapFn","initialValue","valFn","maxEle","minEle","fn$1","union","or","relativeComplement","subtract","and","symmetricDifference","symdiff","fnFilter","filterFn","complement","abscomp","zIndexSort","getDepth","zDepth","depthDiff","getEleDepth","eleDiff","zDiff","poolIndex","elesfn$6","thisSize","first","last","sortFn","sorted","sortByZIndex","srcDepth","tgtDepth","each","typeofUndef","defineSymbolIterator","getLayoutDimensionOptions","elesfn$5","takesUpSpace","bbDim","layoutEles","getMemoizeKey","fnMem","animations","useSpacingFactor","spacingFactor","spacingBb","getFinalPos","spacing","nodesBb","calculateSpacing","ani","easing","fitAni","zoomPanAni","makeLayout","styleCache","cacheStyleFunction","cachePrototypeStyleFunction","selfFn","createLayout","elesfn$4","dirtyStyleCache","dirty","notifyRenderer","batchStyleEles","updatedEles","changedEles","cleanStyle","parsedStyle","property","includeNonDefault","overriddenStyle","getDefaultProperty","numericStyle","numericStyleUnits","renderedStyle","getRenderedStyle","applyBypass","getStylePropertyValue","getRawStyle","removeStyle","removeAllBypasses","removeBypasses","show","hide","effectiveOpacity","parentOpacity","checkCompound","parentOk","defineDerivedStateFunction","specs","ok","edgeOkViaNode","eleTakesUpSpace","eleInteractive","parentInteractive","interactive","noninteractive","eleVisible","edgeVisibleViaNode","hidden","isBundledBezier","renderedCss","removeBypass","removeCss","elesfn$3","defineSwitchFunction","_handler","addlEvents","able","ableField","overrideAble","changedColl","defineSwitchSet","overrideField","autolock","autoungrabify","autounselectify","deselect","unselect","inactive","elesfn$2","defineDagExtremity","disqualified","noIncomingEdges","noOutgoingEdges","defineDagOneHop","oEles","defineDagAllHops","sEles","sElesIds","newNext","nid","defineSourceFunction","sources","defineEdgesWithFunction","otherNodes","edgeData","thisToOther","otherToThis","thisIsSrc","thisIsTgt","defineParallelEdgesFunction","codirected","edge1_p","src1","srcid1","tgtid1","srcEdges1","edge2","edge2data","tgtid2","srcid2","oppdirected","clearTraversalCache","successors","predecessors","otherNode","closedNeighborhood","neighbourhood","closedNeighbourhood","openNeighbourhood","targets","parallelEdges","codirectedEdges","visited","unvisited","visitInComponent","cmpt","componentsOf","Collection","unique","createdElements","elesIds","json","_data","_l","element$1","lazyMap","rebuildMap","elesfn$1","$id","_data2","move","newParentValSpecd","checkSwitch","trueFnName","falseFnName","obj_k","jsons","elesArr","addToPool","cy_p","removeFromElements","_data3","fields","fieldsLength","badSourceOrTarget","_data4","parentId","selfAsParent","restored","_ele3","inside","remove","removeFromPool","elesToRemove","elesToRemoveIds","alreadyAdded","addConnectedEdges","removeEdgeRef","removeParallelRef","pllEdge","alteredParents","removeChildRef","pid","ids","_ele4","pllEdges","elesStillInside","removedElements","_ele6","struct","modifyPool","srcExists","tgtExists","batch","_data5","pidToAssign","updated","_data6","corefn$9","elesByGroup","_jsons2","grs","elesArray","jl","generateCubicBezier","mX1","mY1","mX2","mY2","NEWTON_ITERATIONS","SUBDIVISION_PRECISION","SUBDIVISION_MAX_ITERATIONS","kSplineTableSize","kSampleStepSize","float32ArraySupported","Float32Array","mSampleValues","aA1","aA2","calcBezier","aT","getSlope","getTForX","aX","intervalStart","currentSample","lastSample","guessForT","initialSlope","aGuessT","currentSlope","newtonRaphsonIterate","aA","aB","currentT","binarySubdivide","_precomputed","precompute","calcSampleValues","generateSpringRK4","springAccelerationForState","tension","friction","springEvaluateStateWithDerivative","initialState","dt","derivative","dv","springIntegrateState","dxdt","dvdt","springRK4Factory","have_duration","last_state","initState","time_lapsed","percentComplete","cubicBezier","bezier","percent","easings","linear","spring","getEasedValue","easingFn","roundValue","ease","startProp","endProp","propSpec","easedArr","si","step$1","pEasing","startTime","easingImpl","easingVals","startPos","endPos","valid","endPan","animatingPan","endZoom","animatingZoom","minZoom","maxZoom","_name","easedVal","overrideBypass","startAnimation","stepAll","aniEles","doneEles","stepOne","ranAnis","callbacks","_callbacks","step","ranEleAni","handledThisEle","ranCoreAni","corefn$8","stopAnimationLoop","animationsRunning","startAnimationLoop","beforeRender","willDraw","beforeRenderPriorities","headlessStep","emitterOptions","argSelector","elesfn","corefn$7","png","jpg","bg","jpeg","corefn$6","extension","corefn$5","eventName","eventEles","batchNotifications","notificationsEnabled","destroyed","notifications","bool","noNotifications","batchCount","batchData","rendererDefaults","hideEdgesOnViewport","textureOnViewport","motionBlur","motionBlurOpacity","pixelRatio","desktopTapThreshold","touchTapThreshold","wheelSensitivity","debug","showFps","corefn$4","renderTo","pxRatio","forceRender","resize","invalidateSize","initRenderer","RendererProto","rOpts","destroyRenderer","domEle","_cyreg","childNodes","onRender","offRender","invalidateDimensions","corefn$3","styfn$8","TRUE","ie","cxtMeta","getContextMeta","cxtStyle","getContextStyle","app","applyContextStyle","appliedInitStyle","updateTransitions","diffProps","updateStyleHints","getPropertiesDiff","oldCxtKey","newCxtKey","propDiffs","dualCxtKey","cachedVal","addedProp","cxt","oldHasCxt","newHasCxt","cxtHasDiffed","cxtHasMappedProps","mappedProperties","laterCxtOverrides","laterCxt","cxtKey","prevKey","styleCxtKey","diffPropNames","cxtStyles","contextStyles","retDiffProps","types","diffPropName","cxtProp","eleProp","deleteBypassed","mapped","mapping","fnValue","prevFnValue","retDiffProp","applyParsedProperty","bypassed","propNames","propertyGroupNames","propGrKeys","propertyGroupKeys","propHash","seedKey","getPropertiesHash","oldStyleKey","styleKey","overriddenStyles","grKey","updateGrKey1","updateGrKey2","updateGrKey","updateGrKeyWStr","strVal","parsedProp","propInfo","_grKey","groupKey","normalizedNumberVal","hashOverride","numberVal","enums","haveNormNum","haveNum","multiple","hash1","hash2","_grKey2","grHash","sk","labelDimsKey","labelDimensions","labelKeys","labelKey","labelStyleKey","commonLabel","sourceLabelKeys","sourceLabelKey","sourceLabelStyleKey","targetLabelKeys","targetLabelKey","targetLabelStyleKey","_p$styleKeys","nodeBody","nodeBorder","backgroundImage","pie","nodeKeys","nodeKey","hasPie","clearStyleHints","flatProp","propIsBypass","origProp","origPropIsBypass","flatPropMapping","getVal","checkTriggers","fromVal","toVal","deleteBypass","printMappingErr","mapData","fieldWidth","fieldMax","fieldMin","r1","valueMin","r2","valueMax","g1","g2","b1","a1","a2","clr","calcValue","_fields","_fieldVal","fnRetVal","cleanElements","keepBypasses","anyPrev","styProp","diffProp","fromProp","toProp","initVal","initDt","transitioning","checkTrigger","fromValue","toValue","getTrigger","onTrigger","triggerCheck","checkZOrderTrigger","triggersZOrder","checkBoundsTrigger","triggersBounds","triggersBoundsOfParallelBeziers","styfn$7","_parsedProp","specifiedProps","_name2","_value","_parsedProp2","_prop","prevProp","isColor","isMulti","mutiple","oldValue","propertyNames","styfn$6","px","containerCss","domElement","getComputedStyle","getPropertyValue","styfn$5","isRenderedVal","subproperty","alias","pointsTo","styleProp","getRenderedValue","getValueStringWithUnits","isArrayValue","aniProps","propsObj","chVal","getNonDefaultPropertiesHash","styfn$4","resetToDefault","appendFromJson","defaultLength","styfn$3","selAndBlockStr","blockRem","propAndValStr","removeSelAndBlockFromRemaining","removePropAndValFromRem","selAndBlock","selectorStr","blockStr","invalidBlock","propAndVal","propStr","appendFromString","styfn$2","mapArg","urlRegexes","implicitUnits","percentages","zeroOneNumber","unitless","zeroOneNumbers","nOneOneNumber","nonNegativeInt","integer","nodeSize","numbers","positiveNumber","strictMin","bidirectionalSize","bidirectionalSizeMaybePercent","allowPercent","bidirectionalSizes","sizeMaybePercent","axisDirection","paddingRelativeTo","bgWH","bgPos","bgRelativeTo","bgRepeat","bgFit","bgCrossOrigin","bgClip","bgContainment","fill","bools","lineStyle","lineCap","borderStyle","fontFamily","fontStyle","fontWeight","textDecoration","textTransform","textWrap","textOverflowWrap","textBackgroundShape","nodeShape","overlayShape","compoundIncludeLabels","arrowShape","arrowFill","display","visibility","zCompoundDepth","zIndexCompare","justification","layoutData","mapLayoutData","mapScratch","url","regexes","singleRegexMatchValue","urls","propList","textRotation","polygonPointList","evenMultiple","edgeDistances","edgeEndpoint","singleEnum","validate","valArr","unitsArr","gradientDirection","boundsExpansion","zeroNonZero","val1","val2","any","emptyNonEmpty","str1","str2","empty1","empty2","mainLabel","sourceLabel","targetLabel","behavior","overlay","underlay","transition","nodeSizeHashOverride","edgeLine","ghost","pieBackgroundN","edgeArrow","arrowPrefixes","propGroups","propertyGroups","propGroupNames","aliases","pointsToProp","aliasProp","getDefaultProperties","defaultProperties","rawProps","parsedProps","addDefaultStylesheet","styfn$1","propIsFlat","parseImplWarn","argHash","propCache","parseImpl","passedValue","valueIsString","trim","_mapped","pfValArr","hasEnum","deg","checkEnums","unitsRegex","strictMax","getEmSizeInPixels","propsStr","propsSplit","tuple","enumProp","Style","coreStyle","styfn","clear","ele_p","mapVal","cssRule","append","appendToStyle","fromJson","fromString","corefn$2","newStyle","setStyle","generateStyle","corefn$1","selectionType","selType","panningEnabled","userPanningEnabled","zoomingEnabled","userZoomingEnabled","boxSelectionEnabled","arg0","viewportState","bbe","zoomRange","currentPan","currentZoom","bail","level","pan1","zoom1","zoom2","viewport","zoomDefd","panDefd","zoomFailed","panFailed","z","cancelOnFailedZoom","reset","sizeCache","clientWidth","clientHeight","extent","rb","renderedExtent","multiClickDebounceTime","_int","autolockNodes","autoungrabifyNodes","Core","reg","destroy","readies","head","defVal","def","altVal","rendererOptions","extData","loadExtData","thens","initStyle","initEles","onload","ondone","oldEles","layoutOpts","setElesAndLayout","corefn","isReady","isHeadless","mount","unmount","idInJson","updateEles","gr","toMod","_toMod$_i","_json","parentsToRemove","getFreshRef","defaults$7","circle","grid","avoidOverlap","depthSort","deprecatedOptionDefaults","maximal","acyclic","setInfo","BreadthFirstLayout","maximalAdjustments","rootsArray","comp","compRoots","depths","foundByBfs","addToDepth","pNode","orphanNodes","assignDepthsAt","assignDepths","adjustMaximally","shifted","eInfo","maxDepth","incmr","iInfo","newDepth","changeDepth","enqueue","didShift","minDistance","nbb","cachedWeightedPercent","getWeightedPercent","eleDepth","samples","bf","nDepth","orphanDepth","biggestDepthSize","maxDepthSize","_getInfo2","depthSize","distanceX","distanceY","radiusStepSize","defaults$6","sweep","clockwise","CircleLayout","counterclockwise","dTheta","dcos","dsin","rMin","rx","ry","DEBUG","defaults$5","equidistant","minNodeSpacing","concentric","levelWidth","ConcentricLayout","nodeValues","maxNodeSize","levels","currentLevel","minDist","firstLvlHasMulti","rStep","rDeltaMax","_r","rDelta","_level2","_level3","_dTheta","_r2","_val","defaults$4","animationThreshold","refresh","randomize","componentSpacing","nodeRepulsion","nodeOverlap","idealEdgeLength","edgeElasticity","nestingFactor","gravity","initialTemp","coolingFactor","minTemp","CoseLayout","layoutInfo","createLayoutInfo","printLayoutInfo","randomizePositions","refreshPositions","mainLoop","temperature","getScaledPos","getScaleInBoundsFn","loopRet","frame","separateComponents","thread","isCompound","layoutNodes","idToIndex","graphSet","indexToGraph","layoutEdges","edgeSize","id2cmptId","tempNode","isLocked","cmptId","positionX","positionY","padLeft","padRight","padTop","padBottom","tempGraph","p_id","node_id","node_ix","tempEdge","idealLength","elasticity","sourceIx","targetIx","lca","findLCA","lcaGraph","findLCA_aux","count","graphIx","nodeIx","coseBB","lnode","pctX","pctY","_step","calculateNodeForces","calculateEdgeForces","calculateGravityForces","propagateForces","updatePositions","randomDistance","directionX","directionY","overlap","nodesOverlap","forceX","forceY","point1","findClippingPoint","point2","distanceSqr","dX","dY","overlapX","overlapY","X","Y","H","W","dirSlope","nodeSlope","lx","ly","fx","fy","nodeIndex","offX","offY","childNode","tempForce","limitForce","updateAncestryBoundaries","flag","cid","totalA","usedW","rowH","maxRowW","defaults$3","avoidOverlapPadding","condense","rows","cols","GridLayout","cells","splits","small","large","oRows","oCols","columns","sm","lg","_sm","_lg","cellWidth","cellHeight","cellUsed","used","use","moveToNextCell","id2manPos","rcPos","defaults$2","NullLayout","defaults$1","PresetLayout","posIsFn","copyPosition","getPosition","defaults","RandomLayout","NullRenderer","noop","throwImgErr","BRp$f","arrowShapes","bbCollide","translation","edgeWidth","xRotated","yScaled","transformPoints","retPts","pointsToArr","standardGap","defineArrowShape","defn","collide","roughCollide","draw","arrowShapeImpl","gap","controlPoint","ptsTrans","ctrlPt","ctrlPtTrans","pointsTee","triPts","teePts","pointsTr","circleInside","getArrowWidth","baseCrossLinePts","crossLinePts","shiftFactor","BRp$e","clientX","clientY","offsets","findContainerClientCoords","offsetLeft","offsetTop","scale","containerBB","rect","getBoundingClientRect","styleValue","paddingHor","paddingVer","borderHor","unscaledW","unscaledH","interactiveElementsOnly","isTouch","findNearestElements","nearEdge","nearNode","getCachedZSortedEles","near","edgeThreshold","nodeThreshold","labelThreshold","minSqDist","addEle","sqDist","checkNode","hw","hh","nodeShapes","getNodeShape","checkPoint","checkEdge","rs","styleWidth","widthSq","width2","edgeType","allpts","arSize","arrows","arrowStartX","arrowStartY","srcArrowAngle","arrowEndX","arrowEndY","tgtArrowAngle","midsrcArrowAngle","midtgtArrowAngle","ar","shape","preprop","pre","checkLabel","th","ox","oy","box","x1c","x2c","y1c","y2c","boxBb","nodeBb","startX","startY","endX","endY","allInside","BRp$d","isHaystack","isBezier","isMultibezier","isSegments","isSelf","segpts","i1","i2","i3","bp0x","bp0y","bp1x","bp1y","ctrlpts","ic","midDispX","midDispY","getArrowHeight","arrowWidthCache","BRp$c","getPts","findHaystackPoints","haystack","srcPos","tgtPos","srcW","tgtW","srcH","tgtH","halfRadius","storeEdgeProjections","calculateArrowAngles","recalculateEdgeLabelProjections","calculateLabelAngles","findSegmentsPoints","pairInfo","posPts","intersectionPts","vectorNormInverse","segmentWs","segmentDs","segmentsN","w1","w2","midptPts","adjustedMidpt","findLoopPoints","edgeIsUnbundled","dirCounts","ctrlptDists","ctrlptDist","loopDir","loopSwp","stepSize","loopDist","loopAngle","outAngle","inAngle","String","findCompoundLoopPoints","loopaPos","loopbPos","loopPos","compoundStretchA","compoundStretchB","findStraightEdgePoints","findBezierPoints","edgeIsSwapped","ctrlptWs","bezierN","ctrlptWeight","multi","normctrlptDist","manctrlptDist","sign","distanceFromMidpoint","findTaxiPoints","VERTICAL","HORIZONTAL","LEFTWARD","RIGHTWARD","DOWNWARD","UPWARD","dIncludesNodeBody","taxiDir","rawTaxiDir","taxiTurn","turnIsPercent","taxiTurnPfVal","turnIsNegative","minD","dw","dh","pdx","pdy","subDWH","dxy","dwh","isExplicitDir","isVert","pl","sgnL","forcedDir","getIsTooClose","isTooCloseSrc","isTooCloseTgt","lShapeInsideSrc","lShapeInsideTgt","_lShapeInsideSrc","_lShapeInsideTgt","_y","_x","_x2","_x3","_y2","_y3","_y4","_x4","_x5","_x6","_y5","_y6","tryToCorrectInvalidPoints","srcShape","tgtShape","badStart","badAStart","badEnd","badAEnd","minCpADist","arrowShapeWidth","startACpDist","closeStartACp","endACpDist","closeEndACp","overlapping","cpD","cpL","cpM","cpProj","srcCtrlPtIntn","intersectLine","_cpD","_cpL","_cpM","_radius","_cpProj","tgtCtrlPtIntn","findEndpoints","storeAllpts","checkForInvalidEdgeWarning","loggedErr","findEdgeControlPoints","hashTable","pairId","map2","pairIds","haystackEdges","edgeIsBezier","tableEntry","hasUnbundled","hasBezier","swappedpairInfo","edge1","firstEdge","_curveStyle","_edgeIsUnbundled","calculatedIntersection","srcOutside","srcIntn","tgtOutside","tgtIntn","vector","vectorNorm","passedPairInfo","BRp$b","npos","p1_i","p2_i","tgtArShape","srcArShape","et","lines","segments","hasEndpts","overrideEndpts","srcManEndpt","srcManEndptVal","tgtManEndpt","tgtManEndptVal","cpStart","srcArrowFromPt","manualEndptToPx","trs","lw2","lh2","va","ha","labelIntersect","refPt","intSqdist","labIntSqdist","arrowEnd","edgeEnd","srs","_lw","_lh","_lx","_ly","_lw2","_lh2","_va","_ha","_labelIntersect","_refPt","_intSqdist","_labIntSqdist","_minSqDist","arrowStart","edgeStart","badLine","BRp$a","pushBezierPts","qbezierAt$1","p3","bpts","bezierProjPcts","lpts","recalculateEdgeProjections","BRp$9","content","textX","textY","nodeWidth","nodeHeight","nodePos","textHalign","textValign","applyLabelDimensions","lineAngleFromDelta","lineAngle","setRs","midAngle","createControlPointInfo","startDist","nProjs","addSegment","cp","t0","prevSegment","segment","prevCp","calculateEndProjection","isSrc","cps","totalDist","_cp","_seg","lastSeg","seg","tSegment","segDt","lp0","lp1","bezierAngle","di","d0","_t","vec","vecDist","normVec","lineAt","applyPrefixedLabelDimensions","getLabelText","labelDims","calculateLabelDimensions","lineHeight","numLines","normPerLineHeight","labelLineHeight","pfd","wrapStyle","maxW","overflowAny","wrappedLines","wordsRegex","wordSeparator","lineW","processedLine","words","subline","word","testLine","_maxW","ellipsized","incLastCh","getLabelJustification","cacheKey","labelDimCache","existingVal","fStyle","family","canvas","labelCalcCanvas","c2d","labelCalcCanvasContext","getContext","ds","zIndex","pointerEvents","font","metrics","measureText","calculateLabelAngle","rot","rotStr","labelAutoAngle","labelAngle","sourceLabelAngle","targetLabelAngle","BRp$8","warnedCutRect","makePolygon","BRp$7","elesToUpdate","dirtyStyleCaches","cleanConnected","binder","updateEleCalcs","fns","onUpdateEleCalcsFns","flushRenderedStyleQueue","eleCalcs","isCleanConnected","_rstyle","recalculateNodeLabelProjection","nodeW","nodeH","_p3","_rstyle2","BRp$6","cachedZSortedEles","drag","nondrag","grabTargets","inDragLayer","forceRecalc","updateCachedGrabbedEles","BRp$5","BRp$4","crossOrigin","onLoad","imageCache","image","addEventListener","Image","dataUriPrefix","BRp$3","useCapture","tgtIsDom","supportsPassiveEvents","supportsPassive","capture","passive","bindings","nodeIsDraggable","nodeIsGrabbable","load","isSelected","triggerEvents","isMultSelKeyDown","shiftKey","metaKey","ctrlKey","allowPanningPassthrough","down","downs","allowPassthrough","setInDragLayer","setGrabTarget","isGrabTarget","addToDragList","list","addToList","setGrabbed","addNodesToDrag","innerNodes","addDescendantsToDrag","updateAncestorsInDragLayer","addNodeToDrag","freeDraggedElements","grabbedEles","setFreed","setOutDragLayer","removeGrabTarget","blurActiveDomElement","activeElement","blur","haveMutationsApi","MutationObserver","haveResizeObserverApi","ResizeObserver","removeObserver","mutns","rNodes","removedNodes","observe","childList","registerBinding","onResize","styleObserver","resizeObserver","invalidateCoords","invalidateContainerClientCoordsCache","clickTimeout","didDoubleClick","prevClickTimeStamp","eventInContainer","containerPageCoords","touches","atLeastOnePosInside","tParent","containerIsTarget","hoverData","which","gpos","projectIntoViewport","select","selection","nears","draggedElements","dragData","possibleDragElements","mdownPos","mdownGPos","cxtStarted","cxtEvt","activate","downTime","getTime","cxtDragged","makeEvent","selectedNodes","redrawHint","bgActivePosistion","redraw","tapholdCancelled","clearTimeout","tapholdTimeout","tapholdDuration","draggingEles","dragging","selecting","findNearestElement","isOverThresholdDrag","dx2","dist2","desktopTapThreshold2","multSelKeyDown","goIntoBoxMode","cxtOver","deltaP","justStartedPan","mdPos","dragged","unactivate","didDrag","justStartedDrag","totalShift","dragDelta","updateDragDelta","cxtTap","getAllInBox","eleWouldBeSelected","downWasGrabbed","f1x1","f1y1","f2x1","f2y1","distance1","distance1Sq","center1","modelCenter1","containerWidth","containerHeight","twoFingersStartInside","wheelHandler","scrollingPage","wheelZooming","wheelTimeout","deltaY","wheelDeltaY","wheelDelta","deltaMode","newZoom","gestureStartZoom","scrollingPageTimeout","hasTouchStarted","touchstartHandler","touchmoveHandler","touchcancelHandler","touchendHandler","didDoubleTouch","touchTimeout","prevTouchTimeStamp","distanceSq","touchData","earlier","singleTouchMoved","touchDragEles","cxtDistThreshold","near1","near2","starts","draggedEles","singleTouchStartTime","pinching","sPos","touch0","startGPosition","startGPos","touchTapThreshold2","f1x2","f1y2","f2x2","f2y2","distance2Sq","factorThreshold","distThreshold","lastThreeTouch","didSelect","de_p","_start","distance2","factor","tx","ty","ctrx","ctry","pan2","swipePanning","ctxTapend","ctxTap","startWasGrabbed","rdist2","TouchEvent","pointers","makeTouch","identifier","pointerId","pageX","pageY","radiusX","radiusY","screenX","screenY","addPointer","touch","makePointer","removePointer","addTouchesToEvent","pointerIsMouse","pointerType","updatePointer","BRp$2","nodeShapeImpl","generateEllipse","ellipseWradius","ellipseHradius","newLength","lenProportion","intersectLineEllipse","generateRoundPolygon","allPoints","sourceIndex","destIndex","xDest","yDest","norm","destUv","sourceUv","py","cosTheta","cp0x","cp0y","cp1x","cp1y","orthx","orthy","lowestIntersection","lowestSquaredDistance","squaredDistance","roundPolygonIntersectLine","cutPolygonPoints","squaredCornerRadius","cx","pointInsideRoundPolygon","generateRoundRectangle","diam","generateCutRectangle","cornerLength","generateCutTrianglePts","cl","xBegin","xEnd","yBegin","yEnd","topLeft","topRight","bottomRight","bottomLeft","cPts","cutTrianglePts","generateBarrel","bPts","generateBarrelBezierPts","approximateBarrelCurvePts","m0","m1","m2","curveConstants","hOffset","wOffset","ctrlPtXOffset","isTop","isBottom","barrelCurvePts","getCurveT","curvePts","x0","y0","xMin","xMax","yMin","yMax","coeff","validRoots","sqrtR","denom","solveQuadratic","curveRegions","cornerPts","bezY","generateBottomRoundrectangle","topIntersections","registerNodeShapes","generatePolygon","diamondPoints","star5Points","outerPoints","innerPoints","innerRadius","tagPoints","BRp$1","redrawTotalTime","redrawCount","averageRedrawTime","lastRedrawTime","lastDrawTime","requestedFrame","renderOptions","cbs","beforeRenderCallbacks","startRenderLoop","renderLoopStarted","renderFn","requestTime","skipFrame","render","endTime","BR","BRp","clientFunctions","ctr","stylesheetId","stylesheetAlreadyExists","textContent","insertBefore","triggerMode","initialPan","redraws","motionBlurEnabled","forcedPixelRatio","motionBlurTransparency","motionBlurPxRatio","mbPxRBlurry","minMbLowQualFrames","fullQualityMb","clearedForMotionBlur","eleTxrDeq","lyrTxrDeq","lyrTxrSkip","registerArrowShapes","registerCalculationListeners","invalidateCachedZSortedEles","matchCanvasSize","removeEventListener","disconnect","labelCalcDiv","fullFpsTime","defs","dequeueingSetup","queueRedraw","deqRedrawThreshold","frameStartTime","avgRenderTime","renderTime","deqd","getPixelRatio","frameDuration","timeAvailable","deqFastCost","deqCost","deqAvgCost","deqNoDrawCost","thisDeqd","deq","onDeqd","shouldRedraw","ElementTextureCacheLookup","doesEleInvalidateKey","idsByKey","keyForId","cachesByLvl","lvls","getIdsFor","currKey","deleteIdForKey","addIdForKey","keyHasChangedFor","lvl","caches","getCachesAt","getCache","updateKeyMappingFor","hasCache","setCache","deleteCache","deleteKeyMappingFor","entireKeyInvalidated","invalidateKey","getNumberOfIdsForKey","getTxrReasons","dequeue","downscale","highQuality","initDefaults","drawElement","getBoundingBox","getRotationPoint","getRotationOffset","isVisible","allowEdgeTxrCaching","allowParentTxrCaching","ElementTextureCache","initOptions","onDequeues","setupDequeueing","ETCp","reasons","getTextureQueue","txrH","eleImgCaches","getRetiredTextureQueue","rtxtrQs","retired","getElementQueue","eleCacheQueue","reqs","getElementKeyToQueue","eleKeyToCacheQueue","getElement","reason","eleScaledH","eleScaledW","scaledLabelShown","eleTextBiggerThanMin","eleCache","invalidated","texture","invalidatedWidth","txrQ","txr","addNewTxr","recycleTexture","addTexture","usedWidth","higherCache","scalableFrom","otherCache","deqing","highQualityReq","downscaleReq","oneUpCache","drawImage","setTransform","clearRect","queueElement","lowerCache","_l2","translate","eleCaches","checkTextureFullness","invalidateElements","invalidateElement","isInvalid","getForCachedKey","invalidate","_cache","checkTextureUtility","removeFromQueue","retireTexture","fullnessChecks","minW","makeOffscreenCanvas","rtxtrQ","k2q","existingReq","req","dequeued","cacheExists","onDequeue","offDequeue","LayeredTextureCache","layersByLevel","firstGet","lastInvalidationTime","skipping","eleTxrDeqs","scheduleElementRefinement","refineElementTextures","layersQueue","LTCp","layerIdPool","MAX_INT","makeLayer","layer","elesQueue","getLayers","validateLayersElesOrdering","tmpLayers","layersByLvl","layers","levelIsComplete","canUseAsTmpLvl","checkLvls","dir","checkTempLevels","after","getBb","insert","maxElesPerLayer","allowLazyQueueing","imgLayerCaches","existingLayer","queueLayer","drawEleInLayer","getEleLevelForLayerLevel","setImgSmoothing","drawCachedElement","numElesInLayers","invalidateLayer","updateElementsInLayers","haveLayers","replacement","rLyr","replaces","enqueueElementRefinement","elesQ","hasId","eleDeqs","peek","applyLayerReplacement","requestRedraw","layersInLevel","replaced","CRp$a","polygon","lineTo","triangleBackcurve","firstPt","quadraticCurveTo","triangleTee","trianglePoints","teePoints","beginPath","firstTeePt","moveTo","closePath","circleTriangle","arc","firstTrPt","CRp$9","shiftToOriginWithBb","showLabel","showOverlay","showOpacity","drawNode","drawEdge","drawNodeOverlay","drawEdgeOverlay","drawNodeUnderlay","drawEdgeUnderlay","eleTxrCache","getRotation","getOpacity","opacity","smooth","oldGlobalAlpha","rotPt","getImgSmoothing","globalAlpha","getZeroRotation","getLabelRotation","getTextAngle","getSourceLabelRotation","getTargetLabelRotation","getTextOpacity","requestHighQuality","_r$data","lblTxrCache","slbTxrCache","tlbTxrCache","drawElementUnderlay","drawCachedElementPortion","drawElementOverlay","drawElements","drawCachedElements","drawCachedNodes","drawLayeredElements","lyrTxrCache","CRp$8","drawLabel","shouldDrawOverlay","shouldDrawOpacity","lineOpacity","effectiveLineOpacity","effectiveArrowOpacity","drawLine","strokeOpacity","eleStrokeStyle","drawEdgeTrianglePath","lineWidth","drawEdgePath","drawArrows","arrowOpacity","drawArrowheads","lineJoin","ghostOpacity","effectiveGhostOpacity","drawElementText","drawEdgeOverlayUnderlay","overlayOrUnderlay","usePaths","colorStrokeStyle","canvasCxt","pathCacheHit","lineDashPattern","lineDashOffset","pathCacheKey","pathCache","Path2D","setLineDash","stroke","fillStyle","strokeStyle","normal","triangleHead","drawArrowhead","arrowClearFill","edgeOpacity","gco","globalCompositeOperation","colorFillStyle","drawArrowShape","canvasContext","shapeImpl","arrowPathCache","cachedPath","matchEdgeWidth","CRp$7","img","ix","iy","iw","ih","nodeOpacity","styleObj","getIndexedStyle","repeat","paddingX2","nodeTW","nodeTH","shouldClip","imgOpacity","imgW","cachedW","imgH","cachedH","offsetWidth","offsetHeight","posXUnits","posXPfVal","offXUnits","offXPfVal","posYUnits","posYPfVal","offYUnits","offYPfVal","gAlpha","smoothingEnabled","isSmoothingSwitched","save","clip","safeDrawImage","pattern","createPattern","CRp$6","useEleOpacity","label","textAlign","textBaseline","_label","srcLabel","tgtLabel","applyRotation","drawText","getFontCache","fontCaches","setupTextStyle","labelStyle","labelSize","labelFamily","labelWeight","outlineOpacity","outlineColor","pdash","textAngle","orgTextX","orgTextY","textW","textH","backgroundOpacity","borderOpacity","textBorderWidth","backgroundPadding","bgX","bgY","bgW","bgH","textFill","textBackgroundColor","ctx","roundRect","fillRect","textStroke","textLineWidth","textBorderColor","textBorderStyle","strokeRect","whiteWidth","halfTextW","strokeText","fillText","CRp$5","eleOpacity","urlDefined","numImages","bgImgCrossOrigin","getCachedImage","backgroundTimestamp","darkness","borderWidth","bgOpacity","borderColor","setupShapeColor","bgOpy","eleFillStyle","setupBorderColor","bdrOpy","styleShape","shapePts","nodePathCache","drawShape","drawImages","prevBging","totalCompleted","drawInscribedImage","drawPie","redrawShape","pieOpacity","darken","drawBorder","effGhostOpacity","drawNodeOverlayUnderlay","cyStyle","pieSize","lastPercent","angleStart","angleEnd","CRp$4","contexts","backingStore","backingStorePixelRatio","webkitBackingStorePixelRatio","mozBackingStorePixelRatio","msBackingStorePixelRatio","oBackingStorePixelRatio","devicePixelRatio","paintCache","paintCaches","needToCreateCache","createGradientStyleFor","shapeStyleName","gradientStyle","createRadialGradient","_end","createLinearGradient","hasPositions","addColorStop","gradientFillStyle","backgroundFill","backgroundColor","gradientStrokeStyle","lineFill","lineColor","mbPxRatio","bufferCanvases","MOTIONBLUR_BUFFER_NODE","MOTIONBLUR_BUFFER_DRAG","canvasWidth","canvasHeight","canvasContainer","CANVAS_LAYERS","canvases","BUFFER_COUNT","textureMult","TEXTURE_BUFFER","forcedContext","forcedZoom","forcedPan","drawAllLayers","forcedPxRatio","drawOnlyNodeLayer","needDraw","canvasNeedsRedraw","textureDraw","inNodeDragGesture","inBoxSelection","motionBlurFadeEffect","prevPxRatio","motionBlurTimeout","mbFrames","clearingMotionBlur","textureDrawLastFrame","NODE","SELECT_BOX","effectiveZoom","effectivePan","prevVp","prevViewport","mbclear","setContextTransform","ePan","eZoom","bufferContexts","textureCache","mpan","DRAG","outsideBgColor","outsideBgOpacity","vpManip","hideEdges","needMbClear","useBuffer","drawDebugPoints","timeToRender","fps","cxtNode","txtNode","cxtDrag","txtDrag","drawMotionBlur","txt","needClear","pxr","motionBlurCleared","CRp$3","arcTo","barrelCurveConstants","sin0","cos0","ellipseStepSize","drawEllipsePath","ellipse","xPos","yPos","rw","rh","CRp$2","b64UriToB64","b64uri","output","mimeType","getB64Uri","toDataURL","quality","toBlob","blob","b64","bytes","atob","buff","ArrayBuffer","buffUint8","Uint8Array","Blob","b64ToBlob","createBuffer","buffer","bufferCanvasImage","ctrRect","full","specdMaxDims","maxWidth","maxHeight","maxScaleW","maxScaleH","buffCanvas","buffCxt","zsortedEles","CRp$1","drawPolygonPath","drawRoundPolygonPath","drawRoundRectanglePath","drawCutRectanglePath","drawBottomRoundRectanglePath","drawBarrelPath","CR","CanvasRenderer","CRp","tapHlOffAttr","tapHlOffStyle","containerStyle","overflow","styleMap","userAgent","setAttribute","topCanvas","pathsEnabled","emptyBb","getCenterOffset","getElementBox","getLabelBox","getSourceLabelBox","getTargetLabelBox","isLabelVisibleAtScale","addTextMargin","getRsPt","oldBackgroundTimestamp","onUpdateEleCalcs","refineInLayers","pathsImpld","path2dEnabled","imageSmoothingEnabled","webkitImageSmoothingEnabled","mozImageSmoothingEnabled","msImageSmoothingEnabled","OffscreenCanvas","incExts","extensions","modules","setExtension","registrant","ext","overrideErr","layoutProto","optLayoutFns","fnName","regStop","getCy","emitterOpts","BaseRenderer","getExtension","bProto","RegistrantRenderer","rProto","Renderer","pName","pVal","_pName","setModule","moduleType","moduleName","getModule","Stylesheet","sheetfn","version","code","defineProperties","ELK","_ref$defaultLayoutOpt","defaultLayoutOptions","_ref$algorithms","algorithms","workerFactory","workerUrl","initialized","Worker","worker","postMessage","PromisedWorker","cmd","catch","_ref2","_ref2$layoutOptions","_ref2$logging","logging","_ref2$measureExecutio","measureExecutionTime","terminate","_this2","resolvers","onmessage","answer","receive","convertGwtStyleError","javaException","cause","backingJsObject","$wnd","nb","xb","Fd","$g","yq","Sq","Es","Jw","Vw","VA","dA","MA","PA","PB","bx","vy","Nz","Yz","Ylb","Ymb","xmb","Fmb","Qmb","gcb","ccb","jcb","jtb","otb","qtb","_fb","bpb","kpb","ppb","Gpb","drb","dzb","fzb","fxb","Vxb","Ovb","byb","zyb","Zyb","_yb","hzb","jzb","lzb","nzb","rzb","zzb","Czb","Ezb","Gzb","Izb","Mzb","bBb","NBb","PBb","RBb","iCb","OCb","SCb","GDb","JDb","fEb","xEb","CEb","GEb","yFb","KGb","tIb","vIb","xIb","zIb","OIb","SIb","TJb","VJb","XJb","XKb","fKb","VKb","VLb","jLb","nLb","GLb","KLb","MLb","OLb","RLb","YLb","bMb","gMb","lMb","pMb","wMb","zMb","CMb","FMb","LMb","zNb","PNb","kOb","pOb","tOb","yOb","FOb","GPb","aQb","cQb","eQb","gQb","iQb","CQb","MQb","OQb","ASb","fTb","kTb","STb","fUb","DUb","VUb","YUb","_Ub","_Wb","QWb","XWb","jVb","DVb","VVb","$Vb","dXb","hXb","lXb","gYb","HYb","SYb","VYb","dZb","P$b","T$b","h1b","m1b","q1b","u1b","y1b","C1b","e2b","g2b","m2b","q2b","u2b","S2b","U2b","W2b","_2b","e3b","h3b","p3b","t3b","w3b","y3b","A3b","M3b","Q3b","U3b","Y3b","l4b","q4b","s4b","u4b","w4b","y4b","L4b","N4b","P4b","R4b","T4b","X4b","I5b","Q5b","T5b","Z5b","l6b","o6b","t6b","z6b","L6b","M6b","P6b","X6b","$6b","a7b","c7b","g7b","j7b","m7b","r7b","x7b","D7b","D9b","b9b","h9b","j9b","l9b","w9b","F9b","hac","jac","pac","uac","Iac","Kac","Sac","obc","rbc","vbc","Fbc","Jbc","Xbc","ccc","fcc","lcc","occ","tcc","ycc","Acc","Ccc","Ecc","Gcc","Zcc","_cc","bdc","fdc","jdc","pdc","sdc","ydc","Adc","Cdc","Edc","Idc","Ndc","Qdc","Sdc","Udc","Wdc","Ydc","aec","hec","jec","lec","nec","uec","wec","yec","Aec","Fec","Jec","Lec","Nec","Rec","Uec","Zec","Zfc","lfc","tfc","xfc","zfc","Ffc","Jfc","Nfc","Pfc","Vfc","_fc","fgc","jgc","lgc","Bgc","ehc","ghc","ihc","khc","mhc","ohc","qhc","yhc","Ahc","Ghc","Ihc","Khc","Mhc","Shc","Uhc","Whc","dic","dlc","blc","flc","hlc","jlc","Glc","Ilc","Klc","Mlc","Mjc","Qjc","Qlc","Ulc","Ylc","Lkc","Nkc","Pkc","Rkc","Xkc","_kc","gmc","kmc","zmc","Fmc","Wmc","$mc","anc","mnc","wnc","Hnc","Jnc","Lnc","Nnc","Pnc","Ync","eoc","Aoc","Coc","Eoc","Joc","Loc","Zoc","_oc","bpc","hpc","kpc","ppc","pFc","Ryc","QCc","PDc","xGc","HGc","JGc","NGc","GIc","iKc","mKc","wKc","yKc","AKc","EKc","KKc","OKc","QKc","SKc","UKc","YKc","aLc","fLc","hLc","nLc","pLc","tLc","vLc","zLc","BLc","DLc","FLc","sMc","JMc","hNc","RNc","ZNc","_Nc","bOc","dOc","fOc","hOc","hRc","jRc","KRc","NRc","NQc","LQc","_Qc","cPc","iPc","kPc","mPc","xPc","zPc","zSc","BSc","GSc","ISc","NSc","TSc","NTc","NVc","oVc","SVc","VVc","XVc","ZVc","bWc","bXc","CXc","FXc","IXc","MXc","UXc","bYc","fYc","oYc","qYc","uYc","pZc","G$c","h0c","N0c","k1c","I1c","Q1c","f2c","i2c","k2c","w2c","O2c","S2c","Z2c","v3c","x3c","R3c","U3c","e4c","w4c","x4c","z4c","B4c","D4c","F4c","H4c","J4c","L4c","N4c","P4c","R4c","T4c","V4c","X4c","Z4c","_4c","_7c","b5c","d5c","f5c","h5c","H5c","Hfd","Zfd","Zed","ged","Jed","Ned","Red","Ved","bbd","mdd","_fd","fgd","kgd","Mgd","Ahd","Ald","Tld","xkd","rmd","knd","Jod","JCd","Bpd","BFd","oFd","bqd","bvd","jvd","yud","Hxd","EBd","aDd","MGd","vHd","RHd","wNd","zNd","CNd","KNd","XNd","$Nd","HPd","lUd","XUd","DWd","GWd","JWd","MWd","PWd","SWd","VWd","YWd","_Wd","xYd","BYd","mZd","EZd","GZd","JZd","MZd","PZd","SZd","VZd","YZd","_Zd","c$d","f$d","i$d","l$d","o$d","r$d","u$d","x$d","A$d","D$d","G$d","J$d","M$d","P$d","S$d","V$d","Y$d","_$d","c_d","f_d","i_d","l_d","o_d","r_d","u_d","x_d","A_d","D_d","G_d","J_d","M_d","P_d","S_d","V_d","Y_d","h5d","U6d","U9d","_8d","fae","hae","kae","nae","qae","tae","wae","zae","Cae","Fae","Iae","Lae","Oae","Rae","Uae","Xae","$ae","ebe","hbe","kbe","nbe","qbe","tbe","wbe","zbe","Cbe","Fbe","Ibe","Lbe","Obe","Rbe","Ube","Xbe","$be","bce","ece","hce","kce","nce","qce","tce","wce","zce","Cce","Fce","Ice","Lce","Oce","Rce","Uce","Xce","ude","Vge","dhe","ol","wb","oPb","nPb","EPb","CPb","gFb","fFb","TRb","SRb","ySb","wSb","PSb","OSb","dTb","bTb","i4b","b4b","D2b","x2b","J6b","D6b","u9b","q9b","$9b","I9b","Umc","Imc","abc","Vac","ZCc","VCc","kCc","hCc","rCc","oCc","Tcc","Occ","xkc","gkc","xDc","rDc","iDc","cDc","kwc","jwc","tJc","jJc","dJc","aJc","Pyc","Nyc","VBc","SBc","CFc","yFc","CUc","wUc","lUc","fUc","sUc","pUc","IUc","GUc","IWc","HWc","_Wc","ZWc","fHc","dHc","f0c","d0c","B0c","A0c","L0c","J0c","LTc","JTc","sTc","rTc","KLc","ILc","wNc","tNc","PYc","OYc","nZc","lZc","q3c","p3c","Z7c","X7c","Z9c","Y9c","_ad","Zad","kdd","idd","$md","Smd","HGd","tGd","hLd","NKd","J6d","Uge","Mvb","uCb","Yb","cj","Dj","df","kf","ah","Ph","Ci","Fk","Ln","zp","Yp","qq","Dq","wr","Ir","sj","sw","xw","Cw","Qw","Rw","Xw","Xv","Sv","eu","Zx","xy","xB","HB","TB","fC","wB","MBb","ZGb","dRb","eRb","__b","njc","mjc","dFc","eFc","nOc","oOc","pOc","qOc","rOc","DVc","FVc","SZc","TZc","c$c","bJd","Cic","Dg","vib","eib","Pib","Vib","$ib","mcb","Mcb","Xcb","Ndb","_db","teb","Qeb","djb","Gjb","Njb","Bjb","lnb","Dnb","anb","Mob","Rob","iob","olb","qub","Tub","Vub","Xub","Zub","tpb","_pb","Wqb","nsb","Rxb","Txb","Xxb","bzb","tzb","vzb","xzb","Kzb","Ozb","iAb","kAb","mAb","BAb","hBb","jBb","nBb","TBb","XBb","QCb","WCb","_Cb","dEb","QGb","YGb","tKb","CLb","JMb","RNb","kQb","mQb","FQb","ETb","UTb","dUb","hUb","EZb","j$b","v$b","J0b","M0b","R0b","U0b","i2b","k2b","o2b","s2b","G2b","I2b","K2b","M2b","W3b","$3b","V4b","u5b","A7b","G7b","J7b","M7b","Mbc","Pbc","lac","nac","qcc","Gdc","$dc","cec","_ec","pfc","Bfc","Lfc","ygc","Dgc","shc","uhc","whc","Chc","Ehc","Ohc","Yhc","Tkc","Vkc","Olc","pnc","rnc","dpc","fpc","GCc","KCc","mDc","jEc","HEc","FEc","qoc","bFc","GFc","iGc","kGc","mGc","$Gc","hIc","lIc","pIc","tIc","xIc","zIc","CIc","LIc","CKc","IKc","MKc","$Kc","cLc","jLc","rLc","xLc","OMc","ZOc","ZRc","aSc","I$c","K$c","M$c","O$c","U$c","n1c","z1c","B1c","Q2c","U2c","z3c","med","Xed","_ed","Qfd","Bgd","$gd","lrd","urd","vrd","wrd","xrd","yrd","zrd","Ard","Brd","Crd","Ird","Krd","Lrd","Mrd","Nrd","Prd","Srd","Yrd","Zrd","_rd","asd","bsd","csd","dsd","msd","osd","qsd","ssd","Wsd","Lsd","thd","qtd","yBd","GBd","MBd","SBd","iCd","YMd","GNd","EPd","EQd","NTd","qOd","lVd","VVd","iYd","RYd","ZYd","z0d","O0d","s0d","W5d","cge","xfe","Tfd","jkb","Vjb","Rkb","Ckb","Lqb","Uhb","lEb","kEb","A_b","UQd","FQd","moc","yXb","Ji","vbb","DB","LB","ZB","lC","EC","wC","SB","j5b","g5b","PXc","ad","As","Zl","Ql","jq","sq","Fq","im","Sz","Rz","Qz","be","gz","Xy","xcb","pcb","tcb","Bdb","Vdb","Ydb","Geb","bgb","Apb","Jpb","utb","rQd","MPd","Bb","tTb","mt","fA","rcb","dCb","Tvb","Ekb","GLd","Uhd","d7d","D2d","Odb","Hcb","Pje","Qje","Tdb","Tqb","gyb","Wvb","KFb","PFb","FFb","pGb","MFb","ZQb","Gxb","Pwb","jUb","PTb","sDb","oDb","zDb","tDb","CWb","HXb","nYb","BYb","fLb","vYb","a2b","wZb","TCc","zJc","uMc","wdc","vKc","twb","oHb","$Gb","LXb","L_b","H_b","p0b","s0b","WMc","VMc","bNc","EPc","CPc","JPc","OPc","w1c","s1c","s7c","Psb","apd","ppd","lDd","YCd","NDd","mFd","vFd","GFd","KPd","OJd","hJd","aQd","SSd","FId","rUd","oUd","NYd","cZd","R8d","o9d","i9d","hee","Dd","yd","Hd","ph","Sh","Wc","oi","Ii","Zdd","QSc","D2c","QXc","e_c","d_c","$k","al","Lk","Kk","Mk","vb","hs","gs","Gp","xp","Lo","Ep","Tp","Wn","wx","un","ov","Mv","Br","Ov","Lw","hz","Yy","MB","eC","Ftb","Atb","zwb","LVb","BD","mlb","fad","gad","KVb","ZVb","KC","IP","Uhe","RVb","rXb","amb","OC","GC","bQ","MVb","NVb","lIb","vfd","nmd","Fyd","xMd","gc","ukd","Dyd","gmd","Dfd","Odc","Ajc","xjc","Ucd","zcd","yjc","Tcd","P6c","sjc","Acd","tjc","rjc","Wyc","Y1c","W1c","Ky","Edb","ED","Ksd","dzc","bzc","Xyc","td","Xr","tC","vC","yC","ocb","qcb","ucb","vcb","fGc","LFc","c0d","kzd","bhd","Rgd","dhd","Cdb","Wdb","Zdb","Feb","Heb","Ccb","Gdb","jfb","akb","Hwb","wVb","qVb","Blb","Glb","cgb","cqd","n8d","z2c","mde","pc","qc","Ki","Kz","Jz","jz","iz","KA","JA","OB","NB","jc","zh","Pi","Gb","GD","Qb","She","Bs","Td","Sd","scb","Oeb","Hfb","Ifb","Ufb","Vfb","Xfb","zob","Yob","Inb","Gob","Az","wrb","vrb","entries","Nhe","createObject","hke","getOwnPropertyNames","xrb","zrb","yrb","Mtb","Vtb","eCb","PC","rdb","_Db","$Db","HGb","IGb","JGb","dLb","zVb","KOb","SFc","M3c","L3c","N3c","O3c","P3c","Q3c","b4c","c4c","d4c","z5c","y5c","A5c","B5c","D5c","E5c","F5c","G5c","Vdd","T$c","Qqb","aDb","R$c","RGc","cEc","iHc","t7c","Qsb","aPb","_Ob","Ixd","zud","dCd","ZBd","fCd","p_b","q_b","d7c","aPc","NJd","DId","Oc","gdb","Vd","sn","Gv","hdb","fdb","Fhd","Ehd","mUd","yFd","xFd","yte","JD","ubb","uvd","h0d","ond","Hhd","Ghd","yod","nUd","Phb","Bod","Lnd","Qnd","Knd","Mnd","zod","pnd","cod","dod","Gnd","wtd","_Kd","Xnd","P3","Ile","Dnd","Hnd","Nnd","j5","Ond","PQd","Fnd","Ate","Bte","xId","Fi","End","Cte","Xse","_nd","qud","ZKd","Dte","C2","Zse","Vnd","_","Ete","G2","Fte","$je","Gte","D2","dte","Hte","Ite","z2","$se","E2","ete","Jte","Kte","F2","fte","B2","_se","Lte","Mte","Nte","Yse","A2","ate","Ote","Pte","Qte","Rte","Ste","CK","Wnd","Znd","Q3","SI","Rnd","Aod","Tnd","Shb","Cod","LFd","KFd","_ve","qZd","p8d","o8d","A9d","Ewe","Y9d","r8d","q8d","Z9d","rEd","Q9","S9","T9","U9","ZI","SD","wI","xI","BI","yK","FI","JI","MI","UI","eae","cae","fb","qb","D","F","G","J","I","L","T","Z","ab","db","eb","gb","hb","ib","jb","kb","lb","mb","ob","pb","aae","O","Qve","Jwe","bue","Kwe","Lwe","jxe","sbb","bJ","UD","Rwe","VD","WD","cJ","XD","Swe","axe","rbb","Vie","Bnd","Rve","nie","fue","Sve","xwe","Dwe","Fwe","Owe","vwe","Cwe","ywe","Khe","cwe","Eve","Gve","Pwe","Qwe","zwe","Hve","Twe","Ive","Uwe","Vwe","Wwe","Xwe","Jve","Ywe","Zwe","$we","_we","bxe","cxe","dxe","exe","Mhe","uwe","Kve","twe","fxe","gxe","hxe","ixe","wwe","Awe","Bwe","_9d","bae","Rhb","JFd","IFd","dae","mvd","lvd","n4c","Srb","enb","fnb","gnb","jnb","Cnb","Uqb","Mqb","tgb","lgb","BCb","Oje","qfb","Xhe","Icb","Rie","Ohe","QD","kgb","NaN","ugb","Mgb","heb","hhb","Mbb","Nbb","xbb","Yje","Sbb","Ibb","ogb","Ygb","bfb","ngb","Hxb","Qwb","_ub","ye","Sub","sd","Bfb","Cfb","Ffb","Lfb","WAb","Tzb","Wsb","pD","aIc","h2c","j4c","tg","cUd","Zod","uqb","N5c","K5c","L5c","I5c","M5c","J5c","c2c","cCb","WHb","VHb","XHb","jVc","ZUc","L2c","K$","tRc","h$","Q$c","J_","OCd","PCd","QCd","RCd","SCd","TCd","UCd","VCd","WCd","XCd","Rge","ege","Afb","aje","Xee","nde","rde","tvd","sue","Uvb","tue","ode","dge","Uee","Vee","Fhe","Ehe","dde","$ce","Iz","xz","zz","Ybb","Wbb","Yf","zf","$f","Hf","rk","ek","gk","pk","yk","Ak","Fj","dr","zr","So","qp","$q","$r","ne","Wo","bu","qu","qv","Px","mC","Lb","rf","Zv","$u","eB","Zfb","Yfb","Emb","Dmb","Vhb","hnb","Hc","inb","Ic","knb","Qc","Dob","dob","uc","Rqb","fob","jt","Mhb","Lp","Iie","sC","GB","FB","EB","Whb","yAb","Vzb","Gqb","Dqb","Csb","Osb","MAb","Nb","Myb","Gc","qDb","wDb","xDb","yDb","BFb","CFb","DFb","EFb","oGb","Ltb","Ktb","Otb","Utb","Ttb","Wtb","$xb","Zxb","$Yb","ZYb","cPb","bPb","dPb","EAb","DAb","a$b","_Zb","FDb","EDb","xUb","wUb","w2b","iVb","hVb","nVb","mVb","OVb","LWb","KWb","a4b","p9b","qgc","pgc","Hmc","FUc","j3c","i_c","h_c","k_c","s_c","r_c","t_c","R0c","Q0c","T0c","Vyc","Uyc","DCc","vCc","Une","r4c","Xrb","s4c","B3c","kKb","Xe","W8c","We","qJb","wJb","Jkb","pJb","uJb","sJb","UHb","Mpb","bIb","Npb","NC","CHb","Rcd","Scd","rJb","lKb","jKb","E3b","tN","ile","Ikb","D3b","N3b","F1","xqb","_Bb","G3b","rqb","K3b","F3b","yNb","wtc","Csc","J6c","H6c","L3b","J3b","e9b","vNb","tyc","uyc","ryc","I6c","d9b","rcd","ncd","Hsc","pcd","f9b","g3c","DQd","fB","getFullYear","nje","getMonth","getDate","nB","oje","uA","ofb","wA","dB","ZA","getHours","setMonth","YA","setHours","$A","setMinutes","_A","setSeconds","bB","cB","wbb","Abb","Cbb","_ie","Gbb","getDay","getTimezoneOffset","mB","pA","Pgd","Ogd","AFd","zFd","tFd","sFd","EFd","DFd","nRd","mRd","u4d","LRd","KRd","y4d","g5d","f5d","Q6d","P6d","pEd","oEd","tZd","rZd","Xge","Wge","jw","Lv","cd","sib","Lpb","tqb","Dbb","ybb","Crb","Vrb","Nvb","Rub","Kub","wcb","Zy","mxb","Gyb","zx","yx","zlb","Dlb","Alb","Flb","aCb","pjb","kqb","VBb","BEb","sBb","yBb","EBb","KBb","tMb","oOb","SOb","SMb","NEb","VEb","sFb","hHb","OHb","FIb","wLb","YRb","zTb","rUb","oWb","SXb","k0b","z5b","T8b","ibc","Cec","rfc","Rfc","Tfc","bgc","ngc","Qhc","$hc","Z0b","Dfc","dgc","Bmc","cWb","I$b","Unc","Oic","Bjc","Yjc","Gkc","Bpc","Jpc","Tpc","cqc","oqc","yqc","Hqc","Uqc","mrc","zrc","Prc","Yrc","fsc","nsc","nzc","zzc","Kzc","Xzc","Dtc","lAc","uAc","CAc","LAc","UAc","aBc","uBc","DBc","MBc","sGc","VIc","EIc","qKc","GKc","lLc","mMc","fMc","ZLc","ZMc","IOc","GQc","PQc","RQc","ARc","rSc","TTc","_Tc","RUc","uVc","hWc","rWc","kXc","uXc","AYc","l$c","Z$c","D_c","O_c","c1c","D3c","O5c","a6c","m2c","W2c","f7c","G7c","j8c","iad","rad","Bad","Nad","ibd","tbd","Ibd","Ubd","gcd","scd","Ycd","udd","Jdd","Eed","bfd","dfd","ffd","Kfd","Mfd","Ofd","vgd","qgd","jrd","krd","mrd","nrd","qrd","rrd","srd","trd","Drd","Frd","Hrd","Jrd","Ord","oo","Yqd","iqd","Ohb","jqd","hqd","Wpd","Rrd","Qrd","meb","Hhe","Dhe","cC","ikd","Q8c","hkd","reb","Upd","Mld","gqd","Xte","Fe","Xqd","BIc","FHc","gIc","Arb","CMc","wMc","NNc","FNc","eCc","ZBc","aCc","Dsd","Urd","Wrd","kGd","nGd","bUd","zVd","C1d","MLd","Wud","f7d","_zd","Y1d","c8d","$Id","_Id","hCd","qAd","e7d","R2d","Wd","dd","PD","KD","LD","Lhe","ND","Em","Xb","Kq","Ob","QNc","Pb","hr","Kc","Bbb","Kbb","Jdb","pfb","cg","ag","oVb","DD","Kb","pVb","RD","CCb","Dfb","Efb","Nfb","Pfb","Qfb","Mfb","Esb","Gsb","Fsb","setTime","fvb","bvb","jvb","nvb","Nqb","Ld","wmb","tCb","X6c","f3c","_Pb","QPb","Xod","jtd","ltd","VPb","yJb","xJb","as","Y8b","X8b","W8b","Cv","sl","vl","Nsd","Msd","$zd","Cyd","nNd","s4d","p4d","w4d","qRd","lcb","ww","hfb","Ny","kB","SC","TC","Hdb","NIb","beb","Fbb","G1b","lrb","Frb","swb","E2c","d3c","Nlb","Klb","zsb","$rb","I_b","B_b","g7c","aad","bad","dad","_9c","Jzc","Fzc","Ezc","fcd","bcd","ccd","oid","Lg","Mg","Gfd","Kkd","k3c","y2d","vkd","wkd","_kd","cld","ald","bld","dld","eld","hmd","imd","omd","pmd","iKd","$Jd","_Jd","_ud","Xg","Vg","Hgb","Zgb","rxb","wxb","MGb","Hkb","xf","xtb","tb","Vq","lr","Osd","q0b","HOb","LOb","IOb","lzd","mBc","Lge","wfe","fNc","eNc","Rpb","i0d","GA","FA","Iy","UVd","TVd","Gg","Bp","Okb","Mlb","llb","Eqb","Ntb","De","Deb","Ctb","hDb","dOb","n_b","txb","bUb","ZTb","zP","jBc","hBc","E6c","F6c","R6c","CQd","UA","RA","SA","TA","gB","vA","fromCharCode","hA","AOc","BOc","COc","RRd","a7d","Pp","mmb","Py","VI","JLd","Uxd","b7d","Oj","T6d","hi","F2d","Tje","R6d","Fc","Hwe","S6d","Tg","rl","ak","Gtd","ytd","t2d","Lq","Rb","rr","Sqb","Bc","pr","yfb","zfb","AD","Yrb","jsb","isb","RJc","V_b","OZb","Su","sCb","Bib","uib","SJc","Eub","kke","xHb","REc","aEc","tEc","OJc","Tbb","HAb","JAb","YAb","aKc","Y6c","mud","Ozd","Ajd","Wzd","otd","ytb","hue","Rld","y5d","atd","ul","KI","Rr","Qr","ntd","htd","gtd","ztb","Ivb","Hvb","pMd","BMd","K4d","N4d","X4d","_4d","C4d","k2d","G4d","I4d","c5d","aj","Jj","Rj","she","xl","Qo","Eb","Fb","Cb","ycb","Db","ur","Gr","Hs","Eg","Uk","Ub","ot","I6d","H6d","Bcb","zcb","Acb","Jfb","Wfb","Xnb","Zob","Ry","Qv","au","xu","kfb","lastIndexOf","ifb","xfb","fcb","Etb","Fxb","Jwb","FCb","$H","ECb","pDb","vDb","nGb","Btb","Asb","_rb","GUb","HUb","cxb","CKb","BKb","tle","NFb","OFb","Lkb","zXb","a1b","iBc","gBc","i3c","B2c","hhd","Sb","n5b","jm","Vp","Oyd","$yd","bzd","sgc","Pu","Xj","Jie","Skb","Vz","gfb","wfb","Pkb","ZBb","Zr","Hsb","Isb","Cmd","Gh","Wwb","tib","Ptb","Xtb","Trb","ksb","mkb","lCb","yCb","lsb","Cg","qg","wNb","St","Zc","O6c","Z6c","b7c","a7c","V6c","Dic","xed","u7c","n7c","AXb","xXb","mf","GRd","dSd","hSd","fSd","sId","dId","zXd","nXd","BXd","DXd","FXd","PXd","RXd","XXd","w1d","V1d","p1d","a3d","xid","ypb","$modCount","MUc","LUc","LCd","KCd","KVc","Hp","FUd","C9d","ade","TD","$ie","_ce","cde","G9d","hde","gde","jde","pe","qe","me","oe","ydb","vdb","Mf","Sf","Hg","bAb","vAb","sNb","pNb","dg","Yg","g4c","Wrb","i4c","wic","B6c","eLc","FJc","$Jb","ZJb","pqb","tdd","sdd","fOb","Axb","Ywb","bOb","gOb","qyb","lyb","_Hb","lle","Kld","_hd","bmd","kpd","Cvd","Atd","Avd","Hi","bRd","c7d","C2d","DAd","uVd","sYd","zyd","YOd","fTd","Xp","cq","LA","XMb","Opb","Sfb","ijb","trb","Aib","Vc","Urb","zc","HD","gRb","iTb","_Qb","sec","NIc","LDc","hSc","HRc","aHb","Zwb","lxb","hxb","J_b","Tod","c7c","PYd","gve","mue","gUd","Q4d","U4d","n5d","i5d","p5d","v5d","t5d","A5d","D5d","d6d","Y5d","tk","j6d","Aj","Nh","Kh","h6d","Ih","Fdb","dfb","Dxb","Cwb","Exb","Dwb","HBb","lfb","S_b","ecd","Zbd","_bd","$bd","zAd","Btd","ujb","Awb","czd","Pyd","fBb","rie","Avb","Bvb","bId","mk","Q2d","Rk","P2d","k3d","$j","H2d","M2d","Kj","zj","Ei","j3d","tn","Hv","Kcb","ke","Ldb","dIc","Nrb","$sb","Tkb","bCb","Pc","Xwb","Kwb","iyb","JCb","GCb","ICb","ntb","ltb","mtb","jzd","sGd","$Gd","ZGd","jmb","Mb","r0b","K_b","mKb","BZb","TNb","uOb","S3b","Z6b","n9b","x$b","LGb","iLb","B6b","C6b","Hgc","Fgc","Ggc","bPc","nYc","L1c","T1c","zCd","IVd","kWd","cWd","HXd","oXd","TXd","ZXd","JXd","LXd","NXd","VXd","_Xd","Nq","Wq","Nu","fr","Hx","Jq","Ru","YEc","WA","Yc","ed","pzb","qvb","fAb","$Bb","HVb","v_b","h5b","k5b","Lbb","zbb","Eje","Fje","iD","Rbb","DHc","EHc","DRb","vSb","aBb","C6c","Lsb","Nsb","Msb","rCb","mCb","dWb","Dnc","Cnc","_Gd","PNd","UNd","y1d","T0d","z1d","gFd","OEd","CEd","DEd","dFd","jFd","Sdd","Ydd","$6c","W6c","G6c","Dsb","Aq","Wm","Ed","vvb","Eeb","iYb","hYb","_Gb","YHb","ZHb","zfe","ige","X6d","W6d","ex","oeb","neb","d$c","e$c","B0b","D0b","Ojc","lHd","R7d","odb","ldb","Evd","Ti","e3c","Vte","Qpd","Spd","Yte","dmd","Tpd","kqd","emd","Kqd","Zj","rAb","pAb","oAb","Ie","IC","JC","Ucb","Tcb","Ceb","Beb","Yeb","Xeb","ddb","cdb","yI","IVb","my","ir","vc","fqb","Vqb","Bsb","Rfb","$lb","Bzb","Aeb","Qyb","vic","uNb","kmb","Xi","q$c","sEc","bVc","hVc","Voc","Toc","qed","pFd","KYd","jgd","MOb","Nr","ejc","djc","$ic","ju","bt","Bfe","rge","SEd","Qpb","oCb","Dkb","wCb","qqb","nmb","Oz","Wyb","Ayb","xL","Kie","Nyb","vqb","wqb","Aub","Cub","ike","jke","MC","$Nb","ZNb","BLb","SVb","oEb","uEb","$Jc","Qgd","s9c","Mr","Lr","mHb","nHb","c3c","Dod","JHd","JMd","UTd","eXd","nWd","lXd","MD","OD","ID","u1d","X1d","Hh","h1d","lse","AAd","jGd","fGd","x6","W1d","Qh","Ty","Sr","Yge","Id","Xm","Jd","Rl","fe","Dc","Ec","Ffe","Hge","Ubb","qD","a4d","kD","Obb","lD","Pbb","mD","Dcb","Ecb","Ddb","Kdb","Ad","QAb","TAb","RAb","g1b","z0b","l1b","Rnc","Zsc","Xsc","ltc","xc","Snc","koc","poc","Iub","_vb","n6c","i6c","m6c","tcd","D6c","oOd","kh","oie","gub","ipb","fpb","nHc","lHc","Pzd","Xzd","jUd","nSd","oSd","DSd","ESd","x0d","Lj","Dm","Cfe","Dge","Efe","nfb","Qhb","irb","Rrb","Vgb","amc","xJc","fkb","bkb","gkb","ckb","cv","ylb","Clb","Lyb","zjb","vCb","Goc","K2c","ese","Nkb","A2c","C2c","i2d","mfb","bKd","Hj","d1d","nlb","Kkb","zpb","Plb","oke","avb","Olb","Jhe","ueb","HMb","IMb","iq","Lub","Rzb","uB","tB","vB","ffb","Iwb","Vsb","d3d","gl","hkc","NAb","kkc","jkc","lOc","mOc","it","gKc","aRb","tPc","DPc","A6d","aLd","TKd","oRd","ote","w2d","t3d","lud","pSd","Qk","h3d","v2d","nk","uId","Dtd","Wh","mdb","zdb","jdb","Vi","ci","Uc","qo","Ix","by","qj","Dfe","zge","qCb","HC","__elementTypeId$","FC","bv","Kfb","ovb","ze","dVb","H1b","pRb","HA","gA","lA","nA","mA","sA","v4c","hm","ux","xx","lmb","NFd","MFd","sZd","b5","a5","c5","d5","f5","h5","g5","i5","k5","l5","m5","n5","o5","p5","q5","t5","v5","u5","$J","O4","T4","U4","O9","E9","AI","DK","v8","u8","X4","V4","CZd","AZd","Pnd","yZd","qve","nve","$nd","WKd","dwe","ewe","fwe","gwe","hwe","rve","vZd","iwe","jwe","kwe","kse","lwe","sve","bte","cte","Cnd","uve","mwe","vve","wve","pve","ove","tve","xve","zZd","BZd","PFd","xZd","bwe","wZd","YFd","QQd","DZd","pTb","NHc","MHc","THc","gCb","Bvd","dYd","f0d","mxd","ct","usb","$c","U6c","Hb","Q_b","LHd","LQd","Qzb","He","Qy","Szb","Uzb","Kpb","Ae","WHc","AYd","$1d","e2d","O6d","L6d","X0d","pdb","Qzd","Yzd","Ms","$o","jrb","umb","fod","vfb","tmb","ve","hpb","Vm","Um","zie","hob","Cc","eob","gob","ec","keb","wtb","kcb","OJb","MJb","QJb","vtb","xkb","Ynd","LPd","Yod","a2d","Gwe","g2d","a1d","b2d","h2d","b1d","Ah","Fg","Xx","Zeb","Zie","Mub","nqd","ctd","Xpd","Aqd","qBb","IBb","OAb","wBb","PAb","CBb","l$b","QIc","rKc","WKc","gPc","Blc","Ggd","pec","Jmc","Bkc","Rmc","Qmc","Omc","Mmc","Pmc","Wwc","_Ac","ZAc","Fqb","wjc","$lc","bmc","cmc","dmc","dle","knc","fnc","uOc","gnc","jnc","inc","uPc","jtc","dnc","ojc","cnc","lnc","enc","c_b","lyc","nyc","xyc","bne","Vje","vmc","smc","tmc","rmc","pmc","omc","Uf","wmc","ymc","xmc","Pid","QEd","KPb","NQd","RQd","JZb","$sc","O2b","P2b","Wod","pQb","prd","ord","JVd","ZVd","cXd","mWd","jXd","Ng","YBd","lBb","Sie","FD","p4b","Gkb","PPb","OPb","NPb","gpb","ODb","LDb","NDb","MDb","nd","POc","vPc","ooc","noc","FSd","U5d","QRc","Vod","Dx","Ex","rz","hashCode","Brb","Oq","mr","vmb","VDb","Ne","Gfb","fVb","pu","hOb","FYb","UZb","K6c","Ay","eod","bod","Gz","$Kd","wjd","Jsd","stackTraceLimit","Xz","Wy","gm","Eob","Fob","grb","hrb","O7d","Cxd","nxd","vyd","$Ec","OQ","kne","KVd","R5d","cIb","THb","kBc","nBc","lid","jh","Sg","hgd","$e","pNd","MKd","Dmd","eue","zUd","vud","qNd","bQc","$Pc","_Pc","FQc","DQc","jle","EQc","kle","YLc","XLc","WLc","aIb","$Cb","ahd","Gf","NGb","UGb","tlb","pCb","vlb","Ksb","Cx","QUb","zUb","Lwb","NJb","zid","sh","hvd","evd","dvd","cvd","g_c","VZc","b$c","setDate","no","Od","Fx","aC","rC","qC","xC","bC","mpd","fmd","Ind","Fod","Ve","Uud","t_b","A4b","BGc","SX","RHc","SHc","BHc","eMc","cMc","vle","dMc","STc","QTc","Yqe","RTc","hK","MHd","ZJd","WId","le","Drb","FLd","Tfb","bq","Wj","uu","vu","RZb","QZb","$_b","F0b","jDb","qXb","aOb","Ake","Bke","Ppb","qs","yG","RBc","qUb","pUb","S8b","J8b","YBc","nCc","_Ic","h8b","EJc","HLc","vMc","eUc","yRc","xRc","qSc","kSc","DOc","W3c","e7c","D1c","E1c","cJd","XMd","lKd","T2d","YId","XOd","eTd","yCc","Aw","UAb","VAb","nr","mie","Jib","xCb","Mkb","Rje","kkb","geb","A0b","l7c","o3d","b3d","wud","p3d","tMc","av","Z1d","S0d","d2d","l1d","U0d","_1d","f2d","q1d","r1d","i1d","e1d","j1d","s1d","m1d","Y0d","hEb","dD","jD","Vbb","rD","Mu","bjc","ajc","iSc","WRc","oRb","qz","dkd","Tdd","cIc","kxd","lxd","$fe","sSd","rSd","uSd","tSd","wSd","vSd","ySd","xSd","ASd","zSd","CSd","BSd","HSd","GSd","JSd","ISd","x$c","b2c","j2d","O1d","R0d","h9d","u3d","f3d","Q8d","I8d","$Tc","ZTc","YTc","Zb","Mc","Ofb","eLd","fLd","Ysb","Jgb","wwb","$i","k_b","Qkb","AQ","jne","l_b","kDb","Ke","iDb","pXb","Nwb","Owb","YXc","gVc","ZXc","o4c","p4c","i6d","s6d","tsb","Ij","y6d","xh","dl","z6d","H2c","h3c","tgc","Zw","Jc","Bh","hc","The","Vhe","Ib","Tb","Jb","eVb","Hke","xpb","$pb","dib","msb","eD","wD","vD","Hje","Ije","aD","hD","oD","tAb","Yzb","orb","Qu","Mie","Oy","m_b","aR","lne","SZb","a5b","e5b","ayc","mwc","F7c","A7c","E7c","G0b","$4b","Wxc","Jsb","Xsb","jxc","j0b","e0b","b5b","i5b","wxc","_4b","d5b","D7c","C7c","Z4b","c5b","f5b","Ypd","Zpd","$pd","_pd","aqd","Ez","sz","yz","Pz","Lz","Cz","Bz","Mz","Hz","Fz","C6d","Wg","Zh","jk","uEc","msc","ksc","gle","lsc","bDc","aDc","z_c","y_c","O_","q_c","p_c","N_","Z0c","Y0c","W_","hPb","gPb","GO","Smc","Tmc","rmb","Qq","ugc","Ko","Xjb","_jb","Wjb","rg","goc","DZc","Nj","Sj","yh","Uqd","ro","dtd","ri","$fb","qEd","wj","sr","nx","xd","E2d","q3d","xud","r3d","JJc","h0b","Nd","Rqd","Bfd","kt","wu","Yj","zb","ite","kVd","Mj","Ok","TOc","UOc","SOc","NOc","$Oc","YOc","OOc","sOc","tOc","HOc","FOc","pPc","XOc","mic","lic","kic","setFullYear","ee","VKd","Yjb","AQb","lge","lSd","mSd","LVd","nYd","Irb","mRb","uFd","FFd","Ucc","Dtb","Itb","Gx","GAb","SAb","Ee","Hub","zub","nke","REb","QEb","MEb","aN","LEb","KEb","ZEb","YEb","UEb","bN","SEb","TEb","aSb","_Rb","XRb","gP","VRb","WRb","D5b","C5b","y5b","ZR","x5b","w5b","Cqc","Bqc","xqc","JW","vqc","wqc","erc","drc","_qc","MW","$qc","Zqc","eBc","dBc","fX","$Ac","GAc","FAc","BAc","cX","zAc","AAc","Npc","Mpc","Ipc","FW","Gpc","Hpc","rsc","qsc","SW","MOc","LOc","DZ","GOc","ZIc","YIc","UIc","mY","TIc","SIc","bMc","aMc","fZ","jMc","iMc","gZ","KQc","JQc","YZ","dUc","cUc","F$","XUc","WUc","PUc","NUc","OUc","XTc","WTc","E$","lWc","kWc","fWc","X$","dWc","eWc","i1c","h1c","a1c","X_","_0c","$0c","H_c","G_c","C_c","P_","B_c","A_c","zHc","Cfd","fRb","FAb","pv","ko","PYb","MYb","NYb","jYb","wyb","oyb","jyb","kyb","nyb","myb","C3c","zUc","Lzd","Uzd","vtd","yUd","RC","Tc","Iv","$b","tjb","Qgb","ihb","lhb","Rgb","Yac","kZb","tNb","hid","uHb","rHb","vHb","sHb","oxd","qSd","CWd","AWd","_6d","qi","bi","oud","Ou","Iu","km","smb","cfb","Fpb","uwb","zhb","Qbb","Ahb","Thb","krb","Erb","b1b","kRb","Gjc","rd","qd","BQc","SRc","g6c","f6c","Uac","gbc","fbc","PHc","XHc","QHc","Psc","yc","VHc","AHc","gtc","KAb","xHc","OHc","JJd","pvb","QYd","UDc","Mi","Ugb","o5b","lHb","kHb","gHb","pN","dHb","eHb","fHb","SHb","RHb","NHb","sN","LHb","KHb","MHb","JIb","IIb","EIb","zN","DIb","CIb","BIb","Kyb","Jyb","Fyb","Cyb","Dyb","Eyb","DTb","CTb","yTb","oP","wTb","vTb","xTb","WXb","VXb","RXb","hQ","QXb","PXb","OXb","Zpc","Ypc","Rpc","GW","Ppc","Opc","Qpc","gqc","fqc","bqc","HW","$pc","aqc","_pc","Lqc","Kqc","Gqc","KW","Eqc","Dqc","Fqc","Kkc","Jkc","Fkc","vV","Ekc","Dkc","Ckc","jsc","isc","esc","RW","csc","dsc","bsc","asc","_rc","Xrc","QW","Vrc","Wrc","Urc","Dzc","Czc","xzc","ZW","uzc","vzc","wzc","YAc","XAc","TAc","eX","QAc","RAc","SAc","yAc","xAc","tAc","bX","rAc","qAc","sAc","PAc","OAc","KAc","JAc","HAc","IAc","QBc","PBc","LBc","jX","JBc","KBc","IBc","yBc","xBc","tBc","hX","qBc","rBc","sBc","HBc","GBc","CBc","iX","BBc","zBc","ABc","wGc","vGc","rGc","PX","oGc","pGc","qGc","zVc","yVc","tVc","O$","sVc","qVc","rVc","vWc","uWc","pWc","Y$","mWc","nWc","oWc","EYc","DYc","zYc","q_","xYc","yYc","wYc","yXc","xXc","sXc","b_","qXc","rXc","pXc","b_c","a_c","Y$c","V$c","W$c","X$c","mbd","lbd","hbd","z1","fbd","ebd","gbd","vad","uad","qad","u1","nad","oad","Thd","Qhd","S2","FAd","fi","uhb","vhb","LC","KNb","ACb","xke","zke","zCb","exb","dL","VRc","Ut","zGc","S6c","gCc","lUb","n8b","mib","kib","kSd","XKd","Yh","$Hb","Lfe","Kfe","bfe","Nfe","vXb","uXb","tXb","sXb","KZc","MZc","Jjc","Pjc","Byb","rj","v1d","gid","oUc","b3c","vRc","pSc","lSc","dBb","vr","m3c","o3c","w9c","b9c","n3c","yHc","nud","pOd","Itd","KLd","Ek","Bwb","ulb","Llb","bid","Pj","nDb","ixb","jxb","kxb","zxb","yxb","iL","uWb","tWb","lWb","SP","hWb","kWb","iWb","jWb","ALb","zLb","vLb","PN","uLb","rLb","sLb","tLb","WMb","VMb","RMb","jO","OMb","NMb","PMb","QMb","WOb","VOb","ROb","CO","OOb","NOb","POb","QOb","GRc","FRc","uRc","wRc","mbc","lbc","VS","dbc","ebc","cbc","oXc","nXc","iXc","a_","hXc","fXc","gXc","eXc","Fjc","Ejc","mV","zjc","Pzc","Ozc","Izc","$W","Gzc","Hzc","ugd","tgd","pgd","k2","mgd","ngd","lgd","ogd","xbd","wbd","rbd","A1","qbd","nbd","obd","pbd","Fad","Ead","Aad","zad","xad","wad","yad","Ied","Hed","Ded","O1","Ced","zed","Bed","Aed","ydd","xdd","I1","rdd","qdd","pdd","x9d","w9d","z9d","y9d","ane","Zrb","Jbb","Ygc","Tnc","zoc","Rwd","Ai","rFd","$d","dDb","ghd","t9c","MCd","Wxd","NCd","Oh","Rh","QSd","mle","sD","tD","Gje","uD","WDc","VDc","$Dc","TDc","rfb","Aid","bk","Pg","xjd","CD","yjd","iid","bLd","fid","vgc","omb","agd","Ac","INb","HNb","FNb","GNb","JNb","uud","gi","Ftd","Xc","Cj","Bve","LAb","__elementTypeCategory$","dub","fub","sgb","vgb","ghb","vge","EA","Scb","es","DCb","nCb","S9b","Ogb","Ggb","Dhb","Ehb","AWb","xWb","yWb","vWb","wWb","zWb","uqc","tqc","mqc","IW","iqc","hqc","kqc","lqc","jqc","bAc","aAc","Vzc","_W","Uzc","Rzc","Szc","Qzc","Tzc","bkc","akc","Xjc","uV","Tjc","Vjc","Wjc","Ujc","Sjc","Htc","Gtc","Ctc","TW","Btc","xtc","ytc","ztc","Atc","S_c","R_c","N_c","Q_","I_c","J_c","M_c","K_c","L_c","S5c","R5c","e1","p$c","o$c","k$c","y_","g$c","f$c","i$c","h$c","j$c","vUb","uUb","mUb","nUb","oUb","mad","lad","ead","cad","Ybd","Xbd","Tbd","C1","Qbd","Sbd","Obd","Pbd","Rbd","cdd","bdd","_1c","hoc","usc","ioc","Jub","Hie","dC","Emc","ple","sfb","toLocaleLowerCase","idb","VWb","mDb","Zsb","xac","Ssc","t9b","Zwc","iZb","Rc","Bid","YKd","lte","lo","vo","Eie","Fie","kcd","jcd","dcd","D1","acd","tHb","wed","aid","cKd","gZd","Qmd","Umd","li","Xcd","ZEc","q7c","lib","dcb","TLc","Qwd","OQd","ADb","My","Qie","Kv","uo","o0b","n0b","NQ","g0b","i0b","f0b","d0b","n8c","m8c","i8c","h8c","g8c","f8c","d8c","c8c","e8c","K7c","J7c","o1","z7c","B7c","Fpc","Epc","Apc","EW","wpc","ypc","vpc","xpc","zpc","upc","Drc","Crc","yrc","OW","wrc","trc","xrc","vrc","urc","rrc","qrc","prc","lrc","NW","irc","hrc","krc","grc","jrc","frc","tzc","szc","lzc","YW","jzc","hzc","fzc","gzc","kzc","izc","Yqc","Xqc","Sqc","LW","Nqc","Mqc","Pqc","Oqc","Rqc","Qqc","wSc","vSc","t$","nSc","mSc","oSc","ycd","xcd","E1","ocd","mcd","lcd","qcd","Rad","Qad","Mad","Kad","Iad","Lad","Gad","Jad","Had","qQd","AId","izd","xj","he","ftd","God","Hod","LLd","Bk","Xdd","ndb","rDb","Uz","Xie","_bb","tdb","RPb","s$c","wkb","hkb","ikb","ekb","pgb","fhb","f4c","l4c","m4c","C0","ole","Bne","Cne","ztd","JAd","LAd","yId","CId","BId","Bt","Nc","ieb","CUb","XZb","c6d","zte","qeb","peb","uCc","Z7b","w8b","v8b","UCc","qDc","xFc","z8b","p8b","y8b","Tne","RLc","Lkd","kmd","lmd","zpd","UUd","VUd","Apd","pQd","PHd","jKd","Rdd","g3d","_o","Fv","Kp","T6c","_Mb","DUd","jmd","$jb","eub","bub","M5b","Odd","Vxc","O5b","N5b","L5b","R_b","K5b","U_b","Fsc","P5b","Qdd","GOb","jfc","Swc","XGc","ku","UHc","nMc","$B","pRd","Jk","r$c","HSb","FSb","Ime","GSb","Jme","ESb","Kme","DSb","Lme","H9b","$Xc","Tqe","Sqe","cme","$Fc","oHc","ZFc","yb","ktb","Bx","Bnc","gKd","Poc","Soc","p7c","Zzb","$zb","uAb","zZb","AZb","aod","MJd","tJb","IJb","KKd","Mve","LKd","Nve","w0b","u0b","v0b","t0b","x0b","y0b","nfd","mfd","sfd","Udd","Wdd","Pdd","kUc","jUc","FTc","iUc","gUc","mTc","gTc","hUc","zic","CHc","tic","xic","Aud","D9","Fcb","wd","WZc","hKd","xk","KFc","cHc","xle","pAc","oAc","kAc","iAc","dAc","gAc","eAc","fAc","cAc","hAc","jAc","Flc","Elc","Alc","KV","tlc","wlc","slc","zlc","vlc","ulc","ylc","xlc","e6c","d6c","_5c","f1","$5c","T5c","X5c","Z5c","U5c","V5c","W5c","Y5c","Hsd","Gsd","Csd","O3","Bsd","ysd","zsd","xsd","Asd","vsd","usd","wsd","XNb","VNb","WNb","Xyb","hyb","dyb","uyb","eyb","T3c","h4c","jkd","F9c","M9b","_$b","stb","cId","HAd","IAd","Wb","crb","n3b","mxc","Bsc","o3b","m3b","l3b","tAd","sAd","aKb","Idd","Ddd","_Jb","phb","ohb","nhb","Hhb","xhb","dhb","Vb","Kje","ZC","fD","Yyb","Jg","ui","f_b","Si","pQc","zjd","Hkd","gNc","vQc","MRd","Ox","Ewb","xwb","xRb","eSb","uSb","qSb","Nbd","Mbd","Hbd","B1","zbd","ybd","Bbd","Gbd","Fbd","Ebd","Cbd","Dbd","Abd","wFb","vFb","rFb","dN","qFb","lFb","mFb","kFb","oFb","pFb","nFb","jFb","iFb","pTc","Zqe","qTc","nTc","$qe","oTc","TQc","PEc","QEc","urb","bZb","o7c","M_b","lBc","Gkd","Txd","ckd","Zqd","drd","lqd","KJd","Be","nD","bdb","__java$exception","lz","btd","gue","Zjb","vkb","yac","mEb","CDb","xyb","fyb","J2c","Etd","ki","Wi","p6c","A6c","Gne","Hne","V1c","URc","dVc","q1c","p1c","QLc","ZEd","pmb","etd","fkd","Cid","jEb","sbd","Zac","Trc","Src","Orc","PW","Frc","Hrc","Irc","Jrc","Krc","Lrc","Nrc","Erc","Grc","Mrc","Ndd","Mdd","J1","Bdd","Add","Edd","Fdd","Hdd","Gdd","Cdd","zdd","vUc","uC","_Ed","Jy","I4b","Uy","Tz","ce","xtd","Vh","xNb","TQb","mSb","VQb","_sd","Pld","lRb","UQb","Mic","Nic","nNb","YMb","aNb","rNd","hj","$C","EDc","Gwb","xVb","sVb","XDb","RDb","AUd","w6c","crd","grd","hrd","Dqd","qmb","WC","YC","QC","jRb","xld","Ykd","qic","nic","w7c","rVb","LCb","PH","j_c","Mre","S0c","ekd","yid","XRc","PZc","nib","Pr","bRc","Ife","dfe","Afe","ue","OYb","Esc","syb","qL","tke","Zmd","icb","Rmd","Qe","q4c","aub","Ss","BG","Gie","whb","nzd","$3","hve","JEd","NEd","oNb","Tt","Nhb","re","Hic","oic","pic","Gic","ai","b4d","c4d","ql","Xdb","jPb","Tle","kPb","iPb","Ule","lPb","Vle","mPb","Wle","Mcc","Ncc","Lcc","Kcc","Icc","Xad","Wad","f9c","Yad","C9c","Sad","s8c","Tad","Y8c","Vad","_8c","Uad","$8c","$Pd","nQd","Ly","Ebb","rwb","yke","j5c","Fkb","Hlb","eDc","HZb","w$c","_Zc","aRc","Rqe","pHb","Re","qHb","Se","Tgb","Bgb","Dgb","Sy","F$c","ss","LUb","JUb","KUb","VQc","Lcd","qtc","wZc","ZZc","$Zc","PVc","jid","And","IEd","PEd","Vk","Sc","mc","j7c","E_b","Ngb","Z1b","A9b","UDb","JDc","cVc","fVc","qKb","AJb","wf","bFd","Kic","Iic","jjc","Tmd","GQd","IQd","q6d","Xk","x6c","Jkd","Vn","Up","qr","Ck","KXb","gic","u4c","qnd","Eid","Jnd","sb","_ic","zId","o1d","vk","wk","kud","Fhb","Lhb","Ghb","c6b","_5b","i6b","Ekd","bkd","eid","jte","Vj","yZb","VOd","QHd","N7d","Gj","Og","Web","cTd","kue","Xh","$hd","lYb","jf","Osc","_Fc","bGc","Rf","loc","zGb","bLb","had","Vcd","Wcd","ej","fj","sud","Zi","vAd","KAd","xAd","ZId","dJd","eJd","Cve","fJd","Dve","k6d","l6d","ved","Efd","RUb","aJd","CUd","EUd","Tee","Uje","a_b","Lwc","owc","Mkd","ttb","CI","hDc","cD","gD","N_b","Gub","yub","lke","mke","O_b","n6d","o6d","aeb","Ev","ln","EZc","QZc","GZc","LFb","Ycc","yOc","t4c","Tj","xFb","GVb","Xkd","Ljc","JOb","t6c","o6c","Q9b","pyc","wyc","N9b","EHb","Htd","Q9c","XGb","MUb","JTb","Vme","qZb","ITb","Iwc","Cwc","aUb","itc","pf","LTb","Byc","zsc","aZb","Y$b","qme","utc","cZb","_Yb","ezc","h0","czc","I2c","Kf","Kgb","Lgb","Zcd","QUc","Dne","b1c","zg","Ug","zvd","Gxd","Di","pvd","lsd","cLb","_Kb","UKb","mKd","nm","wm","W_b","$kd","vyc","zqe","kyc","oyc","syc","zyc","myc","Y9b","U9b","tWd","uWd","vWd","wWd","xWd","yWd","zWd","BWd","FHb","oN","Sje","$Ed","Sxd","Lzc","Cxc","Dxc","pBc","qyc","Ze","wAd","B6d","Zg","Myd","mj","IA","Epb","Hbb","UC","p5b","r5b","t5b","s5b","q5b","m5b","Rwc","Tqc","rHc","yic","fdd","edd","hdd","T9c","gdd","E9c","ddd","r8c","tme","Ree","Ufe","OZc","jhb","Ofe","Zee","$ee","Qsc","gjc","LHc","Zic","cjc","Yic","JHc","fjc","bIc","ijc","hnc","mzd","wCc","vd","BDb","DDb","N9d","Qge","Q9d","R9d","ted","sed","sde","Kge","$l","Vpd","eh","Wje","fh","fvd","vg","uDc","Igb","Zcb","Ue","VOc","aAb","_zb","xM","ROc","QOc","Cjd","Bjd","vjd","EOb","bFb","_Eb","dFb","gNb","Ce","dNb","ELb","ENb","efb","_0d","FMc","gVb","Xgb","mIb","Kjc","Vbc","Wbc","q2c","J8c","OEc","TPb","tyb","ryb","yyb","pyb","xKb","zKb","vKb","AKb","wKb","b0d","xlb","dYb","HFb","umc","r2c","igd","Vqd","k4c","o5c","Ye","UEd","_lb","P9d","wte","fde","ide","Wgb","DN","nJb","mJb","oJb","eJb","dJb","fJb","iJb","hJb","gJb","lJb","kJb","jJb","bJb","aJb","cJb","$Ib","ZIb","_Ib","XIb","WIb","YIb","vjc","cYb","h4b","Dsc","c4b","gkd","nj","Roc","XEd","O9c","oQc","mse","ise","Y3","jse","L9d","ede","bde","Zyd","BPb","K9c","vPb","G8c","qPb","wPb","zPb","yPb","APb","xPb","$Eb","sPb","rPb","tPb","uPb","NWb","ued","JKb","DKb","a0b","P_b","kid","Qg","zld","Zje","tud","PJb","yle","zle","Ale","eVc","zD","yD","xD","u6d","mh","x6d","v6d","kCb","d4d","HLd","rud","Gk","mi","mYb","Nac","zHb","Bub","KCb","HCb","vZb","Qwc","nZb","Ksc","btc","pZb","Nsc","uZb","tXc","jre","FZc","JFb","IFb","AFb","GFb","uAd","jve","uj","BAd","m3d","wo","GF","b0b","Scc","Rcc","Pcc","Aqe","XDc","Y_b","HVc","BVc","FMd","QTd","UPc","Oqe","WPc","XPc","I0c","z0c","G0c","H0c","x0c","E0c","n0c","F0c","o0c","D0c","y8c","uld","JQd","yde","jIb","kIb","IZc","RZc","P1c","N1c","hNb","Gle","iNb","ZMb","jNb","$Mb","XZc","r6c","k6c","jud","Fwb","Dle","Ele","Fle","Nyd","rJc","otc","eyc","hJc","iJc","oJc","yJc","mJc","sY","Iqe","eme","sJc","nJc","Bxc","Ayc","lJc","pJc","kJc","qJc","ZOb","YOb","Vne","Wqd","rTb","TMc","qmc","Uoc","Qoc","s3d","Yxd","H9d","_I","PDb","Me","QDb","sRb","cRb","a$c","AHb","lNb","mNb","xxc","ikc","Kmc","Nmc","dKd","Fve","eKd","EXb","GXb","d_b","pHc","qHc","hQc","aKd","C0b","Ogc","uoc","voc","u$c","M6d","bD","UId","AGb","MSb","Mme","ISb","Nme","JSb","Ome","KSb","LSb","Pme","NSb","Qme","ric","HHc","KHc","gJc","UWc","XWc","NWc","OWc","PWc","MWc","QWc","e9c","TWc","x9c","KWc","wWc","LWc","xWc","RWc","zWc","SWc","BWc","VWc","CWc","WWc","DWc","YWc","FWc","ni","Z_b","p7b","rMc","yxc","bgd","yf","Vt","_tb","VSb","O8c","B8c","YSb","ZSb","$Sb","XSb","_Sb","aTb","USb","SSb","TSb","WSb","RSb","H6b","bd","Xec","ttc","Lhd","Eod","Nhd","lpd","Phd","mQd","VFd","KHd","OFd","Snd","cGd","ird","p6d","v$c","ere","Lld","Khd","Uod","Ohd","PSd","aGd","qUd","dGd","yHb","BHb","O9b","K9b","L9b","V9b","R9b","LZb","JNc","Yke","eFb","Zke","cFb","$ke","aFb","_ke","cfc","AZc","po","Ooc","Vsc","Wsc","l6c","WTb","m8b","q8b","x8b","a8b","YTb","S7b","b8b","XTb","d8b","E$c","t$c","j0d","l0d","oh","rEc","GHc","MVc","qWc","hg","vld","Fkd","wld","tj","t1d","OKd","DJb","MKb","q6c","h6c","dre","y6c","pud","woc","oEc","nEc","WOc","Rpd","Ute","T_b","ixc","Ikd","Zkd","Ui","Eyd","UZc","bEb","BVb","znc","yzc","NZc","HZc","$sd","XId","w6d","t6d","llc","Vwc","nlc","olc","mlc","KXc","LXc","gLd","SQd","E9d","I9d","KDc","Kd","LNb","ONb","WZb","eLb","H3b","C3b","sic","SEc","Rsc","CX","FX","tKc","rRc","F2c","vTc","sRc","HXc","Qlb","Wlb","Md","F6b","WNc","Xac","Fne","Wac","ctc","_ac","qhb","Jje","oQb","wub","xub","S1b","fxc","Qld","gxc","kjc","ujc","qjc","E3c","hsd","sg","rpc","qf","Qj","iud","oA","H1c","F1c","IVc","JVc","AVc","cub","bYb","P9b","Tsc","Dpb","Bpb","Cpb","fje","uke","hCb","wg","SUb","IWb","pWb","mWb","RVc","CVc","c0c","__c","v0c","$_c","t0c","Y_c","p0c","Z_c","r0c","X_c","W_c","a0c","b0c","T_c","k0c","U_c","m0c","V_c","U7c","T7c","W7c","V7c","D9c","O7c","I8c","Q7c","S7c","L7c","pse","P7c","R7c","M7c","t8c","N7c","w8c","iue","ktd","mtd","Dvd","lue","nue","Oi","m6d","Ymd","gh","MQd","HQd","ufb","dkc","ckc","ekc","fkc","oFc","oY","nY","KIc","IHc","jZb","XQc","Qqe","$Tb","dtc","qRc","sZc","yZc","s6c","qec","rec","Gcb","jfd","Vud","og","fld","Rlb","LRb","mme","MRb","KRb","nme","PRb","ome","QRb","pme","NRb","rme","RRb","ORb","NUb","vdc","mkc","lkc","rkc","tkc","skc","qkc","ukc","vkc","Yi","wkc","Znc","H0b","j3b","k3b","uic","VGc","UGc","WGc","TGc","MSc","kTc","lTc","JZc","LZc","ofd","Ax","Q6c","esd","fsd","gsd","zMc","ljc","Zjc","UQc","BGb","Iod","Z2d","Xxd","ffe","qde","ae","Z9b","fac","ktc","Wec","Vec","iCc","p5c","pjc","QVc","mte","nte","ORd","QRd","I2d","swe","K6d","N6d","sKc","cRc","eqd","fqd","dqd","rEb","Cxb","Bxb","sEb","FJb","KJb","JJb","KIb","OKb","SKb","RKb","cOb","JM","rQb","TBc","pCc","jOc","eg","fg","rfd","PKd","SKd","ESc","jTc","FSc","kFd","HEd","BEd","zEd","AEd","GEd","mve","eFd","fFd","hFd","VEd","iFd","aFd","lFd","NFc","YQc","a0d","wi","ti","Gac","Hac","GDc","rUc","qUc","Ffd","PRd","Ilb","Jlb","nEb","PM","tEb","qh","LEd","Wk","tfb","YUc","valueOf","RJb","gIb","fIb","hIb","iIb","SJb","Gqd","Hqd","Iqd","Jqd","TMd","Slb","Tlb","Ulb","Xlb","xGb","YNb","wOb","tpc","spc","MJc","MZb","uRb","d4b","qPc","X_b","Gsc","Spc","tDc","_Uc","Tqd","$od","brd","Fqd","Vlb","DGb","EKb","Idb","Bw","TZb","KZb","gne","zRc","LJd","tde","Fue","Gue","m5c","fs","ll","rA","LNc","$Pb","Lc","UPb","SPb","D$c","Jqe","z6c","xOb","hl","Pgb","Cgb","Sgb","Fgb","bhb","Jhb","CVb","u0d","KTb","gyc","KQ","cne","DYb","Hxc","twc","EYb","MTb","lf","CYb","Fxc","Ixc","Gxc","zwc","OTb","NTb","qYb","oQd","NRd","pi","Ri","vz","tz","wz","uz","cEb","m8d","k8d","h8d","i8d","j8d","l8d","If","Bf","IKb","Q1b","$xc","lfd","M1b","swd","bj","Bhb","Chb","Egb","K4b","DMc","qmd","EId","vyb","BOb","COb","MPb","DOb","eNb","$Ob","AOb","Y5b","Qxd","jj","ILd","dj","pkc","zCc","Rxd","rwd","l5c","ug","gvd","Rg","nTb","hbc","Wne","vNd","Nid","bcb","ecb","GGb","O3b","Ysc","a9b","Rs","XOb","Dac","zac","Xoc","Dyc","CTc","BTc","HTc","uTc","ETc","GTc","H9c","xTc","yTc","zTc","wTc","ATc","DTc","ITc","RBd","KYc","Bre","LYc","IYc","Cre","FYc","Dre","GYc","Ere","JYc","Fre","MYc","Gre","HYc","Hre","NYc","Ire","yld","At","Wzc","nqc","lre","mre","nre","yWc","ore","EWc","qre","GWc","rre","AWc","XXb","T6b","rPc","IAb","dme","Ju","kr","Aac","uKc","bfc","tQb","sGb","qTb","mTb","LSc","xSc","ySc","q$","fme","frd","cue","due","Eqd","Pqd","Qqd","aNc","NLc","OLc","PLc","SLc","f1d","g1d","Yfe","UUb","PFc","ync","TDb","TM","Jke","Kke","SDb","Le","ard","$qd","erd","mqd","xqd","YCc","XCc","YGc","FKb","Woc","RYc","VYc","gZc","fZc","jZc","SYc","_Yc","aZc","bZc","$Yc","cZc","hZc","eZc","ZYc","dZc","iZc","WYc","XYc","YYc","P8c","UYc","TYc","kZc","Ab","kNb","LPb","JPb","IQb","e4b","mzc","AQc","r9b","zbc","HNc","aWc","_Vc","aYb","WQb","jSb","pSb","XQb","YQb","fSb","LQb","tRb","SQb","KQb","yoc","PZb","Asc","Noc","xoc","Jyc","roc","soc","toc","KQd","_Fd","eRc","WSc","ml","AXc","gWc","jXc","zXc","ig","BXc","npd","UKd","mo","DWb","Y2b","vsc","mmd","ste","opd","Sld","VId","Ej","IJc","vQb","uQb","xQb","jcc","ftc","XNc","YNc","VNc","_od","u6c","x1d","wXb","LYb","YZc","Y7b","V7b","G8b","r8b","M8b","E8b","i8b","A8b","F8b","e8b","O8b","P7b","I8b","o8b","H8b","Q8b","C8b","Q7b","D8b","R8b","K8b","P8b","N8b","T7b","_7b","t8b","u8b","k8b","f8b","W7b","U7b","l8b","g8b","L8b","R7b","B8b","j8b","s8b","c8b","$7b","X7b","UWb","qWb","nWb","NHd","Ihb","yhb","thb","aGc","groupCollapsed","fCb","Yie","jCb","groupEnd","dGc","cGc","Jsc","mtc","Lmc","a6b","Txc","bcc","fN","jN","oPc","Rac","jQc","See","F5b","H5b","G5b","b_b","j_b","E5b","eec","QFc","N8d","v8d","t8d","u8d","s8d","Z6d","Tbc","Ubc","TFc","Dub","fFc","DNc","zNc","CNc","ENc","ANc","BNc","cQc","gQc","dQc","fQc","eQc","GNc","Qac","rMb","Igc","aQc","Pq","vGb","yGb","rGb","$doc","documentMode","Zd","Xd","suppressed","Yd","mhb","p2d","R1b","RFc","rQc","$wd","Swd","Twd","Uwd","Vwd","Wwd","Ywd","Xwd","Zwd","hxd","_wd","axd","bxd","cxd","dxd","fxd","exd","gxd","Usc","ybc","wbc","wHb","AYb","Xqe","bTc","bJc","atc","sQb","itd","wQb","V6b","Eac","Fac","o2c","L1b","PGb","o9c","p9c","n9c","q9c","l9c","OCc","vre","wre","xre","B9b","hTc","_Sc","iTc","aTc","D4b","jdd","Rse","ame","wme","yme","_le","QKd","uNd","tNd","eee","uue","Yue","G6b","rAd","e6b","g6b","h6b","htc","k6b","sPc","j6b","f6b","d6b","b6b","jnd","AUc","yUc","BUc","sbc","_D","hjc","hKb","Vgc","hic","Ngc","Xgc","Ugc","oSb","tSb","M9c","bSb","sSb","lSb","kSb","dSb","cSb","iSb","hSb","gSb","nSb","rSb","Qre","Vre","y0c","Rre","w0c","Sre","u0c","Tre","Ure","s0c","Wre","l0c","Xre","Yre","Zre","q0c","Lyc","Tf","p2c","HJb","LJb","QKb","TKb","gJd","x2d","l1","j1","DJ","xK","jK","uK","kL","Bj","YDb","yVb","cNb","qNb","rNb","fTc","cTc","kHc","g_b","N1b","OFc","ywc","uwc","vwc","SGb","gac","dac","eac","Owc","Anc","doc","Gyc","Vf","Iyc","boc","Hyc","coc","Wf","Myc","aoc","oqd","pqd","Zte","$te","rqd","qqd","_te","aue","_qd","CGb","ixd","$ad","Qse","uqe","Fme","Tme","bqe","E4b","ht","lastIndex","nOd","k1d","n1d","B2d","kme","af","cf","K0c","ase","Zpe","q2d","uVb","LP","tVb","ef","ff","JVb","Ijc","hfd","G9c","Mq","Rq","Bg","r9c","ULc","X9b","jg","kg","uGb","FGb","wGb","tGb","EGb","qGb","JQb","_Bc","cyc","LEc","MEc","NEc","JRc","Jh","vh","x6b","YDc","bEc","ZDc","nFc","vDc","dyc","oNc","qNc","rNc","nNc","sNc","pNc","lNc","mNc","fee","kee","nxe","oxe","pxe","qxe","rxe","mxe","wac","tA","Hjc","uh","Zk","Yk","YEd","YFc","axc","UFc","Lf","mHc","eGc","khb","ZJc","yyc","uZc","gRc","dRc","n$","Sqd","wqd","yqd","zqd","sqd","tqd","uqd","vqd","i_b","Msc","etc","X$b","_bc","ysc","BPc","fNb","Y7c","qse","vme","Jre","Eme","Ame","rse","lqe","W$b","VGb","C8c","mgb","Xje","Khb","Wcc","ine","tfd","Neb","Jeb","Keb","Leb","Meb","Tse","qfd","_Mc","qwd","wwd","rRb","iRb","Tge","w5","Axe","nh","W6b","U6b","S6b","WFc","XFc","X2d","Uxc","P1b","iKb","LIb","OGb","Kxc","_Nb","lQc","nQc","mQc","kQc","wQc","zQc","fKd","uk","Sse","_fe","Vfe","lxe","Pxd","gSc","fSc","$Sc","ZSc","XSc","VPc","Te","KMc","LMc","MMc","HQb","$O","e3d","ifd","UVc","eDb","GM","nvd","_Ec","odc","age","YPc","TPc","Bac","Zxd","lj","getMinutes","getSeconds","getMilliseconds","opc","npc","MFc","Fub","RKd","bh","ji","E6b","Oac","Mac","Lsc","Pac","U1b","Yxc","ELd","O1b","Y1b","Mxc","hyc","Jwc","Awc","xwc","Jxc","o8c","sne","$1b","X1b","xsc","V1b","T1b","oTb","xMc","$8b","Z8b","dDc","lde","kde","DPb","Yle","Zle","$le","Zbc","Pmd","c6","v6c","kMc","vZc","kYc","tYc","sYc","J2d","K2d","Xfe","TQd","$$b","uxc","exc","TUb","gf","e0c","Pre","$Mc","$Ic","Of","Pf","Qf","Nf","J4b","Xwc","swc","H4b","rtc","stc","B4b","ptc","bKc","oZb","vxc","Z$b","hZb","mZb","lZb","Mh","uEd","vEd","wEd","sEd","tEd","xEd","yEd","EEd","FEd","lve","xUc","Dbc","Cbc","sTb","UCb","Je","bm","Vvb","em","am","rhb","shb","egb","rgc","Pgc","pwc","Wgc","aEb","rwc","ZDb","Kgc","Lgc","Mgc","mYc","z_","o_","mg","lYc","ZQc","o7b","txc","Dk","zk","WGb","vwd","Gi","eed","fed","bed","aed","ced","Afd","h_b","_z","de","VC","XC","rid","kte","qid","Vcb","dTc","YSc","eTc","VSc","MNc","F8d","H8d","K8d","L8d","M8d","P8d","O8d","w8d","x8d","y8d","z8d","A8d","B8d","C8d","D8d","E8d","G8d","J8d","kKd","Lve","n5c","Jf","jxd","gj","Cyc","EJb","NKb","BUb","oqb","AUb","hf","oBc","wDc","sDc","OUb","Wfe","PUb","AVb","vVb","nc","yUb","sZb","W9b","J9b","T9b","AFc","_sc","Bbc","Abc","rxc","qxc","Ebc","wFc","qY","Ewc","AGc","gg","wPc","fyc","Axc","sxc","Exc","kNc","$wc","hxc","_wc","cxc","Hwc","Dwc","Bwc","cUb","Kyc","_Tb","y$c","KTc","are","bre","zme","Gme","Dme","Hme","Gcd","Bcd","Ocd","Qcd","Hcd","Ccd","Pcd","Ncd","Ecd","Dcd","Mcd","Kcd","Jcd","Vmd","tte","ute","bl","nse","cTb","Rme","Sme","ume","Cme","Zfe","Mwb","pEc","wlb","zQb","zfd","z8c","A9c","Elb","YJc","eYb","_Xb","wsc","_1b","F4b","G4b","j6c","yfd","Ag","y2b","C2b","CZc","xZc","tZc","BZc","zZc","lDb","Lke","Mke","cVb","Isc","IDc","CDc","DDc","ADc","HDc","BDc","FDc","rZb","tZb","ssc","oMc","UMc","pMc","qMc","Cf","vf","rKb","DHb","eKb","GJb","PKb","BJb","pKb","GKb","HKb","CJb","oKb","cKb","KKb","LKb","oc","Ff","dKb","uf","$2b","iyc","kfd","E0b","W1b","V$b","e_b","$Wc","sre","Cy","Df","M8c","ucd","z9c","Jbd","U8c","P9c","X9c","R9c","S9c","V9c","Jcb","yKb","Bmd","dgb","fgb","ggb","hgb","igb","jgb","z2b","w_b","z_b","x_b","y_b","A2b","B2b","xSb","xme","Bme","Fcd","Icd","LOG10E","Gwc","nxc","pje","qje","bje","cje","dje","eje","gje","hje","ije","jje","kje","lje","mje","BA","rje","sje","tje","uje","vje","wje","xje","AA","DA","yA","CA","iQc","Yfd","ded","$dd","Ioc","KJc","Af","Ef","bKb","lbb","_ee","Yee","wxe","afe","uxe","vxe","xxe","c3b","vtc","d3b","gDc","oxc","pxc","fDc","zd","Ctd","Vxd","fYb","$Xb","ZXb","BWb","JWb","SMc","u2c","d9c","N8c","s2c","v2c","_6c","_dd","e7b","DXb","FXb","WJc","xY","wY","VJc","HJc","UJc","TJc","GJc","lMc","Shd","Mhd","Rhd","Jhd","Ihd","Ml","Ol","Tl","Ul","yl","Dl","Kl","El","zl","Jl","Hl","Vl","Wl","Nl","Sl","Pl","efe","wl","tl","Fl","Al","Il","Bl","Gl","Cl","Iue","Hue","pde","Xl","Yl","Zue","bve","_ue","ave","$ue","dm","cm","$bc","NCb","mZc","Kre","xqe","Wmd","_Pd","vte","YXb","Kgd","I9c","jyc","J9c","L9c","N9c","U9c","Bvc","Cvc","Dvc","Evc","byc","yvc","zvc","Avc","awc","Hvc","Kvc","Svc","Eyc","Nvc","Fyc","Pvc","fwc","cwc","Yvc","Wvc","$vc","pvc","qvc","Auc","Ywc","Duc","Lxc","E8c","Twc","Kwc","p8c","Nwc","Xxc","Zxc","Oxc","Qxc","Rxc","Sxc","Pxc","Gvc","avc","$uc","Fvc","Suc","muc","kuc","Xtc","Ytc","buc","Ztc","iuc","cvc","dvc","Luc","uvc","zxc","hvc","Guc","nvc","wuc","yuc","evc","Mtc","Ktc","qwc","Jtc","Juc","bxc","Iuc","dxc","Kuc","Pwc","euc","v9c","v8c","L8c","h9c","Nxc","j9c","Vuc","Xuc","nwc","Itc","Zuc","suc","quc","S8c","kxc","Puc","kvc","Mwc","ouc","_xc","wvc","Uwc","uuc","lxc","Ruc","Fwc","guc","Uuc","huc","Vtc","wwc","Stc","Qtc","Rtc","Utc","Otc","Huc","z$c","vue","wue","xue","BCc","CCc","gee","Mue","Nue","Oue","Pue","Kue","Sue","HVd","BMc","AMc","te","yMc","EMc","IMc","kA","jA","iA","OA","NA","QA","sse","tse","$pe","q8c","x8c","cqe","A8c","ype","F8c","tpe","K8c","g9c","u9c","R8c","mqe","pqe","qqe","vse","T8c","vqe","Xpe","wse","xse","yse","zse","Ase","Bse","Cse","Dse","Ese","Fse","u8c","Lpe","Mpe","Npe","Ope","Ppe","Spe","Qpe","Rpe","Tpe","Upe","Vpe","W9c","wqe","tqe","sqe","i9c","V8c","dqe","X8c","gqe","m9c","hqe","iqe","jqe","kqe","Z8c","c9c","a9c","nqe","D8c","Gse","Hse","rqe","oqe","Ype","B9c","Wpe","y9c","eqe","fqe","_pe","aqe","Ise","H8c","k9c","ntc","Xne","Yne","puc","Zne","$ne","Muc","_ne","aoe","boe","coe","xvc","doe","eoe","foe","goe","nuc","hoe","bvc","ioe","Tuc","joe","koe","loe","moe","noe","_uc","ooe","poe","qoe","luc","roe","soe","toe","uoe","voe","woe","xoe","cuc","yoe","$tc","zoe","vvc","Aoe","Boe","fvc","Coe","ivc","Doe","Eoe","Foe","rvc","Goe","xuc","Hoe","zuc","Ioe","Buc","Joe","Koe","Loe","Moe","Noe","Ooe","Poe","Qoe","Roe","Soe","Toe","Ntc","Uoe","Ltc","Voe","Woe","Xoe","Yoe","bwc","Zoe","$oe","_oe","Tvc","ape","Qvc","bpe","cpe","gwc","dpe","epe","fpe","gpe","hpe","tuc","ipe","ruc","jpe","Wtc","kpe","lpe","Ptc","mpe","Ttc","npe","ope","Ouc","Nuc","Wuc","Yuc","Quc","fuc","juc","duc","_tc","auc","lvc","mvc","gvc","jvc","ovc","tvc","svc","Cuc","Euc","Fuc","vuc","Ivc","Jvc","Lvc","Mvc","Uvc","Vvc","Ovc","Rvc","hwc","iwc","dwc","ewc","Zvc","Xvc","_vc","hfe","ife","ofe","pfe","qfe","tfe","mfe","rfe","ufe","cfe","sfe","nfe","jfe","lfe","kfe","LK","QFd","SFd","RFd","TFd","UFd","WFd","ZFd","$Fd","bGd","eGd","gGd","hGd","XFd","iGd","Oyc","cee","dee","sxe","txe","yue","zue","Aue","Bue","Cue","Due","Eue","Jue","Lue","Que","Rue","Tue","Uue","Vue","Wue","Xue","global","Phe","Qhe","Rhe","Whe","Yhe","Zhe","$he","_he","aie","bie","cie","die","eie","fie","gie","hie","iie","jie","kie","lie","qie","sie","tie","uie","vie","wie","xie","yie","Aie","Bie","Cie","Die","Lie","Nie","Oie","Pie","Tie","Uie","Wie","yje","zje","Aje","Bje","Cje","Dje","Lje","Mje","Nje","_je","ake","bke","cke","dke","eke","fke","gke","pke","qke","rke","ske","vke","wke","Cke","Dke","Eke","Fke","Gke","Ike","Nke","Oke","Pke","Qke","Rke","Ske","Tke","Uke","Vke","Wke","Xke","ale","ble","cle","fle","hle","nle","qle","rle","sle","ule","wle","Ble","Cle","Hle","Jle","Kle","Lle","Mle","Nle","Ole","Ple","Qle","Rle","Sle","Xle","bme","gme","hme","ime","jme","lme","sme","Ume","Wme","Xme","Yme","Zme","$me","_me","dne","ene","fne","hne","mne","nne","pne","qne","rne","tne","une","vne","wne","xne","yne","zne","Ane","Ene","Ine","Jne","Kne","Lne","Mne","Nne","One","Pne","Qne","Rne","Sne","ppe","qpe","rpe","spe","upe","vpe","wpe","xpe","zpe","Ape","Bpe","Cpe","Dpe","Epe","Fpe","Gpe","Hpe","Ipe","Jpe","Kpe","yqe","Bqe","Cqe","Dqe","Eqe","Fqe","Gqe","Hqe","Kqe","Lqe","Mqe","Nqe","Pqe","Uqe","Vqe","Wqe","_qe","cre","fre","gre","hre","ire","kre","tre","ure","yre","zre","Are","Lre","Nre","Ore","$re","_re","bse","cse","dse","fse","gse","hse","ose","Jse","Kse","Lse","Mse","Nse","Ose","Pse","Use","Vse","Wse","gte","hte","pte","qte","rte","xte","Tte","Wte","jue","oue","pue","que","rue","cve","dve","eve","fve","ive","kve","yve","zve","Ave","Ove","Pve","Tve","Uve","Vve","Wve","Xve","Yve","Zve","$ve","awe","nwe","owe","pwe","qwe","rwe","Iwe","Mwe","Nwe","kxe","yxe","zxe","Bxe","Cxe","Dxe","Exe","Fxe","Gxe","Hxe","goog","ac","kc","lc","fc","rc","sc","wc","tc","fd","$w","ax","gd","hd","jd","kd","zy","Qi","ld","md","od","xe","ud","tr","Bd","Cd","Gd","Ku","Lu","Dv","Ym","Jv","pd","so","hp","Pd","jr","Zq","Qd","Gs","Rd","Qs","Ud","lt","Tw","Uw","uy","XAb","nz","message","mz","kz","callee","caller","Wz","XA","ge","je","_B","Lcb","Wcb","Mdb","$db","seb","Peb","dkb","Function","jpb","opb","we","uvb","zvb","Pxb","Qxb","vwb","Oyb","Pyb","Ryb","Syb","Tyb","Vyb","Uyb","eAb","gAb","dAb","qAb","eBb","pBb","vBb","uBb","BBb","Ge","ABb","GBb","$Ab","_Ab","Xzb","VCb","Oe","Pe","WDb","wEb","qEb","pIb","qIb","sf","MIb","zJb","sKb","WKb","gLb","hLb","aLb","FLb","QLb","XLb","aMb","fMb","kMb","sMb","vMb","yMb","BMb","EMb","MNb","NNb","jOb","eOb","yQb","qQb","WPb","XPb","YPb","ZPb","Ig","BQb","EQb","NQb","yRb","CRb","JRb","ERb","uTb","QTb","RTb","WUb","XUb","QVb","UVb","PVb","TP","MWb","PWb","EWb","FWb","GWb","HWb","nf","of","GYb","UYb","DZb","O$b","o_b","Old","b2b","Nld","c2b","d2b","E2b","F2b","Q2b","R2b","Z2b","b3b","g3b","r3b","u3b","s3b","I3b","f4b","k4b","g4b","C4b","l5b","S5b","X5b","n6b","s6b","y6b","I6b","f7b","i7b","TGb","l7b","zS","q7b","s7b","t7b","u7b","v7b","_8b","g9b","s9b","C9b","_9b","aac","bac","tac","Cac","$ac","nbc","qbc","ubc","Gbc","Hbc","Sbc","acc","dcc","kcc","mcc","scc","wcc","vcc","xcc","Qcc","Vcc","ndc","rdc","udc","Kdc","Ldc","Mdc","gec","fec","Eec","Gec","Hec","Tec","dfc","efc","ffc","gfc","hfc","ifc","fBc","iEb","wgc","Zgc","$gc","_gc","ahc","bhc","Qgc","Rgc","chc","Jgc","Sgc","dhc","Tgc","cic","bic","aic","fic","Bic","Jic","Lic","ykc","zkc","Akc","nkc","okc","plc","qlc","rlc","emc","fmc","Dmc","Vmc","nnc","onc","vnc","Hoc","Yoc","Xf","C5c","UBc","Zf","$Bc","jCc","qCc","xCc","ACc","PCc","WCc","lDc","ODc","_Dc","qEc","mFc","zFc","BFc","FFc","VFc","eHc","YHc","ZHc","$Hc","_Hc","eIc","fIc","MIc","PIc","cJc","wJc","dKc","LJc","NJc","eKc","fKc","PJc","QJc","cKc","XJc","NZb","JLc","NMc","uNc","vNc","INc","KNc","kOc","_Oc","qQc","uQc","sQc","tQc","WQc","iRc","pRc","oRc","fRc","nRc","PSc","SSc","HUc","aVc","iVc","nVc","EVc","GVc","SXc","jYc","cYc","dYc","A$c","B$c","C$c","c_c","j1c","ng","r1c","G1c","O1c","a2c","$1c","pg","d2c","M2c","G2c","Y2c","r3c","s3c","t3c","E0","S3c","k5c","jeb","r7c","h1","led","hed","ied","jed","Yyc","Zyc","$yc","azc","_yc","ufd","wfd","xfd","Sfd","cgd","Lgd","ehd","fhd","shd","xg","yg","zhd","Kg","ck","dk","fk","hk","kk","wh","Uj","Ch","Dh","Eh","Fh","yj","Lh","Und","J4","Uh","Th","Bqd","Cqd","Lqd","Mqd","Nqd","Oqd","isd","jsd","ksd","Isd","Vsd","ptd","$h","_h","Nk","sqb","_3","Li","Ni","xi","yi","zi","Bi","oj","pj","ozd","oAd","pAd","GAd","MAd","vBd","wBd","xBd","I4","vj","TEd","KEd","Did","OHd","_Hd","lk","aId","qk","dLd","SMd","GMd","HMd","IMd","UMd","Hk","FNd","NNd","ONd","SNd","TNd","twd","uwd","TOd","UOd","WOd","Ik","aTd","bTd","dTd","RTd","STd","TTd","BUd","SUd","WUd","TUd","Pk","sVd","tVd","nl","fl","kl","Sk","Tk","bYd","cYd","IYd","JYd","iZd","fZd","Xmd","jZd","kZd","lZd","__d","k0d","t0d","W0d","V0d","Z0d","$0d","c1d","o2d","r2d","N2d","O2d","W2d","_2d","c3d","i3d","l3d","CAd","Y6d","s2d","u2d","A2d","G2d","L2d","V2d","Y2d","$2d","n3d","Z8d","$8d","g9d","qgb","D9d","F9d","J9d","B9d","M9d","O9d","S9d","Ll","gfe","Jfe","fm","Wee","_ge","ahe","bhe","che","ohe","phe","qhe","rhe","Ihe","gwtOnLoad","Xbb","dispatch","svd","Z1c","rvd","t2c","Ghe","qvd","saveDispatch","dispatcher","ovd","ELKNode","_ELK","optionsClone","workerThreadsExist","_Worker","ReferenceError","_possibleConstructorReturn","__proto__","getPrototypeOf","subClass","superClass","setPrototypeOf","_inherits","defaultCmp","heapify","heappop","heappush","heappushpop","heapreplace","insort","nlargest","nsmallest","_siftdown","_siftup","cmp","item","lastelt","returnitem","_ref1","_results","_results1","elem","los","startpos","newitem","parentpos","childpos","endpos","rightpos","pushpop","heap","front","condition","format","argIndex","framesToPop","hashClear","hashDelete","hashGet","hashHas","hashSet","Hash","listCacheClear","listCacheDelete","listCacheGet","listCacheHas","listCacheSet","ListCache","getNative","mapCacheClear","mapCacheDelete","mapCacheGet","mapCacheHas","mapCacheSet","MapCache","iteratee","baseAssignValue","objValue","castPath","toKey","getRawTag","objectToString","symToStringTag","toStringTag","isFunction","isMasked","isObject","toSource","reIsHostCtor","funcProto","objectProto","funcToString","reIsNative","assignValue","isIndex","customizer","nested","newValue","arrayMap","isSymbol","symbolProto","symbolToString","baseToString","trimmedEndIndex","reTrimStart","isKey","stringToPath","coreJsData","freeGlobal","isKeyable","__data__","baseIsNative","nativeObjectToString","isOwn","tag","unmasked","nativeCreate","reIsUint","reIsDeepProp","reIsPlainProp","maskSrcKey","IE_PROTO","assocIndexOf","getMapData","freeSelf","memoizeCapped","rePropName","reEscapeChar","quote","subString","reWhitespace","toNumber","nativeMax","nativeMin","wait","lastArgs","lastThis","maxWait","timerId","lastCallTime","lastInvokeTime","leading","maxing","trailing","invokeFunc","shouldInvoke","timeSinceLastCall","timerExpired","trailingEdge","timeWaiting","remainingWait","debounced","isInvoking","leadingEdge","cancel","flush","baseGet","defaultValue","baseGetTag","isObjectLike","memoized","Cache","baseSet","baseTrim","reIsBadHex","reIsBinary","reIsOctal","freeParseInt","isBinary","copyArray","hookCallback","hooks","setHookCallback","input","hasOwnProp","isObjectEmpty","isUndefined","isNumber","isDate","arrLen","createUTC","locale","strict","createLocalOrUTC","defaultParsingFlags","unusedTokens","unusedInput","charsLeftOver","nullInput","invalidEra","invalidMonth","invalidFormat","userInvalidated","iso","parsedDateParts","era","meridiem","rfc2822","weekdayMismatch","getParsingFlags","_pf","isValid","_isValid","flags","parsedParts","isNowValid","invalidWeekday","_strict","bigHour","isFrozen","createInvalid","fun","momentProperties","updateInProgress","copyConfig","momentPropertiesLen","_isAMomentObject","_tzm","_isUTC","_offset","_locale","Moment","config","updateOffset","isMoment","suppressDeprecationWarnings","deprecate","firstTime","deprecationHandler","argLen","deprecations","deprecateSimple","_config","_dayOfMonthOrdinalParseLenient","_dayOfMonthOrdinalParse","_ordinalParse","mergeConfigs","parentConfig","childConfig","Locale","defaultCalendar","sameDay","nextDay","nextWeek","lastDay","lastWeek","sameElse","calendar","mom","_calendar","zeroFill","targetLength","forceSign","absNumber","zerosToFill","formattingTokens","localFormattingTokens","formatFunctions","formatTokenFunctions","addFormatToken","token","padded","ordinal","localeData","removeFormattingTokens","makeFormatFunction","formatMoment","expandFormat","invalidDate","replaceLongDateFormatTokens","longDateFormat","defaultLongDateFormat","LTS","LT","LL","LLL","LLLL","_longDateFormat","formatUpper","tok","defaultInvalidDate","_invalidDate","defaultOrdinal","defaultDayOfMonthOrdinalParse","_ordinal","defaultRelativeTime","future","past","mm","MM","yy","relativeTime","withoutSuffix","isFuture","_relativeTime","pastFuture","addUnitAlias","unit","shorthand","lowerCase","normalizeUnits","normalizeObjectUnits","inputObject","normalizedProp","normalizedInput","priorities","addUnitPriority","getPrioritizedUnits","unitsObj","isLeapYear","year","absFloor","toInt","argumentForCoercion","coercedNumber","makeGetSet","keepTime","set$1","month","date","daysInMonth","stringGet","stringSet","prioritized","prioritizedLen","match1","match2","match3","match4","match6","match1to2","match3to4","match5to6","match1to3","match1to4","match1to6","matchUnsigned","matchSigned","matchOffset","matchShortOffset","matchTimestamp","matchWord","addRegexToken","strictRegex","isStrict","getParseRegexForToken","unescapeFormat","regexEscape","matched","p4","addParseToken","tokenLen","addWeekParseToken","addTimeToArrayFromToken","YEAR","MONTH","DATE","HOUR","MINUTE","SECOND","MILLISECOND","WEEK","WEEKDAY","mod","modMonth","monthsShort","months","monthsShortRegex","monthsRegex","monthsParse","defaultLocaleMonths","defaultLocaleMonthsShort","MONTHS_IN_FORMAT","defaultMonthsShortRegex","defaultMonthsRegex","localeMonths","_months","isFormat","localeMonthsShort","_monthsShort","handleStrictParse","monthName","_monthsParse","_longMonthsParse","_shortMonthsParse","localeMonthsParse","_monthsParseExact","dayOfMonth","getSetMonth","getDaysInMonth","computeMonthsParse","_monthsShortStrictRegex","_monthsShortRegex","_monthsStrictRegex","_monthsRegex","cmpLenRev","shortPieces","longPieces","mixedPieces","daysInYear","parseTwoDigitYear","getSetYear","getIsLeapYear","createDate","ms","createUTCDate","UTC","getUTCFullYear","setUTCFullYear","firstWeekOffset","dow","doy","fwd","getUTCDay","dayOfYearFromWeeks","week","weekday","resYear","resDayOfYear","dayOfYear","weekOfYear","resWeek","weekOffset","weeksInYear","weekOffsetNext","localeWeek","_week","defaultLocaleWeek","localeFirstDayOfWeek","localeFirstDayOfYear","getSetWeek","getSetISOWeek","parseWeekday","weekdaysParse","parseIsoWeekday","shiftWeekdays","ws","weekdaysMin","weekdaysShort","weekdays","weekdaysMinRegex","weekdaysShortRegex","weekdaysRegex","defaultLocaleWeekdays","defaultLocaleWeekdaysShort","defaultLocaleWeekdaysMin","defaultWeekdaysRegex","defaultWeekdaysShortRegex","defaultWeekdaysMinRegex","localeWeekdays","_weekdays","day","localeWeekdaysShort","_weekdaysShort","localeWeekdaysMin","_weekdaysMin","handleStrictParse$1","weekdayName","_weekdaysParse","_shortWeekdaysParse","_minWeekdaysParse","localeWeekdaysParse","_weekdaysParseExact","_fullWeekdaysParse","getSetDayOfWeek","getSetLocaleDayOfWeek","getSetISODayOfWeek","computeWeekdaysParse","_weekdaysStrictRegex","_weekdaysRegex","_weekdaysShortStrictRegex","_weekdaysShortRegex","_weekdaysMinStrictRegex","_weekdaysMinRegex","minp","shortp","longp","minPieces","hFormat","hours","kFormat","lowercase","minutes","matchMeridiem","_meridiemParse","localeIsPM","seconds","kInput","_isPm","isPM","_meridiem","pos1","pos2","defaultLocaleMeridiemParse","getSetHour","localeMeridiem","isLower","globalLocale","baseConfig","dayOfMonthOrdinalParse","meridiemParse","locales","localeFamilies","commonPrefix","arr1","minl","normalizeLocale","chooseLocale","loadLocale","isLocaleNameSane","oldLocale","_abbr","aliasedRequire","getSetGlobalLocale","values","getLocale","defineLocale","abbr","parentLocale","updateLocale","tmpLocale","listLocales","checkOverflow","_overflowDayOfYear","_overflowWeeks","_overflowWeekday","extendedIsoRegex","basicIsoRegex","tzRegex","isoDates","isoTimes","aspNetJsonRegex","obsOffsets","UT","GMT","EDT","EST","CDT","CST","MDT","MST","PDT","PST","configFromISO","allowTime","dateFormat","timeFormat","tzFormat","isoDatesLen","isoTimesLen","configFromStringAndFormat","extractFromRFC2822Strings","yearStr","monthStr","dayStr","hourStr","minuteStr","secondStr","untruncateYear","preprocessRFC2822","checkWeekday","weekdayStr","parsedInput","calculateOffset","obsOffset","militaryOffset","numOffset","configFromRFC2822","parsedArray","setUTCMinutes","getUTCMinutes","configFromString","createFromInputFallback","currentDateArray","nowValue","_useUTC","getUTCMonth","getUTCDate","configFromArray","currentDate","expectedWeekday","yearToUse","dayOfYearFromWeekInfo","_dayOfYear","_nextDay","weekYear","weekdayOverflow","curWeek","GG","createLocal","ISO_8601","RFC_2822","skipped","stringLength","totalParsedInputLength","meridiemFixWrap","erasConvertYear","hour","isPm","meridiemHour","configFromStringAndArray","tempConfig","bestMoment","scoreToBeat","currentScore","validFormatFound","bestFormatIsValid","configfLen","score","configFromObject","dayOrDate","minute","second","millisecond","createFromConfig","prepareConfig","preparse","configFromInput","isUTC","prototypeMin","prototypeMax","pickBy","moments","ordering","isDurationValid","unitHasDecimal","orderLen","isValid$1","createInvalid$1","createDuration","Duration","years","quarters","quarter","weeks","isoWeek","days","milliseconds","_milliseconds","_days","_bubble","isDuration","absRound","compareArrays","array1","array2","dontConvert","lengthDiff","diffs","utcOffset","offsetFromString","chunkOffset","matcher","parts","cloneWithOffset","model","local","getDateOffset","getSetOffset","keepLocalTime","keepMinutes","localAdjust","_changeInProgress","addSubtract","getSetZone","setOffsetToUTC","setOffsetToLocal","setOffsetToParsedOffset","tZone","hasAlignedHourOffset","isDaylightSavingTime","isDaylightSavingTimeShifted","_isDSTShifted","isLocal","isUtcOffset","isUtc","aspNetRegex","isoRegex","diffRes","parseIso","momentsDifference","inp","positiveMomentsDifference","isAfter","isBefore","createAdder","period","tmp","isAdding","isString","isMomentInput","isNumberOrStringArray","isMomentInputObject","objectTest","propertyTest","propertyLen","arrayTest","dataTypeTest","isCalendarSpec","getCalendarFormat","myMoment","calendar$1","formats","sod","startOf","calendarFormat","localInput","endOf","isBetween","inclusivity","localFrom","localTo","isSame","inputMs","isSameOrAfter","isSameOrBefore","asFloat","that","zoneDelta","monthDiff","wholeMonthDiff","anchor","toISOString","keepOffset","toDate","inspect","datetime","suffix","zone","inputString","defaultFormatUtc","defaultFormat","postformat","humanize","fromNow","toNow","newLocaleData","lang","MS_PER_SECOND","MS_PER_MINUTE","MS_PER_HOUR","MS_PER_400_YEARS","mod$1","dividend","divisor","localStartOfDate","utcStartOfDate","startOfDate","isoWeekday","unix","toObject","toJSON","isValid$2","parsingFlags","invalidAt","creationData","localeEras","eras","_eras","since","until","localeErasParse","eraName","narrow","localeErasConvertYear","getEraName","getEraNarrow","getEraAbbr","getEraYear","erasNameRegex","computeErasParse","_erasNameRegex","_erasRegex","erasAbbrRegex","_erasAbbrRegex","erasNarrowRegex","_erasNarrowRegex","matchEraAbbr","matchEraName","matchEraNarrow","matchEraYearOrdinal","_eraYearOrdinalRegex","abbrPieces","namePieces","narrowPieces","addWeekYearFormatToken","getSetWeekYear","getSetWeekYearHelper","getSetISOWeekYear","getISOWeeksInYear","getISOWeeksInISOWeekYear","isoWeekYear","getWeeksInYear","weekInfo","getWeeksInWeekYear","weeksTarget","setWeekAll","dayOfYearData","getSetQuarter","erasParse","eraYearOrdinalParse","getSetDayOfMonth","getSetDayOfYear","getSetMinute","getSetMillisecond","getSetSecond","parseMs","getZoneAbbr","getZoneName","createUnix","createInZone","parseZone","preParsePostFormat","for","eraNarrow","eraAbbr","eraYear","isoWeeks","weeksInWeekYear","isoWeeksInYear","isoWeeksInISOWeekYear","isDST","zoneAbbr","zoneName","dates","isDSTShifted","proto$1","get$1","setter","listMonthsImpl","out","listWeekdaysImpl","localeSorted","listMonths","listMonthsShort","listWeekdays","listWeekdaysShort","listWeekdaysMin","firstDayOfYear","firstDayOfWeek","langData","mathAbs","addSubtract$1","add$1","subtract$1","absCeil","monthsFromDays","monthsToDays","daysToMonths","valueOf$1","makeAs","asMilliseconds","asSeconds","asMinutes","asHours","asDays","asWeeks","asMonths","asQuarters","asYears","clone$1","get$2","makeGetter","thresholds","substituteTimeAgo","relativeTime$1","posNegDuration","getSetRelativeTimeRounding","roundingFunction","getSetRelativeTimeThreshold","limit","argWithSuffix","argThresholds","withSuffix","abs$1","toISOString$1","totalSign","ymSign","daysSign","hmsSign","proto$2","toIsoString","relativeTimeRounding","relativeTimeThreshold","HTML5_FMT","DATETIME_LOCAL","DATETIME_LOCAL_SECONDS","DATETIME_LOCAL_MS","TIME","TIME_SECONDS","TIME_MS","__unused_webpack_module","__unused_webpack_exports","Lib","rules","fullSelector","addStyleRule","Plotly","ReactPropTypesSecret","emptyFunction","emptyFunctionWithReset","resetWarningCache","shim","componentName","location","propFullName","secret","getShim","isRequired","ReactPropTypes","bigint","symbol","arrayOf","elementType","instanceOf","objectOf","oneOf","oneOfType","exact","checkPropTypes","PropTypes","aa","ca","encodeURIComponent","da","ea","fa","ia","ja","ka","la","ma","acceptsBooleans","attributeName","attributeNamespace","mustUseProperty","propertyName","sanitizeURL","removeEmptyString","ra","sa","ta","pa","qa","oa","removeAttribute","setAttributeNS","xlinkHref","__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED","wa","ya","za","Aa","Ba","Ca","Da","Ea","Fa","Ga","Ha","Ia","Ja","Ka","La","Ma","Na","Oa","prepareStackTrace","Reflect","construct","displayName","Pa","Qa","$$typeof","_context","_payload","_init","Ra","Sa","Ta","nodeName","Va","_valueTracker","getOwnPropertyDescriptor","setValue","stopTracking","Ua","Wa","checked","Xa","Ya","defaultChecked","_wrapperState","initialChecked","Za","controlled","ownerDocument","defaultSelected","disabled","dangerouslySetInnerHTML","namespaceURI","innerHTML","MSApp","execUnsafeLocalFunction","lastChild","nodeType","nodeValue","animationIterationCount","borderImageOutset","borderImageSlice","borderImageWidth","boxFlex","boxFlexGroup","boxOrdinalGroup","columnCount","flex","flexGrow","flexPositive","flexShrink","flexNegative","flexOrder","gridArea","gridRow","gridRowEnd","gridRowSpan","gridRowStart","gridColumn","gridColumnEnd","gridColumnSpan","gridColumnStart","lineClamp","order","tabSize","widows","fillOpacity","floodOpacity","stopOpacity","strokeDasharray","strokeDashoffset","strokeMiterlimit","strokeWidth","setProperty","menuitem","area","br","embed","keygen","link","track","wbr","srcElement","correspondingUseElement","stateNode","onError","alternate","return","memoizedState","dehydrated","sibling","unstable_scheduleCallback","unstable_cancelCallback","unstable_shouldYield","unstable_requestPaint","unstable_now","unstable_getCurrentPriorityLevel","unstable_ImmediatePriority","unstable_UserBlockingPriority","unstable_NormalPriority","unstable_LowPriority","unstable_IdlePriority","clz32","pendingLanes","suspendedLanes","pingedLanes","entangledLanes","entanglements","eventTimes","delete","nativeEvent","blockedOn","domEventName","eventSystemFlags","targetContainers","isDehydrated","containerInfo","dispatchEvent","ReactCurrentBatchConfig","keyCode","charCode","_reactName","_targetInst","currentTarget","returnValue","cancelBubble","persist","isPersistent","eventPhase","bubbles","cancelable","isTrusted","view","detail","altKey","getModifierState","button","buttons","relatedTarget","fromElement","toElement","movementX","movementY","dataTransfer","animationName","elapsedTime","pseudoElement","clipboardData","Spacebar","Left","Right","Down","Del","Win","Menu","Apps","Scroll","MozPrintableKey","Alt","Control","Meta","Shift","pressure","tangentialPressure","tiltX","tiltY","twist","isPrimary","targetTouches","changedTouches","deltaX","wheelDeltaX","deltaZ","email","password","range","search","tel","oninput","detachEvent","attachEvent","nextSibling","compareDocumentPosition","HTMLIFrameElement","contentWindow","href","contentEditable","focusedElem","selectionRange","documentElement","selectionStart","selectionEnd","defaultView","getSelection","rangeCount","anchorNode","anchorOffset","focusNode","focusOffset","createRange","setStart","removeAllRanges","addRange","setEnd","scrollLeft","scrollTop","focus","animationend","animationiteration","animationstart","transitionend","parentWindow","na","xa","$a","ba","char","__html","queueMicrotask","previousSibling","contextTypes","__reactInternalMemoizedUnmaskedChildContext","__reactInternalMemoizedMaskedChildContext","childContextTypes","getChildContext","__reactInternalMemoizedMergedChildContext","deletions","pendingProps","treeContext","retryLane","memoizedProps","defaultProps","_currentValue","childLanes","dependencies","firstContext","lanes","memoizedValue","interleaved","updateQueue","baseState","firstBaseUpdate","lastBaseUpdate","shared","pending","effects","eventTime","lane","Component","refs","isMounted","_reactInternals","enqueueSetState","enqueueReplaceState","enqueueForceUpdate","shouldComponentUpdate","isPureReactComponent","contextType","updater","componentWillReceiveProps","UNSAFE_componentWillReceiveProps","getDerivedStateFromProps","getSnapshotBeforeUpdate","UNSAFE_componentWillMount","componentWillMount","componentDidMount","ref","_owner","_stringRef","implementation","tagName","revealOrder","_workInProgressVersionPrimary","ReactCurrentDispatcher","baseQueue","lastRenderedReducer","action","hasEagerState","eagerState","lastRenderedState","getSnapshot","lastEffect","stores","deps","readContext","useCallback","useContext","useEffect","useImperativeHandle","useInsertionEffect","useLayoutEffect","useMemo","useReducer","useRef","useState","useDebugValue","useDeferredValue","useTransition","useMutableSource","useSyncExternalStore","useId","unstable_isNewReconciler","identifierPrefix","digest","WeakMap","getDerivedStateFromError","componentDidCatch","componentStack","pingCache","ReactCurrentOwner","compare","baseLanes","cachePool","transitions","UNSAFE_componentWillUpdate","componentWillUpdate","componentDidUpdate","pendingContext","fallback","dataset","dgst","_reactRetry","subtreeFlags","isBackwards","rendering","renderingStartTime","tail","tailMode","wasMultiple","suppressHydrationWarning","onClick","onclick","createElementNS","autoFocus","createTextNode","WeakSet","_reactRootContainer","onCommitFiberUnmount","componentWillUnmount","isHidden","__reactInternalSnapshotBeforeUpdate","callbackNode","expirationTimes","expiredLanes","callbackPriority","finishedWork","finishedLanes","timeoutHandle","mutableReadLanes","onCommitFiberRoot","onRecoverableError","onPostCommitFiberRoot","isReactComponent","pendingChildren","mutableSourceEagerHydrationData","pendingSuspenseBoundaries","reportError","_internalRoot","unstable_scheduleHydration","querySelectorAll","JSON","stringify","form","usingClientEntryPoint","Events","findFiberByHostInstance","bundleType","rendererPackageName","rendererConfig","overrideHookState","overrideHookStateDeletePath","overrideHookStateRenamePath","overrideProps","overridePropsDeletePath","overridePropsRenamePath","setErrorHandler","setSuspenseHandler","scheduleUpdate","currentDispatcherRef","findHostInstanceByFiber","findHostInstancesForRefresh","scheduleRefresh","scheduleRoot","setRefreshHandler","getCurrentFiber","reconcilerVersion","__REACT_DEVTOOLS_GLOBAL_HOOK__","isDisabled","supportsFiber","inject","createPortal","createRoot","unstable_strictMode","findDOMNode","flushSync","hydrate","hydrateRoot","hydratedSources","_getVersion","unmountComponentAtNode","unstable_batchedUpdates","unstable_renderSubtreeIntoContainer","checkDCE","PlotlyComponent","_Component","_super","_createSuper","resizeHandler","syncWindowResize","_assertThisInitialized","syncEventHandlers","attachUpdateEvents","getRef","handleUpdate","figureCallback","updatePlotly","shouldInvokeResizeHandler","figureCallbackFunction","shouldAttachUpdateEvents","unmounting","react","onInitialized","prevProps","numPrevFrames","numNextFrames","figureChanged","revisionDefined","revision","revisionChanged","onUpdate","onPurge","isBrowser","removeUpdateEvents","purge","_this3","updateEvents","updateEvent","_this4","_this$el","_transitionData","_frames","invoke","_this5","useResizeHandler","Plots","_this6","eventNames","hasHandler","Boolean","addEventHandler","removeEventHandler","getPlotlyEventName","_react","divId","propTypes","_propTypes","_interopRequireWildcard","_factory","_interopRequireDefault","_plotly","__self","__source","Fragment","jsx","jsxs","setState","forceUpdate","escape","_status","_result","Children","only","Profiler","PureComponent","StrictMode","Suspense","cloneElement","createContext","_currentValue2","_threadCount","Provider","Consumer","_defaultValue","_globalName","createFactory","createRef","forwardRef","isValidElement","lazy","memo","startTransition","unstable_act","sortIndex","expirationTime","priorityLevel","scheduling","isInputPending","MessageChannel","port2","port1","unstable_Profiling","unstable_continueExecution","unstable_forceFrameRate","unstable_getFirstCallbackNode","unstable_next","unstable_pauseExecution","unstable_runWithPriority","unstable_wrapCallback","warning","loaded","chunkId","promises","miniCssF","globalThis","inProgress","dataWebpackPrefix","script","needAttach","scripts","getElementsByTagName","getAttribute","charset","timeout","onScriptComplete","onerror","doneFns","paths","installedChunks","installedChunkData","errorType","realSrc","request","webpackJsonpCallback","parentChunkLoadingFunction","chunkIds","moreModules","runtime","chunkLoadingGlobal","_toPropertyKey","hint","prim","toPrimitive","_setPrototypeOf","_getPrototypeOf","_isNativeReflectConstruct","sham","Proxy","Derived","hasNativeReflectConstruct","Super","NewTarget","_toConsumableArray","ownKeys","enumerableOnly","getOwnPropertySymbols","symbols","sym","_objectSpread2","getOwnPropertyDescriptors","excluded","sourceKeys","_objectWithoutProperties","sourceSymbolKeys","propertyIsEnumerable","_excluded","useButtonProps","rel","role","_ref2$tabIndex","tabIndex","handleClick","isTrivialHref","onKeyDown","Button","React","asProp","_objectWithoutPropertiesLoose","_useButtonProps2","buttonProps","_jsx","DEFAULT_BREAKPOINTS","ThemeContext","prefixes","breakpoints","minBreakpoint","useBootstrapPrefix","defaultPrefix","useIsRTL","bsPrefix","_ref$variant","variant","_ref$active","_ref$disabled","_objectSpread","Spinner","_ref$animation","_ref$as","bsSpinnerPrefix","nodeHtmlLabel","Graph","setSelectedNode","selectedWorkload","selectedOp","selectedPattern","setWarningText","_useState2","setGraph","_useState4","groupNode","setGroupNode","groupNodeOpList","workload_id","model_path","localStorage","getItem","response","op_name","groups","status","CytoGraph","_React$Component","handleCallback","childData","renderCytoscapeElement","getElements","node_type","newExpandedNodes","_jsxs","GraphButtons","parentCallback","collapseNode","tooltip","_React$Component2","_super2","getLabel","highlight","border_color","customColor","getHash","OpDetails","setHistogramType","setSelectedPattern","opDetails","setOpDetails","Pattern","sequence","framework","colSpan","Weights","dtype","granularity","Activation","_Fragment","Table","striped","bordered","borderless","hover","responsive","decoratedBsPrefix","table","responsiveClass","OpList","setSelectedOp","opList","setOpList","MSE","_error$response","tableContent","opData","scrollIntoView","toExponential","Histogram","histogramType","histogramData","setHistogramData","Plot","getHistogramData","colorPalette","generateColor","histograms","series","orientation","side","fillcolor","hoverinfo","spanmode","yaxis","autorange","showgrid","legend","tracegroupgap","violinmode","_get","receiver","desc","_construct","Parent","Class","_wrapNativeSuper","Wrapper","PACKET_TYPES","PACKET_TYPES_REVERSE","TEXT_ENCODER","ERROR_PACKET","withNativeBlob","withNativeArrayBuffer","isView","encodePacket","supportsBinary","encodeBlobAsBase64","fileReader","FileReader","readAsDataURL","byteOffset","byteLength","chars","TEXT_DECODER","decodePacket","encodedPacket","binaryType","mapBinary","decodeBase64Packet","decoded","base64","encoded1","encoded2","encoded3","encoded4","bufferLength","arraybuffer","decode","SEPARATOR","mixin","emitReserved","hasListeners","globalThisShim","pick","NATIVE_SET_TIMEOUT","NATIVE_CLEAR_TIMEOUT","installTimerFunctions","useNativeTimers","setTimeoutFn","clearTimeoutFn","TransportError","_Error","description","Transport","_Emitter","socket","readyState","doOpen","doClose","onClose","packets","write","packet","onPacket","details","onPause","schema","_hostname","_port","_query","hostname","port","secure","encodedQuery","encode","alphabet","encoded","yeast","XMLHttpRequest","hasCORS","XHR","xdomain","hasXHR2","responseType","Polling","_Transport","polling","isSSL","protocol","forceBase64","withCredentials","cookieJar","createCookieJar","poll","doPoll","encodedPayload","encodedPackets","decodedPacket","decodePayload","onOpen","close","encodePayload","doWrite","timestampRequests","timestampParam","createUri","Request","uri","xhrStatus","_this7","onData","pollXhr","_this8","_this9","xhr","open","extraHeaders","setDisableHeaderCheck","setRequestHeader","addCookies","requestTimeout","onreadystatechange","parseCookies","send","requestsCount","requests","cleanup","fromError","abort","responseText","unloadHandler","nextTick","WebSocket","MozWebSocket","isReactNative","product","WS","protocols","headers","addEventListeners","onopen","autoUnref","_socket","unref","onclose","closeEvent","lastPacket","WT","WebTransport","transport","transportOptions","closed","createBidirectionalStream","stream","binaryFlag","reader","readable","getReader","writer","getWriter","read","TextDecoder","isPlainBinary","decodePacketFromBinary","handshake","TextEncoder","arrayBuffer","encodePacketToBinary","shouldIncludeBinaryHeader","transports","websocket","webtransport","host","authority","ipv6uri","pathNames","regx","queryKey","$0","$2","Socket","writeBuffer","prevBufferLen","agent","upgrade","rememberUpgrade","addTrailingSlash","rejectUnauthorized","perMessageDeflate","closeOnBeforeunload","qry","pairs","pair","decodeURIComponent","upgrades","pingInterval","pingTimeout","pingTimeoutTimer","beforeunloadEventListener","offlineEventListener","EIO","priorWebsocketSuccess","createTransport","setTransport","onDrain","failed","onTransportOpen","upgrading","freezeTransport","onTransportClose","onupgrade","probe","onHandshake","resetPingTimeout","sendPacket","filterUpgrades","maxPayload","getWritablePackets","payloadSize","utf8Length","compress","cleanupAndClose","waitForUpgrade","filteredUpgrades","_createForOfIteratorHelper","allowArrayLike","normalCompletion","didErr","_e2","withNativeFile","File","hasBinary","deconstructPacket","buffers","packetData","pack","_deconstructPacket","attachments","placeholder","_placeholder","newData","reconstructPacket","_reconstructPacket","PacketType","RESERVED_EVENTS","Encoder","replacer","EVENT","ACK","encodeAsString","encodeAsBinary","BINARY_EVENT","BINARY_ACK","nsp","deconstruction","Decoder","reviver","reconstructor","isBinaryEvent","decodeString","BinaryReconstructor","takeBinaryData","buf","tryParse","isPayloadValid","finishedReconstruction","CONNECT","DISCONNECT","CONNECT_ERROR","reconPack","binData","freeze","connect","connect_error","disconnecting","newListener","io","connected","recovered","receiveBuffer","sendBuffer","_queue","_queueSeq","acks","auth","_opts","_autoConnect","subs","onpacket","subEvents","_readyState","_len2","_key2","retries","fromQueue","volatile","_addToQueue","ack","_registerAckCallback","isTransportWritable","engine","notifyOutgoingListeners","ackTimeout","timer","_len3","_key3","_len4","_key4","withErr","tryCount","_len5","responseArgs","_key5","_drainQueue","_packet","_sendConnectPacket","_pid","_lastOffset","onconnect","onevent","onack","ondisconnect","emitEvent","_anyListeners","_iterator","sent","_len6","_key6","emitBuffered","subDestroy","_anyOutgoingListeners","_step2","_iterator2","Backoff","jitter","attempts","rand","deviation","setMin","setMax","setJitter","Manager","nsps","reconnection","reconnectionAttempts","reconnectionDelay","reconnectionDelayMax","randomizationFactor","backoff","_parser","parser","encoder","decoder","autoConnect","_reconnection","_reconnectionAttempts","_reconnectionDelay","_randomizationFactor","_reconnectionDelayMax","_timeout","_reconnecting","reconnect","Engine","skipReconnect","openSubDestroy","maybeReconnectOnOpen","errorSub","onping","ondata","ondecoded","_nsps","_close","onreconnect","attempt","parsed","sameNamespace","forceNew","multiplex","Workloads","setSelectedWorkload","workloads","setWorkloads","spinner","setSpinner","getWorkloads","changeSelectedWorkload","workloadsList","workload","workload_name","moment","creation_time","_ref$fluid","fluid","_ref$rounded","rounded","_ref$roundedCircle","roundedCircle","_ref$thumbnail","thumbnail","getOverlayDirection","placement","isRTL","bsDirection","getInitialPopperStyles","Tooltip","_ref$placement","arrowProps","hasDoneInitialMeasure","popper","primaryPlacement","computedStyle","strategy","TOOLTIP_OFFSET","useMounted","mounted","useWillUnmount","onUnmount","valueRef","useUpdatedRef","MAX_DELAY_MS","setChainedTimeout","handleRef","timeoutAtMs","delayMs","useTimeout","defaultKey","_toPrimitive","useUncontrolledProp","propValue","wasPropRef","_useState","stateValue","isProp","wasProp","nextProps","prevState","nextState","__reactInternalSnapshotFlag","__reactInternalSnapshot","__suppressDeprecationWarning","toFnRef","refA","refB","mergeRefs","useCallbackRef","find","tar","dequal","foo","bar","ctor","_step3","_iterator3","DataView","getInt8","getBasePlacement","getWindow","isElement","isHTMLElement","isShadowRoot","ShadowRoot","getUAString","uaData","userAgentData","brands","brand","isLayoutViewport","includeScale","isFixedStrategy","clientRect","scaleX","scaleY","visualViewport","addVisualOffsets","getLayoutRect","rootNode","getRootNode","isSameNode","getNodeName","isTableElement","getDocumentElement","getParentNode","assignedSlot","getTrueOffsetParent","offsetParent","getOffsetParent","isFirefox","perspective","contain","willChange","getContainingBlock","getMainAxisFromPlacement","within","mathMax","mathMin","mergePaddingObject","expandToHashMap","hashMap","auto","basePlacements","clippingParents","reference","variationPlacements","placements","modifierPhases","phase","_state$modifiersData$","arrowElement","arrow","popperOffsets","modifiersData","basePlacement","axis","rects","toPaddingObject","arrowRect","minProp","maxProp","endDiff","startDiff","arrowOffsetParent","clientSize","centerToReference","axisProp","centerOffset","effect","_options$element","requires","requiresIfExists","getVariation","unsetSides","mapToStyles","_Object$assign2","popperRect","variation","gpuAcceleration","adaptive","roundOffsets","isFixed","_offsets$x","_offsets$y","hasX","hasY","sideX","sideY","win","heightProp","widthProp","_Object$assign","commonStyles","_ref4","dpr","roundOffsetsByDPR","_options$gpuAccelerat","_options$adaptive","_options$roundOffsets","styles","_options$scroll","scroll","_options$resize","scrollParents","scrollParent","getOppositePlacement","getOppositeVariationPlacement","getWindowScroll","pageXOffset","pageYOffset","getWindowScrollBarX","isScrollParent","_getComputedStyle","overflowX","overflowY","getScrollParent","listScrollParents","_element$ownerDocumen","isBody","updatedList","rectToClientRect","getClientRectFromMixedType","clippingParent","html","layoutViewport","getViewportRect","clientTop","clientLeft","getInnerBoundingClientRect","winScroll","scrollWidth","scrollHeight","getDocumentRect","getClippingRect","boundary","rootBoundary","mainClippingParents","clipperElement","getClippingParents","firstClippingParent","clippingRect","accRect","computeOffsets","commonX","commonY","mainAxis","detectOverflow","_options$placement","_options$strategy","_options$boundary","_options$rootBoundary","_options$elementConte","elementContext","_options$altBoundary","altBoundary","_options$padding","altContext","clippingClientRect","contextElement","referenceClientRect","popperClientRect","elementClientRect","overflowOffsets","offsetData","multiply","_skip","_options$mainAxis","checkMainAxis","_options$altAxis","altAxis","checkAltAxis","specifiedFallbackPlacements","fallbackPlacements","_options$flipVariatio","flipVariations","allowedAutoPlacements","preferredPlacement","oppositePlacement","getExpandedFallbackPlacements","_options$allowedAutoP","allPlacements","allowedPlacements","overflows","computeAutoPlacement","referenceRect","checksMap","makeFallbackChecks","firstFittingPlacement","_basePlacement","isStartVariation","isVertical","mainVariationSide","altVariationSide","fittingPlacement","getSideOffsets","preventedOffsets","isAnySideFullyClipped","_options$offset","invertDistance","skidding","distanceAndSkiddingToXY","_data$state$placement","_options$tether","tether","_options$tetherOffset","tetherOffset","isBasePlacement","tetherOffsetValue","normalizedTetherOffsetValue","offsetModifierState","_offsetModifierState$","mainSide","altSide","additive","maxLen","arrowPaddingObject","arrowPaddingMin","arrowPaddingMax","arrowLen","minOffset","maxOffset","clientOffset","offsetModifierValue","tetherMax","preventedOffset","_offsetModifierState$2","_mainSide","_altSide","_max","isOriginSide","_offsetModifierValue","_tetherMin","_tetherMax","_preventedOffset","withinMaxClamp","getCompositeRect","elementOrVirtualElement","isOffsetParentAnElement","offsetParentIsScaled","isElementScaled","getNodeScroll","modifiers","dep","depModifier","DEFAULT_OPTIONS","areValidElements","popperGenerator","generatorOptions","_generatorOptions","_generatorOptions$def","defaultModifiers","_generatorOptions$def2","defaultOptions","orderedModifiers","effectCleanupFns","isDestroyed","setOptionsAction","cleanupModifierEffects","orderModifiers","existing","mergeByName","_ref$options","cleanupFn","noopFn","_state$elements","_state$orderedModifie","_state$orderedModifie2","onFirstUpdate","createPopper","preventOverflow","referenceOverflow","popperAltOverflow","referenceClippingOffsets","popperEscapeOffsets","isReferenceHidden","hasPopperEscaped","computeStyles","eventListeners","flip","disabledApplyStylesModifier","ariaDescribedByModifier","_popper$getAttribute","_state$elements2","EMPTY_MODIFIERS","referenceElement","popperElement","_ref$enabled","_ref$strategy","_ref$modifiers","prevModifiers","popperInstanceRef","_popperInstanceRef$cu","_popperInstanceRef$cu2","_useSafeState2","useSafeState","popperState","updateModifier","nextModifiers","optionsSupported","onceSupported","canUseDOM","wrappedHandler","__once","onceHandler","useEventCallback","useCommittedRef","getRefTarget","InitialTriggerEvents","click","mouseup","pointerup","onClickOutside","_ref$clickTrigger","clickTrigger","preventMouseClickOutsideRef","waitingForTrigger","handleMouseCapture","isLeftClickEvent","handleInitialMouse","handleMouse","_ownerWindow$event","_ownerWindow$parent","ownerWindow","currentEvent","removeInitialTriggerListener","removeMouseCaptureListener","removeMouseListener","mobileSafariHackListeners","onRootClose","useClickOutside","handleKeyUp","isEscKey","removeKeyupListener","Context","resolveContainerRef","useWaitForDOMRef","onResolved","resolvedRef","setRef","earlyRef","nextRef","toModifierArray","mergeOptionsWithPopperConfig","_modifiers$eventListe","_modifiers$preventOve","_modifiers$preventOve2","_modifiers$offset","_modifiers$arrow","enableEvents","fixed","containerPadding","_ref$popperConfig","popperConfig","toModifierMap","inProp","in","onExited","mountOnEnter","unmountOnExit","hasEnteredRef","handleExited","combinedRef","useMergedRefs","ImperativeTransition","onEntered","exited","setExited","onTransition","isInitialRef","handleTransition","useIsomorphicEffect","stale","initial","isStale","Overlay","outerRef","_props$popperConfig","Transition","runTransition","_useCallbackRef2","rootElement","attachRef","_useCallbackRef4","attachArrowRef","mergedRef","usePopper","mountOverlay","useRootClose","onHide","rootClose","rootCloseDisabled","rootCloseEvent","onExit","onExiting","onEnter","onEntering","NoopTransition","renderTransition","appear","ReactDOM","baseVal","rHyphen","pascalCase","chr","createWithBsPrefix","_ref$displayName","BsComponent","_ref2$as","Tag","componentProps","resolvedPrefix","Popover","PopoverBody","Header","PopoverHeader","Body","POPPER_OFFSET","UNMOUNTED","EXITED","ENTERING","ENTERED","EXITING","initialStatus","isMounting","enter","appearStatus","nextCallback","_proto","updateStatus","nextStatus","cancelNextCallback","getTimeouts","exit","mounting","nodeRef","forceReflow","performEnter","performExit","appearing","maybeNode","maybeAppearing","timeouts","enterTimeout","safeSetState","onTransitionEnd","setNextCallback","doesNotHaveTimeoutOrListener","addEndListener","maybeNextCallback","_this$props","childProps","TransitionGroupContext","psuedoElement","rUpper","msPattern","hyphenateStyleName","hyphenate","supportedTransforms","transforms","isTransform","removeProperty","cssText","emulateTransitionEnd","called","handle","createEvent","initEvent","transitionEnd","parseDuration","removeEmulate","transitionEndListener","safeFindDOMNode","componentOrElement","childRef","handleEnter","handleEntering","handleEntered","handleExit","handleExiting","handleAddEndListener","innerProps","fadeStyles","_fadeStyles","Fade","_ref$transitionClasse","transitionClasses","isAppearing","triggerBrowserReflow","TransitionWrapper","_ref$transition","_ref$rootClose","_ref$show","outerShow","outerProps","popperRef","firstRenderedState","setFirstRenderedState","_useOverlayOffset","customOffset","overlayRef","popoverClass","tooltipClass","useOverlayOffset","_useOverlayOffset2","actualTransition","handleFirstUpdate","BaseOverlay","overlayProps","_popperObj$state","_popperObj$state$modi","popperObj","aRef","__wrapped","wrapRefs","updatedPlacement","outOfBoundaries","handleMouseOverOut","relatedNative","related","_ref$trigger","propsShow","_ref$defaultShow","defaultShow","onToggle","propsDelay","_ref$flip","triggerNodeRef","hoverStateRef","_useUncontrolledProp2","setShow","normalizeDelay","onFocus","onBlur","handleShow","handleHide","handleFocus","handleBlur","handleMouseOver","handleMouseOut","triggers","triggerProps","onMouseOver","onMouseOut","WorkloadDetails","tooltipDelete","tooltipCopy","tooltipFullPath","OverlayTrigger","deleteWorkload","clipboard","writeText","ModelSummary","_summary$data","summary","setSummary","replaceAll","Profiling","profilingTable","setProfilingTable","profilingChartData","setProfilingChartData","ProfilingChart","ProfilingTable","_useState6","setChecked","_useState8","sorting","setSorting","showOnChart","chartData","node_name","sortedProfiling","sortedTable","requestSorting","getSortingClass","profiling","accelerator_execution_time","cpu_execution_time","op_defined","op_run","total_execution_time","chartCheck","newProfilingChartData","requestChartCheck","getChartData","profilingData","marker","xaxis","title","showticklabels","Anchor","handleKeyDown","CloseButton","_ref$ariaLabel","ariaLabel","DivStyledAsH4","AlertHeading","AlertLink","Alert","uncontrolledProps","_useUncontrolled","fieldName","_extends2","Utils","propsValue","rest","handlerName","_useUncontrolledProp","_extends","useUncontrolled","_useUncontrolled$show","_useUncontrolled$clos","closeLabel","closeVariant","_useUncontrolled$vari","dismissible","_useUncontrolled$tran","handleClose","alert","Link","Heading","Warning","warningText","Feedback","_ref$type","_ref$tooltip","FormCheckInput","_ref$isValid","_ref$isInvalid","controlId","FormContext","FormCheckLabel","htmlFor","FormCheck","bsSwitchPrefix","_ref$inline","inline","_ref$reverse","_ref$feedbackTooltip","feedbackTooltip","feedback","feedbackType","_ref$title","innerFormContext","hasLabel","hasChildOfType","Input","Label","FormControl","_classes2","htmlSize","plaintext","readOnly","FormGroup","useCol","spans","brkPoint","span","infix","Col","_useCol2","_useCol2$","colProps","_excluded2","_useCol2$2","_useCol2$2$as","FormLabel","_ref$column","column","_ref$visuallyHidden","visuallyHidden","columnClass","FormRange","FormSelect","FormText","muted","Switch","FloatingLabel","validated","Form","Group","Floating","FormFloating","Check","Text","Range","Select","InputGroupText","InputGroup","hasValidation","contextValue","InputGroupContext","Radio","Checkbox","NodeProperties","selectedNode","_selectedNode$attribu","propertyList","attributeList","attribute","_attribute$value","attribute_type","AccuracyResults","accuracy_data","ratio","baseline_accuracy","toPrecision","optimized_accuracy","_useState10","_useState12","kindOf","thing","kindOfTest","typeOfTest","isArrayBuffer","isPlainObject","isFile","isBlob","isFileList","isURLSearchParams","_ref$allOwnKeys","allOwnKeys","findKey","_global","isContextDefined","TypedArray","isTypedArray","isHTMLForm","isRegExp","reduceDescriptors","reducer","descriptors","reducedDescriptors","ALPHA","DIGIT","ALPHABET","ALPHA_DIGIT","isAsyncFn","isBuffer","isFormData","kind","FormData","isArrayBufferView","isBoolean","isStream","pipe","caseless","targetKey","stripBOM","inherits","superConstructor","toFlatObject","sourceObj","destObj","propFilter","endsWith","searchString","forEachEntry","matchAll","regExp","freezeMethods","toObjectSet","arrayOrString","delimiter","toCamelCase","toFiniteNumber","generateString","isSpecCompliantForm","toJSONObject","reducedValue","isThenable","AxiosError","captureStackTrace","utils","fileName","lineNumber","columnNumber","customProps","axiosError","isVisitable","removeBrackets","renderKey","dots","predicates","formData","metaTokens","indexes","option","visitor","defaultVisitor","useBlob","convertValue","Buffer","isFlatArray","exposedHelpers","build","charMap","AxiosURLSearchParams","_pairs","toFormData","_encode","buildURL","serializedParams","serializeFn","serialize","hashmarkIndex","InterceptorManager","fulfilled","rejected","synchronous","runWhen","silentJSONParsing","forcedJSONParsing","clarifyTimeoutError","URLSearchParams","isStandardBrowserEnv","isStandardBrowserWebWorkerEnv","WorkerGlobalScope","importScripts","buildPath","isNumericKey","isLast","arrayToObject","parsePropPath","DEFAULT_CONTENT_TYPE","transitional","transitionalDefaults","adapter","transformRequest","contentType","getContentType","hasJSONContentType","isObjectPayload","formDataToJSON","setContentType","platform","helpers","toURLEncodedForm","formSerializer","_FormData","env","rawValue","stringifySafely","transformResponse","JSONRequested","strictJSONParsing","ERR_BAD_RESPONSE","xsrfCookieName","xsrfHeaderName","maxContentLength","maxBodyLength","validateStatus","common","ignoreDuplicateOf","$internals","normalizeHeader","header","normalizeValue","matchHeaderValue","isHeaderNameFilter","AxiosHeaders","_Symbol$iterator","_Symbol$toStringTag","valueOrRewrite","rewrite","setHeader","_header","_rewrite","lHeader","setHeaders","rawHeaders","parseHeaders","tokensRE","parseTokens","deleted","deleteHeader","normalized","formatHeader","_this$constructor","asStrings","computed","accessors","defineAccessor","accessorName","methodName","buildAccessors","accessor","transformData","isCancel","__CANCEL__","CanceledError","ERR_CANCELED","expires","domain","cookie","toGMTString","buildFullPath","baseURL","requestedURL","isAbsoluteURL","relativeURL","combineURLs","originURL","msie","urlParsingNode","resolveURL","pathname","requestURL","samplesCount","firstSampleTS","timestamps","chunkLength","startedAt","bytesCount","passed","progressEventReducer","isDownloadStream","bytesNotified","_speedometer","speedometer","lengthComputable","progressBytes","rate","estimated","onCanceled","requestData","requestHeaders","cancelToken","unsubscribe","signal","username","unescape","btoa","fullPath","onloadend","responseHeaders","getAllResponseHeaders","ERR_BAD_REQUEST","settle","statusText","paramsSerializer","responseURL","onabort","ECONNABORTED","ERR_NETWORK","ontimeout","timeoutErrorMessage","ETIMEDOUT","xsrfValue","isURLSameOrigin","cookies","onDownloadProgress","onUploadProgress","upload","subscribe","aborted","parseProtocol","knownAdapters","http","xhrAdapter","adapters","nameOrAdapter","throwIfCancellationRequested","throwIfRequested","dispatchRequest","headersToObject","mergeConfig","config1","config2","getMergedValue","mergeDeepProperties","valueFromConfig2","defaultToConfig2","mergeDirectKeys","mergeMap","timeoutMessage","decompress","beforeRedirect","httpAgent","httpsAgent","socketPath","responseEncoding","configValue","VERSION","validators","deprecatedWarnings","validator","formatMessage","opt","ERR_DEPRECATED","assertOptions","allowUnknown","ERR_BAD_OPTION_VALUE","ERR_BAD_OPTION","Axios","instanceConfig","interceptors","configOrUrl","contextHeaders","boolean","function","requestInterceptorChain","synchronousRequestInterceptors","interceptor","responseInterceptorChain","chain","newConfig","generateHTTPMethod","isForm","CancelToken","resolvePromise","_listeners","onfulfilled","_resolve","HttpStatusCode","Continue","SwitchingProtocols","Processing","EarlyHints","Created","Accepted","NonAuthoritativeInformation","NoContent","ResetContent","PartialContent","MultiStatus","AlreadyReported","ImUsed","MultipleChoices","MovedPermanently","Found","SeeOther","NotModified","UseProxy","Unused","TemporaryRedirect","PermanentRedirect","BadRequest","Unauthorized","PaymentRequired","Forbidden","NotFound","MethodNotAllowed","NotAcceptable","ProxyAuthenticationRequired","RequestTimeout","Conflict","Gone","LengthRequired","PreconditionFailed","PayloadTooLarge","UriTooLong","UnsupportedMediaType","RangeNotSatisfiable","ExpectationFailed","ImATeapot","MisdirectedRequest","UnprocessableEntity","Locked","FailedDependency","TooEarly","UpgradeRequired","PreconditionRequired","TooManyRequests","RequestHeaderFieldsTooLarge","UnavailableForLegalReasons","InternalServerError","NotImplemented","BadGateway","ServiceUnavailable","GatewayTimeout","HttpVersionNotSupported","VariantAlsoNegotiates","InsufficientStorage","LoopDetected","NotExtended","NetworkAuthenticationRequired","axios","createInstance","defaultConfig","Cancel","spread","isAxiosError","formToJSON","setItem","Diagnosis","onPerfEntry","getCLS","getFID","getFCP","getLCP","getTTFB","App","reportWebVitals"],"sourceRoot":""} \ No newline at end of file diff --git a/neural_insights/web/app/static/media/IntelClear_Bd.060888be7dccf869db54.ttf b/neural_insights/web/app/static/media/IntelClear_Bd.060888be7dccf869db54.ttf deleted file mode 100644 index 5058d15085c..00000000000 Binary files a/neural_insights/web/app/static/media/IntelClear_Bd.060888be7dccf869db54.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/IntelClear_Lt.c5e18e9d5505364da760.ttf b/neural_insights/web/app/static/media/IntelClear_Lt.c5e18e9d5505364da760.ttf deleted file mode 100644 index 151b54c7b7b..00000000000 Binary files a/neural_insights/web/app/static/media/IntelClear_Lt.c5e18e9d5505364da760.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/IntelClear_Rg.33af11200cffaf9540ff.ttf b/neural_insights/web/app/static/media/IntelClear_Rg.33af11200cffaf9540ff.ttf deleted file mode 100644 index 0a2d0be299f..00000000000 Binary files a/neural_insights/web/app/static/media/IntelClear_Rg.33af11200cffaf9540ff.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/intelone-display-bold.64a6eab04dcda9c570c8.ttf b/neural_insights/web/app/static/media/intelone-display-bold.64a6eab04dcda9c570c8.ttf deleted file mode 100644 index 072a9dc8880..00000000000 Binary files a/neural_insights/web/app/static/media/intelone-display-bold.64a6eab04dcda9c570c8.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/intelone-display-light.68a9d0311f7374acb0cf.ttf b/neural_insights/web/app/static/media/intelone-display-light.68a9d0311f7374acb0cf.ttf deleted file mode 100644 index d23ed421816..00000000000 Binary files a/neural_insights/web/app/static/media/intelone-display-light.68a9d0311f7374acb0cf.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/intelone-display-regular.0f8c3ef25c545acb6b7c.ttf b/neural_insights/web/app/static/media/intelone-display-regular.0f8c3ef25c545acb6b7c.ttf deleted file mode 100644 index a70bb54e9d0..00000000000 Binary files a/neural_insights/web/app/static/media/intelone-display-regular.0f8c3ef25c545acb6b7c.ttf and /dev/null differ diff --git a/neural_insights/web/app/static/media/intelone-mono-font-family-regular.de914c9a804c00b4f3e5.ttf b/neural_insights/web/app/static/media/intelone-mono-font-family-regular.de914c9a804c00b4f3e5.ttf deleted file mode 100644 index 271117969d3..00000000000 Binary files a/neural_insights/web/app/static/media/intelone-mono-font-family-regular.de914c9a804c00b4f3e5.ttf and /dev/null differ diff --git a/neural_insights/web/communication.py b/neural_insights/web/communication.py deleted file mode 100644 index 5d335442080..00000000000 --- a/neural_insights/web/communication.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Objects to communicate between domain logic and outside layers.""" - -from queue import Queue -from typing import Any, Dict, List, Union - - -class Request: - """Domain defined parameters.""" - - def __init__(self, method: str, operation: str, data: dict) -> None: - """Initialize properties.""" - self.method: str = method - self.operation: str = operation - self.data: dict = data - - -class Response: - """Domain defined response.""" - - def __init__(self) -> None: - """Initialize properties.""" - self.data: Union[Dict[str, Any], List[Dict[str, Any]]] = {} - self.command: Dict[str, Any] = {} - - -def create_simple_response( - data: Union[Dict[str, Any], List[Dict[str, Any]]], -) -> Response: - """Create new Response object with only data set.""" - response = Response() - response.data = data - return response - - -class Message: - """Message used to send data to GUI.""" - - def __init__(self, status: str, subject: str, data: Any): - """Initialize message.""" - self._status: str = status - self._subject: str = subject - self._data: Any = data - - @property - def subject(self) -> str: - """Get the subject.""" - return self._subject - - @property - def status(self) -> str: - """Get the status.""" - return self._status - - @property - def data(self) -> Any: - """Get the data.""" - return self._data - - -class MessageQueue: - """Queue for passing messages to GUI.""" - - _queue: Queue = Queue() - - def post_failure(self, subject: str, data: Any) -> None: - """Post failure message.""" - self._queue.put(Message("failure", subject, data)) - - def post_success(self, subject: str, data: Any) -> None: - """Post success message.""" - self._queue.put(Message("success", subject, data)) - - def post_error(self, subject: str, data: Any) -> None: - """Post error message.""" - self._queue.put(Message("error", subject, data)) - - def post_info(self, subject: str, data: Any) -> None: - """Post info message.""" - self._queue.put(Message("info", subject, data)) - - def get(self) -> Message: - """Wait for message and return it.""" - return self._queue.get() diff --git a/neural_insights/web/configuration.py b/neural_insights/web/configuration.py deleted file mode 100644 index 7279dd4ddd3..00000000000 --- a/neural_insights/web/configuration.py +++ /dev/null @@ -1,287 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Configuration module for Neural Insights server.""" - -import argparse -import logging -import os -import secrets -import socket -import sys -from typing import Dict - -from numpy.random import randint - -from neural_compressor.utils.utility import singleton -from neural_insights.utils.consts import WORKDIR_LOCATION -from neural_insights.utils.exceptions import NotFoundException -from neural_insights.utils.logger import log -from neural_insights.utils.utils import determine_ip - - -@singleton -class Configuration: - """Configuration object for Neural Insights server.""" - - PORT_DEFAULT = 5000 - MAX_PORTS_TRIED = 10 - - def __init__(self) -> None: - """Set the variables.""" - self.server_address = "" - self.server_port = 0 - self.url_prefix: str = "" - self.gui_port = 0 - self.log_level = 0 - self.token = "" - self.scheme = "" - self.workdir = "" - self.allow_insecure_connections = False - self.tls_certificate = "" - self.tls_key = "" - self.set_up() - - def set_up(self) -> None: - """Reset variables.""" - self.determine_values_from_environment() - - def determine_values_from_environment(self) -> None: - """Set variables based on environment values.""" - args = self.get_command_line_args() - self.server_address = determine_ip() - self.server_port = self.determine_server_port(args) - self.url_prefix = self.determine_url_prefix(args) - self.gui_port = self.determine_gui_port(args) - self.log_level = self.determine_log_level(args) - self.token = secrets.token_hex(16) - self.allow_insecure_connections = args.get("allow_insecure_connections", False) - self.tls_certificate = args.get("cert", "") - self.tls_key = args.get("key", "") - self.workdir = args.get("workdir_location", WORKDIR_LOCATION) - self.scheme = "http" if self.allow_insecure_connections else "https" - - @property - def global_config_directory(self) -> str: - """Get the directory for global config files.""" - return os.path.join( - os.environ.get("HOME", ""), - ".neural_compressor", - ) - - def get_command_line_args(self) -> Dict: - """Return arguments passed in command line.""" - parser = argparse.ArgumentParser( - description="Run Neural Insights server.", - ) - parser.add_argument( - "-p", - "--port", - type=int, - help="server port number to listen on", - ) - parser.add_argument( - "-P", - "--gui-port", - type=int, - help="port number for GUI", - ) - parser.add_argument( - "-U", - "--url-prefix", - type=str, - default="", - help="URL prefix for Neural Insights instance.", - ) - parser.add_argument( - "--allow-insecure-connections", - action="store_true", - help="run server without encryption", - ) - parser.add_argument( - "--cert", - type=str, - default="", - help="TLS Certificate to use", - ) - parser.add_argument( - "--key", - type=str, - default="", - help="TLS private key to use", - ) - parser.add_argument( - "--workdir-location", - type=str, - default=WORKDIR_LOCATION, - help="Path to work directory.", - ) - parser.add_argument( - "--verbose", - "-v", - action="count", - default=2, - help="verbosity of logging output, use -vv and -vvv for even more logs", - ) - return vars(parser.parse_args()) - - def determine_server_port(self, args: Dict) -> int: - """Return port to be used by the server. - - Will raise a NotFoundException if port is already in use. - - When port given in command line, only that port will be tried. - When no port specified will try self.MAX_PORTS_TRIED times, - starting with self.PORT_DEFAULT. - """ - command_line_port = args.get("port") - if command_line_port is not None: - self._ensure_valid_port(command_line_port) - if self.is_port_taken(command_line_port): - raise NotFoundException( - f"Port {command_line_port} already in use, exiting.", - ) - else: - return command_line_port - - ports = [self.PORT_DEFAULT] + randint( - 1025, - 65536, - self.MAX_PORTS_TRIED - 1, - ).tolist() - - for port in ports: - if not self.is_port_taken(port): - return port - - raise NotFoundException( - f"Unable to find a free port in {len(ports)} attempts, exiting.", - ) - - def determine_gui_port(self, args: Dict) -> int: - """Return port to be used by the GUI client. - - Will return self.server_port unless specified in configuration. - """ - command_line_port = args.get("gui_port") - if command_line_port is not None: - self._ensure_valid_port(command_line_port) - return command_line_port - return self.server_port - - def is_port_taken(self, port: int) -> bool: - """Return if given port is already in use.""" - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - s.bind((self.server_address, port)) - except socket.error: - return True - finally: - s.close() - - return False - - def determine_log_level(self, args: Dict) -> int: - """Determine log level based on parameters given.""" - verbosity_mapping = [ - logging.CRITICAL, - logging.WARNING, - logging.INFO, - logging.DEBUG, - ] - verbosity: int = args.get("verbose") # type:ignore - try: - return verbosity_mapping[verbosity] - except IndexError: - return logging.DEBUG - - @staticmethod - def determine_url_prefix(args: dict) -> str: - """Determine url prefix based on parameters given.""" - url_prefix = args.get("url_prefix", "") - if isinstance(url_prefix, str) and not url_prefix.startswith("/"): - url_prefix = f"/{url_prefix}" - return url_prefix - - def get_url(self) -> str: - """Return URL to access application.""" - base_url = f"{self.scheme}://{self.server_address}:{self.gui_port}" - if self.url_prefix != "/": - base_url = f"{base_url}{self.url_prefix}" - return f"{base_url}/?token={self.token}" - - def dump_token_to_file(self) -> None: - """Dump token to file.""" - token_filepath = os.path.join(WORKDIR_LOCATION, "token") - os.makedirs(os.path.dirname(token_filepath), exist_ok=True) - - if sys.platform == "win32": - self.create_secured_token_file_win(token_filepath) - - try: - token_file = os.open(token_filepath, flags=os.O_WRONLY | os.O_CREAT, mode=0o600) - os.write(token_file, self.token.encode()) - except Exception as err: - raise err - finally: - os.close(token_file) - - log.debug(f"Token has been dumped to {token_filepath}.") - - @staticmethod - def create_secured_token_file_win(token_filepath: str): - """Create secured file on Windows OS.""" - import ntsecuritycon as con # pylint: disable=import-error - import win32api # pylint: disable=import-error - import win32file # pylint: disable=import-error - import win32security # pylint: disable=import-error - - username = win32api.GetUserName() - os.makedirs(os.path.dirname(token_filepath), exist_ok=True) - - if os.path.exists(token_filepath): - os.remove(token_filepath) - - security_descriptor = win32security.SECURITY_DESCRIPTOR() - user_sid, _, _ = win32security.LookupAccountName("", username) - - access_rights = con.FILE_ALL_ACCESS - - dacl = win32security.ACL() - dacl.AddAccessAllowedAce(win32security.ACL_REVISION, access_rights, user_sid) - - security_descriptor.SetSecurityDescriptorDacl(1, dacl, 0) - - security_attributes = win32security.SECURITY_ATTRIBUTES() - security_attributes.SECURITY_DESCRIPTOR = security_descriptor - - handle = win32file.CreateFile( - token_filepath, - win32file.GENERIC_WRITE, - win32file.FILE_SHARE_READ, - security_attributes, - win32file.CREATE_NEW, - win32file.FILE_ATTRIBUTE_NORMAL, - None, - ) - - win32file.CloseHandle(handle) - - def _ensure_valid_port(self, port: int) -> None: - """Validate if proposed port number is allowed by TCP/IP.""" - if port < 1: - raise ValueError(f"Lowest allowed port number is 1, attempted to use: {port}") - if port > 65535: - raise ValueError(f"Highest allowed port number is 65535, attempted to use: {port}") diff --git a/neural_insights/web/exceptions.py b/neural_insights/web/exceptions.py deleted file mode 100644 index 9d8340527d7..00000000000 --- a/neural_insights/web/exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Web Exceptions.""" - -from neural_insights.utils.exceptions import NotFoundException - - -class ServiceNotFoundException(NotFoundException): - """Unknown Service requested.""" - - pass diff --git a/neural_insights/web/router.py b/neural_insights/web/router.py deleted file mode 100644 index 84900f885af..00000000000 --- a/neural_insights/web/router.py +++ /dev/null @@ -1,254 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Connector between api.py and components.""" -import json -import os -from threading import Thread -from typing import Any, Callable, Dict, List, Optional - -from werkzeug.wrappers import Response as WebResponse - -from neural_insights.components.diagnosis.diagnosis import Diagnosis -from neural_insights.components.diagnosis.factory import DiagnosisFactory -from neural_insights.components.diagnosis.op_details import OpDetails -from neural_insights.components.graph.graph import Graph -from neural_insights.components.graph.graph_reader import GraphReader -from neural_insights.components.workload_manager.workload_manager import WorkloadManager -from neural_insights.utils.exceptions import ClientErrorException -from neural_insights.utils.json_serializer import JsonSerializer -from neural_insights.web.communication import Request, Response, create_simple_response -from neural_insights.web.exceptions import ServiceNotFoundException -from neural_insights.web.service.request_data_processor import RequestDataProcessor - - -class RoutingDefinition: - """Abstract routing definition.""" - - def __init__(self, callback: Callable[[Dict[str, Any]], Any]) -> None: - """Initialize object.""" - self.callback = callback - - -class RealtimeRoutingDefinition(RoutingDefinition): - """Routing executed in realtime.""" - - pass - - -class DeferredRoutingDefinition(RoutingDefinition): - """Routing executed in separate thread.""" - - pass - - -class Router: - """Connector between api.py and components.""" - - def __init__(self) -> None: - """Initialize object.""" - self.routes: Dict[str, RoutingDefinition] = { - "workloads": RealtimeRoutingDefinition(get_workloads_list), - "workloads/delete": RealtimeRoutingDefinition(delete_workload), - "profiling": RealtimeRoutingDefinition(get_profiling_details), - "model/graph": RealtimeRoutingDefinition(get_model_graph), - "model/summary": RealtimeRoutingDefinition(get_model_summary), - "model/graph/highlight_pattern": RealtimeRoutingDefinition(find_pattern_in_graph), - "diagnosis/op_list": RealtimeRoutingDefinition(get_op_list), - "diagnosis/op_details": RealtimeRoutingDefinition(get_op_details), - "diagnosis/histogram": RealtimeRoutingDefinition(get_histogram), - "profiling/result": RealtimeRoutingDefinition(get_profiling_details), - } - - def handle(self, request: Request) -> Response: - """Run operation on requested component and return result.""" - routing_definition = self.routes.get(request.operation) - if routing_definition is None: - raise ServiceNotFoundException(f"Unable to find {request.operation}") - - data = self._process_routing_definition(routing_definition, request.data) - if isinstance(data, WebResponse): - return data - - serialized_data = JsonSerializer.serialize_item(data) - - return create_simple_response(serialized_data) - - def _process_routing_definition( - self, - routing_definition: RoutingDefinition, - data: dict, - ) -> Any: - """Process routing definition.""" - if isinstance(routing_definition, RealtimeRoutingDefinition): - return routing_definition.callback(data) - if isinstance(routing_definition, DeferredRoutingDefinition): - self._validate_deffered_routing_data(data) - t = Thread(target=routing_definition.callback, args=(data,)) - t.daemon = True - t.start() - return {"exit_code": 102, "message": "processing"} - raise ValueError( - f"Unsupported RoutingDefinition type: {routing_definition.__class__.__name__}", - ) - - @staticmethod - def _validate_deffered_routing_data(data: dict) -> None: - """Validate input data for Deferred Routing and raises in case of issues.""" - request_id = str(data.get("request_id", "")) - if not request_id: - raise ClientErrorException("Missing request id.") - - -def get_model_graph(data: Dict[str, Any]) -> Graph: - """Get model graph.""" - graph_reader = GraphReader() - return graph_reader.read( - model_path=RequestDataProcessor.get_string_value(data, "path"), - expanded_groups=data.get("group", []), - ) - - -def get_model_summary(data: Dict[str, Any]) -> Dict: - """Get model graph.""" - workload_id: Optional[str] = data.get("workload_id", None) - workload = WorkloadManager().get_workload(workload_id) - - if workload.model_summary_file is None: - raise Exception("Model summary not found.") - with open(workload.model_summary_file, "r") as summary_file: - model_summary = "\n".join(summary_file.readlines()) - return { - "summary": model_summary, - } - - -def find_pattern_in_graph(data: Dict[str, Any]) -> dict: - """Find OP pattern in graph for diagnosis tab.""" - graph_reader = GraphReader() - - model_path = RequestDataProcessor.get_string_value(data, "path") - op_name = data.get("op_name", None) - pattern = data.get("pattern", None) - if any(param is None for param in [model_path, op_name, pattern]): - raise ClientErrorException( - "Missing parameters. Required parameters are: path, op_name and pattern.", - ) - model_graph, expanded_groups = graph_reader.find_pattern_in_graph( - model_path=model_path, - op_name=op_name, - pattern=pattern, - ) - return {"graph": model_graph.serialize(), "groups": expanded_groups} - - -def get_workloads_list(data: Dict[str, Any]) -> dict: - """Get workloads list.""" - workload_manager = WorkloadManager() - - serialized_workloads = [workload.serialize() for workload in workload_manager.workloads] - return { - "workloads": serialized_workloads, - } - - -def delete_workload(data: Dict[str, Any]) -> dict: - """Remove workload from workloads list.""" - workload_id: Optional[str] = data.get("workload_id", None) - if workload_id is None: - raise ClientErrorException("Could not find workload ID.") - - removed_id = WorkloadManager().remove_workload(workload_id) - - return { - "workload_id": removed_id, - } - - -def get_diagnosis(workload_id: str) -> Diagnosis: - """Get diagnosis object for specified workload.""" - workload = WorkloadManager().get_workload(workload_id) - diagnosis = DiagnosisFactory.get_diagnosis(workload) - return diagnosis - - -def get_op_list(data: Dict[str, Any]) -> List[dict]: - """Get OP list for model.""" - workload_id: Optional[str] = data.get("workload_id", None) - if workload_id is None: - raise ClientErrorException("Could not find workload ID.") - diagnosis = get_diagnosis(workload_id) - - return diagnosis.get_op_list() - - -def get_op_details(data: Dict[str, Any]) -> dict: - """Get OP details for specific OP in model.""" - try: - workload_id: str = str(data.get("workload_id", None)) - op_name: str = str(data.get("op_name", None)) - except ValueError: - raise ClientErrorException("Incorrect parameter values.") - except TypeError: - raise ClientErrorException("Could not find all required parameters.") - - diagnosis = get_diagnosis(workload_id) - - op_details: Optional[OpDetails] = diagnosis.get_op_details(op_name) - if op_details is None: - return {} - return op_details.serialize() - - -def get_histogram(data: Dict[str, Any]) -> list: - """Get histogram of specific tensor in model.""" - try: - workload_id: str = str(data.get("workload_id", None)) - op_name: str = str(data.get("op_name", None)) - histogram_type: str = str(data.get("type", None)) - except ValueError: - raise ClientErrorException("Incorrect parameter values.") - except TypeError: - raise ClientErrorException("Could not find all required parameters.") - - diagnosis = get_diagnosis(workload_id) - - histogram_type_map = { - "weights": "weight", - "activation": "activation", - } - - parsed_histogram_type: Optional[str] = histogram_type_map.get(histogram_type, None) - if parsed_histogram_type is None: - raise ClientErrorException( - f"Histogram type not supported. " f"Use one of following: {histogram_type_map.keys()}", - ) - - histogram_data = diagnosis.get_histogram_data(op_name, parsed_histogram_type) - return histogram_data - - -def get_profiling_details(data: Dict[str, Any]) -> List[dict]: - """Get profiling result.""" - workload_id: Optional[str] = data.get("workload_id", None) - if workload_id is None: - raise ClientErrorException("Could not find workload ID.") - workload = WorkloadManager().get_workload(workload_id) - profiling_data_path = os.path.join( - workload.workload_location, - "profiling_data.json", - ) - with open(profiling_data_path, "r") as json_file: - profiling_data = json.load(json_file) - return profiling_data diff --git a/neural_insights/web/server.py b/neural_insights/web/server.py deleted file mode 100644 index 8d3cb1f708f..00000000000 --- a/neural_insights/web/server.py +++ /dev/null @@ -1,253 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Main endpoint for GUI.""" -import os -import time -from functools import wraps -from threading import Thread -from typing import Any, Callable - -from flask import Blueprint, Flask -from flask import Request as WebRequest -from flask import jsonify, render_template, request -from flask_cors import CORS -from flask_socketio import SocketIO -from werkzeug.serving import make_ssl_devcert -from werkzeug.wrappers import Response as WebResponse - -from neural_insights.components.workload_manager.workload_manager import WorkloadManager -from neural_insights.utils.consts import CONFIG_REFRESH_PERIOD -from neural_insights.utils.exceptions import InternalException -from neural_insights.utils.logger import log -from neural_insights.web.communication import MessageQueue, Request -from neural_insights.web.configuration import Configuration -from neural_insights.web.router import Router -from neural_insights.web.service.response_generator import ResponseGenerator - -templates_dir = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - "app", - ), -) - -app = Flask( - __name__, - static_folder=templates_dir, - static_url_path="/", - template_folder=templates_dir, -) -app_blueprint = Blueprint("Neural Insights", __name__) -socketio = SocketIO() - -router = Router() - -METHODS = ["GET", "POST"] - -# Suppress TensorFlow messages -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -url_prefix: str = "" -workload_manager = None - - -def run_server(configuration: Configuration) -> None: - """Run webserver on specified scheme, address and port.""" - addr = configuration.server_address - server_port = configuration.server_port - gui_port = configuration.gui_port - token = configuration.token - - global url_prefix - url_prefix = configuration.url_prefix - - cors_allowed_origins = f"{configuration.scheme}://{addr}:{gui_port}" - - app.config["JSON_SORT_KEYS"] = False - app.register_blueprint(app_blueprint, url_prefix=url_prefix) - app.secret_key = token - CORS(app, origins=cors_allowed_origins) - socketio.init_app( - app, - cors_allowed_origins=cors_allowed_origins, - max_http_buffer_size=2000, - ) - tls_args = get_tls_args(configuration) - - global workload_manager - workload_manager = WorkloadManager(workdir_location=configuration.workdir) - - socketio.run(app, host=addr, port=server_port, **tls_args) - - -def get_tls_args(configuration: Configuration) -> dict: - """Get TLS configuration.""" - if configuration.allow_insecure_connections: - return {} - - if configuration.tls_certificate and configuration.tls_key: - certfile = configuration.tls_certificate - keyfile = configuration.tls_key - else: - os.makedirs(configuration.global_config_directory, mode=511, exist_ok=True) - base_path = os.path.join(configuration.global_config_directory, "certificate") - certfile = f"{base_path}.crt" - keyfile = f"{base_path}.key" - if not os.path.isfile(certfile) or not os.path.isfile(keyfile): - certfile, keyfile = make_ssl_devcert(base_path) - - return { - "certfile": certfile, - "keyfile": keyfile, - "do_handshake_on_connect": False, - } - - -@app_blueprint.after_request -def block_iframe(response: WebResponse) -> WebResponse: - """Block iframe and set others CSP.""" - response.headers["X-Frame-Options"] = "DENY" - response.headers["Content-Security-Policy"] = ( - "frame-ancestors 'none'; font-src 'self'; img-src 'self'; script-src 'self'" - ) - response.headers["Access-Control-Max-Age"] = "-1" - return response - - -@app_blueprint.after_request -def block_sniffing(response: WebResponse) -> WebResponse: - """Block MIME sniffing.""" - response.headers["X-Content-Type-Options"] = "nosniff" - return response - - -def require_api_token(func: Callable) -> Any: - """Validate authorization token.""" - - @wraps(func) - def check_token(*args: str, **kwargs: str) -> Any: - """Validate that correct token was provided.""" - provided_token = request.headers.get( - "Authorization", - request.args.to_dict().get("token", None), - ) - - if not app.secret_key == provided_token: - return ( - "Invalid token, please use the URL displayed by the server on startup", - 403, - ) - - return func(*args, **kwargs) - - return check_token - - -@app_blueprint.route("/", methods=METHODS) -def root() -> Any: - """Serve JS application index.""" - return render_template("index.html", url_prefix=url_prefix) - - -@app_blueprint.route("/api/", methods=METHODS) -@require_api_token -def handle_api_call(subpath: str) -> Any: - """Handle API access.""" - try: - parameters = build_parameters(subpath, request) - response = router.handle(parameters) - if isinstance(response, WebResponse): - return response - return jsonify(response.data) - except Exception as err: - if isinstance(err, InternalException): - log.critical(err) - return ResponseGenerator.from_exception(err) - - -@app_blueprint.route("/api/", methods=["OPTIONS"]) -def allow_api_call(subpath: str) -> Any: - """Allow for API access.""" - return "OK" - - -@app.errorhandler(404) -def page_not_found(e: Any) -> Any: - """Serve JS application index when no static file found.""" - return render_template( - "index.html", - url_prefix=url_prefix, - ) - - -@app_blueprint.after_request -def disable_cache(response: WebResponse) -> WebResponse: - """Disable cache on all requests.""" - response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" - response.headers["Pragma"] = "no-cache" - response.headers["Expires"] = "0" - response.headers["Cache-Control"] = "public, max-age=0" - return response - - -def build_parameters(endpoint: str, request: WebRequest) -> Request: - """Build domain object from flask request.""" - data = request.get_json() if request.is_json else request.args.to_dict(flat=False) - return Request(request.method, endpoint, data) - - -def web_socket_publisher(web_socket: SocketIO) -> None: - """Send messages from queue via web-socket to GUI.""" - queue = MessageQueue() - while True: - message = queue.get() - web_socket.emit( - message.subject, - {"status": message.status, "data": message.data}, - ) - socketio.sleep(0) - - -publisher = Thread( - target=web_socket_publisher, - args=(socketio,), -) -publisher.daemon = True -publisher.start() - - -def config_watcher(period_in_s: int) -> None: - """Observe config for changes and send notification when changed.""" - queue = MessageQueue() - config_modification_time = None - if workload_manager is not None: - config_modification_time = os.stat(workload_manager.config_path).st_mtime - while True: - time.sleep(period_in_s) - if workload_manager is None: - continue - config_path = workload_manager.config_path - config_mod_time_new = os.stat(config_path).st_mtime - if config_modification_time != config_mod_time_new: - config_modification_time = config_mod_time_new - queue.post_info("Config update", "Workload config has been modified.") - - -conf_watcher = Thread( - target=config_watcher, - args=(CONFIG_REFRESH_PERIOD,), -) -conf_watcher.daemon = True -conf_watcher.start() diff --git a/neural_insights/web/service/__init__.py b/neural_insights/web/service/__init__.py deleted file mode 100644 index 51720d2b907..00000000000 --- a/neural_insights/web/service/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Web services.""" diff --git a/neural_insights/web/service/request_data_processor.py b/neural_insights/web/service/request_data_processor.py deleted file mode 100644 index e6cc24f9529..00000000000 --- a/neural_insights/web/service/request_data_processor.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Request data processor.""" - -from typing import Any, Dict - -from neural_insights.utils.exceptions import ClientErrorException - - -class RequestDataProcessor: - """Request data processor.""" - - @staticmethod - def get_string_value(data: Dict[str, Any], name: str) -> str: - """Get string value from request.""" - try: - return data[name][0] - except KeyError: - raise ClientErrorException(f"Missing {name} parameter") diff --git a/neural_insights/web/service/response_generator.py b/neural_insights/web/service/response_generator.py deleted file mode 100644 index f782c91824b..00000000000 --- a/neural_insights/web/service/response_generator.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Response generator.""" - -from werkzeug.wrappers import Response - -from neural_insights.utils.exceptions import ( - AccessDeniedException, - ClientErrorException, - InternalException, - NotFoundException, -) - - -class ResponseGenerator: - """Response generator class.""" - - @staticmethod - def add_refresh(response: Response, refresh_time: int) -> Response: - """Add Refresh header to response.""" - response.headers["refresh"] = refresh_time - return response - - @staticmethod - def from_exception(exception: Exception) -> Response: - """Create Response from Exception.""" - return Response( - response=str(exception), - status=ResponseGenerator.get_status_code_for_exception(exception), - ) - - @staticmethod - def get_status_code_for_exception(exception: Exception) -> int: - """Get HTTP status code for Exception.""" - if isinstance(exception, ClientErrorException): - return 400 - if isinstance(exception, AccessDeniedException): - return 403 - if isinstance(exception, NotFoundException): - return 404 - if isinstance(exception, InternalException): - return 500 - return 500 diff --git a/neural_solution/README.md b/neural_solution/README.md deleted file mode 100644 index bb88539e4af..00000000000 --- a/neural_solution/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# What's Neural Solution? - -Neural Solution is a flexible and easy to use tool that brings the capabilities of Intel® Neural Compressor as a service. With Neural Solution, Users can effortlessly submit optimization tasks through the RESTful/gRPC APIs. Neural Solution automatically dispatches these tasks to one or multiple nodes, streamlining the entire process. - -# Why Neural Solution? - -- Task Parallelism: Neural Solution automatically schedules the optimization task queue by coordinating available resources and allows execution of multiple optimization tasks simultaneously. -- Tuning Parallelism: Neural Solution accelerates the optimization process by seamlessly parallelizing the tuning across multiple nodes. -- APIs Support: Neural Solution supports both RESTful and gRPC APIs, enabling users to conveniently submit optimization tasks. -- Code Less: When working with Hugging Face models, Neural Solution seamlessly integrates the functionality of the [Neural Coder](https://github.com/intel/neural-compressor/tree/master/neural_coder), eliminating the need for any code modifications during the optimization process. - -# How does Neural Solution Work? -![NS-OaaS-Intro](./docs/source/imgs/NS-OaaS-Intro.png) - -# Get Started -## Installation -### Prerequisites - -- Install [Anaconda](https://docs.anaconda.com/free/anaconda/install/) -- Install [Open MPI](https://www.open-mpi.org/faq/?category=building#easy-build) -- Python 3.8 or later - - -There are two ways to install the neural solution: -### Method 1. Using pip: -``` -pip install neural-solution -``` - -### Method 2. Building from source: -```shell -# get source code -git clone https://github.com/intel/neural-compressor -cd neural-compressor - -# install neural compressor -pip install -r requirements.txt -python setup.py install - -# install neural solution -pip install -r neural_solution/requirements.txt -python setup.py neural_solution install -``` - -## End-to-end examples -- [Quantizing a Hugging Face model](./examples/hf_models/README.md) -- [Quantizing a custom model](./examples/custom_models_optimized/tf_example1/README.md) -## Learn More - - -- The Architecture documents -- [APIs Reference](./docs/source/description_api.md) - -# Contact - -Please contact us at [inc.maintainers@intel.com](mailto:inc.maintainers@intel.com) for any Neural Solution related question. diff --git a/neural_solution/__init__.py b/neural_solution/__init__.py deleted file mode 100644 index 67e0ac52f38..00000000000 --- a/neural_solution/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution.""" -from neural_solution.utils import logger diff --git a/neural_solution/backend/__init__.py b/neural_solution/backend/__init__.py deleted file mode 100644 index 0fea45c0091..00000000000 --- a/neural_solution/backend/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution backend.""" -from neural_solution.backend.cluster import Cluster -from neural_solution.backend.result_monitor import ResultMonitor -from neural_solution.backend.scheduler import Scheduler -from neural_solution.backend.task_db import TaskDB -from neural_solution.backend.task_monitor import TaskMonitor diff --git a/neural_solution/backend/cluster.py b/neural_solution/backend/cluster.py deleted file mode 100644 index f742bde0726..00000000000 --- a/neural_solution/backend/cluster.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution cluster.""" -import sqlite3 -import threading -from collections import Counter -from typing import List - -from neural_solution.backend.utils.utility import create_dir, synchronized -from neural_solution.utils import logger - - -class Cluster: - """Cluster resource management based on sockets.""" - - def __init__(self, node_lst=[], db_path=None): - """Init Cluster. - - Args: - node_lst: node list. Defaults to []. - db_path: cluster db path. Defaults to None. - """ - self.lock = threading.Lock() - self.node_lst = node_lst - self.socket_queue = [] - self.db_path = db_path - create_dir(db_path) - self.conn = sqlite3.connect(f"{db_path}", check_same_thread=False) - self.initial_cluster_from_node_lst(node_lst) - self.lock = threading.Lock() - - def reserve_resource(self, task): - """Reserve the resource and return the requested list of resources.""" - reserved_resource_lst = [] - workers = task.workers - logger.info(f"task {task.task_id} needs {workers}") - reserved_resource_lst = self.get_free_socket(workers) - if reserved_resource_lst: - allocated_resources = {} - counts = Counter(int(item.split()[0]) for item in reserved_resource_lst) - - for node_id, count in counts.items(): - allocated_resources[node_id] = count - for node_id in allocated_resources: - sql = """ - UPDATE cluster - SET busy_sockets = busy_sockets + ?, - free_sockets = total_sockets - busy_sockets - ? - WHERE id = ? - """ - self.cursor.execute(sql, (allocated_resources[node_id], allocated_resources[node_id], node_id)) - self.conn.commit() - logger.info(f"[Cluster] Assign {reserved_resource_lst} to task {task.task_id}") - return reserved_resource_lst - - @synchronized - def free_resource(self, reserved_resource_lst): - """Free the resource by adding the previous occupied resources to the socket queue.""" - self.socket_queue += reserved_resource_lst - counts = Counter(int(item.split()[0]) for item in reserved_resource_lst) - free_resources = {} - for node_id, count in counts.items(): - free_resources[node_id] = count - for node_id, count in counts.items(): - free_resources[node_id] = count - for node_id in free_resources: - sql = """ - UPDATE cluster - SET free_sockets = free_sockets + ?, - busy_sockets = total_sockets - free_sockets - ? - WHERE id = ? - """ - self.cursor.execute(sql, (free_resources[node_id], free_resources[node_id], node_id)) - self.conn.commit() - # delete nodes with status of remove, some version without RETURNING syntax - self.cursor.execute("SELECT id FROM cluster WHERE status='remove' AND busy_sockets=0") - deleted_ids = self.cursor.fetchall() - deleted_ids = [str(id_tuple[0]) for id_tuple in deleted_ids] - self.cursor.execute("DELETE FROM cluster WHERE status='remove' AND busy_sockets=0") - self.conn.commit() - - # remove deleted nodes from socket queue - socket_queue_delete_ids = [socket for socket in self.socket_queue if socket.split()[0] in deleted_ids] - if len(socket_queue_delete_ids) > 0: - logger.info(f"[Cluster] remove node-list {socket_queue_delete_ids} from socket_queue: {self.socket_queue}") - self.socket_queue = [socket for socket in self.socket_queue if socket.split()[0] not in deleted_ids] - logger.info(f"[Cluster] free resource {reserved_resource_lst}, now have free resource {self.socket_queue}") - - @synchronized - def get_free_socket(self, num_sockets: int) -> List[str]: - """Get the free sockets list.""" - booked_socket_lst = [] - - # detect and append new resource - self.cursor.execute("SELECT id, name, total_sockets FROM cluster where status = 'join'") - new_node_lst = self.cursor.fetchall() - for index, name, total_sockets in new_node_lst: - sql = """ - UPDATE cluster - SET status = ? - WHERE id = ? - """ - self.cursor.execute(sql, ("alive", index)) - self.conn.commit() - self.socket_queue += [str(index) + " " + name] * total_sockets - logger.info(f"[Cluster] add new node-id {index} to socket_queue: {self.socket_queue}") - - # do not assign nodes with status of remove - # remove to-delete nodes from socket queue - self.cursor.execute("SELECT id FROM cluster WHERE status='remove'") - deleted_ids = self.cursor.fetchall() - deleted_ids = [str(id_tuple[0]) for id_tuple in deleted_ids] - - socket_queue_delete_ids = [socket for socket in self.socket_queue if socket.split()[0] in deleted_ids] - if len(socket_queue_delete_ids) > 0: - logger.info(f"[Cluster] remove node-list {socket_queue_delete_ids} from socket_queue: {self.socket_queue}") - self.socket_queue = [socket for socket in self.socket_queue if socket.split()[0] not in deleted_ids] - - # delete nodes with status of remove - self.cursor.execute("DELETE FROM cluster WHERE status='remove' AND busy_sockets=0") - self.conn.commit() - - if len(self.socket_queue) < num_sockets: - logger.info(f"Can not allocate {num_sockets} sockets, due to only {len(self.socket_queue)} left.") - return 0 - else: - booked_socket_lst = self.socket_queue[:num_sockets] - self.socket_queue = self.socket_queue[num_sockets:] - return booked_socket_lst - - @synchronized - def initial_cluster_from_node_lst(self, node_lst): - """Initialize cluster according to the node list. - - Args: - node_lst (List): the node list. - """ - # sqlite should set this check_same_thread to False - self.conn = sqlite3.connect(f"{self.db_path}", check_same_thread=False) - self.cursor = self.conn.cursor() - self.cursor.execute("drop table if exists cluster ") - self.cursor.execute( - r"create table cluster(id INTEGER PRIMARY KEY AUTOINCREMENT," - + "name varchar(100)," - + "node_info varchar(500)," - + "status varchar(100)," - + "free_sockets int," - + "busy_sockets int," - + "total_sockets int)" - ) - self.node_lst = node_lst - for index, node in enumerate(self.node_lst): - self.socket_queue += [str(index + 1) + " " + node.name] * node.num_sockets - self.cursor.execute( - r"insert into cluster(name, node_info, status, free_sockets, busy_sockets, total_sockets)" - + "values ('{}', '{}', '{}', {}, {}, {})".format( - node.name, - repr(node).replace("Node", f"Node{index+1}"), - "alive", - node.num_sockets, - 0, - node.num_sockets, - ) - ) - - self.conn.commit() - logger.info(f"socket_queue: {self.socket_queue}") - - -class Node: - """Node definition.""" - - name: str = "unknown_node" - ip: str = "unknown_ip" - num_sockets: int = 0 - num_cores_per_socket: int = 0 - num_gpus: int = 0 # For future use - - def __init__( - self, name: str, ip: str = "unknown_ip", num_sockets: int = 0, num_cores_per_socket: int = 0, num_gpus: int = 0 - ) -> None: - """Init node. - - hostfile template: - host1 2 20 # host1 has 2 sockets, each socket has 20 cores - host2 2 20 # host2 has 2 sockets, each socket has 20 cores - - Args: - name: node name - ip: ip address. Defaults to "unknown_ip". - num_sockets: the number of sockets. Defaults to 0. - num_cores_per_socket: the number of core(s) per socket. Defaults to 0. - num_gpus: the number of gpus. Defaults to 0. - """ - self.name = name - self.ip = ip - self.num_sockets = num_sockets - self.num_cores_per_socket = num_cores_per_socket - self.num_gpus = num_gpus - - def __repr__(self) -> str: - """Return node info. - - Returns: - str: node info. - """ - return ( - f"Node: {self.name}(ip: {self.ip}) has {self.num_sockets} socket(s) " - f"and each socket has {self.num_cores_per_socket} cores." - ) diff --git a/neural_solution/backend/result_monitor.py b/neural_solution/backend/result_monitor.py deleted file mode 100644 index fb99a34f409..00000000000 --- a/neural_solution/backend/result_monitor.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution result monitor.""" - -import socket - -from neural_solution.backend.task_db import TaskDB -from neural_solution.backend.utils.utility import deserialize, serialize -from neural_solution.utils import logger - - -class ResultMonitor: - """ResultMonitor is a thread that monitors the coming task results and update the task collection in the TaskDb. - - Attributes: - port: The port that ResultMonitor listens to - task_db: the TaskDb object that manages the tasks - """ - - def __init__(self, port, task_db: TaskDB): - """Init ResultMonitor. - - Args: - port (int): the port for monitoring task results. - task_db (TaskDB): the object of TaskDB. - """ - self.s = socket.socket() - self.port = port - self.task_db = task_db - - def wait_result(self): - """Monitor the task results and update them in the task db and send back to studio.""" - self.s.bind(("localhost", self.port)) # open a port as the serving port for results - self.s.listen(10) - while True: - logger.info("[ResultMonitor] waiting for results...") - c, addr = self.s.accept() - result = c.recv(2048) - result = deserialize(result) - if "ping" in result: - logger.info("[ResultMonitor] Client query status.") - c.send(b"ok") - c.close() - continue - logger.info("[ResultMonitor] getting result: {}".format(result)) - logger.info("[ResultMonitor] getting q_model path: {}".format(result["q_model_path"])) - self.task_db.update_q_model_path_and_result(result["task_id"], result["q_model_path"], result["result"]) - c.close() - # TODO send back the result to the studio - # or let studio manually fresh the page and call the query_task_status to get the result? - - def query_task_status(self, task_id): - """Synchronize query on the task status.""" - # TODO send back the result to the studio? RPC for query? - logger.info(self.task_db.lookup_task_status(task_id)) diff --git a/neural_solution/backend/runner.py b/neural_solution/backend/runner.py deleted file mode 100644 index ebf013137c8..00000000000 --- a/neural_solution/backend/runner.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Main backend runner.""" -import argparse -import threading - -from neural_solution.backend import ResultMonitor, Scheduler, TaskDB, TaskMonitor -from neural_solution.backend.utils.utility import build_cluster, get_db_path -from neural_solution.config import config -from neural_solution.utils import logger - - -def parse_args(args=None): - """Parse the command line options. - - Args: - args (Any, optional): the command line options. Defaults to None. - - Returns: - argparse.Namespace: arguments. - """ - parser = argparse.ArgumentParser( - description="Neural Solution runner automatically schedules multiple inc tasks and\ - executes multi-node distributed tuning." - ) - - parser.add_argument( - "-H", "--hostfile", type=str, default=None, help="Path to the host file which contains all available nodes." - ) - parser.add_argument("-TMP", "--task_monitor_port", type=int, default=2222, help="Port to monitor task.") - parser.add_argument("-RMP", "--result_monitor_port", type=int, default=3333, help="Port to monitor result.") - parser.add_argument("-WS", "--workspace", type=str, default="./", help="Work space.") - parser.add_argument( - "-CEN", "--conda_env_name", type=str, default="inc", help="Conda environment for task execution" - ) - parser.add_argument("-UP", "--upload_path", type=str, default="./examples", help="Custom example path.") - - return parser.parse_args(args=args) - - -def main(args=None): - """Implement the main entry of backend. - - create the task db. - start the result monitor. - start the task scheduler. - start the task monitor. - """ - args = parse_args(args) - - logger.info(f"Current workspace {args.workspace}") - db_path = get_db_path(args.workspace) - - # Initialize cluster from the host file. If there is no host file, build one local cluster. - cluster, num_threads_per_process = build_cluster(args.hostfile, db_path) - - # initialize the task db - task_db = TaskDB(db_path) - - # start three threads - rm = ResultMonitor(args.result_monitor_port, task_db) - t_rm = threading.Thread(target=rm.wait_result) - config.workspace = args.workspace - - ts = Scheduler( - cluster, - task_db, - args.result_monitor_port, - conda_env_name=args.conda_env_name, - upload_path=args.upload_path, - config=config, - num_threads_per_process=num_threads_per_process, - ) - t_ts = threading.Thread(target=ts.schedule_tasks) - - tm = TaskMonitor(args.task_monitor_port, task_db) - t_tm = threading.Thread(target=tm.wait_new_task) - - t_rm.start() - t_ts.start() - t_tm.start() - logger.info( - "task monitor port {} and result monitor port {}".format(args.task_monitor_port, args.result_monitor_port) - ) - logger.info("server start...") - - t_rm.join() - t_ts.join() - t_tm.join() - - -if __name__ == "__main__": - main() diff --git a/neural_solution/backend/scheduler.py b/neural_solution/backend/scheduler.py deleted file mode 100644 index 14ffa3afb1b..00000000000 --- a/neural_solution/backend/scheduler.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution scheduler.""" -import glob -import json -import os -import re -import shutil -import socket -import subprocess -import threading -import time - -from neural_solution.backend.cluster import Cluster -from neural_solution.backend.task import Task -from neural_solution.backend.task_db import TaskDB -from neural_solution.backend.utils.utility import ( - build_workspace, - dump_elapsed_time, - get_current_time, - get_q_model_path, - get_task_log_path, - is_remote_url, - serialize, -) -from neural_solution.utils import logger -from neural_solution.utils.utility import get_task_log_workspace, get_task_workspace - -# TODO update it according to the platform -cmd = "echo $(conda info --base)/etc/profile.d/conda.sh" -CONDA_SOURCE_PATH = subprocess.getoutput(cmd) - - -class Scheduler: - """Scheduler dispatches the task with the available resources, calls the mpi command and report results.""" - - def __init__( - self, - cluster: Cluster, - task_db: TaskDB, - result_monitor_port, - conda_env_name=None, - upload_path="./examples", - config=None, - num_threads_per_process=5, - ): - """Scheduler dispatches the task with the available resources, calls the mpi command and report results. - - Attributes: - cluster: the Cluster object that manages the server resources - task_db: the TaskDb object that manages the tasks - result_monitor_port: The result monitor port to report the accuracy and performance result - conda_env_name: The basic environment for task execution - upload_path: Custom example path. - """ - self.cluster = cluster - self.task_db = task_db - self.result_monitor_port = result_monitor_port - self.conda_env_name = conda_env_name - self.upload_path = upload_path - self.config = config - self.num_threads_per_process = num_threads_per_process - - def prepare_env(self, task: Task): - """Check and create a conda environment. - - If the required packages are not installed in the conda environment, - create a new conda environment and install the required packages. - - Args: - task (Task): task - """ - # Define the prefix of the conda environment name - env_prefix = self.conda_env_name - requirement = task.requirement.split(" ") - # Skip check when requirement is empty. - if requirement == [""]: - return env_prefix - # Construct the command to list all the conda environments - cmd = "conda env list" - output = subprocess.getoutput(cmd) - # Parse the output to get a list of conda environment names - env_list = [line.strip().split()[0] for line in output.splitlines()[2:]] - conda_env = None - for env_name in env_list: - # Only check the conda environments that start with the specified prefix - if env_name.startswith(env_prefix): - conda_bash_cmd = f"source {CONDA_SOURCE_PATH}" - cmd = f"{conda_bash_cmd} && conda activate {env_name} && conda list" - output = subprocess.getoutput(cmd) - # Parse the output to get a list of installed package names - installed_packages = [line.split()[0] for line in output.splitlines()[2:]] - installed_packages_version = [ - line.split()[0] + "=" + line.split()[1] for line in output.splitlines()[2:] - ] - missing_packages = set(requirement) - set(installed_packages) - set(installed_packages_version) - if not missing_packages: - conda_env = env_name - break - if conda_env is None: - # Construct the command to create a new conda environment and install the required packages - from datetime import datetime - - now = datetime.now() - suffix = now.strftime("%Y%m%d-%H%M%S") - conda_env = f"{env_prefix}_{suffix}" - # Construct the name of the new conda environment - cmd = ( - f"source {CONDA_SOURCE_PATH} && conda create -n {conda_env} --clone {env_prefix}" - f" && conda activate {conda_env} && pip install {task.requirement.replace('=','==')}" - ) - p = subprocess.Popen(cmd, shell=True) # nosec - logger.info(f"[Scheduler] Creating new environment {conda_env} start.") - p.wait() - logger.info(f"[Scheduler] Creating new environment {conda_env} end.") - return conda_env - - def prepare_task(self, task: Task): - """Prepare workspace and download run_task.py for task. - - Args: - task (Task): task - """ - self.task_path = build_workspace(path=get_task_workspace(self.config.workspace), task_id=task.task_id) - logger.info(f"****TASK PATH: {self.task_path}") - if is_remote_url(task.script_url): - task_url = task.script_url.replace("github.com", "raw.githubusercontent.com").replace("blob", "") - try: - subprocess.check_call(["wget", "-P", self.task_path, task_url]) - except subprocess.CalledProcessError as e: - logger.info("Failed: {}".format(e.cmd)) - else: - # Assuming the file is uploaded in directory examples - example_path = os.path.abspath(os.path.join(self.upload_path, task.script_url)) - # only one python file - script_path = glob.glob(os.path.join(example_path, "*.py"))[0] - # script_path = glob.glob(os.path.join(example_path, f'*{extension}'))[0] - self.script_name = script_path.split("/")[-1] - shutil.copy(script_path, os.path.abspath(self.task_path)) - task.arguments = task.arguments.replace("=dataset", "=" + os.path.join(example_path, "dataset")).replace( - "=model", "=" + os.path.join(example_path, "model") - ) - if not task.optimized: - # Generate quantization code with Neural Coder API - neural_coder_cmd = ["python -m neural_coder --enable --approach"] - # for users to define approach: "static", "static_ipex", "dynamic", "auto" - approach = task.approach - neural_coder_cmd.append(approach) - if is_remote_url(task.script_url): - self.script_name = task.script_url.split("/")[-1] - neural_coder_cmd.append(self.script_name) - neural_coder_cmd = " ".join(neural_coder_cmd) - full_cmd = """cd {}\n{}""".format(self.task_path, neural_coder_cmd) - p = subprocess.Popen(full_cmd, shell=True) # nosec - logger.info("[Neural Coder] Generating optimized code start.") - p.wait() - logger.info("[Neural Coder] Generating optimized code end.") - - def check_task_status(self, log_path): - """Check status for the task from log path. - - Args: - log_path (str): the log path for task. - - Returns: - str: status "done" or "failed" - """ - for line in reversed(open(log_path).readlines()): - res_pattern = r"[INFO] Save deploy yaml to" - # res_matches = re.findall(res_pattern, line) - if res_pattern in line: - return "done" - return "failed" - - def _parse_cmd(self, task: Task, resource): - # mpirun -np 3 -mca btl_tcp_if_include 192.168.20.0/24 -x OMP_NUM_THREADS=80 - # --host mlt-skx091,mlt-skx050,mlt-skx053 bash run_distributed_tuning.sh - self.prepare_task(task) - conda_env = self.prepare_env(task) - host_str = ",".join([item.split(" ")[1] for item in resource]) - logger.info(f"[TaskScheduler] host resource: {host_str}") - - # Activate environment - conda_bash_cmd = f"source {CONDA_SOURCE_PATH}" - conda_env_cmd = f"conda activate {conda_env}" - mpi_cmd = [ - "mpirun", - "-np", - "{}".format(task.workers), - "-host", - "{}".format(host_str), - "-map-by", - "socket:pe={}".format(self.num_threads_per_process), - "-mca", - "btl_tcp_if_include", - "192.168.20.0/24", # TODO replace it according to the node - "-x", - "OMP_NUM_THREADS={}".format(self.num_threads_per_process), - "--report-bindings", - ] - mpi_cmd = " ".join(mpi_cmd) - - # Initial Task command - task_cmd = ["python"] - task_cmd.append(self.script_name) - task_cmd.append(self.sanitize_arguments(task.arguments)) - task_cmd = " ".join(task_cmd) - - # use optimized code by Neural Coder - if not task.optimized: - task_cmd = task_cmd.replace(".py", "_optimized.py") - - # build a bash script to run task. - bash_script_name = "distributed_run.sh" if task.workers > 1 else "run.sh" - bash_script = """{}\n{}\ncd {}\n{}""".format(conda_bash_cmd, conda_env_cmd, self.task_path, task_cmd) - bash_script_path = os.path.join(self.task_path, bash_script_name) - with open(bash_script_path, "w", encoding="utf-8") as f: - f.write(bash_script) - full_cmd = """cd {}\n{} bash {}""".format(self.task_path, mpi_cmd, bash_script_name) - - return full_cmd - - def report_result(self, task_id, log_path, task_runtime): - """Report the result to the result monitor.""" - s = socket.socket() - s.connect(("localhost", self.result_monitor_port)) - results = {"optimization time (seconds)": "{:.2f}".format(task_runtime)} - for line in reversed(open(log_path).readlines()): - res_pattern = r"Tune (\d+) result is:\s.*?\(int8\|fp32\):\s+(\d+\.\d+).*?\(int8\|fp32\):\s+(\d+\.\d+).*?" - res_matches = re.findall(res_pattern, line) - if res_matches: - # results["Tuning count"] = res_matches[0][0] - results["Accuracy"] = res_matches[0][1] - results["Duration (seconds)"] = res_matches[0][2] - # break when the last result is matched - break - - results = json.dumps(results) - - s.send(serialize({"task_id": task_id, "result": results, "q_model_path": self.q_model_path})) - s.close() - - @dump_elapsed_time("Task execution") - def launch_task(self, task: Task, resource): - """Generate the mpi command and execute the task. - - Redirect the log to ./TASK_LOG_PATH/task_/txt - """ - full_cmd = self._parse_cmd(task, resource) - logger.info(f"[TaskScheduler] Parsed the command from task: {full_cmd}") - log_path = get_task_log_path(log_path=get_task_log_workspace(self.config.workspace), task_id=task.task_id) - p = subprocess.Popen(full_cmd, stdout=open(log_path, "w+"), stderr=subprocess.STDOUT, shell=True) # nosec - logger.info(f"[TaskScheduler] Start run task {task.task_id}, dump log into {log_path}") - start_time = time.time() - p.wait() - self.cluster.free_resource(resource) - task_runtime = time.time() - start_time - logger.info( - f"[TaskScheduler] Finished task {task.task_id}, and free resource {resource}, dump log into {log_path}" - ) - task_status = self.check_task_status(log_path) - self.task_db.update_task_status(task.task_id, task_status) - self.q_model_path = get_q_model_path(log_path=log_path, task_id=task.task_id) if task_status == "done" else None - self.report_result(task.task_id, log_path, task_runtime) - - def dispatch_task(self, task, resource): - """Dispatch the task in a thread.""" - t = threading.Thread(target=self.launch_task, args=(task, resource)) - t.start() - - def schedule_tasks(self): - """After each 5 seconds, check the task queue and try to schedule a task.""" - while True: - time.sleep(5) - logger.info(f"[TaskScheduler {get_current_time()}] try to dispatch a task...") - if self.task_db.get_pending_task_num() > 0: - logger.info( - f"[TaskScheduler {get_current_time()}], " - + f"there are {self.task_db.get_pending_task_num()} task pending." - ) - task_id = self.task_db.task_queue[0] - task = self.task_db.get_task_by_id(task_id) - resource = self.cluster.reserve_resource(task) - if resource: - self.task_db.task_queue.popleft() - self.task_db.update_task_status(task.task_id, "running") - self.dispatch_task(task, resource) - else: - logger.info("[TaskScheduler] no enough node resources!") - else: - logger.info("[TaskScheduler] no requests in the deque!") - - def sanitize_arguments(self, arguments: str): - """Replace space encoding with space.""" - return arguments.replace("\xa0", " ") diff --git a/neural_solution/backend/task.py b/neural_solution/backend/task.py deleted file mode 100644 index 669f1e4a99b..00000000000 --- a/neural_solution/backend/task.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution task.""" - - -class Task: - """A Task is an abstraction of a user tuning request that is handled in neural solution service. - - Attributes: - task_id: The task id - arguments: The task command - workers: The requested resource unit number - status: The status of the task: pending/running/done - result: The result of the task, which is only value-assigned when the task is done - """ - - def __init__( - self, - task_id, - arguments, - workers, - status, - script_url, - optimized, - approach, - requirement, - result="", - q_model_path="", - ): - """Init task. - - Args: - task_id (str): the id of task. - arguments (str): the running arguments for task. - workers (int): the resources. - status (str): "pending", "running", "done", "failed" - script_url (str): the python script address - optimized (bool): the running script has been optimized - approach (str): the quantization method - requirement (str): python packages - result (str, optional): the result of task. Defaults to "". - q_model_path (str, optional): the quantized model path. Defaults to "". - """ - self.task_id = task_id - self.arguments = arguments - self.workers = workers - self.status = status - self.script_url = script_url - self.optimized = optimized - self.approach = approach - self.requirement = requirement - self.result = result - self.q_model_path = q_model_path diff --git a/neural_solution/backend/task_db.py b/neural_solution/backend/task_db.py deleted file mode 100644 index 08636c12b8c..00000000000 --- a/neural_solution/backend/task_db.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution task database.""" -import sqlite3 -import threading -from collections import deque - -from neural_solution.backend.task import Task -from neural_solution.backend.utils.utility import create_dir - - -class TaskDB: - """TaskDb manages all the tasks. - - TaskDb provides atomic operations on managing the task queue and task details. - - Attributes: - task_queue: a FIFO queue that only holds pending task ids - task_collections: a growing-only list of all task objects and their details (no garbage collection currently) - lock: the lock on the data structures to provide atomic operations - """ - - def __init__(self, db_path): - """Init TaskDB. - - Args: - db_path (str): the database path. - """ - self.task_queue = deque() - create_dir(db_path) - # sqlite should set this check_same_thread to False - self.conn = sqlite3.connect(f"{db_path}", check_same_thread=False) - self.cursor = self.conn.cursor() - self.cursor.execute( - "create table if not exists task(id TEXT PRIMARY KEY, arguments varchar(100), " - + "workers int, status varchar(20), script_url varchar(500), optimized integer, " - + "approach varchar(20), requirements varchar(500), result varchar(500), q_model_path varchar(200))" - ) - self.conn.commit() - # self.task_collections = [] - self.lock = threading.Lock() - - def append_task(self, task): - """Append the task to the task queue.""" - with self.lock: - self.task_queue.append(task.task_id) - - def get_pending_task_num(self): - """Get the number of the pending tasks.""" - with self.lock: - return len(self.task_queue) - - def get_all_pending_tasks(self): - """Get all the pending task objects.""" - self.cursor.execute(r"select * from task where status=='pending'") - task_lst = self.cursor.fetchall() - res_lst = [] - for task_tuple in task_lst: - res_lst.append(Task(*task_tuple)) - return res_lst - - def update_task_status(self, task_id, status): - """Update the task status with the task id and the status.""" - if status not in ["pending", "running", "done", "failed"]: - raise Exception("status invalid, should be one of pending/running/done") - self.cursor.execute(r"update task set status='{}' where id=?".format(status), (task_id,)) - self.conn.commit() - - def update_result(self, task_id, result_str): - """Update the task result with the result string.""" - self.cursor.execute(r"update task set result='{}' where id={}".format(result_str, task_id)) - self.conn.commit() - - def update_q_model_path_and_result(self, task_id, q_model_path, result_str): - """Update the task result with the result string.""" - self.cursor.execute( - r"update task set q_model_path='{}', result='{}' where id=?".format(q_model_path, result_str), (task_id,) - ) - self.conn.commit() - - def lookup_task_status(self, task_id): - """Look up the current task status and result.""" - self.cursor.execute(r"select status, result from task where id=?", (task_id,)) - status, result = self.cursor.fetchone() - return {"status": status, "result": result} - - def get_task_by_id(self, task_id): - """Get the task object by task id.""" - self.cursor.execute(r"select * from task where id=?", (task_id,)) - attr_tuple = self.cursor.fetchone() - return Task(*attr_tuple) - - def remove_task(self, task_id): # currently no garbage collection - """Remove task.""" - pass diff --git a/neural_solution/backend/task_monitor.py b/neural_solution/backend/task_monitor.py deleted file mode 100644 index 02da9124369..00000000000 --- a/neural_solution/backend/task_monitor.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution task monitor.""" -import socket - -from neural_solution.backend.utils.utility import deserialize, serialize -from neural_solution.utils import logger - - -class TaskMonitor: - """TaskMonitor is a thread that monitors the coming tasks and appends them to the task queue. - - Attributes: - port: the port that the task monitor listens to - task_db: the TaskDb object that manages the tasks - """ - - def __init__(self, port, task_db): - """Init TaskMonitor.""" - self.s = socket.socket() - self.port = port - self.task_db = task_db - - def _start_listening(self, host, port, max_parallelism): - self.s.bind(("localhost", port)) # open a port as the serving port for tasks - self.s.listen(max_parallelism) - - def _receive_task(self): - c, addr = self.s.accept() - task = c.recv(4096) - task_dict = deserialize(task) - if "ping" in task_dict: - logger.info("[TaskMonitor] Client query status.") - c.send(b"ok") - return False - task_id = task_dict["task_id"] - - logger.info("[TaskMonitor] getting task: {}".format(task_id)) - return self.task_db.get_task_by_id(task_id) - # return Task(task_id=task["task_id"], arguments=task["arguments"], - # workers=task["workers"], status="pending", script_url=task['script_url']) - - def _append_task(self, task): - self.task_db.append_task(task) - logger.info("[TaskMonitor] append task {} done.".format(task.task_id)) - - def wait_new_task(self): - """Monitor the coming tasks and append it to the task db.""" - self._start_listening("localhost", self.port, 10) - while True: - logger.info("[TaskMonitor] waiting for new tasks...") - task = self._receive_task() - if not task: - continue - self._append_task(task) diff --git a/neural_solution/backend/utils/__init__.py b/neural_solution/backend/utils/__init__.py deleted file mode 100644 index 108ea605857..00000000000 --- a/neural_solution/backend/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution backend utils.""" diff --git a/neural_solution/backend/utils/utility.py b/neural_solution/backend/utils/utility.py deleted file mode 100644 index f097402dc91..00000000000 --- a/neural_solution/backend/utils/utility.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution backend utils.""" -import json -import os -from urllib.parse import urlparse - -from neural_solution.utils import logger - - -def serialize(request: dict) -> bytes: - """Serialize a dict object to bytes for inter-process communication.""" - return json.dumps(request).encode() - - -def deserialize(request: bytes) -> dict: - """Deserialize the received bytes to a dict object.""" - return json.loads(request) - - -def dump_elapsed_time(customized_msg=""): - """Get the elapsed time for decorated functions. - - Args: - customized_msg (string, optional): The parameter passed to decorator. Defaults to None. - """ - import time - - def f(func): - def fi(*args, **kwargs): - start = time.time() - res = func(*args, **kwargs) - end = time.time() - logger.info( - "%s elapsed time: %s ms" - % (customized_msg if customized_msg else func.__qualname__, round((end - start) * 1000, 2)) - ) - return res - - return fi - - return f - - -def get_task_log_path(log_path, task_id): - """Get the path of task log according id. - - Args: - log_path (str): the log path of task - task_id (str): the task id - - Returns: - str: the path of task log file - """ - if not os.path.exists(log_path): - os.makedirs(log_path) - log_file_path = "{}/task_{}.txt".format(log_path, task_id) - return log_file_path - - -def get_db_path(workspace="./"): - """Get the database path. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the path of database - """ - return os.path.join(workspace, "db", "task.db") - - -def get_task_workspace(workspace="./"): - """Get the workspace of task. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the workspace of task - """ - return os.path.join(workspace, "task_workspace") - - -def get_task_log_workspace(workspace="./"): - """Get the log workspace for task. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the log workspace for task - """ - return os.path.join(workspace, "task_log") - - -def get_serve_log_workspace(workspace="./"): - """Get log workspace for service. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: log workspace for service - """ - return os.path.join(workspace, "serve_log") - - -def build_local_cluster(db_path): - """Build a local cluster. - - Args: - db_path (str): database path - - Returns: - (Cluster, int): cluster and num threads per process - """ - from neural_solution.backend.cluster import Cluster, Node - - hostname = "localhost" - node1 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5) - node2 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5) - node3 = Node(name=hostname, num_sockets=2, num_cores_per_socket=5) - - node_lst = [node1, node2, node3] - cluster = Cluster(node_lst=node_lst, db_path=db_path) - return cluster, 5 - - -def build_cluster(file_path, db_path): - """Build cluster according to the host file. - - Args: - file_path : the path of host file. - - Returns: - Cluster: return cluster object. - """ - from neural_solution.backend.cluster import Cluster, Node - - # If no file is specified, build a local cluster - if file_path == "None" or file_path is None: - return build_local_cluster(db_path) - - if not os.path.exists(file_path): - raise Exception(f"Please check the path of host file: {file_path}.") - - node_lst = [] - num_threads_per_process = 5 - with open(file_path, "r") as f: - for line in f: - hostname, num_sockets, num_cores_per_socket = line.strip().split(" ") - num_sockets, num_cores_per_socket = int(num_sockets), int(num_cores_per_socket) - node = Node(name=hostname, num_sockets=num_sockets, num_cores_per_socket=num_cores_per_socket) - node_lst.append(node) - num_threads_per_process = num_cores_per_socket - cluster = Cluster(node_lst=node_lst, db_path=db_path) - return cluster, num_threads_per_process - - -def get_current_time(): - """Get current time. - - Returns: - str: the current time in hours, minutes, and seconds. - """ - from datetime import datetime - - return datetime.now().strftime("%H:%M:%S") - - -def synchronized(func): - """Locking for synchronization. - - Args: - func (function): decorative function - """ - - def wrapper(self, *args, **kwargs): - with self.lock: - return func(self, *args, **kwargs) - - return wrapper - - -def build_workspace(path, task_id=""): - """Build workspace of running tasks. - - Args: - path: master work directory for all tasks. - task_id: the id of task - """ - task_path = "{}/{}".format(path, task_id) - if not os.path.exists(task_path): - os.makedirs(task_path) - return os.path.abspath(task_path) - - -def is_remote_url(url_or_filename): - """Check if input is a URL. - - Args: - url_or_filename (str): url_or_filename - - Returns: - bool: True or False - """ - parsed = urlparse(url_or_filename) - return parsed.scheme in ("http", "https") - - -def create_dir(path): - """Create the (nested) path if not exist.""" - if not os.path.exists(os.path.dirname(path)): - os.makedirs(os.path.dirname(path)) - - -def get_q_model_path(log_path, task_id): - """Get the quantized model path from task log. - - Args: - log_path (str): log path for task - task_id: the id of task - - Returns: - str: quantized model path - """ - import re - - for line in reversed(open(log_path).readlines()): - match = re.search(r"(Save quantized model to|Save config file and weights of quantized model to) (.+?)\.", line) - if match: - q_model_path = match.group(2) - match_task_id = re.search(r"(.+/task_workspace/{}/[^/]+)".format(task_id), q_model_path) - if match_task_id: - q_model_path = match_task_id.group() - return q_model_path - return "quantized model path not found" diff --git a/neural_solution/bin/__init__.py b/neural_solution/bin/__init__.py deleted file mode 100644 index 7f80f09b1f0..00000000000 --- a/neural_solution/bin/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution.""" diff --git a/neural_solution/bin/neural_solution.py b/neural_solution/bin/neural_solution.py deleted file mode 100644 index a562313febe..00000000000 --- a/neural_solution/bin/neural_solution.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution entry point.""" - - -def exec(): - """Execute Neural Solution launch.""" - from neural_solution.launcher import main - - main() - - -if __name__ == "__main__": - exec() diff --git a/neural_solution/config.py b/neural_solution/config.py deleted file mode 100644 index 8ab52da571f..00000000000 --- a/neural_solution/config.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Config for both frontend and backend.""" - -INTERVAL_TIME_BETWEEN_DISPATCH_TASK = 3 - - -class Config: - """Config for services.""" - - workspace: str = "./ns_workspace" - task_monitor_port: int = 2222 - result_monitor_port: int = 3333 - service_address: str = "localhost" - grpc_api_port: int = 4444 - # TODO add set and get methods for each attribute - - -config = Config() diff --git a/neural_solution/docs/source/README.md b/neural_solution/docs/source/README.md deleted file mode 100644 index cf5e2cc87f7..00000000000 --- a/neural_solution/docs/source/README.md +++ /dev/null @@ -1,170 +0,0 @@ -# Get started - -- [Get started](#get-started) - - [Install Neural Solution](#install-neural-solution) - - [Prerequisites](#prerequisites) - - [Method 1. Using pip](#method-1-using-pip) - - [Method 2. Building from source](#method-2-building-from-source) - - [Start service](#start-service) - - [Submit task](#submit-task) - - [Query task status](#query-task-status) - - [Stop service](#stop-service) - - [Inspect logs](#inspect-logs) - - [Manage resource](#manage-resource) - - [Node States](#node-states) - - [Query cluster](#query-cluster) - - [Add node](#add-node) - - [Remove node](#remove-node) - -## Install Neural Solution -### Prerequisites -- Install [Anaconda](https://docs.anaconda.com/free/anaconda/install/) -- Install [Open MPI](https://www.open-mpi.org/faq/?category=building#easy-build) -- Python 3.8 or later - -There are two ways to install the neural solution: -### Method 1. Using pip -``` -pip install neural-solution -``` -### Method 2. Building from source - -```shell -# get source code -git clone https://github.com/intel/neural-compressor -cd neural-compressor - -# install neural compressor -pip install -r requirements.txt -python setup.py install - -# install neural solution -pip install -r neural_solution/requirements.txt -python setup.py neural_solution install -``` - -## Start service - -```shell -# Start neural solution service with custom configuration -neural_solution start --task_monitor_port=22222 --result_monitor_port=33333 --restful_api_port=8001 - -# Help Manual -neural_solution -h -# Help output - -usage: neural_solution {start,stop} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] - [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] - -Neural Solution - -positional arguments: - {start,stop} start/stop service - -optional arguments: - -h, --help show this help message and exit - --hostfile HOSTFILE start backend serve host file which contains all available nodes - --restful_api_port RESTFUL_API_PORT - start restful serve with {restful_api_port}, default 8000 - --grpc_api_port GRPC_API_PORT - start gRPC with {restful_api_port}, default 8000 - --result_monitor_port RESULT_MONITOR_PORT - start serve for result monitor at {result_monitor_port}, default 3333 - --task_monitor_port TASK_MONITOR_PORT - start serve for task monitor at {task_monitor_port}, default 2222 - --api_type API_TYPE start web serve with all/grpc/restful, default all - --workspace WORKSPACE - neural solution workspace, default "./ns_workspace" - --conda_env CONDA_ENV - specify the running environment for the task - --upload_path UPLOAD_PATH - specify the file path for the tasks - -``` - -## Submit task - -- For RESTful API: `[user@server hf_model]$ curl -H "Content-Type: application/json" --data @./task.json http://localhost:8000/task/submit/` -- For gRPC API: `python -m neural_solution.frontend.gRPC.client submit --request="test.json"` - -> For more details, please reference the [API description](./description_api.md) and [examples](../../examples/README.md). - -## Query task status - -Query the task status and result according to the `task_id`. - -- For RESTful API: `[user@server hf_model]$ curl -X GET http://localhost:8000/task/status/{task_id}` -- For gRPC API: `python -m neural_solution.frontend.gRPC.client query --task_id={task_id}` - -> For more details, please reference the [API description](./description_api.md) and [examples](../../examples/README.md). - -## Stop service - -```shell -# Stop neural solution service with default configuration -neural_solution stop -``` - -## Inspect logs - -The default logs locate in `./ns_workspace/`. Users can specify a custom workspace by using `neural_solution ---workspace=/path/to/custom/workspace`. - -There are several logs under workspace: - -```shell -(ns) [username@servers ns_workspace]$ tree -. -├── db -│ └── task.db # database to save the task-related information -├── serve_log # service running log -│ ├── backend.log # backend log -│ ├── frontend_grpc.log # grpc frontend log -│ └── frontend.log # HTTP/RESTful frontend log -├── task_log # overall log for each task -│ ├── task_bdf0bd1b2cc14bc19bce12d4f9b333c7.txt # task log -│ └── ... -└── task_workspace # the log for each task - ... - ├── bdf0bd1b2cc14bc19bce12d4f9b333c7 # task_id - ... - -``` - -## Manage resource -Neural Solution supports cluster management for service maintainers, providing several command-line tools for efficient resource management. - -### Node States - -Each node in the cluster can have three different states: - -- Alive: Represents a node that is functioning properly and available to handle requests. -- Join: Indicates that a node is in the process of being added to the cluster but has not fully joined yet. -- Remove: Indicates that a node is scheduled to be removed from the cluster. - -Below are some commonly used commands and their usage: - -### Query cluster -This command is used to query the current status of the cluster. No additional parameters are required, simply enter the following command: -```shell -neural_solution cluster --query -``` -### Add node -This command is used to add nodes to the cluster. You can either specify a host file or provide a list of nodes separated by ";". The node format consists of three parts: hostname, number_of_sockets, and cores_per_socket. Here's a breakdown of each part: - -- hostname: This refers to the name or IP address of the node that you want to add to the cluster. It identifies the specific machine or server that will be part of the cluster. - -- number_of_sockets: This indicates the number of physical CPU sockets available on the node. A socket is a physical component that houses one or more CPU cores. It represents a physical processor unit. - -- cores_per_socket: This specifies the number of CPU cores present in each socket. A core is an individual processing unit within a CPU. - -For example: -```shell -neural_solution cluster --join "host1 2 20; host2 4 20" -``` -### Remove node -This command is used to remove nodes from the cluster based on the IDs obtained from the query. The IDs can be passed as a parameter to the command. For example: -```shell -neural_solution cluster --remove -``` -Please note that the above commands are just examples and may require additional parameters or configurations based on your specific setup. diff --git a/neural_solution/docs/source/description_api.md b/neural_solution/docs/source/description_api.md deleted file mode 100644 index 7940a6c2768..00000000000 --- a/neural_solution/docs/source/description_api.md +++ /dev/null @@ -1,181 +0,0 @@ -# Neural Solution API - -Welcome to Neural Solution OaaS API documentation. This API documentation provides a detailed description of all the endpoints available in Neural Solution OaaS API. - -## Base URL - -The base URL for Neural Solution OaaS API is `{host_ip}:port` - -## Endpoints - -### GET / - -#### Description - -This is the welcome interface for Neural Solution OaaS. - -#### Usage -```bash -curl -X GET {host_ip}:port/description -``` - -#### Responses - -| Status Code | Description | -| ----------- | ------------------------------------------------- | -| 200 | Welcome to Neural Solution OaaS! | - - -### POST /task/submit - -#### Description - -Submit a new task to Neural Solution OaaS. - -#### Parameters - -Refer to [task_request_description.md](./template/task_request_description.md). - -#### Usage -```bash -curl -X POST -H "Content-Type: application/json" -d @task_request.json {host_ip}:port/task/submit -``` -#### Responses - -| Status Code | Description | Content | -| ----------- | -----------------------| ---------------------| -| 200 | Submitted successfully.| `status`: "Successfully.", `task_id`: Hashed key, `msg` : "Task submitted successfully"| -| 500 | Submitted failed. | `status`: "Failed." | - -### GET /task/status/{task_id} - -#### Description - -Get the status of a submitted task. - -#### Parameters - -- `task_id` - The hashed key of the submitted task. - -#### Usage -```bash -curl -X GET {host_ip}:port/task/status/{task_id} -``` - -#### Responses - -| Status Code | Description | Content | -| ----------- | ---------------- | ------------- | -| 200 | The status of task . | `status`: "running"/"done"/"pending"/"failed"
`tuning_info`: tuning information
`optimization_result`: optimization time, Accuracy, Duration, result_path| - -### GET /task/log/{task_id} - -#### Description - -Get the log of a submitted task. - -#### Usage -```bash -curl -X GET {host_ip}:port/task/log/{task_id} -``` - -#### Parameters - -- `task_id` - The hashed key of the submitted task. - -#### Responses - -| Status Code | Description | Content | -| ----------- | ----------- | ------------ | -| 200 | Task log. | Task log. | - -### WebSocket /task/screen/{task_id} - -#### Description - -Get real-time log of a submitted task. - -#### Parameters - -- `task_id` - The hashed key of the submitted task. - -#### Responses - -| Status Code | Description | Content | -| ----------- | ---------------------------------------- | ---------------------| -| 101 | Get real-time task log. | Real-time task log. | -| 1000 | Normal Closure. | Connection was closed successfully.| -| 404 | Task not found. | `status`: "Failed." | - -### GET /ping - -#### Description - -Check the health status of Neural Solution. - -#### Usage -```bash -curl -X GET {host_ip}:port/task/log/{task_id} -``` - -#### Responses - -| Status Code | Description | Content | -| ----------- | ------------ | -------------------------------------- | -| 200 | The health status. | `status`: "Healthy", `msg`: "Neural Solution is running." | -| 500 | Ping fail! & error message. | `status`: "Failed.", `msg`: Error message. | - - -### GET /cluster - -#### Description - -Get the running status of Neural Solution cluster. - -#### Usage -```bash -curl -X GET {host_ip}:port/cluster -``` - -#### Responses - -| Status Code | Description | Content | -| ----------- | ------------| ------------------------------------------ | -| 200 | Cluster information. | `msg`: "Cluster information." | - -### GET /download/{task_id} - -#### Description - -Download optimized result locally. - -#### Usage -```bash -curl -X GET {host_ip}:port/download/{task_id} --output quantized_model.zip -``` - -#### Responses - -| Status Code | Description | Content | -| ----------- | ----------- | ---------------- | -| 200 | Download optimized model. | zip file | -| 400 | No quantized model when task failed. | `msg`: "Please check URL." | -| 404 | Download optimized model. | `msg`: "Task failed, file not found" | - - -### GET /description - -#### Description - -Get user-facing API. - -#### Usage -```bash -curl -X GET {host_ip}:port/description -``` - -#### Responses - -| Status Code | Description | Content | -| ----------- | ----------- | ---------------- | -| 200 | User-facing API. | `msg`: The user-facing API. | diff --git a/neural_solution/docs/source/imgs/NS-OaaS-Intro.png b/neural_solution/docs/source/imgs/NS-OaaS-Intro.png deleted file mode 100644 index 42845031702..00000000000 Binary files a/neural_solution/docs/source/imgs/NS-OaaS-Intro.png and /dev/null differ diff --git a/neural_solution/docs/source/ns_design_doc.md b/neural_solution/docs/source/ns_design_doc.md deleted file mode 100644 index 3a3872b7b48..00000000000 --- a/neural_solution/docs/source/ns_design_doc.md +++ /dev/null @@ -1,123 +0,0 @@ -## Design Doc for Optimization as a Service [WIP] - - - -### Contents - -- [Design Doc for Optimization as a Service \[WIP\]](#design-doc-for-optimization-as-a-service-wip) - - [Contents](#contents) - - [Overview](#overview) - - [Workflow of OaaS](#workflow-of-oaas) - - [Class definition diagram](#class-definition-diagram) - - [Extensibility](#extensibility) - -### Overview - -Optimization as a service(OaaS) is a platform that enables users to submit quantization tasks for their models and automatically dispatches these tasks to one or multiple nodes for accuracy-aware tuning. OaaS is designed to parallelize the tuning process in two levels: tuning and model. At the tuning level, OaaS execute the tuning process across multiple nodes for one model. At the model level, OaaS allocate free nodes to incoming requests automatically. - - -### Workflow of OaaS - -```mermaid -sequenceDiagram - participant Studio - participant TaskMonitor - participant Scheduler - participant Cluster - participant TaskLauncher - participant ResultMonitor - Par receive task - Studio ->> TaskMonitor: P1-1. Post quantization Request - TaskMonitor ->> TaskMonitor: P1-2. Add task to task DB - TaskMonitor ->> Studio: P1-3. Task received notification - and Schedule task - loop - Scheduler ->> Scheduler: P2-1. Pop task from task DB - Scheduler ->> Cluster: P2-2. Apply for resources - Note over Scheduler, Cluster: the number of Nodes - Cluster ->> Cluster: P2-3. Check the status of nodes in cluster - Cluster ->> Scheduler: P2-4. Resources info - Note over Scheduler, Cluster: host:socket list - Scheduler ->> TaskLauncher: P2-5. Dispatch task - end - and Run task - TaskLauncher ->> TaskLauncher: P3-1. Run task - Note over TaskLauncher, TaskLauncher: mpirun -np 4 -hostfile hostfile python main.py - TaskLauncher ->> TaskLauncher: P3-2. Wait task to finish... - TaskLauncher ->> Cluster: P3-3. Free resource - TaskLauncher ->> ResultMonitor: P3-4. Report the Acc and Perf - ResultMonitor ->> Studio: P3-5. Post result to Studio - and Query task status - Studio ->> ResultMonitor: P4-1. Query the status of the submitted task - ResultMonitor ->> Studio: P4-2. Post the status of queried task - End - -``` - -The optimization process is divided into four parts, each executed in separate threads. - -- Part 1. Posting new quantization task. (P1-1 -> P1-2 -> P1-3) - -- Part 2. Resource allocation and scheduling. (P2-1 -> P2-2 -> P2-3 -> P2-4 -> P2-5) - -- Part 3. Task execution and reporting. (P3-1 -> P3-2 -> P3-3 -> P3-4 -> P3-5) - -- Part 4. Updating the status. (P4-1 -> P4-2) - -### Class definition diagram - - - -```mermaid -classDiagram - - - -TaskDB "1" --> "*" Task -TaskMonitor --> TaskDB -ResultMonitor --> TaskDB -Scheduler --> TaskDB -Scheduler --> Cluster - - -class Task{ - + status - + get_status() - + update_status() -} - -class TaskDB{ - - task_collections - + append_task() - + get_all_pending_tasks() - + update_task_status() -} -class TaskMonitor{ - - task_db - + wait_new_task() -} -class Scheduler{ - - task_db - - cluster - + schedule_tasks() - + dispatch_task() - + launch_task() -} - -class ResultMonitor{ - - task_db - + query_task_status() -} -class Cluster{ - - node_list - + free() - + reserve_resource() - + get_node_status() -} - -``` - - -### Extensibility - -- The service can be deployed on various resource pool, including a set of worker nodes, such as a local cluster or cloud cluster (AWS and GCP). diff --git a/neural_solution/docs/source/template/task_request_description.md b/neural_solution/docs/source/template/task_request_description.md deleted file mode 100644 index 137c66129b3..00000000000 --- a/neural_solution/docs/source/template/task_request_description.md +++ /dev/null @@ -1,25 +0,0 @@ -### Task request description - -- `script_url` (str): The URL to download the model archive. -- `optimized` (bool): If `True`, the model script has already be optimized by `Neural Coder`. -- `arguments` (List[Union[int, str]], optional): Arguments that are needed for running the model. -- `approach` (str, optional): The optimization approach supported by `Neural Coder`. -- `requirements` (List[str], optional): The environment requirements. -- `priority`(int, optional): The importance of the task, the optional value is `1`, `2`, and `3`, `1` is the highest priority. - - -An example: - -```json -{ - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [ - "--model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result" - ], - "approach": "static", - "requirements": [ - ], - "priority": 1 -} -``` diff --git a/neural_solution/examples/README.md b/neural_solution/examples/README.md deleted file mode 100644 index 85fe48fd54f..00000000000 --- a/neural_solution/examples/README.md +++ /dev/null @@ -1,27 +0,0 @@ -### Examples List - - - - - - - - - - - - - - - - - - - - - - - - - -
ModelDescriptionExample
custom modelquantize a custom modeltf_example1
huggingface modelquantize a huggingface model by specifying a URLhf_models
huggingface modelquantize a huggingface model by specifying a URL with gRPC APIhf_models_grpc
diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/README.md b/neural_solution/examples/custom_models_optimized/tf_example1/README.md deleted file mode 100644 index 19aa66c9f7d..00000000000 --- a/neural_solution/examples/custom_models_optimized/tf_example1/README.md +++ /dev/null @@ -1,178 +0,0 @@ - -## An end-to-end example: quantize a custom model with Neural Solution - -In this example, we show how to quantize a [custom model](https://github.com/intel/neural-compressor/tree/master/examples/helloworld/tf_example1) with Neural Solution. - -### Objective -- Demonstrate how to prepare requirements. -- Demonstrate how to start the Neural Solution Service. -- Demonstrate how to prepare an optimization task request and submit it to Neural Solution Service. -- Demonstrate how to query the status of the task and fetch the optimization result. -- Demonstrate how to query and manage the resource of the cluster. - -### Requirements -Customizing the model requires preparing the following folders and files. -1. dataset/, place dataset -2. model/, place model weight and configuration files -3. run.py, the running python script - -The folder structure is as follows: -```shell -├── dataset -│ └── train-00173-of-01024 -├── model -│ └── mobilenet_v1_1.0_224_frozen.pb -├── README.md -├── task_request_distributed.json -├── task_request.json -└── test.py -``` - -### Start the Neural Solution Service - -```shell -# Activate your environment -conda activate ENV - -# Start neural solution service with default configuration, log will be saved in the "serve_log" folder. -neural_solution start - -# Start neural solution service with custom configuration -neural_solution start --task_monitor_port=22222 --result_monitor_port=33333 --restful_api_port=8001 - -# Stop neural solution service with default configuration -neural_solution stop - -# Help Manual -neural_solution -h -# Help output - -usage: neural_solution {start,stop} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] - [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] [--query] [--join JOIN] [--remove REMOVE] - -Neural Solution - -positional arguments: - {start,stop,cluster} start/stop/management service - -optional arguments: - -h, --help show this help message and exit - --hostfile HOSTFILE start backend serve host file which contains all available nodes - --restful_api_port RESTFUL_API_PORT - start restful serve with {restful_api_port}, default 8000 - --grpc_api_port GRPC_API_PORT - start gRPC with {restful_api_port}, default 8000 - --result_monitor_port RESULT_MONITOR_PORT - start serve for result monitor at {result_monitor_port}, default 3333 - --task_monitor_port TASK_MONITOR_PORT - start serve for task monitor at {task_monitor_port}, default 2222 - --api_type API_TYPE start web serve with all/grpc/restful, default all - --workspace WORKSPACE - neural solution workspace, default "./ns_workspace" - --conda_env CONDA_ENV - specify the running environment for the task - --upload_path UPLOAD_PATH - specify the file path for the tasks - --query [cluster parameter] query cluster information - --join JOIN [cluster parameter] add new node into cluster - --remove REMOVE [cluster parameter] remove from cluster -``` - - -### Submit optimization task - -- Step 1: Prepare the json file includes request content. In this example, we have created request that quantize a [custom model](https://github.com/intel/neural-compressor/tree/master/examples/helloworld/tf_example1). - -```shell -[user@server tf_example1]$ cd path/to/neural_solution/neural_solution/examples/custom_models_optimized/tf_example1 -[user@server tf_example1]$ cat task_request.json -{ - "script_url": "tf_example1", - "optimized": "True", - "arguments": [ - "--dataset_location=dataset", "--model_path=model" - ], - "approach": "static", - "requirements": [ - ], - "workers": 1 -} -``` -When using distributed quantization, the `workers` needs to be set to greater than 1 when submitting a request. -```shell -[user@server tf_example1]$ cat task_request_distributed.json -{ - "script_url": "tf_example1", - "optimized": "True", - "arguments": [ - "--dataset_location=dataset", "--model_path=model" - ], - "approach": "static", - "requirements": [ - ], - "workers": 3 -} -``` - - -- Step 2: Submit the task request to service, and it will return the submit status and task id for future use. - -```shell -[user@server tf_example1]$ curl -H "Content-Type: application/json" --data @./task.json http://localhost:8000/task/submit/ - -# response if submit successfully -{ - "status": "successfully", - "task_id": "7602cd63d4c849e7a686a8165a77f69d", - "msg": "Task submitted successfully" -} -``` - - - -### Query optimization result - -- Query the task status and result according to the `task_id`. - -``` shell -[user@server tf_example1]$ curl -X GET http://localhost:8000/task/status/{task_id} -# return the task status -{ - "status": "done", - "tuning_info": {}, - "optimization_result": { - "optimization time (seconds)": "151.16", - "Accuracy": "0.8617", - "Duration (seconds)": "17.8213", - "result_path": "http://localhost:8000/download/7602cd63d4c849e7a686a8165a77f69d" - } -} - -``` -### Download optimized model - -- Download the optimized model according to the `task_id`. - -``` shell -[user@server tf_example1]$ curl -X GET http://localhost:8000/download/{task_id} --output quantized_model.zip -# download quantized_model.zip -``` - -### Manage resource -```shell -# query cluster information -neural_solution cluster --query - -# add new node into cluster -# parameter: " ; " -neural_solution cluster --join "host1 2 20; host2 5 20" - -# remove node from cluster according to id -neural_solution cluster --remove -``` - -### Stop the service -```shell -neural_solution stop -``` diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/task_request.json b/neural_solution/examples/custom_models_optimized/tf_example1/task_request.json deleted file mode 100644 index 7cce23ec4d4..00000000000 --- a/neural_solution/examples/custom_models_optimized/tf_example1/task_request.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "script_url": "custom_models_optimized/tf_example1", - "optimized": "True", - "arguments": [ - "--dataset_location=dataset", "--model_path=model" - ], - "approach": "static", - "requirements": ["tensorflow" - ], - "workers": 1 -} diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/task_request_distributed.json b/neural_solution/examples/custom_models_optimized/tf_example1/task_request_distributed.json deleted file mode 100644 index d2d42585171..00000000000 --- a/neural_solution/examples/custom_models_optimized/tf_example1/task_request_distributed.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "script_url": "custom_models_optimized/tf_example1", - "optimized": "True", - "arguments": [ - "--dataset_location=dataset", "--model_path=model" - ], - "approach": "static", - "requirements": ["tensorflow" - ], - "workers": 3 -} diff --git a/neural_solution/examples/custom_models_optimized/tf_example1/test.py b/neural_solution/examples/custom_models_optimized/tf_example1/test.py deleted file mode 100644 index 2c5ccfd537c..00000000000 --- a/neural_solution/examples/custom_models_optimized/tf_example1/test.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Running script.""" -import tensorflow as tf - -from neural_compressor import Metric -from neural_compressor.config import PostTrainingQuantConfig, TuningCriterion -from neural_compressor.data import BilinearImagenetTransform, ComposeTransform, DefaultDataLoader, TensorflowImageRecord -from neural_compressor.quantization import fit - -flags = tf.compat.v1.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string("dataset_location", None, "location of calibration dataset and evaluate dataset") - -flags.DEFINE_string("model_path", None, "location of model") - -calib_dataset = TensorflowImageRecord( - root=FLAGS.dataset_location, - transform=ComposeTransform(transform_list=[BilinearImagenetTransform(height=224, width=224)]), -) -calib_dataloader = DefaultDataLoader(dataset=calib_dataset, batch_size=10) - -eval_dataset = TensorflowImageRecord( - root=FLAGS.dataset_location, - transform=ComposeTransform(transform_list=[BilinearImagenetTransform(height=224, width=224)]), -) -eval_dataloader = DefaultDataLoader(dataset=eval_dataset, batch_size=1) - - -def main(): - """Implement running function.""" - top1 = Metric(name="topk", k=1) - tuning_criterion = TuningCriterion(strategy="basic") - config = PostTrainingQuantConfig(calibration_sampling_size=[20], quant_level=1, tuning_criterion=tuning_criterion) - model_path = FLAGS.model_path + "/mobilenet_v1_1.0_224_frozen.pb" - q_model = fit( - model=model_path, - conf=config, - calib_dataloader=calib_dataloader, - eval_dataloader=eval_dataloader, - eval_metric=top1, - ) - q_model.save("./q_model_path/q_model") - - -if __name__ == "__main__": - main() diff --git a/neural_solution/examples/hf_models/README.md b/neural_solution/examples/hf_models/README.md deleted file mode 100644 index 4b72b5d88c7..00000000000 --- a/neural_solution/examples/hf_models/README.md +++ /dev/null @@ -1,142 +0,0 @@ -## An end-to-end example: quantize a Hugging Face model with Neural Solution - -In this example, we show how to quantize a Hugging Face model with Neural Solution. - -### Objective -- Demonstrate how to start the Neural Solution Service. -- Demonstrate how to prepare an optimization task request and submit it to Neural Solution Service. -- Demonstrate how to query the status of the task and fetch the optimization result. -- Demonstrate how to query and manage the resource of the cluster. - - -### Start the Neural Solution Service - -```shell -# Activate your environment -conda activate ENV - -# Start neural solution service with default configuration, log will be saved in the "serve_log" folder. -neural_solution start - -# Start neural solution service with custom configuration -neural_solution start --task_monitor_port=22222 --result_monitor_port=33333 --restful_api_port=8001 - -# Stop neural solution service with default configuration -neural_solution stop - -# Help Manual -neural_solution -h -# Help output - -usage: neural_solution {start,stop,cluster} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] - [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] [--query] [--join JOIN] [--remove REMOVE] - -Neural Solution - -positional arguments: - {start,stop,cluster} start/stop/management service - -optional arguments: - -h, --help show this help message and exit - --hostfile HOSTFILE start backend serve host file which contains all available nodes - --restful_api_port RESTFUL_API_PORT - start restful serve with {restful_api_port}, default 8000 - --grpc_api_port GRPC_API_PORT - start gRPC with {restful_api_port}, default 8000 - --result_monitor_port RESULT_MONITOR_PORT - start serve for result monitor at {result_monitor_port}, default 3333 - --task_monitor_port TASK_MONITOR_PORT - start serve for task monitor at {task_monitor_port}, default 2222 - --api_type API_TYPE start web serve with all/grpc/restful, default all - --workspace WORKSPACE - neural solution workspace, default "./ns_workspace" - --conda_env CONDA_ENV - specify the running environment for the task - --upload_path UPLOAD_PATH - specify the file path for the tasks - --query [cluster parameter] query cluster information - --join JOIN [cluster parameter] add new node into cluster - --remove REMOVE [cluster parameter] remove from cluster -``` - - -### Submit optimization task - -- Step 1: Prepare the json file includes request content. In this example, we have created request that quantize a [Text classification model](https://github.com/huggingface/transformers/tree/v4.21-release/examples/pytorch/text-classification) from Hugging Face. - -```shell -[user@server hf_model]$ cd path/to/neural_solution/examples/hf_model -[user@server hf_model]$ cat task_request.json -{ - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [ - "--model_name_or_path=bert-base-cased", "--task_name=mrpc", "--do_eval", "--output_dir=result" - ], - "approach": "static", - "requirements": [], - "workers": 1 -} -``` - - -- Step 2: Submit the task request to service, and it will return the submit status and task id for future use. - -```shell -[user@server hf_model]$ curl -H "Content-Type: application/json" --data @./task.json http://localhost:8000/task/submit/ - -# response if submit successfully -{ - "status": "successfully", - "task_id": "cdf419910f9b4d2a8320d0e420ac1d0a", - "msg": "Task submitted successfully" -} -``` - - - -### Query optimization result - -- Query the task status and result according to the `task_id`. - -``` shell -[user@server hf_model]$ curl -X GET http://localhost:8000/task/status/{task_id} - -# return the task status -{ - "status": "done", - "optimized_result": { - "optimization time (seconds)": "58.15", - "accuracy": "0.3162", - "duration (seconds)": "4.6488" - }, - "result_path": "http://localhost:8000/download/7602cd63d4c849e7a686a8165a77f69d" - } -} -``` -### Download optimized model - -- Download the optimized model according to the `task_id`. - -``` shell -[user@server tf_example1]$ curl -X GET http://localhost:8000/download/{task_id} --output quantized_model.zip -# download quantized_model.zip -``` -### Manage resource -```shell -# query cluster information -neural_solution cluster --query - -# add new node into cluster -# parameter: " ; " -neural_solution cluster --join "host1 2 20; host2 5 20" - -# remove node from cluster according to id -neural_solution cluster --remove - -``` -### Stop the service -```shell -neural_solution stop -``` diff --git a/neural_solution/examples/hf_models/task_request.json b/neural_solution/examples/hf_models/task_request.json deleted file mode 100644 index 0566be32f3d..00000000000 --- a/neural_solution/examples/hf_models/task_request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [ - "--model_name_or_path=bert-base-cased", "--task_name=mrpc", "--do_eval", "--output_dir=result" - ], - "approach": "static", - "requirements": ["datasets", "transformers=4.21.0", "torch"], - "workers": 1 -} diff --git a/neural_solution/examples/hf_models_grpc/README.md b/neural_solution/examples/hf_models_grpc/README.md deleted file mode 100644 index 4712c1b6aeb..00000000000 --- a/neural_solution/examples/hf_models_grpc/README.md +++ /dev/null @@ -1,107 +0,0 @@ -## An end-to-end example: quantize a Hugging Face model with Neural Solution gRPC API - -In this example, we show how to quantize a Hugging Face model with Neural Solution gRPC API. - -### Objective -- Demonstrate how to start the Neural Solution Service. -- Demonstrate how to prepare an optimization task request and submit it to Neural Solution Service. -- Demonstrate how to query the status of the task and fetch the optimization result. - - -### Start the Neural Solution Service - -```shell -# Activate your environment -conda activate ENV - -# Start neural solution service with default configuration, log will be saved in the "serve_log" folder. -neural_solution start - -# Start neural solution service with custom configuration -neural_solution start --task_monitor_port=22222 --result_monitor_port=33333 --grpc_api_port=8001 --api_type=grpc - -# Stop neural solution service with default configuration -neural_solution stop - -# Help Manual -neural_solution -h -# Help output - -usage: neural_solution {start,stop} [-h] [--hostfile HOSTFILE] [--restful_api_port RESTFUL_API_PORT] [--grpc_api_port GRPC_API_PORT] - [--result_monitor_port RESULT_MONITOR_PORT] [--task_monitor_port TASK_MONITOR_PORT] [--api_type API_TYPE] - [--workspace WORKSPACE] [--conda_env CONDA_ENV] [--upload_path UPLOAD_PATH] - -Neural Solution - -positional arguments: - {start,stop} start/stop service - -optional arguments: - -h, --help show this help message and exit - --hostfile HOSTFILE start backend serve host file which contains all available nodes - --restful_api_port RESTFUL_API_PORT - start restful serve with {restful_api_port}, default 8000 - --grpc_api_port GRPC_API_PORT - start gRPC with {restful_api_port}, default 8000 - --result_monitor_port RESULT_MONITOR_PORT - start serve for result monitor at {result_monitor_port}, default 3333 - --task_monitor_port TASK_MONITOR_PORT - start serve for task monitor at {task_monitor_port}, default 2222 - --api_type API_TYPE start web serve with all/grpc/restful, default all - --workspace WORKSPACE - neural solution workspace, default "./ns_workspace" - --conda_env CONDA_ENV - specify the running environment for the task - --upload_path UPLOAD_PATH - specify the file path for the tasks -``` - - -### Submit optimization task - -- Step 1: Prepare the json file includes request content. In this example, we have created request that quantize a [Text classification model](https://github.com/huggingface/transformers/tree/v4.21-release/examples/pytorch/text-classification) from Hugging Face. - -```shell -[user@server hf_models_grpc]$ cd path/to/neural_solution/examples/hf_models_grpc -[user@server hf_models_grpc]$ cat task_request.json -{ - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [ - "--model_name_or_path=bert-base-cased", "--task_name=mrpc", "--do_eval", "--output_dir=result" - ], - "approach": "static", - "requirements": [], - "workers": 1 -} -``` - - -- Step 2: Submit the task request to service, and it will return the submit status and task id for future use. - -```shell -[user@server hf_models_grpc]$ cd path/to/neural-compressor/neural_solution/frontend/gRPC -[user@server hf_models_grpc]$ python client.py submit --request="../../examples/hf_models/task_request.json" - -# response if submit successfully -2023-06-20 14:34:55 [INFO] Neural Solution is running. -2023-06-20 14:34:55 [INFO] successfully -2023-06-20 14:34:55 [INFO] d3e10a49326449fb9d0d62f2bfc1cb43 -2023-06-20 14:34:55 [INFO] Task submitted successfully -``` - - - -### Query optimization result - -- Query the task status and result according to the `task_id`. - -``` shell -[user@server hf_models_grpc]$ python client.py --task_monitor_port=22222 --result_monitor_port=33333 --grpc_api_port=8001 query --task_id="d3e10a49326449fb9d0d62f2bfc1cb43" - - -``` -### Stop the service -```shell -neural_solution stop -``` diff --git a/neural_solution/examples/hf_models_grpc/task_request.json b/neural_solution/examples/hf_models_grpc/task_request.json deleted file mode 100644 index 0566be32f3d..00000000000 --- a/neural_solution/examples/hf_models_grpc/task_request.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [ - "--model_name_or_path=bert-base-cased", "--task_name=mrpc", "--do_eval", "--output_dir=result" - ], - "approach": "static", - "requirements": ["datasets", "transformers=4.21.0", "torch"], - "workers": 1 -} diff --git a/neural_solution/frontend/README.md b/neural_solution/frontend/README.md deleted file mode 100644 index bc69917a642..00000000000 --- a/neural_solution/frontend/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Client - -- [x] fastapi -- [x] gRPC diff --git a/neural_solution/frontend/__init__.py b/neural_solution/frontend/__init__.py deleted file mode 100644 index 728fe75076e..00000000000 --- a/neural_solution/frontend/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution frontend.""" diff --git a/neural_solution/frontend/fastapi/__init__.py b/neural_solution/frontend/fastapi/__init__.py deleted file mode 100644 index c18be1eab5e..00000000000 --- a/neural_solution/frontend/fastapi/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""FastAPI frontend.""" diff --git a/neural_solution/frontend/fastapi/main_server.py b/neural_solution/frontend/fastapi/main_server.py deleted file mode 100644 index 7e01b355e59..00000000000 --- a/neural_solution/frontend/fastapi/main_server.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Fast api server.""" -import asyncio -import json -import os -import socket -import sqlite3 -import uuid -import zipfile - -import uvicorn -from fastapi import FastAPI, HTTPException, Request, WebSocket, WebSocketDisconnect -from fastapi.responses import FileResponse, HTMLResponse, StreamingResponse -from starlette.background import BackgroundTask -from watchdog.events import FileSystemEventHandler -from watchdog.observers import Observer - -from neural_solution.config import config -from neural_solution.frontend.task_submitter import Task, task_submitter -from neural_solution.frontend.utility import ( - check_log_exists, - deserialize, - get_baseline_during_tuning, - get_cluster_info, - get_cluster_table, - get_res_during_tuning, - is_valid_task, - list_to_string, - serialize, -) -from neural_solution.utils.utility import get_db_path, get_task_log_workspace, get_task_workspace - -# Get config from Launcher.sh -task_monitor_port = None -result_monitor_port = None -db_path = None - -app = FastAPI() - - -import argparse - -args = None - - -def parse_arguments(): - """Parse the command line options.""" - parser = argparse.ArgumentParser(description="Frontend with RESTful API") - parser.add_argument("-H", "--host", type=str, default="0.0.0.0", help="The address to submit task.") - parser.add_argument("-FP", "--fastapi_port", type=int, default=8000, help="Port to submit task by user.") - parser.add_argument("-TMP", "--task_monitor_port", type=int, default=2222, help="Port to monitor task.") - parser.add_argument("-RMP", "--result_monitor_port", type=int, default=3333, help="Port to monitor result.") - parser.add_argument("-WS", "--workspace", type=str, default="./", help="Work space.") - args = parser.parse_args() - return args - - -@app.get("/") -def read_root(): - """Root route.""" - return {"message": "Welcome to Neural Solution!"} - - -@app.get("/ping") -def ping(): - """Test status of services. - - Returns: - json: the status of services and message - """ - count = 0 - msg = "Neural Solution is running." - for port in [config.task_monitor_port, config.result_monitor_port]: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - sock.connect((config.service_address, port)) - sock.send(serialize({"ping": "test"})) - sock.settimeout(5) - response = sock.recv(1024) - if response == b"ok": - count += 1 - sock.close() - continue - except ConnectionRefusedError: - msg = "Ping fail! Make sure Neural Solution runner is running!" - break - except Exception as e: - msg = "Ping fail! {}".format(e) - break - sock.close() - return {"status": "Healthy", "msg": msg} if count == 2 else {"status": "Failed", "msg": msg} - - -@app.get("/cluster") -def get_cluster(): - """Get the cluster info. - - Returns: - json: the cluster info. - """ - db_path = get_db_path(config.workspace) - return get_cluster_info(db_path=db_path) - - -@app.get("/clusters") -def get_clusters(): - """Get the cluster info. - - Returns: - HTMLResponse: html table of the cluster info - """ - db_path = get_db_path(config.workspace) - return HTMLResponse(content=get_cluster_table(db_path=db_path)) - - -@app.get("/description") -async def get_description(): - """Get user oriented API descriptions. - - Returns: - json: API descriptions - """ - current_dir = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(current_dir, "..", "user_facing_api.json")) as f: - data = json.load(f) - return data - - -@app.post("/task/submit/") -async def submit_task(task: Task): - """Submit task. - - Args: - task (Task): _description_ - Fields: - task_id: The task id - arguments: The task command - workers: The requested resource unit number - status: The status of the task: pending/running/done - result: The result of the task, which is only value-assigned when the task is done - - Returns: - json: status , id of task and messages. - """ - if not is_valid_task(task.dict()): - raise HTTPException(status_code=422, detail="Invalid task") - - msg = "Task submitted successfully" - status = "successfully" - # search the current - db_path = get_db_path(config.workspace) - - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - task_id = str(uuid.uuid4()).replace("-", "") - sql = ( - r"insert into task(id, script_url, optimized, arguments, approach, requirements, workers, status)" - + r" values ('{}', '{}', {}, '{}', '{}', '{}', {}, 'pending')".format( - task_id, - task.script_url, - task.optimized, - list_to_string(task.arguments), - task.approach, - list_to_string(task.requirements), - task.workers, - ) - ) - cursor.execute(sql) - conn.commit() - try: - task_submitter.submit_task(task_id) - except ConnectionRefusedError: - msg = "Task Submitted fail! Make sure Neural Solution runner is running!" - status = "failed" - except Exception as e: - msg = "Task Submitted fail! {}".format(e) - status = "failed" - conn.close() - else: - msg = "Task Submitted fail! db not found!" - return {"msg": msg} # TODO to align with return message when submit task successfully - return {"status": status, "task_id": task_id, "msg": msg} - - -@app.get("/task/{task_id}") -def get_task_by_id(task_id: str): - """Get task status, result, quantized model path according to id. - - Args: - task_id (str): the id of task. - - Returns: - json: task status, result, quantized model path - """ - res = None - db_path = get_db_path(config.workspace) - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select status, result, q_model_path from task where id=?", (task_id,)) - res = cursor.fetchone() - cursor.close() - conn.close() - return {"status": res[0], "optimized_result": deserialize(res[1]) if res[1] else res[1], "result_path": res[2]} - - -@app.get("/task/") -def get_all_tasks(): - """Get task table. - - Returns: - json: task table - """ - res = None - db_path = get_db_path(config.workspace) - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select * from task") - res = cursor.fetchall() - cursor.close() - conn.close() - return {"message": res} - - -@app.get("/task/status/{task_id}") -def get_task_status_by_id(request: Request, task_id: str): - """Get task status and information according to id. - - Args: - task_id (str): the id of task. - - Returns: - json: task status and information - """ - status = "unknown" - tuning_info = {} - optimization_result = {} - - res = None - db_path = get_db_path(config.workspace) - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select status, result, q_model_path from task where id=?", (task_id,)) - res = cursor.fetchone() - cursor.close() - conn.close() - if not res: - status = "Please check url." - elif res[0] == "done": - status = res[0] - optimization_result = deserialize(res[1]) if res[1] else res[1] - download_url = str(request.base_url) + "download/" + task_id - optimization_result["result_path"] = download_url - elif res[0] == "pending": - status = "pending" - else: - baseline = get_baseline_during_tuning(task_id, get_task_log_workspace(config.workspace)) - tuning_result = get_res_during_tuning(task_id, get_task_log_workspace(config.workspace)) - status = res[0] - tuning_info = {"baseline": baseline, "message": tuning_result} - result = {"status": status, "tuning_info": tuning_info, "optimization_result": optimization_result} - return result - - -@app.get("/task/log/{task_id}") -async def read_logs(task_id: str): - """Get the log of task according to id. - - Args: - task_id (str): the id of task. - - Returns: - StreamingResponse: text stream - - Yields: - str: log lines - """ - log_path = "{}/task_{}.txt".format(get_task_log_workspace(config.workspace), task_id) - if not os.path.exists(log_path): - return {"error": "Logfile not found."} - - def stream_logs(): - with open(log_path) as f: - while True: - line = f.readline() - if not line: - break - yield line.encode() - - return StreamingResponse(stream_logs(), media_type="text/plain") - - -# Real time output log -class LogEventHandler(FileSystemEventHandler): - """Responsible for monitoring log changes and sending logs to clients. - - Args: - FileSystemEventHandler (FileSystemEventHandler): Base file system event handler that overriding methods from. - """ - - def __init__(self, websocket: WebSocket, task_id, last_position): - """Init. - - Args: - websocket (WebSocket): websocket connection - task_id (str): the id of task - last_position (int): The last line position of the existing log. - """ - super().__init__() - self.websocket = websocket - self.task_id = task_id - self.loop = asyncio.get_event_loop() - self.last_position = last_position # record last line - self.queue = asyncio.Queue() - self.timer = self.loop.create_task(self.send_messages()) - - async def send_messages(self): - """Send messages to the client.""" - while True: - try: - messages = [] - while True: - message = await asyncio.wait_for(self.queue.get(), timeout=0.1) - messages.append(message) - except asyncio.TimeoutError: - pass - - if messages: - await self.websocket.send_text("\n".join(messages)) - - def on_modified(self, event): - """File modification event.""" - log_path = "{}/task_{}.txt".format(get_task_log_workspace(config.workspace), self.task_id) - with open(log_path, "r") as f: - # Move the file pointer to the last position - f.seek(self.last_position) - lines = f.readlines() - if lines: - # Record the current position of file pointer - self.last_position = f.tell() - for line in lines: - self.queue.put_nowait(line.strip()) - - -# start log watcher -def start_log_watcher(websocket, task_id, last_position): - """Start log watcher. - - Args: - websocket (WebSocket): websocket connection - task_id (str): the id of task. - last_position (int): The last line position of the existing log. - - Returns: - Observer : monitor log file changes - """ - observer = Observer() - # watch log/task_{}.txt - log_path = "{}/task_{}.txt".format(get_task_log_workspace(config.workspace), task_id) - observer.schedule(LogEventHandler(websocket, task_id, last_position), log_path, recursive=False) - observer.start() - return observer - - -@app.websocket("/task/screen/{task_id}") -async def websocket_endpoint(websocket: WebSocket, task_id: str): - """Real time log output. - - Args: - websocket (WebSocket): websocket connection - task_id (str): the id of task. - - Raises: - HTTPException: exception - """ - if not check_log_exists(task_id=task_id, task_log_path=get_task_log_workspace(config.workspace)): - raise HTTPException(status_code=404, detail="Task not found") - await websocket.accept() - - # send the log that has been written - log_path = "{}/task_{}.txt".format(get_task_log_workspace(config.workspace), task_id) - last_position = 0 - previous_log = [] - if os.path.exists(log_path): - with open(log_path, "r") as f: - previous_log = f.readlines() - last_position = f.tell() - # send previous_log - if previous_log: - await websocket.send_text("\n".join([message.strip() for message in previous_log])) - - # start log watcher - observer = start_log_watcher(websocket, task_id, last_position) - try: - while True: - await asyncio.sleep(1) - # await websocket.receive_text() - except WebSocketDisconnect: - observer.stop() - await observer.join() - - -@app.get("/download/{task_id}") -async def download_file(task_id: str): - """Download quantized model. - - Args: - task_id (str): the task id - - Raises: - HTTPException: 400, Please check URL - HTTPException: 404, Task failed, file not found - - Returns: - FileResponse: quantized model of zip file format - """ - db_path = get_db_path(config.workspace) - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select status, result, q_model_path from task where id=?", (task_id,)) - res = cursor.fetchone() - cursor.close() - conn.close() - if res is None: - raise HTTPException(status_code=400, detail="Please check URL") - if res[0] != "done": - raise HTTPException(status_code=404, detail="Task failed, file not found") - path = res[2] - zip_filename = "quantized_model.zip" - zip_filepath = os.path.abspath(os.path.join(get_task_workspace(config.workspace), task_id, zip_filename)) - # create zipfile and add file - with zipfile.ZipFile(zip_filepath, "w", zipfile.ZIP_DEFLATED) as zip_file: - for root, dirs, files in os.walk(path): - for file in files: - file_path = os.path.join(root, file) - zip_file.write(file_path, os.path.basename(file_path)) - - return FileResponse( - zip_filepath, - media_type="application/octet-stream", - filename=zip_filename, - background=BackgroundTask(os.remove, zip_filepath), - ) - - -if __name__ == "__main__": - # parse the args and modified the config accordingly - args = parse_arguments() - config.workspace = args.workspace - db_path = get_db_path(config.workspace) - config.task_monitor_port = args.task_monitor_port - config.result_monitor_port = args.result_monitor_port - # initialize the task submitter - task_submitter.task_monitor_port = config.task_monitor_port - task_submitter.result_monitor_port = config.result_monitor_port - config.service_address = task_submitter.service_address - # start the app - uvicorn.run(app, host=args.host, port=args.fastapi_port) diff --git a/neural_solution/frontend/gRPC/__init__.py b/neural_solution/frontend/gRPC/__init__.py deleted file mode 100644 index ab0ac85de2a..00000000000 --- a/neural_solution/frontend/gRPC/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""GRPC frontend.""" diff --git a/neural_solution/frontend/gRPC/client.py b/neural_solution/frontend/gRPC/client.py deleted file mode 100644 index 7ae3c3d643f..00000000000 --- a/neural_solution/frontend/gRPC/client.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Client of gRPC frontend.""" - -import argparse -import json -import os - -import grpc - -from neural_solution.config import config -from neural_solution.frontend.gRPC.proto import neural_solution_pb2, neural_solution_pb2_grpc -from neural_solution.utils import logger - - -def _parse_task_from_json(request_path): - file_path = os.path.abspath(request_path) - with open(file_path) as fp: - task = json.load(fp) - return task - - -def submit_task(args): - """Implement main entry point for the client of gRPC frontend.""" - task = _parse_task_from_json(args.request) - logger.info("Parsed task:") - logger.info(task) - - # Create a gRPC channel - port = str(config.grpc_api_port) - channel = grpc.insecure_channel("localhost:" + port) - - # Create a stub (client) - stub = neural_solution_pb2_grpc.TaskServiceStub(channel) - - # Ping serve - request = neural_solution_pb2.EmptyRequest() # pylint: disable=no-member - response = stub.Ping(request) - logger.info(response.status) - logger.info(response.msg) - - # Create a task request with the desired fields - request = neural_solution_pb2.Task( # pylint: disable=no-member - script_url=task["script_url"], - optimized=task["optimized"] == "True", - arguments=task["arguments"], - approach=task["approach"], - requirements=task["requirements"], - workers=task["workers"], - ) - - # Call the SubmitTask RPC on the server - response = stub.SubmitTask(request) - - # Process the response - logger.info(response.status) - logger.info(response.task_id) - logger.info(response.msg) - - -def run_query_task_result(args): - """Query task result according to id. - - Args: - args: args includes task_id - """ - task_id = args.task_id - # Create a gRPC channel - port = str(config.grpc_api_port) - channel = grpc.insecure_channel("localhost:" + port) - - # Create a stub (client) - stub = neural_solution_pb2_grpc.TaskServiceStub(channel) - - request = neural_solution_pb2.TaskId(task_id=task_id) # pylint: disable=no-member - response = stub.QueryTaskResult(request) - logger.info(response.status) - logger.info(response.tuning_information) - logger.info(response.optimization_result) - - -def run_query_task_status(args): - """Query task status according to id. - - Args: - args: args includes task_id - """ - task_id = args.task_id - # Create a gRPC channel - port = str(config.grpc_api_port) - channel = grpc.insecure_channel("localhost:" + port) - - # Create a stub (client) - stub = neural_solution_pb2_grpc.TaskServiceStub(channel) - - request = neural_solution_pb2.TaskId(task_id=task_id) # pylint: disable=no-member - response = stub.GetTaskById(request) - logger.info(response.status) - logger.info(response.optimized_result) - logger.info(response.result_path) - - -if __name__ == "__main__": - logger.info("Try to start gRPC server.") - """Parse the command line options.""" - parser = argparse.ArgumentParser(description="gRPC Client") - subparsers = parser.add_subparsers(help="Action", dest="action") - - submit_action_parser = subparsers.add_parser("submit", help="Submit help") - - submit_action_parser.set_defaults(func=submit_task) - submit_action_parser.add_argument("--request", type=str, default=None, help="Request json file path.") - - query_action_parser = subparsers.add_parser("query", help="Query help") - query_action_parser.set_defaults(func=run_query_task_result) - query_action_parser.add_argument("--task_id", type=str, default=None, help="Query task by task id.") - - parser.add_argument("--grpc_api_port", type=str, default="8001", help="grpc server port.") - parser.add_argument("--result_monitor_port", type=str, default="2222", help="result monitor port.") - parser.add_argument("--task_monitor_port", type=str, default="3333", help="task monitor port.") - - args = parser.parse_args() - config.grpc_api_port = args.grpc_api_port - config.result_monitor_port = args.result_monitor_port - config.task_monitor_port = args.task_monitor_port - args.func(args) - -# for test: -# python client.py query --task_id="d3e10a49326449fb9d0d62f2bfc1cb43" -# python client.py submit --request="test_task_request.json" diff --git a/neural_solution/frontend/gRPC/proto/__init__.py b/neural_solution/frontend/gRPC/proto/__init__.py deleted file mode 100644 index 06941016b4f..00000000000 --- a/neural_solution/frontend/gRPC/proto/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""GRPC proto.""" diff --git a/neural_solution/frontend/gRPC/proto/generate_cmd.sh b/neural_solution/frontend/gRPC/proto/generate_cmd.sh deleted file mode 100644 index 5e94c28798e..00000000000 --- a/neural_solution/frontend/gRPC/proto/generate_cmd.sh +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -python -m grpc_tools.protoc -I./ --python_out=. --grpc_python_out=. ./neural_solution.proto \ No newline at end of file diff --git a/neural_solution/frontend/gRPC/proto/neural_solution.proto b/neural_solution/frontend/gRPC/proto/neural_solution.proto deleted file mode 100644 index 669a9b5276e..00000000000 --- a/neural_solution/frontend/gRPC/proto/neural_solution.proto +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2023 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/empty.proto"; - -package neural_solution; - - -// Interface exported by the server -service TaskService { - rpc Ping(google.protobuf.Empty) returns (ResponsePingMessage) {} - rpc SubmitTask(Task) returns (TaskResponse) {} - rpc GetTaskById(TaskId) returns (TaskStatus) {} - rpc QueryTaskResult(TaskId) returns (ResponseTaskResult) {} -} - - -// [/task/submit/] -// task request description -message Task { - string script_url = 1; - bool optimized = 2; - repeated string arguments = 3; - string approach = 4; - repeated string requirements = 5; - int32 workers = 6; - } -// task response -message TaskResponse { - string status = 1; - string task_id = 2; - string msg = 3; -} - - -// [/task/{task_id}] -// query task status by task id -message TaskId { - string task_id = 1; -} - -// response -message TaskStatus { - string status = 1; - string optimized_result = 2; - string result_path = 3; -} - -// empty message -message EmptyRequest {} - -// / -message WelcomeMessage { - // repsonce welcome message - string message = 1; -} - -// /ping -message ResponsePingMessage { - // repsonce message for ping - string status = 1; - string msg = 2; -} - -// // optimization result message -// message OptimizatonResult{ -// string optimization_time = 1; -// string accuracy = 2; -// string duration = 3; -// string result_path = 4; -// } - -// // tuning information - -// message TuningInformation{ -// string bseline = 1; -// string message = 2; -// } - -// /task/status/{task_id} -message ResponseTaskResult { - string status = 1; - string tuning_information = 2; - string optimization_result = 3; -} - - - diff --git a/neural_solution/frontend/gRPC/proto/neural_solution_pb2.py b/neural_solution/frontend/gRPC/proto/neural_solution_pb2.py deleted file mode 100644 index 65e8ff7ca34..00000000000 --- a/neural_solution/frontend/gRPC/proto/neural_solution_pb2.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: neural_solution.proto -# pylint: disable=all -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x15neural_solution.proto\x12\x0fneural_solution\x1a\x1bgoogle/protobuf/empty.proto"y\n\x04Task\x12\x12\n\nscript_url\x18\x01 \x01(\t\x12\x11\n\toptimized\x18\x02 \x01(\x08\x12\x11\n\targuments\x18\x03 \x03(\t\x12\x10\n\x08\x61pproach\x18\x04 \x01(\t\x12\x14\n\x0crequirements\x18\x05 \x03(\t\x12\x0f\n\x07workers\x18\x06 \x01(\x05"<\n\x0cTaskResponse\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0b\n\x03msg\x18\x03 \x01(\t"\x19\n\x06TaskId\x12\x0f\n\x07task_id\x18\x01 \x01(\t"K\n\nTaskStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x18\n\x10optimized_result\x18\x02 \x01(\t\x12\x13\n\x0bresult_path\x18\x03 \x01(\t"\x0e\n\x0c\x45mptyRequest"!\n\x0eWelcomeMessage\x12\x0f\n\x07message\x18\x01 \x01(\t"2\n\x13ResponsePingMessage\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03msg\x18\x02 \x01(\t"]\n\x12ResponseTaskResult\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x1a\n\x12tuning_information\x18\x02 \x01(\t\x12\x1b\n\x13optimization_result\x18\x03 \x01(\t2\xb5\x02\n\x0bTaskService\x12\x46\n\x04Ping\x12\x16.google.protobuf.Empty\x1a$.neural_solution.ResponsePingMessage"\x00\x12\x44\n\nSubmitTask\x12\x15.neural_solution.Task\x1a\x1d.neural_solution.TaskResponse"\x00\x12\x45\n\x0bGetTaskById\x12\x17.neural_solution.TaskId\x1a\x1b.neural_solution.TaskStatus"\x00\x12Q\n\x0fQueryTaskResult\x12\x17.neural_solution.TaskId\x1a#.neural_solution.ResponseTaskResult"\x00\x62\x06proto3' -) - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "neural_solution_pb2", globals()) -if _descriptor._USE_C_DESCRIPTORS == False: - DESCRIPTOR._options = None - _TASK._serialized_start = 71 - _TASK._serialized_end = 192 - _TASKRESPONSE._serialized_start = 194 - _TASKRESPONSE._serialized_end = 254 - _TASKID._serialized_start = 256 - _TASKID._serialized_end = 281 - _TASKSTATUS._serialized_start = 283 - _TASKSTATUS._serialized_end = 358 - _EMPTYREQUEST._serialized_start = 360 - _EMPTYREQUEST._serialized_end = 374 - _WELCOMEMESSAGE._serialized_start = 376 - _WELCOMEMESSAGE._serialized_end = 409 - _RESPONSEPINGMESSAGE._serialized_start = 411 - _RESPONSEPINGMESSAGE._serialized_end = 461 - _RESPONSETASKRESULT._serialized_start = 463 - _RESPONSETASKRESULT._serialized_end = 556 - _TASKSERVICE._serialized_start = 559 - _TASKSERVICE._serialized_end = 868 -# @@protoc_insertion_point(module_scope) diff --git a/neural_solution/frontend/gRPC/proto/neural_solution_pb2_grpc.py b/neural_solution/frontend/gRPC/proto/neural_solution_pb2_grpc.py deleted file mode 100644 index 8c5f1c25831..00000000000 --- a/neural_solution/frontend/gRPC/proto/neural_solution_pb2_grpc.py +++ /dev/null @@ -1,390 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -# pylint: disable=all -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - -import neural_solution.frontend.gRPC.proto.neural_solution_pb2 as neural__solution__pb2 - - -class TaskServiceStub(object): - """Interface exported by the server.""" - - def __init__(self, channel): - """Init constructor. - - Args: - channel: A grpc.Channel. - """ - self.Ping = channel.unary_unary( - "/neural_solution.TaskService/Ping", - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=neural__solution__pb2.ResponsePingMessage.FromString, - ) - self.SubmitTask = channel.unary_unary( - "/neural_solution.TaskService/SubmitTask", - request_serializer=neural__solution__pb2.Task.SerializeToString, - response_deserializer=neural__solution__pb2.TaskResponse.FromString, - ) - self.GetTaskById = channel.unary_unary( - "/neural_solution.TaskService/GetTaskById", - request_serializer=neural__solution__pb2.TaskId.SerializeToString, - response_deserializer=neural__solution__pb2.TaskStatus.FromString, - ) - self.QueryTaskResult = channel.unary_unary( - "/neural_solution.TaskService/QueryTaskResult", - request_serializer=neural__solution__pb2.TaskId.SerializeToString, - response_deserializer=neural__solution__pb2.ResponseTaskResult.FromString, - ) - - -class TaskServiceServicer(object): - """Interface exported by the server.""" - - def Ping(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SubmitTask(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTaskById(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def QueryTaskResult(self, request, context): - """Missing associated documentation comment in .proto file.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_TaskServiceServicer_to_server(servicer, server): - """Add the TaskServiceServicer to gRpc server. - - Args: - servicer (TaskSubmitterServicer): task servicer - server (grpc._server._Server): server - """ - rpc_method_handlers = { - "Ping": grpc.unary_unary_rpc_method_handler( - servicer.Ping, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=neural__solution__pb2.ResponsePingMessage.SerializeToString, - ), - "SubmitTask": grpc.unary_unary_rpc_method_handler( - servicer.SubmitTask, - request_deserializer=neural__solution__pb2.Task.FromString, - response_serializer=neural__solution__pb2.TaskResponse.SerializeToString, - ), - "GetTaskById": grpc.unary_unary_rpc_method_handler( - servicer.GetTaskById, - request_deserializer=neural__solution__pb2.TaskId.FromString, - response_serializer=neural__solution__pb2.TaskStatus.SerializeToString, - ), - "QueryTaskResult": grpc.unary_unary_rpc_method_handler( - servicer.QueryTaskResult, - request_deserializer=neural__solution__pb2.TaskId.FromString, - response_serializer=neural__solution__pb2.ResponseTaskResult.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler("neural_solution.TaskService", rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class TaskService(object): - """Interface exported by the server.""" - - @staticmethod - def Ping( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - """Tets server status. - - Args: - request: An iterator that yields request values for the RPC. - target: The server address. - method: The name of the RPC method. - request_serializer: Optional :term:`serializer` for serializing the request - message. Request goes unserialized in case None is passed. - response_deserializer: Optional :term:`deserializer` for deserializing the response - message. Response goes undeserialized in case None is passed. - options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core - runtime) to configure the channel. - channel_credentials: A credential applied to the whole channel, e.g. the - return value of grpc.ssl_channel_credentials() or - grpc.insecure_channel_credentials(). - insecure: If True, specifies channel_credentials as - :term:`grpc.insecure_channel_credentials()`. This option is mutually - exclusive with the `channel_credentials` option. - call_credentials: A call credential applied to each call individually, - e.g. the output of grpc.metadata_call_credentials() or - grpc.access_token_call_credentials(). - compression: An optional value indicating the compression method to be - used over the lifetime of the channel, e.g. grpc.Compression.Gzip. - wait_for_ready: An optional flag indicating whether the RPC should fail - immediately if the connection is not ready at the time the RPC is - invoked, or if it should wait until the connection to the server - becomes ready. When using this option, the user will likely also want - to set a timeout. Defaults to True. - timeout: An optional duration of time in seconds to allow for the RPC, - after which an exception will be raised. If timeout is unspecified, - defaults to a timeout controlled by the - GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is - unset, defaults to 60 seconds. Supply a value of None to indicate that - no timeout should be enforced. - metadata: Optional metadata to send to the server. - - Returns: - The response to the RPC. - """ - return grpc.experimental.unary_unary( - request, - target, - "/neural_solution.TaskService/Ping", - google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - neural__solution__pb2.ResponsePingMessage.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SubmitTask( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - """Submit task. - - Args: - request: An iterator that yields request values for the RPC. - target: The server address. - method: The name of the RPC method. - request_serializer: Optional :term:`serializer` for serializing the request - message. Request goes unserialized in case None is passed. - response_deserializer: Optional :term:`deserializer` for deserializing the response - message. Response goes undeserialized in case None is passed. - options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core - runtime) to configure the channel. - channel_credentials: A credential applied to the whole channel, e.g. the - return value of grpc.ssl_channel_credentials() or - grpc.insecure_channel_credentials(). - insecure: If True, specifies channel_credentials as - :term:`grpc.insecure_channel_credentials()`. This option is mutually - exclusive with the `channel_credentials` option. - call_credentials: A call credential applied to each call individually, - e.g. the output of grpc.metadata_call_credentials() or - grpc.access_token_call_credentials(). - compression: An optional value indicating the compression method to be - used over the lifetime of the channel, e.g. grpc.Compression.Gzip. - wait_for_ready: An optional flag indicating whether the RPC should fail - immediately if the connection is not ready at the time the RPC is - invoked, or if it should wait until the connection to the server - becomes ready. When using this option, the user will likely also want - to set a timeout. Defaults to True. - timeout: An optional duration of time in seconds to allow for the RPC, - after which an exception will be raised. If timeout is unspecified, - defaults to a timeout controlled by the - GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is - unset, defaults to 60 seconds. Supply a value of None to indicate that - no timeout should be enforced. - metadata: Optional metadata to send to the server. - - Returns: - The response to the RPC. - """ - return grpc.experimental.unary_unary( - request, - target, - "/neural_solution.TaskService/SubmitTask", - neural__solution__pb2.Task.SerializeToString, - neural__solution__pb2.TaskResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetTaskById( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - """Get task status according to id. - - Args: - request: An iterator that yields request values for the RPC. - target: The server address. - method: The name of the RPC method. - request_serializer: Optional :term:`serializer` for serializing the request - message. Request goes unserialized in case None is passed. - response_deserializer: Optional :term:`deserializer` for deserializing the response - message. Response goes undeserialized in case None is passed. - options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core - runtime) to configure the channel. - channel_credentials: A credential applied to the whole channel, e.g. the - return value of grpc.ssl_channel_credentials() or - grpc.insecure_channel_credentials(). - insecure: If True, specifies channel_credentials as - :term:`grpc.insecure_channel_credentials()`. This option is mutually - exclusive with the `channel_credentials` option. - call_credentials: A call credential applied to each call individually, - e.g. the output of grpc.metadata_call_credentials() or - grpc.access_token_call_credentials(). - compression: An optional value indicating the compression method to be - used over the lifetime of the channel, e.g. grpc.Compression.Gzip. - wait_for_ready: An optional flag indicating whether the RPC should fail - immediately if the connection is not ready at the time the RPC is - invoked, or if it should wait until the connection to the server - becomes ready. When using this option, the user will likely also want - to set a timeout. Defaults to True. - timeout: An optional duration of time in seconds to allow for the RPC, - after which an exception will be raised. If timeout is unspecified, - defaults to a timeout controlled by the - GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is - unset, defaults to 60 seconds. Supply a value of None to indicate that - no timeout should be enforced. - metadata: Optional metadata to send to the server. - - Returns: - The response to the RPC. - """ - return grpc.experimental.unary_unary( - request, - target, - "/neural_solution.TaskService/GetTaskById", - neural__solution__pb2.TaskId.SerializeToString, - neural__solution__pb2.TaskStatus.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def QueryTaskResult( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - """Get task result according to id. - - Args: - request: An iterator that yields request values for the RPC. - target: The server address. - method: The name of the RPC method. - request_serializer: Optional :term:`serializer` for serializing the request - message. Request goes unserialized in case None is passed. - response_deserializer: Optional :term:`deserializer` for deserializing the response - message. Response goes undeserialized in case None is passed. - options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core - runtime) to configure the channel. - channel_credentials: A credential applied to the whole channel, e.g. the - return value of grpc.ssl_channel_credentials() or - grpc.insecure_channel_credentials(). - insecure: If True, specifies channel_credentials as - :term:`grpc.insecure_channel_credentials()`. This option is mutually - exclusive with the `channel_credentials` option. - call_credentials: A call credential applied to each call individually, - e.g. the output of grpc.metadata_call_credentials() or - grpc.access_token_call_credentials(). - compression: An optional value indicating the compression method to be - used over the lifetime of the channel, e.g. grpc.Compression.Gzip. - wait_for_ready: An optional flag indicating whether the RPC should fail - immediately if the connection is not ready at the time the RPC is - invoked, or if it should wait until the connection to the server - becomes ready. When using this option, the user will likely also want - to set a timeout. Defaults to True. - timeout: An optional duration of time in seconds to allow for the RPC, - after which an exception will be raised. If timeout is unspecified, - defaults to a timeout controlled by the - GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is - unset, defaults to 60 seconds. Supply a value of None to indicate that - no timeout should be enforced. - metadata: Optional metadata to send to the server. - - Returns: - The response to the RPC. - """ - return grpc.experimental.unary_unary( - request, - target, - "/neural_solution.TaskService/QueryTaskResult", - neural__solution__pb2.TaskId.SerializeToString, - neural__solution__pb2.ResponseTaskResult.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/neural_solution/frontend/gRPC/server.py b/neural_solution/frontend/gRPC/server.py deleted file mode 100644 index 28b582ee99d..00000000000 --- a/neural_solution/frontend/gRPC/server.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Server of gRPC frontend.""" - -import argparse -import logging -from concurrent import futures - -import grpc - -from neural_solution.config import config -from neural_solution.frontend.gRPC.proto import neural_solution_pb2, neural_solution_pb2_grpc -from neural_solution.frontend.task_submitter import task_submitter -from neural_solution.frontend.utility import ( - check_service_status, - query_task_result, - query_task_status, - submit_task_to_db, -) -from neural_solution.utils import logger -from neural_solution.utils.utility import dict_to_str, get_db_path - - -class TaskSubmitterServicer(neural_solution_pb2_grpc.TaskServiceServicer): - """Deliver services. - - Args: - neural_solution_pb2_grpc (): task servicer - """ - - def __init__(self) -> None: - """Init.""" - pass - - def Ping(self, empty_msg, context): - """Check service status. - - Args: - empty_msg (str): empty message - context (str): context - - Returns: - Response: service status - """ - logger.info("Ping grpc serve.") - port_lst = [config.result_monitor_port] - result = check_service_status(port_lst, service_address=config.service_address) - response = neural_solution_pb2.ResponsePingMessage(**result) # pylint: disable=no-member - return response - - def SubmitTask(self, task, context): - """Submit task. - - Args: - task (Task): task object - Fields: - task_id: The task id - arguments: The task command - workers: The requested resource unit number - status: The status of the task: pending/running/done - result: The result of the task, which is only value-assigned when the task is done - - Returns: - json: status , id of task and messages. - """ - # Process the task - logger.info("Submit task to task db") - db_path = get_db_path(config.workspace) - logger.info(db_path) - result = submit_task_to_db(task=task, task_submitter=task_submitter, db_path=get_db_path(config.workspace)) - # Return a response - response = neural_solution_pb2.TaskResponse(**result) # pylint: disable=no-member - return response - - def GetTaskById(self, task_id, context): - """Get task status, result, quantized model path according to id. - - Args: - task_id (str): the id of task. - - Returns: - json: task status, result, quantized model path - """ - db_path = get_db_path(config.workspace) - result = query_task_status(task_id.task_id, db_path) - logger.info("query result : result") - response = neural_solution_pb2.TaskStatus(**result) # pylint: disable=no-member - return response - - def QueryTaskResult(self, task_id, context): - """Get task status and information according to id. - - Args: - task_id (str): the id of task. - - Returns: - json: task status and information - """ - db_path = get_db_path(config.workspace) - result = query_task_result(task_id.task_id, db_path, config.workspace) - result["tuning_information"] = dict_to_str(result["tuning_information"]) - result["optimization_result"] = dict_to_str(result["optimization_result"]) - response = neural_solution_pb2.ResponseTaskResult(**result) # pylint: disable=no-member - return response - - -def serve(): - """Service entrance.""" - port = str(config.grpc_api_port) - server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) - neural_solution_pb2_grpc.add_TaskServiceServicer_to_server(TaskSubmitterServicer(), server) - server.add_insecure_port("[::]:" + port) - server.start() - logger.info("Server started, listening on " + port) - server.wait_for_termination() - - -def parse_arguments(): - """Parse the command line options.""" - parser = argparse.ArgumentParser(description="Frontend with gRPC API") - parser.add_argument("-H", "--host", type=str, default="0.0.0.0", help="The address to submit task.") - parser.add_argument("-FP", "--grpc_api_port", type=int, default=8001, help="Port to submit task by user.") - parser.add_argument("-TMP", "--task_monitor_port", type=int, default=2222, help="Port to monitor task.") - parser.add_argument("-RMP", "--result_monitor_port", type=int, default=3333, help="Port to monitor result.") - parser.add_argument("-WS", "--workspace", type=str, default="./ns_workspace", help="Work space.") - args = parser.parse_args() - return args - - -if __name__ == "__main__": - logger.info("Try to start gRPC server.") - logging.basicConfig() - args = parse_arguments() - logger.info(args.workspace) - config.workspace = args.workspace - config.grpc_api_port = args.grpc_api_port - config.result_monitor_port = args.result_monitor_port - config.task_monitor_port = args.task_monitor_port - # initialize the task submitter - task_submitter.task_monitor_port = config.task_monitor_port - task_submitter.result_monitor_port = config.result_monitor_port - serve() diff --git a/neural_solution/frontend/task_submitter.py b/neural_solution/frontend/task_submitter.py deleted file mode 100644 index ea862a1a25d..00000000000 --- a/neural_solution/frontend/task_submitter.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution task submitter.""" - -import json -import socket - -from pydantic import BaseModel # pylint: disable=no-name-in-module - -from neural_solution.config import config - - -class Task(BaseModel): - """Task definition for submitting requests. - - Args: - BaseModel (ModelMetaclass): meta class - """ - - script_url: str - optimized: bool - arguments: list - approach: str - requirements: list - workers: int - - -class TaskSubmitter: - """Responsible for submitting tasks.""" - - def __init__(self, task_monitor_port=2222, result_monitor_port=3333, service_address="localhost"): - """Init. - - Args: - task_monitor_port (int, optional): the port for monitoring tasks. Defaults to 2222. - result_monitor_port (int, optional): the port for monitoring results. Defaults to 3333. - service_address (str, optional): the service address. Defaults to "localhost". - """ - self.task_monitor_port = task_monitor_port - self.result_monitor_port = result_monitor_port - self.service_address = service_address - - def serialize(self, tid: str) -> bytes: - """Serialize a str object.""" - d = {"task_id": tid} - return json.dumps(d).encode() - - def submit_task(self, tid): - """Submit task by sending id. - - Args: - tid (str): the id of task - """ - s = socket.socket() - s.connect((self.service_address, self.task_monitor_port)) - s.send(self.serialize(tid)) - s.close() - - -task_submitter = TaskSubmitter( - task_monitor_port=config.task_monitor_port, result_monitor_port=config.result_monitor_port -) diff --git a/neural_solution/frontend/user_facing_api.json b/neural_solution/frontend/user_facing_api.json deleted file mode 100644 index d355db7fd9d..00000000000 --- a/neural_solution/frontend/user_facing_api.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "root": { - "get": { - "description": "Welcome interface", - "responses": { - "200": { - "message": "Welcome to Neural Solution!" - } - } - }, - "/task": { - "/submit": { - "post": { - "description": "Submit new task.", - "parameters": [ - "script_url", - "..." - ], - "responses": { - "200": { - "description": "Submitted successfully.", - "content": { - "status": "Successfully." - }, - "task_id": { - "type": "string", - "description": "Hashed key." - } - }, - "500":{ - "description": "Submitted failed.", - "content": { - "status": "Failed." - } - } - } - } - }, - "/status/{task_id}": { - "get": { - "description": "Get the task status.", - "responses":{ - "200": { - "description": "Task is running.", - "content": { - "status": "running" - } - } - } - } - }, - "/log/{task_id}": { - "get": { - "description": "Get the task log.", - "responses":{ - "200": { - "description": "Get the task log.", - "content": "task log" - } - } - } - }, - "/screen/{task_id}": { - "WebSocket": { - "description": "Get real-time task log.", - "responses":{ - "101": { - "description": "Get real-time task log.", - "content": "real-time task log" - }, - "1000":{ - "description": "Normal Closure", - "content": "Connection was closed successfully." - }, - "404": { - "description": "Task not found.", - "content": { - "status": "Failed." - } - } - } - } - } - }, - "/ping": { - "get":{ - "description": "Check the health status of Neural Solution", - "responses":{ - "200": { - "description": "The health status", - "content1": { - "status": "Healthy", - "msg": "Neural Solution is running." - }, - "content2": { - "status": "Failed.", - "msg": "Ping fail! & error message" - } - } - } - } - }, - "/cluster": { - "get":{ - "description": "Get cluster running status", - "responses":{ - "200": { - "description": "The cluster running status, including node information, node usage and availability, and total resources", - "content1": { - "status": "Healthy", - "msg": "Cluster information." - } - } - } - } - }, - "/download": { - "get":{ - "description": "Download optimized result.", - "responses":{ - "200": { - "description": "Download quantized model zip file locally.", - "content": "quantized model zip file." - }, - "400": { - "description": "User input error.", - "content1": { - "msg": "Please check URL." - } - }, - "404": { - "description": "No quantized model when task failed", - "content1": { - "msg": "Task failed, file not found" - } - } - } - }, - "/description":{ - "get":{ - "description": "Get user facing api.", - "responses":{ - "200": { - "description": "Get user facing api.", - "content1": { - "msg": "The user facing api." - } - } - } - } - } - } - } -} \ No newline at end of file diff --git a/neural_solution/frontend/utility.py b/neural_solution/frontend/utility.py deleted file mode 100644 index a3303abc5e4..00000000000 --- a/neural_solution/frontend/utility.py +++ /dev/null @@ -1,355 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Common utilities for all frontend components.""" - -import json -import os -import re -import socket -import sqlite3 -import uuid - -import pandas as pd - -from neural_solution.utils import logger -from neural_solution.utils.utility import dict_to_str, get_task_log_workspace - - -def query_task_status(task_id, db_path): - """Query task status according to id. - - Args: - task_id (str): the id of task - db_path (str): the path of database - - Returns: - dict: the task status and information. - """ - res = None - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select status, result, q_model_path from task where id=?", (task_id,)) - res = cursor.fetchone() - cursor.close() - conn.close() - return { - "status": res[0], - "optimized_result": dict_to_str(deserialize(res[1]) if res[1] else res[1]), - "result_path": res[2], - } - - -def query_task_result(task_id, db_path, workspace): - """Query the task result according id. - - Args: - task_id (str): the id of task - db_path (str): the path of database - workspace (str): the workspace for Neural Solution - - Returns: - dict: task result - """ - status = "unknown" - tuning_info = {} - optimization_result = {} - - res = None - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(r"select status, result, q_model_path from task where id=?", (task_id,)) - res = cursor.fetchone() - cursor.close() - conn.close() - logger.info("in query") - if not res: - status = "Please check url." - elif res[0] == "done": - status = res[0] - optimization_result = deserialize(res[1]) if res[1] else res[1] - optimization_result["result_path"] = res[2] - elif res[0] == "pending": - status = "pending" - else: - baseline = get_baseline_during_tuning(task_id, get_task_log_workspace(workspace)) - tuning_result = get_res_during_tuning(task_id, get_task_log_workspace(workspace)) - status = res[0] - tuning_info = {"baseline": baseline, "message": tuning_result} - result = {"status": status, "tuning_information": tuning_info, "optimization_result": optimization_result} - return result - - -def check_service_status(port_lst, service_address): - """Check server status. - - Args: - port_lst (List): ports list - service_address (str): service ip - - Returns: - dict: server status and messages - """ - count = 0 - msg = "Neural Solution is running." - for port in port_lst: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - sock.connect((service_address, port)) - sock.send(serialize({"ping": "test"})) - sock.settimeout(5) - response = sock.recv(1024) - if response == b"ok": - count += 1 - sock.close() - continue - except ConnectionRefusedError: - msg = "Ping fail! Make sure Neural Solution runner is running!" - break - except Exception as e: - msg = "Ping fail! {}".format(e) - break - sock.close() - return {"status": "Healthy", "msg": msg} if count == 1 else {"status": "Failed", "msg": msg} - - -def submit_task_to_db(task, task_submitter, db_path): - """Submit the task to db. - - Args: - task (Task): the object of Task - task_submitter (TaskSubmitter): the object of TaskSubmitter - db_path (str): the path of database - - Returns: - str: task id and information - """ - msg = "Task submitted failed" - status = "failed" - task_id = "-1" - result = {"status": status, "task_id": task_id, "msg": msg} - if os.path.isfile(db_path): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - task_id = str(uuid.uuid4()).replace("-", "") - sql = ( - r"insert into task(id, script_url, optimized, arguments, approach, requirements, workers, status)" - + r" values ('{}', '{}', {}, '{}', '{}', '{}', {}, 'pending')".format( - task_id, - task.script_url, - task.optimized, - list_to_string(task.arguments), - task.approach, - list_to_string(task.requirements), - task.workers, - ) - ) - cursor.execute(sql) - conn.commit() - try: - task_submitter.submit_task(task_id) - except ConnectionRefusedError: - msg = "Task Submitted fail! Make sure neural solution runner is running!" - except Exception as e: - msg = "Task Submitted fail! {}".format(e) - conn.close() - status = "successfully" - msg = "Task submitted successfully" - else: - msg = "Task Submitted fail! db not found!" - result["status"] = status - result["task_id"] = task_id - result["msg"] = msg - return result - - -def serialize(request: dict) -> bytes: - """Serialize a dict object to bytes for inter-process communication.""" - return json.dumps(request).encode() - - -def deserialize(request: bytes) -> dict: - """Deserialize the received bytes to a dict object.""" - return json.loads(request) - - -def get_cluster_info(db_path: str): - """Get cluster information from database. - - Returns: - json: cluster information includes the number of nodes and node information. - """ - conn = sqlite3.connect(f"{db_path}") - cursor = conn.cursor() - cursor.execute(r"select * from cluster") - conn.commit() - rows = cursor.fetchall() - conn.close() - return {"Cluster info": rows} - - -def get_cluster_table(db_path: str): - """Get cluster table from database. - - Returns: - html: table of cluster information. - """ - conn = sqlite3.connect(f"{db_path}") - cursor = conn.cursor() - cursor.execute(r"select * from cluster") - conn.commit() - rows = cursor.fetchall() - df = pd.DataFrame(rows, columns=["Node", "Node info", "status", "free workers", "busy workers", "total workers"]) - html_table = df.to_html( - index=False, - ) - conn.close() - return html_table - - -def get_res_during_tuning(task_id: str, task_log_path): - """Get result during tuning. - - Args: - task_id (string): used to generate log path. - - Returns: - dict: the result of {"Tuning count":, "Accuracy":, "Duration (seconds)"}. - """ - results = {} - log_path = "{}/task_{}.txt".format(task_log_path, task_id) - for line in reversed(open(log_path).readlines()): - res_pattern = r"Tune (\d+) result is: " - res_pattern = r"Tune (\d+) result is:\s.*?\(int8\|fp32\):\s+(\d+\.\d+).*?\(int8\|fp32\):\s+(\d+\.\d+).*?" - res_matches = re.findall(res_pattern, line) - if res_matches: - results["Tuning count"] = res_matches[0][0] - results["Accuracy"] = res_matches[0][1] - results["Duration (seconds)"] = res_matches[0][2] - # break when the last result is matched - break - - logger.info("Query results: {}".format(results)) - return results if results else "Tune 1 running..." - - -def get_baseline_during_tuning(task_id: str, task_log_path): - """Get result during tuning. - - Args: - task_id (string): used to generate log path. - - Returns: - dict: the baseline of {"Accuracy":,"Duration (seconds)":}. - """ - results = {} - log_path = "{}/task_{}.txt".format(task_log_path, task_id) - for line in reversed(open(log_path).readlines()): - res_pattern = "FP32 baseline is:\s+.*?(\d+\.\d+).*?(\d+\.\d+).*?" - res_matches = re.findall(res_pattern, line) - if res_matches: - results["Accuracy"] = res_matches[0][0] - results["Duration (seconds)"] = res_matches[0][1] - # break when the last result is matched - break - - logger.info("FP32 baseline: {}".format(results)) - return results if results else "Getting FP32 baseline..." - - -def check_log_exists(task_id: str, task_log_path): - """Check whether the log file exists. - - Args: - task_id (str): task id. - - Returns: - bool: Does the log file exist. - """ - log_path = "{}/task_{}.txt".format(task_log_path, task_id) - if os.path.exists(log_path): - return True - else: - return False - - -def list_to_string(lst: list): - """Convert the list to a space concatenated string. - - Args: - lst (list): strings - - Returns: - str: string - """ - return " ".join(str(i) for i in lst) - - -def is_invalid_str(to_test_str: str): - """Verify whether the to_test_str is valid. - - Args: - to_test_str (str): string to be tested. - - Returns: - bool: valid or invalid - """ - return any(char in to_test_str for char in [" ", '"', "'", "&", "|", ";", "`", ">"]) - - -def is_valid_task(task: dict) -> bool: - """Verify whether the task is valid. - - Args: - task (dict): task request - - Returns: - bool: valid or invalid - """ - required_fields = ["script_url", "optimized", "arguments", "approach", "requirements", "workers"] - - for field in required_fields: - if field not in task: - return False - - if not isinstance(task["script_url"], str) or is_invalid_str(task["script_url"]): - return False - - if (isinstance(task["optimized"], str) and task["optimized"] not in ["True", "False"]) or ( - not isinstance(task["optimized"], str) and not isinstance(task["optimized"], bool) - ): - return False - - if not isinstance(task["arguments"], list): - return False - else: - for argument in task["arguments"]: - if is_invalid_str(argument): - return False - - if not isinstance(task["approach"], str) or task["approach"] not in ["static", "static_ipex", "dynamic", "auto"]: - return False - - if not isinstance(task["requirements"], list): - return False - else: - for requirement in task["requirements"]: - if is_invalid_str(requirement): - return False - - if not isinstance(task["workers"], int) or task["workers"] < 1: - return False - - return True diff --git a/neural_solution/launcher.py b/neural_solution/launcher.py deleted file mode 100644 index 8e6d9c1a677..00000000000 --- a/neural_solution/launcher.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""The entry of Neural Solution.""" -import argparse -import os -import shlex -import socket -import sqlite3 -import subprocess -import sys -import time -from datetime import datetime - -import psutil -from prettytable import PrettyTable - -from neural_solution.utils import logger -from neural_solution.utils.utility import get_db_path - - -def check_ports(args): - """Check parameters ending in '_port'. - - Args: - args (argparse.Namespace): parameters. - """ - for arg in vars(args): - if "_port" in arg: - check_port(getattr(args, arg)) - - -def check_port(port): - """Check if the given port is standardized. - - Args: - port (int): port number. - """ - if not str(port).isdigit() or int(port) < 0 or int(port) > 65535: - logger.info(f"Error: Invalid port number: {port}") - sys.exit(1) - - -def get_local_service_ip(port): - """Get the local IP address of the machine running the service. - - Args: - port (int): The port number of the service. - - Returns: - str: The IP address of the machine running the service. - """ - with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: - s.connect(("8.8.8.8", port)) - return s.getsockname()[0] - - -def stop_service(): - """Stop service.""" - # Get all running processes - for proc in psutil.process_iter(): - try: - # Get the process details - pinfo = proc.as_dict(attrs=["pid", "name", "cmdline"]) - # Check if the process is the target process - if "neural_solution.backend.runner" in pinfo["cmdline"]: - # Terminate the process using Process.kill() method - process = psutil.Process(pinfo["pid"]) - process.kill() - elif "neural_solution.frontend.fastapi.main_server" in pinfo["cmdline"]: - process = psutil.Process(pinfo["pid"]) - process.kill() - elif "neural_solution.frontend.gRPC.server" in pinfo["cmdline"]: - process = psutil.Process(pinfo["pid"]) - process.kill() - except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): - pass - # Service End - logger.info("Neural Solution Service Stopped!") - - -def check_port_free(port): - """Check if the port is free. - - Args: - port (int): port number. - - Returns: - bool : the free state of the port. - """ - for conn in psutil.net_connections(): - if conn.status == "LISTEN" and conn.laddr.port == port: - return False - return True - - -def start_service(args): - """Start service. - - Args: - args (argparse.Namespace): parameters. - """ - # Check ports - ports_flag = 0 - for port in [args.restful_api_port, args.task_monitor_port, args.result_monitor_port]: - # Check if the port is occupied - if not check_port_free(port): - logger.info(f"Port {port} is in use!") - ports_flag += 1 - if ports_flag > 0: - logger.info("Please replace the occupied port!") - sys.exit(1) - # Check completed - - # Check conda environment - if not args.conda_env: - conda_env = os.environ.get("CONDA_DEFAULT_ENV") - if not conda_env: - logger.info("No environment specified or conda environment activated !!!") - sys.exit(1) - else: - logger.info( - "No environment specified, use environment activated:" - + f" ({conda_env}) as the task runtime environment." - ) - conda_env_name = conda_env - else: - conda_env_name = args.conda_env - # Check completed - - serve_log_dir = f"{args.workspace}/serve_log" - if not os.path.exists(serve_log_dir): - os.makedirs(serve_log_dir) - date_time = datetime.now() - date_suffix = "_" + date_time.strftime("%Y%m%d-%H%M%S") - date_suffix = "" - with open(f"{serve_log_dir}/backend{date_suffix}.log", "w") as f: - subprocess.Popen( - [ - "python", - "-m", - "neural_solution.backend.runner", - "--hostfile", - shlex.quote(str(args.hostfile)), - "--task_monitor_port", - shlex.quote(str(args.task_monitor_port)), - "--result_monitor_port", - shlex.quote(str(args.result_monitor_port)), - "--workspace", - shlex.quote(str(args.workspace)), - "--conda_env_name", - shlex.quote(str(conda_env_name)), - "--upload_path", - shlex.quote(str(args.upload_path)), - ], - stdout=os.dup(f.fileno()), - stderr=subprocess.STDOUT, - ) - if args.api_type in ["all", "restful"]: - with open(f"{serve_log_dir}/frontend{date_suffix}.log", "w") as f: - subprocess.Popen( - [ - "python", - "-m", - "neural_solution.frontend.fastapi.main_server", - "--host", - "0.0.0.0", - "--fastapi_port", - shlex.quote(str(args.restful_api_port)), - "--task_monitor_port", - shlex.quote(str(args.task_monitor_port)), - "--result_monitor_port", - shlex.quote(str(args.result_monitor_port)), - "--workspace", - shlex.quote(str(args.workspace)), - ], - stdout=os.dup(f.fileno()), - stderr=subprocess.STDOUT, - ) - if args.api_type in ["all", "grpc"]: - with open(f"{serve_log_dir}/frontend_grpc.log", "w") as f: - subprocess.Popen( - [ - "python", - "-m", - "neural_solution.frontend.gRPC.server", - "--grpc_api_port", - shlex.quote(str(args.grpc_api_port)), - "--task_monitor_port", - shlex.quote(str(args.task_monitor_port)), - "--result_monitor_port", - shlex.quote(str(args.result_monitor_port)), - "--workspace", - shlex.quote(str(args.workspace)), - ], - stdout=os.dup(f.fileno()), - stderr=subprocess.STDOUT, - ) - ip_address = get_local_service_ip(80) - - # Check if the service is started - # Set the maximum waiting time to 3 seconds - timeout = 3 - # Start time - start_time = time.time() - while True: - # Check if the ports are in use - if ( - check_port_free(args.task_monitor_port) - or check_port_free(args.result_monitor_port) - or check_port_free(args.restful_api_port) - ): - # If the ports are not in use, wait for a second and check again - time.sleep(0.5) - # Check if timed out - current_time = time.time() - elapsed_time = current_time - start_time - if elapsed_time >= timeout: - # If timed out, break the loop - logger.info("Timeout!") - break - - # Continue to wait for all ports to be in use - else: - break - ports_flag = 0 - fail_msg = "Neural Solution START FAIL!" - for port in [args.task_monitor_port, args.result_monitor_port]: - if not check_port_free(port): - ports_flag += 1 - - # Check if the serve port is occupied - if not check_port_free(args.restful_api_port): - ports_flag += 1 - else: - fail_msg = f"{fail_msg}\nPlease check frontend serve log!" - - if ports_flag < 2: - fail_msg = f"{fail_msg}\nPlease check backend serve log!" - - if ports_flag < 3: - logger.info(fail_msg) - sys.exit(1) - # Check completed - - logger.info("Neural Solution Service Started!") - logger.info(f'Service log saving path is in "{os.path.abspath(serve_log_dir)}"') - logger.info(f"To submit task at: {ip_address}:{args.restful_api_port}/task/submit/") - logger.info("[For information] neural_solution -h") - - -def query_cluster(db_path: str): - """Query cluster information from database. - - Args: - db_path (str): the database path - """ - conn = sqlite3.connect(f"{db_path}") - cursor = conn.cursor() - cursor.execute(r"select * from cluster") - conn.commit() - results = cursor.fetchall() - - table = PrettyTable() - table.field_names = [i[0] for i in cursor.description] - - for row in results: - table.add_row(row) - - table.title = "Neural Solution Cluster Management System" - logger.info(table) - cursor.close() - conn.close() - - -def create_node(line: str): - """Parse line to create node. - - Args: - line (str): node information, e.g. "localhost 2 20" - - Returns: - Node: node object - """ - from neural_solution.backend.cluster import Node - - hostname, num_sockets, num_cores_per_socket = line.strip().split(" ") - num_sockets, num_cores_per_socket = int(num_sockets), int(num_cores_per_socket) - node = Node(name=hostname, num_sockets=num_sockets, num_cores_per_socket=num_cores_per_socket) - return node - - -def join_node_to_cluster(db_path: str, args): - """Append new node into cluster. - - Args: - db_path (str): the database path - """ - is_file = os.path.isfile(args.join) - node_lst = [] - if is_file: - num_threads_per_process = 5 - with open(args.join, "r") as f: - for line in f: - node_lst.append(create_node(line)) - else: - for line in args.join.split(";"): - node_lst.append(create_node(line)) - - # Insert node into cluster table. - for count, node in enumerate(node_lst): - logger.info(node) - conn = sqlite3.connect(f"{db_path}") - cursor = conn.cursor() - if count == 0: - cursor.execute("SELECT id FROM cluster ORDER BY id DESC LIMIT 1") - result = cursor.fetchone() - index = result[0] if result else 0 - - cursor.execute( - r"insert into cluster(name, node_info, status, free_sockets, busy_sockets, total_sockets)" - + "values ('{}', '{}', '{}', {}, {}, {})".format( - node.name, repr(node).replace("Node", f"Node{index+1}"), "join", node.num_sockets, 0, node.num_sockets - ) - ) - conn.commit() - index += 1 - logger.info(f"Insert node-id: {index} successfully!") - - cursor.close() - conn.close() - - -def remove_node_from_cluster(db_path: str, node_id: int): - """Remove one node from cluster table. In the future, it will be deleted in the Cluster class. - - Args: - db_path (str): the database path - node_id (int): the node id - """ - conn = sqlite3.connect(f"{db_path}") - cursor = conn.cursor() - - cursor.execute(f"SELECT status, busy_sockets FROM cluster where id = {node_id}") - results = cursor.fetchone() - - if results is None: - logger.info(f"No node-id {node_id} in cluster table.") - return - elif results[1] == 0: - sql = f"UPDATE cluster SET status = 'remove' WHERE id = {node_id}" - cursor.execute(sql) - logger.info(f"Remove node-id {node_id} successfully.") - else: - sql = f"UPDATE cluster SET status = 'remove' WHERE id = {node_id}" - cursor.execute(sql) - logger.info("Resource occupied, will be removed after resource release") - conn.commit() - - cursor.close() - conn.close() - - -def manage_cluster(args): - """Neural Solution resource management. query/join/remove node. - - Args: - args (argparse.Namespace): configuration - """ - db_path = get_db_path(args.workspace) - if args.query: - query_cluster(db_path) - if args.join: - join_node_to_cluster(db_path, args) - if args.remove: - remove_node_from_cluster(db_path, node_id=args.remove) - - -def main(): - """Implement the main function.""" - parser = argparse.ArgumentParser(description="Neural Solution") - parser.add_argument("action", choices=["start", "stop", "cluster"], help="start/stop/management service") - parser.add_argument( - "--hostfile", default=None, help="start backend serve host file which contains all available nodes" - ) - parser.add_argument( - "--restful_api_port", type=int, default=8000, help="start restful serve with {restful_api_port}, default 8000" - ) - parser.add_argument( - "--grpc_api_port", type=int, default=8001, help="start gRPC with {restful_api_port}, default 8001" - ) - parser.add_argument( - "--result_monitor_port", - type=int, - default=3333, - help="start serve for result monitor at {result_monitor_port}, default 3333", - ) - parser.add_argument( - "--task_monitor_port", - type=int, - default=2222, - help="start serve for task monitor at {task_monitor_port}, default 2222", - ) - parser.add_argument("--api_type", default="all", help="start web serve with all/grpc/restful, default all") - parser.add_argument( - "--workspace", default="./ns_workspace", help='neural solution workspace, default "./ns_workspace"' - ) - parser.add_argument("--conda_env", default=None, help="specify the running environment for the task") - parser.add_argument("--upload_path", default="examples", help="specify the file path for the tasks") - parser.add_argument("--query", action="store_true", help="[cluster parameter] query cluster information") - parser.add_argument("--join", help="[cluster parameter] add new node into cluster") - parser.add_argument("--remove", help="[cluster parameter] remove from cluster") - args = parser.parse_args() - - # Check parameters ending in '_port' - check_ports(args) - - if args.action == "start": - start_service(args) - elif args.action == "stop": - stop_service() - elif args.action == "cluster": - manage_cluster(args) - - -if __name__ == "__main__": - main() diff --git a/neural_solution/requirements.txt b/neural_solution/requirements.txt deleted file mode 100644 index 975b78686e1..00000000000 --- a/neural_solution/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -fastapi -grpcio -mpi4py -neural_compressor>=2.2 -protobuf -pydantic -uvicorn[standard] -watchdog diff --git a/neural_solution/scripts/prepare_deps.py b/neural_solution/scripts/prepare_deps.py deleted file mode 100644 index da7a716d4a6..00000000000 --- a/neural_solution/scripts/prepare_deps.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Prepare all dependencies.""" - -""" -CHECK LIST: - - MPI - - CONDA - - other packages, such as, mpi4py -""" diff --git a/neural_solution/test/backend/test_cluster.py b/neural_solution/test/backend/test_cluster.py deleted file mode 100644 index e6cc63f8abf..00000000000 --- a/neural_solution/test/backend/test_cluster.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Tests for cluster.""" - -import importlib -import os -import shutil -import unittest - -from neural_solution.backend.cluster import Cluster, Node -from neural_solution.backend.task import Task -from neural_solution.utils.utility import get_db_path, get_task_log_workspace, get_task_workspace - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -db_path = get_db_path(NEURAL_SOLUTION_WORKSPACE) - - -class TestCluster(unittest.TestCase): - @classmethod - def setUp(self): - node_lst = [Node("node1", "localhost", 2, 4), Node("node2", "localhost", 2, 4)] - self.cluster = Cluster(node_lst, db_path=db_path) - - self.task = Task( - task_id="1", - arguments=["arg1", "arg2"], - workers=2, - status="pending", - script_url="https://example.com", - optimized=True, - approach="static", - requirement=["req1", "req2"], - result="", - q_model_path="q_model_path", - ) - - @classmethod - def tearDownClass(self): - shutil.rmtree("ns_workspace") - - def test_reserve_resource(self): - task = self.task - reserved_resource_lst = self.cluster.reserve_resource(task) - self.assertEqual(len(reserved_resource_lst), 2) - self.assertEqual(self.cluster.socket_queue, ["2 node2", "2 node2"]) - - def test_free_resource(self): - task = self.task - reserved_resource_lst = self.cluster.reserve_resource(task) - self.cluster.free_resource(reserved_resource_lst) - self.assertEqual(self.cluster.socket_queue, ["2 node2", "2 node2", "1 node1", "1 node1"]) - - def test_get_free_socket(self): - free_socket_lst = self.cluster.get_free_socket(4) - self.assertEqual(len(free_socket_lst), 4) - self.assertEqual(free_socket_lst, ["1 node1", "1 node1", "2 node2", "2 node2"]) - self.assertEqual(self.cluster.socket_queue, []) - - # Attempting to over allocate resources - free_socket_lst = self.cluster.get_free_socket(10) - self.assertEqual(free_socket_lst, 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_result_monitor.py b/neural_solution/test/backend/test_result_monitor.py deleted file mode 100644 index 1691826729f..00000000000 --- a/neural_solution/test/backend/test_result_monitor.py +++ /dev/null @@ -1,50 +0,0 @@ -import json -import threading -import unittest -from unittest.mock import MagicMock, patch - -from neural_solution.backend.result_monitor import ResultMonitor - - -class TestResultMonitor(unittest.TestCase): - @patch("socket.socket") - def test_wait_result(self, mock_socket): - # Mock data for testing - task_db = MagicMock() - task_db.lookup_task_status.return_value = "COMPLETED" - result = {"task_id": 1, "q_model_path": "path/to/q_model", "result": 0.8} - serialized_result = json.dumps(result) - - mock_c = MagicMock() - mock_c.recv.return_value = serialized_result - - mock_socket.return_value.accept.return_value = (mock_c, MagicMock()) - mock_socket.return_value.recv.return_value = serialized_result - mock_socket.return_value.__enter__.return_value = mock_socket.return_value - - # Create a ResultMonitor object and call the wait_result method - result_monitor = ResultMonitor(8080, task_db) - with patch("neural_solution.backend.result_monitor.deserialize", return_value={"ping": "test"}): - adding_abort = threading.Thread( - target=result_monitor.wait_result, - args=(), - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=0.1) - - def test_query_task_status(self): - # Mock data for testing - task_db = MagicMock() - task_db.lookup_task_status.return_value = "done" - - # Create a ResultMonitor object and call the query_task_status method - result_monitor = ResultMonitor(8080, task_db) - result_monitor.query_task_status(1) - - # Assert that the task_db.lookup_task_status method was called with the correct argument - task_db.lookup_task_status.assert_called_once_with(1) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_runner.py b/neural_solution/test/backend/test_runner.py deleted file mode 100644 index c400f00c427..00000000000 --- a/neural_solution/test/backend/test_runner.py +++ /dev/null @@ -1,47 +0,0 @@ -import argparse -import os -import shutil -import threading -import unittest -from unittest.mock import patch - -from neural_solution.backend.runner import main, parse_args - - -class TestMain(unittest.TestCase): - @classmethod - def tearDownClass(cls) -> None: - os.remove("test.txt") - shutil.rmtree("ns_workspace", ignore_errors=True) - - def test_parse_args(self): - args = ["-H", "path/to/hostfile", "-TMP", "2222", "-RMP", "3333", "-CEN", "inc"] - with patch( - "argparse.ArgumentParser.parse_args", - return_value=argparse.Namespace( - hostfile="path/to/hostfile", task_monitor_port=2222, result_monitor_port=3333, conda_env_name="inc" - ), - ): - self.assertEqual( - parse_args(args), - argparse.Namespace( - hostfile="path/to/hostfile", task_monitor_port=2222, result_monitor_port=3333, conda_env_name="inc" - ), - ) - - def test_main(self): - """Test blocking flag in abort_job method.""" - path = "test.txt" - with open(path, "w") as f: - f.write("hostname1 2 20\nhostname2 2 20") - adding_abort = threading.Thread( - target=main, - kwargs={"args": ["-H", "test.txt", "-TMP", "2222", "-RMP", "3333", "-CEN", "inc_conda_env"]}, - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=2) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_scheduler.py b/neural_solution/test/backend/test_scheduler.py deleted file mode 100644 index a84689d4658..00000000000 --- a/neural_solution/test/backend/test_scheduler.py +++ /dev/null @@ -1,345 +0,0 @@ -import os -import shutil -import threading -import unittest -from subprocess import CalledProcessError -from unittest.mock import MagicMock, Mock, mock_open, patch - -from neural_solution.backend.cluster import Cluster -from neural_solution.backend.scheduler import Scheduler -from neural_solution.backend.task import Task -from neural_solution.backend.task_db import TaskDB -from neural_solution.backend.utils.utility import dump_elapsed_time, get_task_log_path -from neural_solution.config import config -from neural_solution.utils.utility import get_db_path - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -db_path = get_db_path(NEURAL_SOLUTION_WORKSPACE) -config.workspace = NEURAL_SOLUTION_WORKSPACE - - -class TestScheduler(unittest.TestCase): - def setUp(self): - self.cluster = Cluster(db_path=db_path) - self.task_db = TaskDB(db_path=db_path) - self.result_monitor_port = 1234 - self.scheduler = Scheduler( - self.cluster, self.task_db, self.result_monitor_port, conda_env_name="for_ns_test", config=config - ) - - def tearDown(self) -> None: - shutil.rmtree("ns_workspace", ignore_errors=True) - - @classmethod - def tearDownClass(cls) -> None: - shutil.rmtree("examples") - - def test_prepare_env(self): - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "pip", - "test_result", - "test_q_model_path", - ) - result = self.scheduler.prepare_env(task) - self.assertTrue(result.startswith(self.scheduler.conda_env_name)) - - # Test requirement in {conda_env} case - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "pip", - "test_result", - "test_q_model_path", - ) - scheduler_test = Scheduler( - self.cluster, self.task_db, self.result_monitor_port, conda_env_name="base", config=config - ) - result = scheduler_test.prepare_env(task) - self.assertTrue(result.startswith("base")) - - # Test requirement is '' case - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "", - "test_result", - "test_q_model_path", - ) - result = self.scheduler.prepare_env(task) - self.assertEqual(result, self.scheduler.conda_env_name) - - def test_prepare_task(self): - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_example", - "test_optimized", - "static", - "test_requirement", - "test_result", - "test_q_model_path", - ) - test_path = "examples/test_example" - if not os.path.exists(test_path): - os.makedirs(test_path) - with open(os.path.join(test_path, "test.py"), "w") as f: - f.write("# For Test") - - self.scheduler.prepare_task(task) - - # url case - with patch("neural_solution.backend.scheduler.is_remote_url", return_value=True): - self.scheduler.prepare_task(task) - - # optimized is False case - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_example", - False, - "static", - "test_requirement", - "test_result", - "test_q_model_path", - ) - self.scheduler.prepare_task(task) - - with patch("neural_solution.backend.scheduler.is_remote_url", return_value=True): - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_example/test.py", - False, - "static", - "test_requirement", - "test_result", - "test_q_model_path", - ) - self.scheduler.prepare_task(task) - - def test_check_task_status(self): - log_path = "test_log_path" - # done case - with patch("builtins.open", mock_open(read_data="[INFO] Save deploy yaml to\n")) as mock_file: - result = self.scheduler.check_task_status(log_path) - self.assertEqual(result, "done") - - # failed case - with patch("builtins.open", mock_open(read_data="[INFO] Deploying...\n")) as mock_file: - result = self.scheduler.check_task_status(log_path) - self.assertEqual(result, "failed") - - def test_sanitize_arguments(self): - arguments = "test_arguments\xa0" - sanitized_arguments = self.scheduler.sanitize_arguments(arguments) - self.assertEqual(sanitized_arguments, "test_arguments ") - - def test_dispatch_task(self): - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "test_requirement", - "test_result", - "test_q_model_path", - ) - resource = [("node1", "8"), ("node2", "8")] - with patch("neural_solution.backend.scheduler.Scheduler.launch_task") as mock_launch_task: - self.scheduler.dispatch_task(task, resource) - self.assertTrue(mock_launch_task.called) - - @patch("socket.socket") - @patch("builtins.open") - def test_report_result(self, mock_open, mock_socket): - task_id = "test_task" - log_path = "test_log_path" - task_runtime = 10 - self.scheduler.q_model_path = None - mock_socket.return_value.connect.return_value = None - mock_open.return_value.readlines.return_value = ["Tune 1 result is: (int8|fp32): 0.8 (int8|fp32): 0.9"] - expected_result = {"optimization time (seconds)": "10.00", "Accuracy": "0.8", "Duration (seconds)": "0.9"} - - self.scheduler.report_result(task_id, log_path, task_runtime) - - mock_open.assert_called_once_with(log_path) - mock_socket.assert_called_once_with() - mock_socket.return_value.connect.assert_called_once_with(("localhost", 1234)) - mock_socket.return_value.send.assert_called_once() - - @patch("neural_solution.backend.scheduler.Scheduler.prepare_task") - @patch("neural_solution.backend.scheduler.Scheduler.prepare_env") - @patch("neural_solution.backend.scheduler.Scheduler._parse_cmd") - @patch("subprocess.Popen") - @patch("neural_solution.backend.scheduler.Scheduler.check_task_status") - @patch("neural_solution.backend.scheduler.Scheduler.report_result") - def test_launch_task( - self, - mock_report_result, - mock_check_task_status, - mock_popen, - mock_parse_cmd, - mock_prepare_env, - mock_prepare_task, - ): - task = Task( - "test_task", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "test_requirement", - "test_result", - "test_q_model_path", - ) - resource = ["1 node1", "2 node2"] - mock_parse_cmd.return_value = "test_cmd" - mock_check_task_status.return_value = "done" - mock_popen.return_value.wait.return_value = None - mock_prepare_env.return_value = "test_env" - mock_prepare_task.return_value = None - mock_report_result.return_value = None - - self.scheduler.launch_task(task, resource) - - @patch("neural_solution.backend.scheduler.Scheduler.launch_task") - @patch("neural_solution.backend.cluster.Cluster.reserve_resource") - def test_schedule_tasks(self, mock_reserve_resource, mock_launch_task): - task1 = Task( - "1", - "test_arguments", - "test_workers", - "test_status", - "test_script_url", - "test_optimized", - "test_approach", - "test_requirement", - "test_result", - "test_q_model_path", - ) - self.task_db.cursor.execute( - "insert into task values ('1', 'test_arguments', 'test_workers', \ - 'test_status', 'test_script_url', \ - 'test_optimized', 'test_approach', 'test_requirement', 'test_result', 'test_q_model_path')" - ) - - # no pending task case - adding_abort = threading.Thread( - target=self.scheduler.schedule_tasks, - args=(), - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=10) - - # task case - self.task_db.append_task(task1) - mock_reserve_resource.return_value = [("node1", 8)] - mock_launch_task.return_value = None - - adding_abort = threading.Thread( - target=self.scheduler.schedule_tasks, - args=(), - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=10) - - # no resource case - self.task_db.append_task(task1) - mock_reserve_resource.return_value = False - adding_abort = threading.Thread( - target=self.scheduler.schedule_tasks, - args=(), - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=10) - - -class TestParseCmd(unittest.TestCase): - def setUp(self): - self.cluster = Cluster(db_path=db_path) - self.task_db = TaskDB(db_path=db_path) - self.result_monitor_port = 1234 - self.task_scheduler = Scheduler( - self.cluster, self.task_db, self.result_monitor_port, conda_env_name="for_ns_test", config=config - ) - self.task = MagicMock() - self.resource = ["1 node1", "2 node2", "3 node3"] - self.task.workers = 3 - self.task_name = "test_task" - self.script_name = "test_script.py" - self.task_path = "/path/to/task" - self.arguments = "arg1 arg2" - self.task.arguments = self.arguments - self.task.name = self.task_name - self.task.optimized = True - self.task.script = self.script_name - self.task.task_path = self.task_path - self.task_scheduler.script_name = self.script_name - self.task_scheduler.task_path = self.task_path - - def test__parse_cmd(self): - expected_cmd = ( - "cd /path/to/task\nmpirun -np 3 -host node1,node2,node3 -map-by socket:pe=5" - + " -mca btl_tcp_if_include 192.168.20.0/24 -x OMP_NUM_THREADS=5 --report-bindings bash distributed_run.sh" - ) - with patch("neural_solution.backend.scheduler.Scheduler.prepare_task") as mock_prepare_task, patch( - "neural_solution.backend.scheduler.Scheduler.prepare_env" - ) as mock_prepare_env, patch("neural_solution.backend.scheduler.logger.info") as mock_logger_info, patch( - "builtins.open", create=True - ) as mock_open, patch( - "neural_solution.backend.scheduler.os.path.join" - ) as mock_os_path_join: - mock_prepare_task.return_value = None - mock_prepare_env.return_value = "test_env" - mock_logger_info.return_value = None - mock_open.return_value.__enter__ = lambda x: x - mock_open.return_value.__exit__ = MagicMock() - mock_os_path_join.return_value = "/path/to/task/distributed_run.sh" - - result = self.task_scheduler._parse_cmd(self.task, self.resource) - self.assertEqual(result, expected_cmd) - - mock_prepare_task.assert_called_once_with(self.task) - mock_prepare_env.assert_called_once_with(self.task) - mock_logger_info.assert_called_once_with("[TaskScheduler] host resource: node1,node2,node3") - mock_open.assert_called_once_with("/path/to/task/distributed_run.sh", "w", encoding="utf-8") - mock_os_path_join.assert_called_once_with("/path/to/task", "distributed_run.sh") - - self.task.optimized = False - result = self.task_scheduler._parse_cmd(self.task, self.resource) - self.assertEqual(result, expected_cmd) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_task.py b/neural_solution/test/backend/test_task.py deleted file mode 100644 index 43d3c649ec5..00000000000 --- a/neural_solution/test/backend/test_task.py +++ /dev/null @@ -1,26 +0,0 @@ -import unittest - -from neural_solution.backend.task import Task - - -class TestTask(unittest.TestCase): - def setUp(self): - self.task = Task( - "123", "python script.py", 4, "pending", "http://example.com/script.py", True, "approach", "requirement" - ) - - def test_task_attributes(self): - self.assertEqual(self.task.task_id, "123") - self.assertEqual(self.task.arguments, "python script.py") - self.assertEqual(self.task.workers, 4) - self.assertEqual(self.task.status, "pending") - self.assertEqual(self.task.script_url, "http://example.com/script.py") - self.assertEqual(self.task.optimized, True) - self.assertEqual(self.task.approach, "approach") - self.assertEqual(self.task.requirement, "requirement") - self.assertEqual(self.task.result, "") - self.assertEqual(self.task.q_model_path, "") - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_task_db.py b/neural_solution/test/backend/test_task_db.py deleted file mode 100644 index 38e9690a8e7..00000000000 --- a/neural_solution/test/backend/test_task_db.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import shutil -import unittest -from unittest.mock import MagicMock, patch - -from neural_solution.backend.task_db import Task, TaskDB -from neural_solution.utils.utility import get_db_path - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -db_path = get_db_path(NEURAL_SOLUTION_WORKSPACE) - - -class TestTaskDB(unittest.TestCase): - def setUp(self): - self.taskdb = TaskDB(db_path=db_path) - self.task = Task( - "1", "arguments", 1, "pending", "script_url", 0, "approach", "requirement", "result", "q_model_path" - ) - - @classmethod - def tearDownClass(cls) -> None: - shutil.rmtree("ns_workspace") - - def test_append_task(self): - self.taskdb.append_task(self.task) - self.assertEqual(len(self.taskdb.task_queue), 1) - self.assertEqual(self.taskdb.task_queue[0], "1") - - def test_get_pending_task_num(self): - self.taskdb.append_task(self.task) - self.assertEqual(self.taskdb.get_pending_task_num(), 1) - - def test_get_all_pending_tasks(self): - self.taskdb.cursor.execute( - "insert into task values ('2', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - pending_tasks = self.taskdb.get_all_pending_tasks() - self.assertEqual(len(pending_tasks), 1) - self.assertEqual(pending_tasks[0].task_id, "2") - - def test_update_task_status(self): - self.taskdb.cursor.execute( - "insert into task values ('3', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - self.taskdb.update_task_status("3", "running") - self.taskdb.cursor.execute("select status from task where id='3'") - status = self.taskdb.cursor.fetchone()[0] - self.assertEqual(status, "running") - with self.assertRaises(Exception): - self.taskdb.update_task_status("3", "invalid_status") - - def test_update_result(self): - self.taskdb.cursor.execute( - "insert into task values ('4', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - self.taskdb.update_result("4", "new_result") - self.taskdb.cursor.execute("select result from task where id='4'") - result = self.taskdb.cursor.fetchone()[0] - self.assertEqual(result, "new_result") - - def test_update_q_model_path_and_result(self): - self.taskdb.cursor.execute( - "insert into task values ('5', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - self.taskdb.update_q_model_path_and_result("5", "new_q_model_path", "new_result") - self.taskdb.cursor.execute("select q_model_path, result from task where id='5'") - q_model_path, result = self.taskdb.cursor.fetchone() - self.assertEqual(q_model_path, "new_q_model_path") - self.assertEqual(result, "new_result") - - def test_lookup_task_status(self): - self.taskdb.cursor.execute( - "insert into task values ('6', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - status_dict = self.taskdb.lookup_task_status("6") - self.assertEqual(status_dict["status"], "pending") - self.assertEqual(status_dict["result"], "result") - - def test_get_task_by_id(self): - self.taskdb.cursor.execute( - "insert into task values ('7', 'arguments', 1, \ - 'pending', 'script_url', 0, 'approach', 'requirement', 'result', 'q_model_path')" - ) - self.taskdb.conn.commit() - task = self.taskdb.get_task_by_id("7") - self.assertEqual(task.task_id, "7") - self.assertEqual(task.arguments, "arguments") - self.assertEqual(task.workers, 1) - self.assertEqual(task.status, "pending") - self.assertEqual(task.result, "result") - - def test_remove_task(self): - self.taskdb.remove_task("1") - # currently no garbage collection, so this function does nothing - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/test_task_monitor.py b/neural_solution/test/backend/test_task_monitor.py deleted file mode 100644 index 1e4bb069726..00000000000 --- a/neural_solution/test/backend/test_task_monitor.py +++ /dev/null @@ -1,114 +0,0 @@ -import threading -import unittest -from unittest.mock import MagicMock, Mock, patch - -from neural_solution.backend.task import Task -from neural_solution.backend.task_monitor import TaskMonitor - - -class TestTaskMonitor(unittest.TestCase): - def setUp(self): - self.mock_task_db = Mock() - self.mock_socket = Mock() - self.task_monitor = TaskMonitor(8888, self.mock_task_db) - self.task_monitor.s = self.mock_socket - - def test__start_listening(self): - mock_bind = MagicMock() - mock_listen = MagicMock() - with patch("socket.socket") as mock_socket: - mock_socket.return_value.bind = mock_bind - mock_socket.return_value.listen = mock_listen - self.task_monitor._start_listening("localhost", 8888, 10) - - def test_receive_task(self): - self.mock_socket.accept.return_value = ( - Mock(), - b'{"task_id": 123, "arguments": {}, "workers": 1, \ - "status": "pending", "script_url": "http://example.com", "optimized": True, \ - "approach": "static", "requirement": "neural_solution", "result": "", "q_model_path": ""}', - ) - self.mock_task_db.get_task_by_id.return_value = Task( - task_id=123, - arguments={}, - workers=1, - status="pending", - script_url="http://example.com", - optimized=True, - approach="static", - requirement="neural_solution", - result="", - q_model_path="", - ) - - # Test normal task case - with patch( - "neural_solution.backend.task_monitor.deserialize", - return_value={ - "task_id": 123, - "arguments": {}, - "workers": 1, - "status": "pending", - "script_url": "http://example.com", - "optimized": True, - "approach": "static", - "requirement": "neural_solution", - "result": "", - "q_model_path": "", - }, - ): - task = self.task_monitor._receive_task() - self.assertEqual(task.task_id, 123) - self.mock_task_db.get_task_by_id.assert_called_once_with(123) - - # Test ping case - with patch("neural_solution.backend.task_monitor.deserialize", return_value={"ping": "test"}): - response = self.task_monitor._receive_task() - self.assertEqual(response, False) - self.mock_task_db.get_task_by_id.assert_called_once_with(123) - - def test_append_task(self): - task = Task( - task_id=123, - arguments={}, - workers=1, - status="pending", - script_url="http://example.com", - optimized=True, - approach="static", - requirement="neural_solution", - result="", - q_model_path="", - ) - self.task_monitor._append_task(task) - self.mock_task_db.append_task.assert_called_once_with(task) - - def test_wait_new_task(self): - # Set up mock objects - mock_logger = MagicMock() - mock_task = MagicMock() - mock_receive_task = MagicMock(return_value=mock_task) - mock_append_task = MagicMock() - self.task_monitor._receive_task = mock_receive_task - self.task_monitor._append_task = mock_append_task - self.task_monitor._start_listening = MagicMock() - - # Call the function to be tested - adding_abort = threading.Thread( - target=self.task_monitor.wait_new_task, - args=(), - daemon=True, - ) - adding_abort.start() - adding_abort.join(timeout=1) - - # Test task is False - mock_receive_task = MagicMock(return_value=False) - mock_append_task = MagicMock() - self.task_monitor._receive_task = mock_receive_task - - adding_abort.join(timeout=1) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/backend/utils/test_utility.py b/neural_solution/test/backend/utils/test_utility.py deleted file mode 100644 index dd02878ee93..00000000000 --- a/neural_solution/test/backend/utils/test_utility.py +++ /dev/null @@ -1,158 +0,0 @@ -import os -import shutil -import unittest -from unittest.mock import MagicMock, mock_open, patch - -from neural_solution.backend.utils.utility import ( - build_cluster, - build_local_cluster, - build_workspace, - create_dir, - deserialize, - dump_elapsed_time, - get_current_time, - get_q_model_path, - get_task_log_path, - is_remote_url, - serialize, - synchronized, -) -from neural_solution.config import config -from neural_solution.utils.utility import get_db_path, get_task_log_workspace, get_task_workspace - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -DB_PATH = NEURAL_SOLUTION_WORKSPACE + "/db" -TASK_WORKSPACE = NEURAL_SOLUTION_WORKSPACE + "/task_workspace" -TASK_LOG_path = NEURAL_SOLUTION_WORKSPACE + "/task_log" -SERVE_LOG_PATH = NEURAL_SOLUTION_WORKSPACE + "/serve_log" - -config.workspace = NEURAL_SOLUTION_WORKSPACE -db_path = get_db_path(config.workspace) - - -class TestUtils(unittest.TestCase): - @classmethod - def tearDown(self) -> None: - if os.path.exists("ns_workspace"): - shutil.rmtree("ns_workspace") - - def test_serialize(self): - input_dict = {"key1": "value1", "key2": "value2"} - expected_output = b'{"key1": "value1", "key2": "value2"}' - self.assertEqual(serialize(input_dict), expected_output) - - def test_deserialize(self): - input_bytes = b'{"key1": "value1", "key2": "value2"}' - expected_output = {"key1": "value1", "key2": "value2"} - self.assertEqual(deserialize(input_bytes), expected_output) - - def test_dump_elapsed_time(self): - @dump_elapsed_time("test function") - def test_function(): - return True - - with patch("neural_solution.utils.logger") as mock_logger: - test_function() - - def test_get_task_log_path(self): - task_id = 123 - expected_output = f"{TASK_LOG_path}/task_{task_id}.txt" - self.assertEqual( - get_task_log_path(log_path=get_task_log_workspace(config.workspace), task_id=task_id), expected_output - ) - - def test_build_local_cluster(self): - with patch("neural_solution.backend.cluster.Node") as mock_node, patch( - "neural_solution.backend.cluster.Cluster" - ) as mock_cluster: - mock_node_obj = MagicMock() - mock_node.return_value = mock_node_obj - mock_node_obj.name = "localhost" - mock_node_obj.num_sockets = 2 - mock_node_obj.num_cores_per_socket = 5 - build_local_cluster(db_path=db_path) - mock_node.assert_called_with(name="localhost", num_sockets=2, num_cores_per_socket=5) - mock_cluster.assert_called_once() - - def test_build_cluster(self): - # Test 2 hostname - path = "test.txt" - with open(path, "w") as f: - f.write("hostname1 2 20\nhostname2 2 20") - cluster, _ = build_cluster(path, db_path=db_path) - self.assertIsNotNone(cluster) - - os.remove("test.txt") - - file_path = "test_host_file" - with patch("neural_solution.backend.cluster.Node") as mock_node, patch( - "neural_solution.backend.cluster.Cluster" - ) as mock_cluster, patch("builtins.open") as mock_open, patch("os.path.exists") as mock_exists: - # Test None - cluster, _ = build_cluster(file_path=None, db_path=db_path) - mock_cluster.assert_called() - - mock_exists.return_value = True - build_cluster(file_path, db_path=db_path) - - # test_build_cluster_file_not_exist - file_path = "test_file" - with patch("neural_solution.backend.cluster.Node"), patch("neural_solution.backend.cluster.Cluster"), patch( - "builtins.open" - ), patch("os.path.exists") as mock_exists, patch("neural_solution.utils.logger") as mock_logger: - mock_exists.return_value = False - self.assertRaises(Exception, build_cluster, file_path) - mock_logger.reset_mock() - - def test_get_current_time(self): - self.assertIsNotNone(get_current_time()) - - def test_synchronized(self): - class TestClass: - def __init__(self): - self.lock = MagicMock() - - @synchronized - def test_function(self): - return True - - test_class = TestClass() - with patch.object(test_class, "lock"): - test_class.test_function() - - def test_build_workspace(self): - task_id = 123 - expected_output = os.path.abspath(f"{TASK_WORKSPACE}/{task_id}") - self.assertEqual(build_workspace(path=get_task_workspace(config.workspace), task_id=task_id), expected_output) - - def test_is_remote_url(self): - self.assertTrue(is_remote_url("http://test.com")) - self.assertTrue(is_remote_url("https://test.com")) - self.assertFalse(is_remote_url("test.txt")) - - def test_create_dir(self): - path = "test/path/test.txt" - create_dir(path) - self.assertTrue(os.path.exists(os.path.dirname(path))) - - @patch("builtins.open", mock_open(read_data="Save quantized model to /path/to/model.")) - def test_get_q_model_path_success(self): - log_path = "fake_log_path" - q_model_path = get_q_model_path(log_path, "task_id") - self.assertEqual(q_model_path, "/path/to/model") - - @patch("builtins.open", mock_open(read_data="Save quantized model to /path/to/task_workspace/task_id/model/1.pb.")) - def test_get_q_model_path_success_task_id(self): - log_path = "fake_log_path" - q_model_path = get_q_model_path(log_path, "task_id") - self.assertEqual(q_model_path, "/path/to/task_workspace/task_id/model") - - @patch("builtins.open", mock_open(read_data="No quantized model saved.")) - def test_get_q_model_path_failure(self): - log_path = "fake_log_path" - q_model_path = get_q_model_path(log_path, "task_id") - self.assertEqual(q_model_path, "quantized model path not found") - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/frontend/fastapi/test_main_server.py b/neural_solution/test/frontend/fastapi/test_main_server.py deleted file mode 100644 index 11ab1179847..00000000000 --- a/neural_solution/test/frontend/fastapi/test_main_server.py +++ /dev/null @@ -1,310 +0,0 @@ -import asyncio -import os -import shutil -import sqlite3 -import unittest -from unittest.mock import MagicMock, Mock, patch - -from fastapi import WebSocket -from fastapi.testclient import TestClient - -from neural_solution.config import config -from neural_solution.frontend.fastapi.main_server import LogEventHandler, Observer, app, start_log_watcher - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -DB_PATH = NEURAL_SOLUTION_WORKSPACE + "/db" -TASK_WORKSPACE = NEURAL_SOLUTION_WORKSPACE + "/task_workspace" -TASK_LOG_path = NEURAL_SOLUTION_WORKSPACE + "/task_log" -SERVE_LOG_PATH = NEURAL_SOLUTION_WORKSPACE + "/serve_log" - -client = TestClient(app) - - -def build_db(): - if not os.path.exists(DB_PATH): - os.makedirs(DB_PATH) - conn = sqlite3.connect( - f"{DB_PATH}/task.db", check_same_thread=False - ) # sqlite should set this check_same_thread to False - cursor = conn.cursor() - cursor.execute( - "create table if not exists task(id TEXT PRIMARY KEY, arguments varchar(100), " - + "workers int, status varchar(20), script_url varchar(500), optimized integer, " - + "approach varchar(20), requirements varchar(500), result varchar(500), q_model_path varchar(200))" - ) - cursor.execute("drop table if exists cluster ") - cursor.execute( - r"create table cluster(id INTEGER PRIMARY KEY AUTOINCREMENT," - + "node_info varchar(500)," - + "status varchar(100)," - + "free_sockets int," - + "busy_sockets int," - + "total_sockets int)" - ) - - conn.commit() - conn.close - - -def delete_db(): - if os.path.exists(DB_PATH): - shutil.rmtree(DB_PATH) - - -def use_db(): - def f(func): - def fi(*args, **kwargs): - build_db() - res = func(*args, **kwargs) - delete_db() - - return fi - - return f - - -class TestMain(unittest.TestCase): - @classmethod - def setUpClass(self): - if not os.path.exists(TASK_LOG_path): - os.makedirs(TASK_LOG_path) - - @classmethod - def tearDownClass(self): - shutil.rmtree(NEURAL_SOLUTION_WORKSPACE, ignore_errors=True) - delete_db() - - def test_read_root(self): - response = client.get("/") - assert response.status_code == 200 - self.assertEqual(response.json(), {"message": "Welcome to Neural Solution!"}) - - @patch("neural_solution.frontend.fastapi.main_server.socket") - def test_ping(self, mock_socket): - response = client.get("/ping") - self.assertEqual(response.status_code, 200) - self.assertIn("status", response.json()) - self.assertIn("msg", response.json()) - - @use_db() - def test_get_cluster(self): - response = client.get("/cluster") - assert response.status_code == 200 - assert "Cluster info" in response.json() - - @use_db() - def test_get_clusters(self): - response = client.get("/clusters") - assert response.status_code == 200 - assert "table" in response.text - - def test_get_description(self): - data = { - "description": "", - } - path = "../../doc" - if not os.path.exists(path): - os.makedirs(path) - with open(os.path.join(path, "user_facing_api.json"), "w") as f: - import json - - json.dump(data, f) - response = client.get("/description") - assert response.status_code == 200 - assert "description" in response.text - shutil.rmtree(path) - - @patch("neural_solution.frontend.fastapi.main_server.task_submitter.submit_task") - def test_submit_task(self, mock_submit_task): - task = { - "script_url": "http://example.com/script.py", - "optimized": "True", - "arguments": ["arg1", "arg2"], - "approach": "static", - "requirements": ["req1", "req2"], - "workers": 3, - } - - # test invalid task - task_invalid = { - "script_url": "http://example.com/script.py", - "optimized": "True", - "arguments": "invalid str, should be list", - "approach": "static", - "requirements": ["req1", "req2"], - "workers": 3, - } - response = client.post("/task/submit/", json=task_invalid) - print(response) - self.assertEqual(response.status_code, 422) - self.assertIn("arguments", response.text) - - # test no db case - delete_db() - response = client.post("/task/submit/", json=task) - self.assertEqual(response.status_code, 200) - self.assertIn("msg", response.json()) - self.assertIn("Task Submitted fail! db not found!", response.json()["msg"]) - mock_submit_task.assert_not_called() - - # test successfully - build_db() - response = client.post("/task/submit/", json=task) - self.assertEqual(response.status_code, 200) - self.assertIn("status", response.json()) - self.assertIn("task_id", response.json()) - self.assertIn("msg", response.json()) - self.assertIn("successfully", response.json()["status"]) - mock_submit_task.assert_called_once() - - # test ConnectionRefusedError case - mock_submit_task.side_effect = ConnectionRefusedError - response = client.post("/task/submit/", json=task) - self.assertEqual(response.status_code, 200) - self.assertIn("status", response.json()) - self.assertIn("task_id", response.json()) - self.assertIn("msg", response.json()) - self.assertEqual(response.json()["status"], "failed") - self.assertIn("Task Submitted fail! Make sure Neural Solution runner is running!", response.json()["msg"]) - mock_submit_task.assert_called() - - # test generic Exception case - mock_submit_task.side_effect = Exception("Something went wrong") - response = client.post("/task/submit/", json=task) - self.assertEqual(response.status_code, 200) - self.assertIn("status", response.json()) - self.assertIn("task_id", response.json()) - self.assertIn("msg", response.json()) - self.assertEqual(response.json()["status"], "failed") - self.assertIn("Something went wrong", response.json()["msg"]) - mock_submit_task.assert_called() - - delete_db() - - @use_db() - @patch("neural_solution.frontend.fastapi.main_server.task_submitter.submit_task") - def test_get_task_by_id(self, mock_submit_task): - task = { - "script_url": "http://example.com/script.py", - "optimized": True, - "arguments": ["arg1", "arg2"], - "approach": "static", - "requirements": ["req1", "req2"], - "workers": 3, - } - response = client.post("/task/submit/", json=task) - task_id = response.json()["task_id"] - response = client.get(f"/task/{task_id}") - assert response.status_code == 200 - assert response.json()["status"] == "pending" - - @use_db() - def test_get_all_tasks(self): - response = client.get("/task/") - assert response.status_code == 200 - delete_db() - response = client.get("/task/") - assert response.status_code == 200 - assert response.json()["message"] is None - - @use_db() - @patch("neural_solution.frontend.fastapi.main_server.task_submitter.submit_task") - def test_get_task_status_by_id(self, mock_submit_task): - task = { - "script_url": "http://example.com/script.py", - "optimized": True, - "arguments": ["arg1", "arg2"], - "approach": "static", - "requirements": ["req1", "req2"], - "workers": 3, - } - response = client.post("/task/submit/", json=task) - task_id = response.json()["task_id"] - response = client.get(f"/task/status/{task_id}") - assert response.status_code == 200 - self.assertIn("pending", response.text) - - response = client.get("/task/status/error_id") - assert response.status_code == 200 - self.assertIn("Please check url", response.text) - - def test_read_logs(self): - task_id = "12345" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - with open(log_path, "w") as f: - f.write(f"I am {task_id}.") - response = client.get(f"/task/log/{task_id}") - assert response.status_code == 200 - self.assertIn(task_id, response.text) - os.remove(log_path) - - -class TestLogEventHandler(unittest.TestCase): - def setUp(self): - self.loop = asyncio.get_event_loop() - - def test_init(self): - mock_websocket = MagicMock() - mock_websocket.send_text = MagicMock() - handler = LogEventHandler(mock_websocket, "test_task_id", 0) - self.assertEqual(handler.websocket, mock_websocket) - self.assertEqual(handler.task_id, "test_task_id") - self.assertEqual(handler.last_position, 0) - self.assertIsInstance(handler.queue, asyncio.Queue) - self.assertIsInstance(handler.timer, asyncio.Task) - - def test_on_modified(self): - config.workspace = NEURAL_SOLUTION_WORKSPACE - mock_websocket = MagicMock() - mock_websocket.send_text = MagicMock() - task_id = "1234" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - if not os.path.exists(TASK_LOG_path): - os.makedirs(TASK_LOG_path) - - with open(log_path, "w") as f: - f.write(f"I am {task_id}.") - - handler = LogEventHandler(mock_websocket, "1234", 0) - - handler.queue.put_nowait("test message") - event = MagicMock() - task_id = "1234" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - event.src_path = log_path - with patch("builtins.open", MagicMock()) as mock_file: - mock_file.return_value.__enter__.return_value.seek.return_value = None - mock_file.return_value.__enter__.return_value.readlines.return_value = ["test line"] - handler.on_modified(event) - mock_file.assert_called_once_with(log_path, "r") - mock_file.return_value.__enter__.return_value.seek.assert_called_once_with(0) - mock_file.return_value.__enter__.return_value.readlines.assert_called_once() - # handler.queue.put_nowait.assert_called_once_with("test line") - os.remove(log_path) - - -class TestStartLogWatcher(unittest.TestCase): - def setUp(self): - self.loop = asyncio.get_event_loop() - - def test_start_log_watcher(self): - mock_observer = MagicMock() - mock_observer.schedule = MagicMock() - with patch("neural_solution.frontend.fastapi.main_server.Observer", MagicMock(return_value=mock_observer)): - observer = start_log_watcher("test_websocket", "1234", 0) - self.assertIsInstance(observer, type(mock_observer)) - - -class TestWebsocketEndpoint(unittest.TestCase): - def setUp(self): - self.loop = asyncio.get_event_loop() - self.client = TestClient(app) - - def test_websocket_endpoint(self): - pass - # with self.assertRaises(HTTPException): - # asyncio.run(websocket_endpoint(WebSocket, "nonexistent_task")) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/frontend/fastapi/test_task_submitter.py b/neural_solution/test/frontend/fastapi/test_task_submitter.py deleted file mode 100644 index c08c2cd605e..00000000000 --- a/neural_solution/test/frontend/fastapi/test_task_submitter.py +++ /dev/null @@ -1,46 +0,0 @@ -import socket -import unittest -from unittest.mock import patch - -from neural_solution.frontend.task_submitter import Task, TaskSubmitter - - -class TestTask(unittest.TestCase): - def test_task_creation(self): - script_url = "https://example.com" - optimized = True - arguments = ["arg1", "arg2"] - approach = "approach" - requirements = ["req1", "req2"] - workers = 2 - - task = Task( - script_url=script_url, - optimized=optimized, - arguments=arguments, - approach=approach, - requirements=requirements, - workers=workers, - ) - - self.assertEqual(task.script_url, script_url) - self.assertEqual(task.optimized, optimized) - self.assertEqual(task.arguments, arguments) - self.assertEqual(task.approach, approach) - self.assertEqual(task.requirements, requirements) - self.assertEqual(task.workers, workers) - - -class TestTaskSubmitter(unittest.TestCase): - @patch("socket.socket") - def test_submit_task(self, mock_socket): - task_submitter = TaskSubmitter() - task_id = "1234" - task_submitter.submit_task(task_id) - mock_socket.return_value.connect.assert_called_once_with(("localhost", 2222)) - mock_socket.return_value.send.assert_called_once_with(b'{"task_id": "1234"}') - mock_socket.return_value.close.assert_called_once() - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/frontend/fastapi/test_utils.py b/neural_solution/test/frontend/fastapi/test_utils.py deleted file mode 100644 index 42a5f42568f..00000000000 --- a/neural_solution/test/frontend/fastapi/test_utils.py +++ /dev/null @@ -1,212 +0,0 @@ -import os -import shutil -import unittest -from unittest.mock import mock_open, patch - -from neural_solution.frontend.utility import ( - check_log_exists, - deserialize, - get_baseline_during_tuning, - get_cluster_info, - get_cluster_table, - get_res_during_tuning, - is_valid_task, - list_to_string, - serialize, -) - -NEURAL_SOLUTION_WORKSPACE = os.path.join(os.getcwd(), "ns_workspace") -DB_PATH = NEURAL_SOLUTION_WORKSPACE + "/db/task.db" -TASK_WORKSPACE = NEURAL_SOLUTION_WORKSPACE + "/task_workspace" -TASK_LOG_path = NEURAL_SOLUTION_WORKSPACE + "/task_log" -SERVE_LOG_PATH = NEURAL_SOLUTION_WORKSPACE + "/serve_log" - - -class TestMyModule(unittest.TestCase): - @classmethod - def setUpClass(self): - if not os.path.exists(TASK_LOG_path): - os.makedirs(TASK_LOG_path) - - @classmethod - def tearDownClass(self): - shutil.rmtree(NEURAL_SOLUTION_WORKSPACE, ignore_errors=True) - - def test_serialize(self): - request = {"key": "value"} - expected_result = b'{"key": "value"}' - self.assertEqual(serialize(request), expected_result) - - def test_deserialize(self): - request = b'{"key": "value"}' - expected_result = {"key": "value"} - self.assertEqual(deserialize(request), expected_result) - - @patch("sqlite3.connect") - def test_get_cluster_info(self, mock_connect): - mock_cursor = mock_connect().cursor.return_value - mock_cursor.fetchall.return_value = [(1, "node info", "status", 1, 2, 3)] - expected_result = {"Cluster info": [(1, "node info", "status", 1, 2, 3)]} - self.assertEqual(get_cluster_info(TASK_LOG_path), expected_result) - - @patch("sqlite3.connect") - def test_get_cluster_table(self, mock_connect): - mock_cursor = mock_connect().cursor.return_value - mock_cursor.fetchall.return_value = [(1, "node info", "status", 1, 2, 3)] - expected_result = ( - '\n' - " \n" - ' \n' - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - " \n" - "
NodeNode infostatusfree workersbusy workerstotal workers
1node infostatus123
" - ) - self.assertEqual(get_cluster_table(TASK_LOG_path), expected_result) - - def test_get_res_during_tuning(self): - task_id = "12345" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - with open(log_path, "w") as f: - f.write("Tune 1 result is: (int8|fp32): 0.123 (int8|fp32): 0.456") - expected_result = {"Tuning count": "1", "Accuracy": "0.123", "Duration (seconds)": "0.456"} - self.assertEqual(get_res_during_tuning(task_id, TASK_LOG_path), expected_result) - os.remove(log_path) - - def test_get_baseline_during_tuning(self): - task_id = "12345" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - with open(log_path, "w") as f: - f.write("FP32 baseline is: 0.123 0.456") - expected_result = {"Accuracy": "0.123", "Duration (seconds)": "0.456"} - self.assertEqual(get_baseline_during_tuning(task_id, TASK_LOG_path), expected_result) - os.remove(log_path) - - def test_check_log_exists(self): - task_id = "12345" - log_path = f"{TASK_LOG_path}/task_{task_id}.txt" - with patch("os.path.exists") as mock_exists: - mock_exists.return_value = True - self.assertTrue(check_log_exists(task_id, TASK_LOG_path)) - mock_exists.return_value = False - self.assertFalse(check_log_exists(task_id, TASK_LOG_path)) - - def test_list_to_string(self): - lst = ["Hello", "Neural", "Solution"] - expected_result = "Hello Neural Solution" - self.assertEqual(list_to_string(lst), expected_result) - - def test_is_valid_task(self): - task_sql_injection = { - "script_url": "https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py", - "optimized": "False", - "arguments": [], - "approach": "5', '6', 7, 'pending'), ('1b9ff5c2fd2143d58522bd71d18845a3', '2', 3, '4', '5', '6', 7, 'pending') ON CONFLICT (id) DO UPDATE SET id = '1b9ff5c2fd2143d58522bd71d18845a3', q_model_path = '/home/victim/.ssh' --", - "requirements": [], - "workers": 1, - } - self.assertFalse(is_valid_task(task_sql_injection)) - - task_cmd_injection = { - "script_url": 'https://github.com/huggingface/transformers/blob/v4.21-release/examples/pytorch/text-classification/run_glue.py & eval "$(echo ZWNobyAiRG9tYWluIGV4cGFuc2lvbiIgPiB+L2F0dGFjay5weSI= | base64 --decode)"', - "optimized": "False", - "arguments": ["--model_name_or_path bert-base-cased --task_name mrpc --do_eval --output_dir result"], - "approach": "static", - "requirements": [], - "workers": 1, - } - self.assertFalse(is_valid_task(task_cmd_injection)) - - task_lack_field = { - "optimized": "True", - } - self.assertFalse(is_valid_task(task_lack_field)) - - task_script_url_not_str = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True", - "arguments": ["--dataset_location=dataset --model_path=model"], - "approach": "static", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertFalse(is_valid_task(task_script_url_not_str)) - - task_optimized_not_bool_str = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True or False", - "arguments": ["--dataset_location=dataset", "--model_path=model"], - "approach": "static", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertFalse(is_valid_task(task_optimized_not_bool_str)) - - task_arguments_not_list = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True", - "arguments": 123, - "approach": "static", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertFalse(is_valid_task(task_arguments_not_list)) - - task_arguments_invalid = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True", - "arguments": ["--dataset_location=dataset --model_path=model"], - "approach": "static", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertFalse(is_valid_task(task_arguments_not_list)) - - task_approach_is_invalid = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True", - "arguments": [], - "approach": "static or dynamic", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertFalse(is_valid_task(task_approach_is_invalid)) - - task_requirements_not_list = { - "script_url": ["custom_models_optimized/tf_example1"], - "optimized": "True", - "arguments": [], - "approach": "static", - "requirements": "tensorflow", - "workers": 1, - } - self.assertFalse(is_valid_task(task_requirements_not_list)) - - task_normal = { - "script_url": "custom_models_optimized/tf_example1", - "optimized": "True", - "arguments": ["--dataset_location=dataset", "--model_path=model"], - "approach": "static", - "requirements": ["tensorflow"], - "workers": 1, - } - self.assertTrue(is_valid_task(task_normal)) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/test/requirements.txt b/neural_solution/test/requirements.txt deleted file mode 100644 index f7621d172eb..00000000000 --- a/neural_solution/test/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -httpx diff --git a/neural_solution/test/test_logger.py b/neural_solution/test/test_logger.py deleted file mode 100644 index fd0453069f4..00000000000 --- a/neural_solution/test/test_logger.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Tests for logging utilities.""" - -import unittest - -from neural_solution.utils import logger - - -class TestLogger(unittest.TestCase): - def test_logger(self): - logger.log(0, "call logger log function.") - logger.log(1, {"msg": "call logger log function."}) - logger.debug("call logger debug function.") - logger.debug({"msg": "call logger debug function."}) - logger.error("call logger error function.") - logger.error({"msg": "call logger error function."}) - logger.fatal("call logger fatal function") - logger.fatal({"msg": "call logger fatal function"}) - logger.info("call logger info function") - logger.info({"msg": "call logger info function."}) - logger.warn("call logger warn function") - logger.warn({"msg": "call logger warn function"}) - logger.warning("call logger warning function") - logger.warning({"msg": "call logger warning function"}) - logger.warning(["call logger warning function", "done"]) - logger.warning(("call logger warning function", "done")) - logger.warning({"msg": {("bert", "embedding"): {"weight": {"dtype": ["unint8", "int8"]}}}}) - logger.warning({"msg": {("bert", "embedding"): {"op": ("a", "b")}}}) - # the following log will not be prettified - logger.warning([{"msg": "call logger warning function"}, {"msg2": "done"}]) - logger.warning(({"msg": "call logger warning function"}, {"msg2": "done"})) - logger.warning(({"msg": [{"sub_msg": "call logger"}, {"sub_msg2": "call warning function"}]}, {"msg2": "done"})) - - -if __name__ == "__main__": - unittest.main() diff --git a/neural_solution/utils/__init__.py b/neural_solution/utils/__init__.py deleted file mode 100644 index 1e1ab4fbdf4..00000000000 --- a/neural_solution/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""All common functions for both backend and frontend.""" diff --git a/neural_solution/utils/logger.py b/neural_solution/utils/logger.py deleted file mode 100644 index 819403d1398..00000000000 --- a/neural_solution/utils/logger.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Logger: handles logging functionalities.""" - -import logging -import os - - -class Logger(object): - """Logger class.""" - - __instance = None - - def __new__(cls): - """Create a singleton Logger instance.""" - if Logger.__instance is None: - Logger.__instance = object.__new__(cls) - Logger.__instance._log() - return Logger.__instance - - def _log(self): - """Set up the logger format and handler.""" - LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper() - self._logger = logging.getLogger("neural_compressor") - self._logger.handlers.clear() - self._logger.setLevel(LOGLEVEL) - formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") - streamHandler = logging.StreamHandler() - streamHandler.setFormatter(formatter) - self._logger.addHandler(streamHandler) - self._logger.propagate = False - - def get_logger(self): - """Get the logger.""" - return self._logger - - -def _pretty_dict(value, indent=0): - """Make the logger dict pretty.""" - prefix = "\n" + " " * (indent + 4) - if isinstance(value, dict): - items = [prefix + repr(key) + ": " + _pretty_dict(value[key], indent + 4) for key in value] - return "{%s}" % (",".join(items) + "\n" + " " * indent) - elif isinstance(value, list): - items = [prefix + _pretty_dict(item, indent + 4) for item in value] - return "[%s]" % (",".join(items) + "\n" + " " * indent) - elif isinstance(value, tuple): - items = [prefix + _pretty_dict(item, indent + 4) for item in value] - return "(%s)" % (",".join(items) + "\n" + " " * indent) - else: - return repr(value) - - -level = Logger().get_logger().level -DEBUG = logging.DEBUG - - -def log(level, msg, *args, **kwargs): - """Output log with the level as a parameter.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().log(level, line, *args, **kwargs) - else: - Logger().get_logger().log(level, msg, *args, **kwargs) - - -def debug(msg, *args, **kwargs): - """Output log with the debug level.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().debug(line, *args, **kwargs) - else: - Logger().get_logger().debug(msg, *args, **kwargs) - - -def error(msg, *args, **kwargs): - """Output log with the error level.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().error(line, *args, **kwargs) - else: - Logger().get_logger().error(msg, *args, **kwargs) - - -def fatal(msg, *args, **kwargs): - """Output log with the fatal level.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().fatal(line, *args, **kwargs) - else: - Logger().get_logger().fatal(msg, *args, **kwargs) - - -def info(msg, *args, **kwargs): - """Output log with the info level.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().info(line, *args, **kwargs) - else: - Logger().get_logger().info(msg, *args, **kwargs) - - -def warn(msg, *args, **kwargs): - """Output log with the warning level.""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().warning(line, *args, **kwargs) - else: - Logger().get_logger().warning(msg, *args, **kwargs) - - -def warning(msg, *args, **kwargs): - """Output log with the warning level (Alias of the method warn).""" - if isinstance(msg, dict): - for _, line in enumerate(_pretty_dict(msg).split("\n")): - Logger().get_logger().warning(line, *args, **kwargs) - else: - Logger().get_logger().warning(msg, *args, **kwargs) diff --git a/neural_solution/utils/utility.py b/neural_solution/utils/utility.py deleted file mode 100644 index 79cef4565d0..00000000000 --- a/neural_solution/utils/utility.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution utility.""" - -import json -import os - - -def get_db_path(workspace="./"): - """Get the database path. - - Args: - workspace (str, optional): . Defaults to "./". - - Returns: - str: the path of database - """ - db_path = os.path.join(workspace, "db", "task.db") - return os.path.abspath(db_path) - - -def get_task_workspace(workspace="./"): - """Get the workspace of task. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the workspace of task - """ - return os.path.join(workspace, "task_workspace") - - -def get_task_log_workspace(workspace="./"): - """Get the log workspace for task. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the workspace of task. - """ - return os.path.join(workspace, "task_log") - - -def get_serve_log_workspace(workspace="./"): - """Get log workspace for service. - - Args: - workspace (str, optional): the workspace for Neural Solution. Defaults to "./". - - Returns: - str: the log workspace for service - """ - return os.path.join(workspace, "serve_log") - - -def dict_to_str(d): - """Convert a dict object to a string object. - - Args: - d (dict): a dict object - - Returns: - str: string - """ - result = json.dumps(d) - return result diff --git a/neural_solution/version.py b/neural_solution/version.py deleted file mode 100644 index 106f241e994..00000000000 --- a/neural_solution/version.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Neural Solution.""" - -from neural_compressor.version import __version__ diff --git a/pyproject.toml b/pyproject.toml index f2c53b1c100..f17c3af16b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.isort] profile = "black" line_length = 120 -known_first_party = ["neural_compressor", "neural_insights", "neural_solution"] +known_first_party = ["neural_compressor"] extend_skip_glob = ["**/__init__.py"] diff --git a/requirements_ort.txt b/requirements_ort.txt deleted file mode 100644 index a4ac1127d71..00000000000 --- a/requirements_ort.txt +++ /dev/null @@ -1,8 +0,0 @@ -numpy -onnx -onnxruntime -onnxruntime-extensions -psutil -py-cpuinfo -pydantic -transformers diff --git a/setup.py b/setup.py index 80a51ee10be..343047d4547 100644 --- a/setup.py +++ b/setup.py @@ -43,44 +43,23 @@ def get_build_version(): assert False, "Error: Could not open '%s' due %s\n" % (filepath, error) PKG_INSTALL_CFG = { - # overall install config for build from source, python setup.py install + # overall installation config, pip install neural-compressor "neural_compressor": { "project_name": "neural_compressor", "include_packages": find_packages( - include=["neural_compressor", "neural_compressor.*", "neural_coder", "neural_coder.*"], + include=["neural_compressor", "neural_compressor.*"], exclude=[ "neural_compressor.template", ], ), "package_data": {"": ["*.yaml"]}, "install_requires": fetch_requirements("requirements.txt"), - }, - # 2.x binary build config, pip install neural-compressor - "neural_compressor_2x": { - "project_name": "neural_compressor", - "include_packages": find_packages( - include=["neural_compressor", "neural_compressor.*", "neural_coder", "neural_coder.*"], - exclude=[ - "neural_compressor.template", - "neural_compressor.common", - "neural_compressor.common.*", - "neural_compressor.torch", - "neural_compressor.torch.*", - "neural_compressor.tensorflow", - "neural_compressor.tensorflow.*", - "neural_compressor.onnxrt", - "neural_compressor.onnxrt.*", - ], - ), - "package_data": {"": ["*.yaml"]}, - "install_requires": fetch_requirements("requirements.txt"), "extras_require": { - "pt": [f"neural_compressor_3x_pt=={__version__}"], - "tf": [f"neural_compressor_3x_tf=={__version__}"], - "ort": [f"neural_compressor_3x_ort=={__version__}"], + "pt": fetch_requirements("requirements_pt.txt"), + "tf": fetch_requirements("requirements_tf.txt"), }, }, - # 3.x pt binary build config, pip install neural-compressor[pt], install 2.x API + 3.x PyTorch API. + # 3.x pt binary build config, pip install neural-compressor-pt, install 3.x PyTorch API. "neural_compressor_3x_pt": { "project_name": "neural_compressor_3x_pt", "include_packages": find_packages( @@ -93,7 +72,7 @@ def get_build_version(): ), "install_requires": fetch_requirements("requirements_pt.txt"), }, - # 3.x tf binary build config, pip install neural-compressor[tf], install 2.x API + 3.x TensorFlow API. + # 3.x tf binary build config, pip install neural-compressor-tf, install 3.x TensorFlow API. "neural_compressor_3x_tf": { "project_name": "neural_compressor_3x_tf", "include_packages": find_packages( @@ -107,48 +86,6 @@ def get_build_version(): "package_data": {"": ["*.yaml"]}, "install_requires": fetch_requirements("requirements_tf.txt"), }, - # 3.x ort binary build config, pip install neural-compressor[ort], install 2.x API + 3.x ONNXRT API. - "neural_compressor_3x_ort": { - "project_name": "neural_compressor_3x_ort", - "include_packages": find_packages( - include=[ - "neural_compressor.common", - "neural_compressor.common.*", - "neural_compressor.onnxrt", - "neural_compressor.onnxrt.*", - ], - ), - "install_requires": fetch_requirements("requirements_ort.txt"), - }, - "neural_insights": { - "project_name": "neural_insights", - "include_packages": find_packages(include=["neural_insights", "neural_insights.*"], exclude=["test.*", "test"]), - "package_data": { - "neural_insights": [ - "bin/*", - "*.yaml", - "web/app/*.*", - "web/app/static/css/*", - "web/app/static/js/*", - "web/app/static/media/*", - "web/app/icons/*", - ] - }, - "install_requires": fetch_requirements("neural_insights/requirements.txt"), - "entry_points": {"console_scripts": ["neural_insights = neural_insights.bin.neural_insights:execute"]}, - }, - "neural_solution": { - "project_name": "neural_solution", - "include_packages": find_packages(include=["neural_solution", "neural_solution.*"]), - "package_data": { - "neural_solution": [ - "scripts/*.*", - "frontend/*.json", - ] - }, - "install_requires": fetch_requirements("neural_solution/requirements.txt"), - "entry_points": {"console_scripts": ["neural_solution = neural_solution.bin.neural_solution:exec"]}, - }, } @@ -161,18 +98,6 @@ def get_build_version(): ext_modules = [] cmdclass = {} - if "neural_insights" in sys.argv: - sys.argv.remove("neural_insights") - cfg_key = "neural_insights" - - if "neural_solution" in sys.argv: - sys.argv.remove("neural_solution") - cfg_key = "neural_solution" - - if "2x" in sys.argv: - sys.argv.remove("2x") - cfg_key = "neural_compressor_2x" - if "pt" in sys.argv: sys.argv.remove("pt") cfg_key = "neural_compressor_3x_pt" @@ -181,16 +106,16 @@ def get_build_version(): sys.argv.remove("tf") cfg_key = "neural_compressor_3x_tf" - if "ort" in sys.argv: - sys.argv.remove("ort") - cfg_key = "neural_compressor_3x_ort" - project_name = PKG_INSTALL_CFG[cfg_key].get("project_name") include_packages = PKG_INSTALL_CFG[cfg_key].get("include_packages") or {} package_data = PKG_INSTALL_CFG[cfg_key].get("package_data") or {} install_requires = PKG_INSTALL_CFG[cfg_key].get("install_requires") or [] - entry_points = PKG_INSTALL_CFG[cfg_key].get("entry_points") or {} extras_require = PKG_INSTALL_CFG[cfg_key].get("extras_require") or {} + entry_points = { + "console_scripts": [ + "incbench = neural_compressor.common.benchmark:benchmark", + ] + } setup( name=project_name, diff --git a/test/3x/common/test_benchmark.py b/test/3x/common/test_benchmark.py new file mode 100644 index 00000000000..78e94f70ace --- /dev/null +++ b/test/3x/common/test_benchmark.py @@ -0,0 +1,171 @@ +import os +import re +import shutil +import subprocess + +from neural_compressor.common.utils import DEFAULT_WORKSPACE + +# build files during test process to test benchmark +tmp_file_dict = {} +tmp = """ +print("test benchmark") +""" +tmp_file_dict["./tmp/tmp.py"] = tmp + +tmp = """ +print("test benchmark") +print("Throughput: 1 samples/sec") +print("Latency: 1000 ms") +""" +tmp_file_dict["./tmp/throughput_latency.py"] = tmp + +tmp = """ +print("test benchmark") +print("Throughput: 2 tokens/sec") +""" +tmp_file_dict["./tmp/throughput.py"] = tmp + +tmp = """ +print("test benchmark") +print("Latency: 10 ms") +""" +tmp_file_dict["./tmp/latency.py"] = tmp + + +def build_tmp_file(): + os.makedirs("./tmp") + for tmp_path, tmp in tmp_file_dict.items(): + f = open(tmp_path, "w") + f.write(tmp) + f.close() + + +def trigger_process(cmd): + # trigger subprocess + p = subprocess.Popen( + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True + ) # nosec + return p + + +def check_main_process(message): + num_i_pattern = r"(.*) (\d+) instance(.*) triggered" + num_c_pattern = r"(.*) (\d+) core(.*) in use" + log_file_pattern = r"(.*) The log of instance 1 is saved to (.*)" + num_i = re.search(num_i_pattern, message, flags=re.DOTALL).group(2) + all_c = re.search(num_c_pattern, message).group(2) + log_file_path = re.search(log_file_pattern, message).group(2) + return int(num_i), int(all_c), log_file_path + + +def check_log_file(log_file_path): + output_pattern = r"(.*)test benchmark(.*)" + with open(log_file_path, "r") as f: + output = f.read() + f.close() + return re.match(output_pattern, output, flags=re.DOTALL) + + +class TestBenchmark: + def setup_class(self): + build_tmp_file() + + def teardown_class(self): + shutil.rmtree("./tmp") + shutil.rmtree("nc_workspace") + + def test_default(self): + cmd = "incbench tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 1, "the number of instance should be 1." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_only_num_i(self): + cmd = "incbench --num_i 2 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_only_num_c(self): + cmd = "incbench --num_c 1 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == all_c, "the number of instance should equal the number of available cores." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_only_cores(self): + cmd = "incbench -C 0-1 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 1, "the number of instance should be 1." + assert all_c == 2, "the number of available cores should be 2." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_num_i_num_c(self): + cmd = "incbench --num_i 2 --num_c 2 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert all_c == 4, "the number of available cores should be 4." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_num_i_cores(self): + cmd = "incbench --num_i 2 -C 0-2,5,8 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert all_c == 5, "the number of available cores should be 5." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_num_c_cores(self): + cmd = "incbench --num_c 2 -C 0-6 tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 3, "the number of instance should be all_c//num_c=3." + assert all_c == 6, "the number of available cores should be (all_c//num_c)*num_c=6." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_cross_memory(self): + cmd = "incbench --num_c 1 -C 0 --cross_memory tmp/tmp.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 1, "the number of instance should be all_c//num_c=1." + assert all_c == 1, "the number of available cores should be 1." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_throughput_latency(self): + cmd = "incbench --num_i 2 --num_c 2 -C 0-7 tmp/throughput_latency.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert all_c == 4, "the number of available cores should be num_i*num_c=4." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_throughput(self): + cmd = "incbench --num_i 2 --num_c 2 -C 0-7 tmp/throughput.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert all_c == 4, "the number of available cores should be num_i*num_c=4." + assert check_log_file(log_file_path), "instance output is not correct." + + def test_latency(self): + cmd = "incbench --num_i 2 --num_c 2 -C 0-7 tmp/latency.py" + p = trigger_process(cmd) + stdout, _ = p.communicate() + num_i, all_c, log_file_path = check_main_process(stdout.decode()) + assert num_i == 2, "the number of instance should be 2." + assert all_c == 4, "the number of available cores should be num_i*num_c=4." + assert check_log_file(log_file_path), "instance output is not correct." diff --git a/test/3x/common/test_common.py b/test/3x/common/test_common.py index 4af0e1a276d..90a5db3c315 100644 --- a/test/3x/common/test_common.py +++ b/test/3x/common/test_common.py @@ -277,6 +277,11 @@ def test_config_expand_complex_tunable_type(self): for i in range(len(configs_list)): self.assertEqual(configs_list[i].target_op_type_list, target_op_type_list_options[i]) + def test_config_expand_with_empty_options(self): + configs = FakeAlgoConfig(weight_dtype=["int", "float32"], weight_bits=[]) + configs_list = configs.expand() + self.assertEqual(len(configs_list), 2) + def test_mixed_two_algos(self): model = FakeModel() OP1_NAME = "OP1_NAME" diff --git a/test/3x/common/test_utility.py b/test/3x/common/test_utility.py index 00a3be79514..fd349ce1706 100644 --- a/test/3x/common/test_utility.py +++ b/test/3x/common/test_utility.py @@ -11,6 +11,9 @@ import unittest from unittest.mock import MagicMock, patch +import pytest + +import neural_compressor.common.utils.utility as inc_utils from neural_compressor.common import options from neural_compressor.common.utils import ( CpuInfo, @@ -18,6 +21,7 @@ Mode, default_tuning_logger, dump_elapsed_time, + get_workspace, log_process, set_random_seed, set_resume_from, @@ -39,9 +43,11 @@ def test_set_random_seed(self): set_random_seed(seed) def test_set_workspace(self): - workspace = "/path/to/workspace" + workspace = "/tmp/inc_workspace" set_workspace(workspace) self.assertEqual(options.workspace, workspace) + returned_workspace = get_workspace() + self.assertEqual(returned_workspace, workspace) # non String type workspace = 12345 @@ -72,9 +78,11 @@ def test_set_tensorboard(self): class TestCPUInfo(unittest.TestCase): def test_cpu_info(self): cpu_info = CpuInfo() - assert cpu_info.cores_per_socket > 0, "CPU count should be greater than 0" assert isinstance(cpu_info.bf16, bool), "bf16 should be a boolean" assert isinstance(cpu_info.vnni, bool), "avx512 should be a boolean" + assert cpu_info.cores >= 1 + assert cpu_info.sockets >= 1 + assert cpu_info.cores_per_socket >= 1 class TestLazyImport(unittest.TestCase): @@ -110,6 +118,11 @@ def test_lazy_import_access_attr(self): self.assertIsNotNone(lazy_import.module) + def test_call_method_module_not_found(self): + with self.assertRaises(ImportError): + lazy_import = LazyImport("non_existent_module") + lazy_import(3, 4) + class TestUtils(unittest.TestCase): def test_dump_elapsed_time(self): @@ -166,5 +179,60 @@ def __init__(self): assert instance2.value == 1, "Singleton should return the same instance" -if __name__ == "__main__": - unittest.main() +class TestCallCounter(unittest.TestCase): + def test_call_counter(self): + # empty dict + inc_utils.FUNC_CALL_COUNTS.clear() + + @inc_utils.call_counter + def add(a, b): + return a + b + + # Initial count should be 0 + self.assertEqual(inc_utils.FUNC_CALL_COUNTS["add"], 0) + + # Call the function multiple times + add(1, 2) + add(3, 4) + add(5, 6) + + # Count should be incremented accordingly + self.assertEqual(inc_utils.FUNC_CALL_COUNTS["add"], 3) + + +class TestAutoDetectProcessorType: + @pytest.fixture + def force_client(self, monkeypatch): + monkeypatch.setattr(inc_utils.cpu_info, "sockets", 1) + monkeypatch.setattr(inc_utils.cpu_info, "brand_raw", "") + + # force the ram size detected by psutil <= 64GB + class MockMemory: + def __init__(self, total): + self.total = total + + # Patch the psutil.virtual_memory() method + monkeypatch.setattr(inc_utils.psutil, "virtual_memory", lambda: MockMemory(16 * 1024**3)) + + def test_auto_detect_processor_type(self, force_client): + p_type = inc_utils.detect_processor_type_based_on_hw() + assert ( + p_type == inc_utils.ProcessorType.Client + ), f"Expect processor type to be {inc_utils.ProcessorType.Client}, got {p_type}" + + def test_detect_processor_type_based_on_hw(self): + # Test when the brand name includes a server keyword + inc_utils.cpu_info.brand_raw = "Intel Xeon Server" + assert inc_utils.detect_processor_type_based_on_hw() == inc_utils.ProcessorType.Server + + # Test when the memory size is greater than 32GB + with patch("psutil.virtual_memory") as mock_virtual_memory: + mock_virtual_memory.return_value.total = 64 * 1024**3 + assert inc_utils.detect_processor_type_based_on_hw() == inc_utils.ProcessorType.Server + + # Test when none of the conditions are met + inc_utils.cpu_info.sockets = 1 + inc_utils.cpu_info.brand_raw = "Intel Core i7" + with patch("psutil.virtual_memory") as mock_virtual_memory: + mock_virtual_memory.return_value.total = 16 * 1024**3 + assert inc_utils.detect_processor_type_based_on_hw() == inc_utils.ProcessorType.Client diff --git a/test/3x/onnxrt/quantization/layer_wise/test_layer_wise.py b/test/3x/onnxrt/quantization/layer_wise/test_layer_wise.py deleted file mode 100644 index c8e7584ee7f..00000000000 --- a/test/3x/onnxrt/quantization/layer_wise/test_layer_wise.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -import shutil -import unittest -from copy import deepcopy - -import onnx -import onnxruntime as ort -import onnxruntime.tools.symbolic_shape_infer as symbolic_shape_infer -import torch -from optimum.exporters.onnx import main_export -from transformers import AutoTokenizer - -from neural_compressor.common import Logger -from neural_compressor.onnxrt.quantization.calibrate import CalibrationDataReader - -logger = Logger().get_logger() - - -def find_onnx_file(folder_path): - # return first .onnx file path in folder_path - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".onnx"): - return os.path.join(root, file) - return None - - -class DummyNLPDataloader(CalibrationDataReader): - def __init__(self, model_name): - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.sequence_a = "intel-extension-for-transformers is based in SH" - self.sequence_b = "Where is intel-extension-for-transformers based? NYC or SH" - - self.encoded_list = [] - encoded_input = dict(self.tokenizer(self.sequence_a, self.sequence_b, return_tensors="pt")) - input_shape = encoded_input["input_ids"].shape - encoded_input["position_ids"] = ( - torch.arange(0, input_shape[-1], dtype=torch.long).unsqueeze(0).view(-1, input_shape[-1]) - ) - - # convert torch tensor to numpy - for input_name, input_value in encoded_input.items(): - if isinstance(input_value, torch.Tensor): - encoded_input[input_name] = input_value.numpy() - - self.encoded_list.append(encoded_input) - self.iter_next = iter(self.encoded_list) - - def get_next(self): - return next(self.iter_next, None) - - def rewind(self): - self.iter_next = iter(self.encoded_list) - - -class TestLayerWiseQuant(unittest.TestCase): - @classmethod - def setUpClass(self): - # onnx model exported with transformers>=4.38.0 is different with low version - # which will cause layer-wise quant ut to fail - # limit transformers to 4.37.2 - # TODO: remove transformers version limitation - llama_id = "yujiepan/llama-2-tiny-3layers-random" - main_export(llama_id, output="llama-2-tiny-3layers-random", task="text-generation") - model_path = find_onnx_file("llama-2-tiny-3layers-random") - - model = onnx.load(model_path) - model = symbolic_shape_infer.SymbolicShapeInference.infer_shapes(model, auto_merge=True) - infer_shape_model_path = "llama-2-tiny-3layers-random/model-infer-shape.onnx" - onnx.save(model, infer_shape_model_path) - - sess_options = ort.SessionOptions() - sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED - sess_options.optimized_model_filepath = "llama-2-tiny-3layers-random/optimized_model.onnx" - ort.InferenceSession(infer_shape_model_path, sess_options) - - self.llama = "llama-2-tiny-3layers-random/optimized_model.onnx" - self.calibration_data_reader = DummyNLPDataloader(llama_id) - - @classmethod - def tearDownClass(self): - shutil.rmtree("llama-2-tiny-3layers-random", ignore_errors=True) - - def setUp(self): - # print the test name - logger.info(f"Running ONNXRT TestLayerWiseQuant test: {self.id()}") - - def _check_model_is_quantized(self, model): - node_optypes = [node.op_type for node in model.graph.node] - return "MatMulNBits" in node_optypes or "MatMulFpQ4" in node_optypes - - def _get_quantized_matmul_weight(self, model, matmul_name): - weight_init_name = None - for node in model.graph.node: - if node.name == matmul_name: - weight_init_name = node.input[1] - if weight_init_name is None: - return None - - weight_init = None - for init in model.graph.initializer: - if init.name == weight_init_name: - weight_init = onnx.numpy_helper.to_array(init) - return weight_init - - def _apply_quantize(self, quant_config, data_reader=None): - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_model = self.llama - if data_reader is None: - qmodel = _quantize(fp32_model, quant_config) - else: - qmodel = _quantize(fp32_model, quant_config, data_reader) - self.assertIsNotNone(qmodel) - return qmodel - - def test_rtn_layer_wise(self): - from neural_compressor.onnxrt.quantization import RTNConfig - - rtn_config = RTNConfig(layer_wise_quant=True) - qmodel_lwq = self._apply_quantize(rtn_config) - self.assertTrue(self._check_model_is_quantized(qmodel_lwq)) - - rtn_config = RTNConfig(layer_wise_quant=False) - qmodel = self._apply_quantize(rtn_config) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - lwq_quantized_weight = self._get_quantized_matmul_weight(qmodel_lwq, "/lm_head/MatMul_Q4") - self.assertIsNotNone(lwq_quantized_weight) - quantized_weight = self._get_quantized_matmul_weight(qmodel, "/lm_head/MatMul_Q4") - self.assertIsNotNone(quantized_weight) - self.assertTrue((lwq_quantized_weight == quantized_weight).all()) - - def test_gptq_layer_wise(self): - from neural_compressor.onnxrt.quantization import GPTQConfig - - self.calibration_data_reader.rewind() - gptq_config = GPTQConfig(layer_wise_quant=True) - qmodel_lwq = self._apply_quantize(gptq_config, self.calibration_data_reader) - self.assertTrue(self._check_model_is_quantized(qmodel_lwq)) - - self.calibration_data_reader.rewind() - gptq_config = GPTQConfig(layer_wise_quant=False) - qmodel = self._apply_quantize(gptq_config, self.calibration_data_reader) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - lwq_quantized_weight = self._get_quantized_matmul_weight(qmodel_lwq, "/lm_head/MatMul_Q4") - self.assertIsNotNone(lwq_quantized_weight) - quantized_weight = self._get_quantized_matmul_weight(qmodel, "/lm_head/MatMul_Q4") - self.assertIsNotNone(quantized_weight) - self.assertTrue((lwq_quantized_weight == quantized_weight).all()) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/quantization/weight_only/test_awq.py b/test/3x/onnxrt/quantization/weight_only/test_awq.py deleted file mode 100644 index 1587399f9ca..00000000000 --- a/test/3x/onnxrt/quantization/weight_only/test_awq.py +++ /dev/null @@ -1,219 +0,0 @@ -import os -import shutil -import unittest - -import torch -from optimum.exporters.onnx import main_export -from transformers import AutoTokenizer - -from neural_compressor.common import Logger -from neural_compressor.onnxrt.quantization.calibrate import CalibrationDataReader - -logger = Logger().get_logger() - - -def find_onnx_file(folder_path): - # return first .onnx file path in folder_path - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".onnx"): - return os.path.join(root, file) - return None - - -class DummyNLPDataloader(CalibrationDataReader): - def __init__(self, model_name): - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.sequence_a = "intel-extension-for-transformers is based in SH" - self.sequence_b = "Where is intel-extension-for-transformers based? NYC or SH" - - self.encoded_list = [] - encoded_input = dict(self.tokenizer(self.sequence_a, self.sequence_b, return_tensors="pt")) - input_shape = encoded_input["input_ids"].shape - encoded_input["position_ids"] = ( - torch.arange(0, input_shape[-1], dtype=torch.long).unsqueeze(0).view(-1, input_shape[-1]) - ) - - # convert torch tensor to numpy - for input_name, input_value in encoded_input.items(): - if isinstance(input_value, torch.Tensor): - encoded_input[input_name] = input_value.numpy() - - self.encoded_list.append(encoded_input) - self.iter_next = iter(self.encoded_list) - - def get_next(self): - return next(self.iter_next, None) - - def rewind(self): - self.iter_next = iter(self.encoded_list) - - -class TestAWQQuant(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = find_onnx_file("./gptj") - self.calibration_data_reader = DummyNLPDataloader("hf-internal-testing/tiny-random-gptj") - - @classmethod - def tearDownClass(self): - shutil.rmtree("gptj", ignore_errors=True) - - def setUp(self): - # print the test name - logger.info(f"Running ONNXRT TestAWQQuant test: {self.id()}") - - def _count_woq_matmul(self, q_model, bits=4, group_size=32): - op_names = [ - i.name - for i in q_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(bits, group_size)) - ] - return len(op_names) - - def _check_model_is_quantized(self, model): - node_optypes = [node.op_type for node in model.graph.node] - return "MatMulNBits" in node_optypes or "MatMulFpQ4" in node_optypes - - def _check_node_is_quantized(self, model, node_name): - for node in model.graph.node: - if (node.name == node_name or node.name == node_name + "_Q4") and node.op_type in [ - "MatMulNBits", - "MatMulFpQ4", - ]: - return True - return False - - def _apply_awq(self, quant_config): - logger.info(f"Test AWQ with config {quant_config}") - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_model = self.gptj - qmodel = _quantize(fp32_model, quant_config, calibration_data_reader=self.calibration_data_reader) - self.assertIsNotNone(qmodel) - return qmodel - - def test_awq_params_combination(self): - from neural_compressor.onnxrt import AWQConfig - - # some tests were skipped to accelerate the CI - # TODO: check params combination. - # TODO: Add number check for group_size. - awq_options = { - "weight_dtype": ["int"], - "weight_bits": [4, 3, 8], - "weight_group_size": [32], - "weight_sym": [True, False], - "act_dtype": ["fp32"], - "accuracy_level": [0], - "enable_auto_scale": [True, False], - "enable_mse_search": [True, False], - } - from itertools import product - - keys = AWQConfig.params_list - for value in product(*awq_options.values()): - d = dict(zip(keys, value)) - print(d) - quant_config = AWQConfig(**d) - qmodel = self._apply_awq(quant_config) - self.assertEqual(self._count_woq_matmul(qmodel, bits=value[1], group_size=value[2]), 30) - - def test_awq_config(self): - from neural_compressor.onnxrt.quantization import AWQConfig - - awq_config1 = AWQConfig(weight_bits=4) - quant_config_dict = { - "awq": {"weight_bits": 4}, - } - awq_config2 = AWQConfig.from_dict(quant_config_dict["awq"]) - self.assertEqual(awq_config1.to_dict(), awq_config2.to_dict()) - - def test_quantize_awq_from_dict_default(self): - from neural_compressor.onnxrt import get_default_awq_config - - qmodel = self._apply_awq(quant_config=get_default_awq_config()) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_awq_from_dict_beginner(self): - quant_config = { - "awq": { - "weight_bits": 4, - "weight_group_size": 32, - }, - } - qmodel = self._apply_awq(quant_config) - self.assertIsNotNone(qmodel) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_awq_from_class_beginner(self): - from neural_compressor.onnxrt import AWQConfig - - quant_config = AWQConfig(weight_bits=4, weight_group_size=32) - qmodel = self._apply_awq(quant_config) - self.assertIsNotNone(qmodel) - - def test_quantize_awq_fallback_from_class_beginner(self): - from neural_compressor.onnxrt import AWQConfig - - fp32_config = AWQConfig(weight_dtype="fp32") - quant_config = AWQConfig( - weight_bits=4, - weight_dtype="int", - weight_sym=False, - weight_group_size=32, - ) - quant_config.set_local("/h.4/mlp/fc_out/MatMul", fp32_config) - qmodel = self._apply_awq(quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - def test_quantize_awq_from_dict_advance(self): - quant_config = { - "awq": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_dtype": "fp32", - } - }, - } - } - qmodel = self._apply_awq(quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - quant_config = { - "awq": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_bits": 8, - "weight_group_size": 32, - } - }, - } - } - qmodel = self._apply_awq(quant_config) - self.assertIsNotNone(qmodel) - for node in qmodel.graph.node: - if node.name == "/h.4/mlp/fc_out/MatMul": - self.assertTrue(node.input[1].endswith("Q8G32")) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/quantization/weight_only/test_gptq.py b/test/3x/onnxrt/quantization/weight_only/test_gptq.py deleted file mode 100644 index 4309af4e654..00000000000 --- a/test/3x/onnxrt/quantization/weight_only/test_gptq.py +++ /dev/null @@ -1,222 +0,0 @@ -import os -import shutil -import unittest - -import torch -from optimum.exporters.onnx import main_export -from transformers import AutoTokenizer - -from neural_compressor.common import Logger -from neural_compressor.onnxrt.quantization.calibrate import CalibrationDataReader - -logger = Logger().get_logger() - - -def find_onnx_file(folder_path): - # return first .onnx file path in folder_path - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".onnx"): - return os.path.join(root, file) - return None - - -class DummyNLPDataloader(CalibrationDataReader): - def __init__(self, model_name): - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.sequence_a = "intel-extension-for-transformers is based in SH" - self.sequence_b = "Where is intel-extension-for-transformers based? NYC or SH" - - self.encoded_list = [] - encoded_input = dict(self.tokenizer(self.sequence_a, self.sequence_b, return_tensors="pt")) - input_shape = encoded_input["input_ids"].shape - encoded_input["position_ids"] = ( - torch.arange(0, input_shape[-1], dtype=torch.long).unsqueeze(0).view(-1, input_shape[-1]) - ) - - # convert torch tensor to numpy - for input_name, input_value in encoded_input.items(): - if isinstance(input_value, torch.Tensor): - encoded_input[input_name] = input_value.numpy() - - self.encoded_list.append(encoded_input) - self.iter_next = iter(self.encoded_list) - - def get_next(self): - return next(self.iter_next, None) - - def rewind(self): - self.iter_next = iter(self.encoded_list) - - -class TestGPTQQuant(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = find_onnx_file("./gptj") - self.calibration_data_reader = DummyNLPDataloader("hf-internal-testing/tiny-random-gptj") - - @classmethod - def tearDownClass(self): - shutil.rmtree("gptj", ignore_errors=True) - - def setUp(self): - # print the test name - logger.info(f"Running ONNXRT TestGPTQQuant test: {self.id()}") - - def _count_woq_matmul(self, q_model, bits=4, group_size=32): - op_names = [ - i.name - for i in q_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(bits, group_size)) - ] - return len(op_names) - - def _check_model_is_quantized(self, model): - node_optypes = [node.op_type for node in model.graph.node] - return "MatMulNBits" in node_optypes or "MatMulFpQ4" in node_optypes - - def _check_node_is_quantized(self, model, node_name): - for node in model.graph.node: - if (node.name == node_name or node.name == node_name + "_Q4") and node.op_type in [ - "MatMulNBits", - "MatMulFpQ4", - ]: - return True - return False - - def _apply_gptq(self, quant_config): - logger.info(f"Test GPTQ with config {quant_config}") - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_model = self.gptj - qmodel = _quantize(fp32_model, quant_config, calibration_data_reader=self.calibration_data_reader) - self.assertIsNotNone(qmodel) - return qmodel - - def test_gptq_params_combination(self): - from neural_compressor.onnxrt import GPTQConfig - - # some tests were skipped to accelerate the CI - # TODO: check params combination. - # TODO: Add number check for group_size. - gptq_options = { - "weight_dtype": ["int"], - "weight_bits": [4], - "weight_group_size": [32], - "weight_sym": [True, False], - "act_dtype": ["fp32"], - "accuracy_level": [0], - "percdamp": [0.01], - "blocksize": [128], - "actorder": [True, False], - "mse": [True, False], - "perchannel": [True, False], - } - from itertools import product - - keys = GPTQConfig.params_list - for value in product(*gptq_options.values()): - d = dict(zip(keys, value)) - print(d) - quant_config = GPTQConfig(**d) - qmodel = self._apply_gptq(quant_config) - self.assertEqual(self._count_woq_matmul(qmodel, bits=value[1], group_size=value[2]), 30) - - def test_gptq_config(self): - from neural_compressor.onnxrt.quantization import GPTQConfig - - gptq_config1 = GPTQConfig(weight_bits=4) - quant_config_dict = { - "gptq": {"weight_bits": 4}, - } - gptq_config2 = GPTQConfig.from_dict(quant_config_dict["gptq"]) - self.assertEqual(gptq_config1.to_dict(), gptq_config2.to_dict()) - - def test_quantize_gptq_from_dict_default(self): - from neural_compressor.onnxrt import get_default_gptq_config - - qmodel = self._apply_gptq(quant_config=get_default_gptq_config()) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_gptq_from_dict_beginner(self): - quant_config = { - "gptq": { - "weight_bits": 4, - "weight_group_size": 32, - }, - } - qmodel = self._apply_gptq(quant_config) - self.assertIsNotNone(qmodel) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_gptq_from_class_beginner(self): - from neural_compressor.onnxrt import GPTQConfig - - quant_config = GPTQConfig(weight_bits=4, weight_group_size=32) - qmodel = self._apply_gptq(quant_config) - self.assertIsNotNone(qmodel) - - def test_quantize_gptq_fallback_from_class_beginner(self): - from neural_compressor.onnxrt import GPTQConfig - - fp32_config = GPTQConfig(weight_dtype="fp32") - quant_config = GPTQConfig( - weight_bits=4, - weight_dtype="int", - weight_sym=False, - weight_group_size=32, - ) - quant_config.set_local("/h.4/mlp/fc_out/MatMul", fp32_config) - qmodel = self._apply_gptq(quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - def test_quantize_gptq_from_dict_advance(self): - quant_config = { - "gptq": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_dtype": "fp32", - } - }, - } - } - qmodel = self._apply_gptq(quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - quant_config = { - "gptq": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_bits": 8, - "weight_group_size": 32, - } - }, - } - } - qmodel = self._apply_gptq(quant_config) - self.assertIsNotNone(qmodel) - for node in qmodel.graph.node: - if node.name == "/h.4/mlp/fc_out/MatMul": - self.assertTrue(node.input[1].endswith("Q8G32")) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/quantization/weight_only/test_rtn.py b/test/3x/onnxrt/quantization/weight_only/test_rtn.py deleted file mode 100644 index 11a05bc48da..00000000000 --- a/test/3x/onnxrt/quantization/weight_only/test_rtn.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import shutil -import unittest - -from optimum.exporters.onnx import main_export - -from neural_compressor.common import Logger - -logger = Logger().get_logger() - - -def find_onnx_file(folder_path): - # return first .onnx file path in folder_path - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".onnx"): - return os.path.join(root, file) - return None - - -class TestRTNQuant(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = find_onnx_file("./gptj") - - @classmethod - def tearDownClass(self): - shutil.rmtree("gptj", ignore_errors=True) - - def setUp(self): - # print the test name - logger.info(f"Running ONNXRT TestRTNQuant test: {self.id()}") - - def _check_model_is_quantized(self, model): - node_optypes = [node.op_type for node in model.graph.node] - return "MatMulNBits" in node_optypes or "MatMulFpQ4" in node_optypes - - def _check_node_is_quantized(self, model, node_name): - for node in model.graph.node: - if (node.name == node_name or node.name == node_name + "_Q4") and node.op_type in [ - "MatMulNBits", - "MatMulFpQ4", - ]: - return True - return False - - def _count_woq_matmul(self, q_model, bits=4, group_size=32): - op_names = [ - i.name - for i in q_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(bits, group_size)) - ] - return len(op_names) - - def _apply_rtn(self, quant_config): - logger.info(f"Test RTN with config {quant_config}") - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_model = self.gptj - qmodel = _quantize(fp32_model, quant_config) - self.assertIsNotNone(qmodel) - return qmodel - - def test_rtn_params_combination(self): - from neural_compressor.onnxrt import RTNConfig - - # some tests were skipped to accelerate the CI - # TODO: check params combination. - # TODO: Add number check for group_size. - rtn_options = { - "weight_dtype": ["int"], - "weight_bits": [4, 3, 8], - "weight_group_size": [32], - "weight_sym": [True, False], - "act_dtype": ["fp32"], - } - from itertools import product - - keys = RTNConfig.params_list - for value in product(*rtn_options.values()): - d = dict(zip(keys, value)) - quant_config = RTNConfig(**d) - qmodel = self._apply_rtn(quant_config) - self.assertEqual(self._count_woq_matmul(qmodel, bits=value[1], group_size=value[2]), 30) - - def test_rtn_config(self): - from neural_compressor.onnxrt.quantization import RTNConfig - - rtn_config1 = RTNConfig(weight_bits=4) - quant_config_dict = { - "rtn": {"weight_bits": 4}, - } - rtn_config2 = RTNConfig.from_dict(quant_config_dict["rtn"]) - self.assertEqual(rtn_config1.to_dict(), rtn_config2.to_dict()) - - def test_quantize_rtn_from_dict_default(self): - from neural_compressor.onnxrt import get_default_rtn_config - from neural_compressor.onnxrt.quantization.quantize import _quantize - - qmodel = self._apply_rtn(quant_config=get_default_rtn_config()) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_rtn_from_dict_beginner(self): - from neural_compressor.onnxrt.quantization.quantize import _quantize - - quant_config = { - "rtn": { - "weight_bits": 4, - "weight_group_size": 32, - }, - } - qmodel = self._apply_rtn(quant_config) - self.assertIsNotNone(qmodel) - self.assertIsNotNone(qmodel) - self.assertTrue(self._check_model_is_quantized(qmodel)) - - def test_quantize_rtn_from_class_beginner(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.quantization.quantize import _quantize - - quant_config = RTNConfig(weight_bits=4, weight_group_size=32) - qmodel = self._apply_rtn(quant_config) - self.assertIsNotNone(qmodel) - - def test_quantize_rtn_fallback_from_class_beginner(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_config = RTNConfig(weight_dtype="fp32") - fp32_model = self.gptj - quant_config = RTNConfig( - weight_bits=4, - weight_dtype="int", - weight_sym=False, - weight_group_size=32, - ) - quant_config.set_local("/h.4/mlp/fc_out/MatMul", fp32_config) - qmodel = _quantize(fp32_model, quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - def test_quantize_rtn_from_dict_advance(self): - from neural_compressor.onnxrt.quantization.quantize import _quantize - - fp32_model = self.gptj - quant_config = { - "rtn": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_dtype": "fp32", - } - }, - } - } - qmodel = _quantize(fp32_model, quant_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - fp32_model = self.gptj - quant_config = { - "rtn": { - "global": { - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_bits": 8, - "weight_group_size": 32, - } - }, - } - } - qmodel = _quantize(fp32_model, quant_config) - self.assertIsNotNone(qmodel) - for node in qmodel.graph.node: - if node.name == "/h.4/mlp/fc_out/MatMul": - self.assertTrue(node.input[1].endswith("Q8G32")) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/requirements.txt b/test/3x/onnxrt/requirements.txt deleted file mode 100644 index 4165ba5e0a6..00000000000 --- a/test/3x/onnxrt/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -optimum -pytest diff --git a/test/3x/onnxrt/test_autotune.py b/test/3x/onnxrt/test_autotune.py deleted file mode 100644 index 8291d3ef344..00000000000 --- a/test/3x/onnxrt/test_autotune.py +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import os -import shutil -import unittest -from typing import Callable, Dict, List, Optional, Union -from unittest.mock import patch - -import numpy as np -import onnx -import onnxruntime as ort -from optimum.exporters.onnx import main_export - -from neural_compressor.common import Logger -from neural_compressor.common.base_tuning import Evaluator, TuningConfig -from neural_compressor.onnxrt import AWQConfig, CalibrationDataReader, GPTQConfig, RTNConfig, SmoohQuantConfig -from neural_compressor.onnxrt.quantization import autotune - -logger = Logger().get_logger() - - -def _create_evaluator_for_eval_fns(eval_fns: Optional[Union[Callable, Dict, List[Dict]]] = None) -> Evaluator: - evaluator = Evaluator() - evaluator.set_eval_fn_registry(eval_fns) - return evaluator - - -class DataReader(CalibrationDataReader): - def __init__(self, model): - model = onnx.load(model) - batch_size = 1 - sequence_length = 1 - self.data = { - "input_ids": np.random.randint(10, size=(batch_size, sequence_length)).astype("int64"), - "attention_mask": np.zeros((batch_size, sequence_length)).astype("int64"), - } - for inp in model.graph.input: - if inp.name in self.data: - continue - if inp.name == "position_ids": - # model is exported with optimum >= 1.14.0 with new input 'position_ids' - self.data[inp.name] = np.random.randint(10, size=(batch_size, sequence_length)).astype("int64") - - self.enum_data = None - - def get_next(self): - if self.enum_data is None: - self.enum_data = iter([self.data]) - return next(self.enum_data, None) - - def rewind(self): - self.enum_data = None - - -class TestONNXRT3xAutoTune(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = glob.glob(os.path.join("./gptj", "*.onnx"))[0] - self.data_reader = DataReader(self.gptj) - - @classmethod - def tearDownClass(self): - shutil.rmtree("./gptj", ignore_errors=True) - - @patch("logging.Logger.warning") - def test_auto_tune_warning(self, mock_warning): - acc_data = iter([1.0, 0.8, 0.99, 1.0, 0.99, 0.99]) - - def eval_acc_fn(model) -> float: - session = ort.InferenceSession(model.SerializeToString(), providers=["CPUExecutionProvider"]) - return next(acc_data) - - custom_tune_config = TuningConfig(config_set=[SmoohQuantConfig(alpha=0.5), SmoohQuantConfig(alpha=0.6)]) - with self.assertRaises(SystemExit): - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_acc_fn, - calibration_data_reader=self.data_reader, - ) - call_args_list = mock_warning.call_args_list - # There may be multiple calls to warning, so we need to check all of them - self.assertIn( - "Please refine your eval_fn to accept model path (str) as input.", [info[0][0] for info in call_args_list] - ) - - def test_sq_auto_tune(self): - acc_data = iter([1.0, 0.8, 0.99, 1.0, 0.99, 0.99]) - - def eval_acc_fn(model) -> float: - return next(acc_data) - - perf_data = iter([1.0, 0.9, 0.99]) - - def eval_perf_fn(model) -> float: - return next(perf_data) - - eval_fns = [ - {"eval_fn": eval_acc_fn, "weight": 0.5, "name": "accuracy"}, - { - "eval_fn": eval_perf_fn, - "weight": 0.5, - }, - ] - - evaluator = _create_evaluator_for_eval_fns(eval_fns) - - def eval_fn_wrapper(model): - result = evaluator.evaluate(model) - return result - - custom_tune_config = TuningConfig(config_set=[SmoohQuantConfig(alpha=0.5), SmoohQuantConfig(alpha=0.6)]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_acc_fn, - calibration_data_reader=self.data_reader, - ) - self.assertIsNotNone(best_model) - - custom_tune_config = TuningConfig(config_set=[SmoohQuantConfig(alpha=[0.5, 0.6])]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_fn_wrapper, - calibration_data_reader=self.data_reader, - ) - self.assertEqual(len(evaluator.eval_fn_registry), 2) - self.assertIsNotNone(best_model) - - def test_rtn_auto_tune(self): - acc_data = iter([1.0, 0.8, 0.6, 1.0, 0.99, 0.9]) - - def eval_acc_fn(model) -> float: - return next(acc_data) - - perf_data = iter([1.0, 0.99, 0.99]) - - def eval_perf_fn(model) -> float: - return next(perf_data) - - eval_fns = [ - {"eval_fn": eval_acc_fn, "weight": 0.5, "name": "accuracy"}, - { - "eval_fn": eval_perf_fn, - "weight": 0.5, - }, - ] - - evaluator = _create_evaluator_for_eval_fns(eval_fns) - - def eval_fn_wrapper(model): - result = evaluator.evaluate(model) - return result - - custom_tune_config = TuningConfig(config_set=[RTNConfig(weight_group_size=32), RTNConfig(weight_group_size=64)]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_acc_fn, - calibration_data_reader=self.data_reader, - ) - self.assertIsNone(best_model) - - custom_tune_config = TuningConfig(config_set=[RTNConfig(weight_group_size=[32, 64])]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_fn_wrapper, - calibration_data_reader=self.data_reader, - ) - self.assertEqual(len(evaluator.eval_fn_registry), 2) - self.assertIsNotNone(best_model) - op_names = [ - i.name - for i in best_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(4, 32)) - ] - self.assertTrue(len(op_names) > 0) - - def test_awq_auto_tune(self): - acc_data = iter([1.0, 0.8, 0.6, 1.0, 0.99, 0.9]) - - def eval_acc_fn(model) -> float: - return next(acc_data) - - perf_data = iter([1.0, 0.99, 0.99]) - - def eval_perf_fn(model) -> float: - return next(perf_data) - - eval_fns = [ - {"eval_fn": eval_acc_fn, "weight": 0.5, "name": "accuracy"}, - { - "eval_fn": eval_perf_fn, - "weight": 0.5, - }, - ] - - evaluator = _create_evaluator_for_eval_fns(eval_fns) - - def eval_fn_wrapper(model): - result = evaluator.evaluate(model) - return result - - custom_tune_config = TuningConfig(config_set=[AWQConfig(weight_group_size=32), AWQConfig(weight_group_size=64)]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_acc_fn, - calibration_data_reader=self.data_reader, - ) - self.assertIsNone(best_model) - - custom_tune_config = TuningConfig(config_set=[AWQConfig(weight_group_size=[32, 64])]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_fn_wrapper, - calibration_data_reader=self.data_reader, - ) - self.assertEqual(len(evaluator.eval_fn_registry), 2) - self.assertIsNotNone(best_model) - op_names = [ - i.name - for i in best_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(4, 32)) - ] - self.assertTrue(len(op_names) > 0) - - def test_gptq_auto_tune(self): - acc_data = iter([1.0, 0.8, 0.6, 1.0, 0.99, 0.9]) - - def eval_acc_fn(model) -> float: - return next(acc_data) - - perf_data = iter([1.0, 0.99, 0.99]) - - def eval_perf_fn(model) -> float: - return next(perf_data) - - eval_fns = [ - {"eval_fn": eval_acc_fn, "weight": 0.5, "name": "accuracy"}, - { - "eval_fn": eval_perf_fn, - "weight": 0.5, - }, - ] - evaluator = _create_evaluator_for_eval_fns(eval_fns) - - def eval_fn_wrapper(model): - result = evaluator.evaluate(model) - return result - - custom_tune_config = TuningConfig( - config_set=[GPTQConfig(weight_group_size=32), GPTQConfig(weight_group_size=64)] - ) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_acc_fn, - calibration_data_reader=self.data_reader, - ) - self.assertIsNone(best_model) - - custom_tune_config = TuningConfig(config_set=[GPTQConfig(weight_group_size=[32, 64])]) - best_model = autotune( - model_input=self.gptj, - tune_config=custom_tune_config, - eval_fn=eval_fn_wrapper, - calibration_data_reader=self.data_reader, - ) - self.assertEqual(len(evaluator.eval_fn_registry), 2) - self.assertIsNotNone(best_model) - op_names = [ - i.name - for i in best_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(4, 32)) - ] - self.assertTrue(len(op_names) > 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/test_config.py b/test/3x/onnxrt/test_config.py deleted file mode 100644 index 9b0c49de1b8..00000000000 --- a/test/3x/onnxrt/test_config.py +++ /dev/null @@ -1,251 +0,0 @@ -import copy -import os -import shutil -import unittest - -import numpy as np -import onnx -from optimum.exporters.onnx import main_export - -from neural_compressor.common import Logger - -logger = Logger().get_logger() - - -def find_onnx_file(folder_path): - # return first .onnx file path in folder_path - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".onnx"): - return os.path.join(root, file) - return None - - -def build_simple_onnx_model(): - A = onnx.helper.make_tensor_value_info("A", onnx.TensorProto.FLOAT, [1, 5, 5]) - C = onnx.helper.make_tensor_value_info("C", onnx.TensorProto.FLOAT, [1, 5, 2]) - D = onnx.helper.make_tensor_value_info("D", onnx.TensorProto.FLOAT, [1, 5, 2]) - H = onnx.helper.make_tensor_value_info("H", onnx.TensorProto.FLOAT, [1, 5, 2]) - - e_value = np.random.randint(2, size=(10)).astype(np.float32) - B_init = onnx.helper.make_tensor("B", onnx.TensorProto.FLOAT, [5, 2], e_value.reshape(10).tolist()) - E_init = onnx.helper.make_tensor("E", onnx.TensorProto.FLOAT, [1, 5, 2], e_value.reshape(10).tolist()) - - matmul_node = onnx.helper.make_node("MatMul", ["A", "B"], ["C"], name="Matmul") - add = onnx.helper.make_node("Add", ["C", "E"], ["D"], name="add") - - f_value = np.random.randint(2, size=(10)).astype(np.float32) - F_init = onnx.helper.make_tensor("F", onnx.TensorProto.FLOAT, [1, 5, 2], e_value.reshape(10).tolist()) - add2 = onnx.helper.make_node("Add", ["D", "F"], ["H"], name="add2") - - graph = onnx.helper.make_graph([matmul_node, add, add2], "test_graph_1", [A], [H], [B_init, E_init, F_init]) - model = onnx.helper.make_model(graph) - model = onnx.helper.make_model(graph, **{"opset_imports": [onnx.helper.make_opsetid("", 13)]}) - return model - - -class TestQuantizationConfig(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = find_onnx_file("./gptj") - - simple_onnx_model = build_simple_onnx_model() - onnx.save(simple_onnx_model, "simple_onnx_model.onnx") - self.simple_onnx_model = "simple_onnx_model.onnx" - - @classmethod - def tearDownClass(self): - shutil.rmtree("gptj", ignore_errors=True) - os.remove("simple_onnx_model.onnx") - - def setUp(self): - # print the test name - logger.info(f"Running TestQuantizationConfig test: {self.id()}") - - def _check_node_is_quantized(self, model, node_name): - for node in model.graph.node: - if (node.name == node_name or node.name == node_name + "_Q4") and node.op_type in [ - "MatMulNBits", - "MatMulFpQ4", - ]: - return True - return False - - def _count_woq_matmul(self, q_model, bits=4, group_size=32): - op_names = [ - i.name - for i in q_model.graph.node - if i.op_type.startswith("MatMul") and i.input[1].endswith("_Q{}G{}".format(bits, group_size)) - ] - return len(op_names) - - def test_config_white_lst(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.quantization.quantize import _quantize - - global_config = RTNConfig(weight_bits=4) - # set operator instance - fc_out_config = RTNConfig(weight_dtype="fp32", white_list=["/h.4/mlp/fc_out/MatMul"]) - # get model and quantize - fp32_model = self.gptj - qmodel = _quantize(fp32_model, quant_config=global_config + fc_out_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 29) - self.assertFalse(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - def test_config_white_lst2(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.quantization.quantize import _quantize - - global_config = RTNConfig(weight_dtype="fp32") - # set operator instance - fc_out_config = RTNConfig(weight_bits=4, white_list=["/h.4/mlp/fc_out/MatMul"]) - # get model and quantize - fp32_model = self.gptj - qmodel = _quantize(fp32_model, quant_config=global_config + fc_out_config) - self.assertIsNotNone(qmodel) - self.assertEqual(self._count_woq_matmul(qmodel), 1) - self.assertTrue(self._check_node_is_quantized(qmodel, "/h.4/mlp/fc_out/MatMul")) - - def test_config_white_lst3(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.utils.utility import get_model_info - - global_config = RTNConfig(weight_bits=4) - # set operator instance - fc_out_config = RTNConfig(weight_bits=8, white_list=["/h.4/mlp/fc_out/MatMul"]) - quant_config = global_config + fc_out_config - # get model and quantize - fp32_model = self.gptj - model_info = get_model_info(fp32_model, white_op_type_list=["MatMul"]) - logger.info(quant_config) - configs_mapping = quant_config.to_config_mapping(model_info=model_info) - logger.info(configs_mapping) - self.assertTrue(configs_mapping[("/h.4/mlp/fc_out/MatMul", "MatMul")].weight_bits == 8) - self.assertTrue(configs_mapping[("/h.4/mlp/fc_in/MatMul", "MatMul")].weight_bits == 4) - - def test_config_from_dict(self): - from neural_compressor.onnxrt import RTNConfig - - quant_config = { - "rtn": { - "global": { - "weight_dtype": "int", - "weight_bits": 4, - "weight_group_size": 32, - }, - "local": { - "fc1": { - "weight_dtype": "int", - "weight_bits": 8, - } - }, - } - } - config = RTNConfig.from_dict(quant_config["rtn"]) - self.assertIsNotNone(config.local_config) - - def test_config_to_dict(self): - from neural_compressor.onnxrt import RTNConfig - - quant_config = RTNConfig(weight_bits=4) - fc_out_config = RTNConfig(weight_bits=8) - quant_config.set_local("/h.4/mlp/fc_out/MatMul", fc_out_config) - config_dict = quant_config.to_dict() - self.assertIn("global", config_dict) - self.assertIn("local", config_dict) - - def test_same_type_configs_addition(self): - from neural_compressor.onnxrt import RTNConfig - - quant_config1 = { - "rtn": { - "weight_dtype": "int", - "weight_bits": 4, - "weight_group_size": 32, - }, - } - q_config = RTNConfig.from_dict(quant_config1["rtn"]) - quant_config2 = { - "rtn": { - "global": { - "weight_bits": 8, - "weight_group_size": 32, - }, - "local": { - "/h.4/mlp/fc_out/MatMul": { - "weight_dtype": "int", - "weight_bits": 4, - } - }, - } - } - q_config2 = RTNConfig.from_dict(quant_config2["rtn"]) - q_config3 = q_config + q_config2 - q3_dict = q_config3.to_dict() - for op_name, op_config in quant_config2["rtn"]["local"].items(): - for attr, val in op_config.items(): - self.assertEqual(q3_dict["local"][op_name][attr], val) - self.assertNotEqual(q3_dict["global"]["weight_bits"], quant_config2["rtn"]["global"]["weight_bits"]) - - def test_config_mapping(self): - from neural_compressor.onnxrt import RTNConfig - from neural_compressor.onnxrt.utils.utility import get_model_info - - quant_config = RTNConfig(weight_bits=4) - # set operator instance - fc_out_config = RTNConfig(weight_bits=8) - quant_config.set_local("/h.4/mlp/fc_out/MatMul", fc_out_config) - # get model and quantize - fp32_model = self.gptj - model_info = get_model_info(fp32_model, white_op_type_list=["MatMul"]) - logger.info(quant_config) - configs_mapping = quant_config.to_config_mapping(model_info=model_info) - logger.info(configs_mapping) - self.assertTrue(configs_mapping[("/h.4/mlp/fc_out/MatMul", "MatMul")].weight_bits == 8) - self.assertTrue(configs_mapping[("/h.4/mlp/fc_in/MatMul", "MatMul")].weight_bits == 4) - # test regular matching - fc_config = RTNConfig(weight_bits=3) - quant_config.set_local("/h.[1-4]/mlp/fc_out/MatMul", fc_config) - configs_mapping = quant_config.to_config_mapping(model_info=model_info) - logger.info(configs_mapping) - self.assertTrue(configs_mapping[("/h.4/mlp/fc_out/MatMul", "MatMul")].weight_bits == 3) - self.assertTrue(configs_mapping[("/h.3/mlp/fc_out/MatMul", "MatMul")].weight_bits == 3) - self.assertTrue(configs_mapping[("/h.2/mlp/fc_out/MatMul", "MatMul")].weight_bits == 3) - self.assertTrue(configs_mapping[("/h.1/mlp/fc_out/MatMul", "MatMul")].weight_bits == 3) - - def test_diff_types_configs_addition(self): - from neural_compressor.onnxrt import GPTQConfig, RTNConfig - - quant_config1 = { - "rtn": { - "weight_bits": 4, - "weight_group_size": 32, - }, - } - q_config = RTNConfig.from_dict(quant_config1["rtn"]) - d_config = GPTQConfig(weight_group_size=128) - combined_config = q_config + d_config - combined_config_d = combined_config.to_dict() - logger.info(combined_config) - self.assertIn("rtn", combined_config_d) - self.assertIn("gptq", combined_config_d) - - -class TestQuantConfigForAutotune(unittest.TestCase): - def test_expand_config(self): - # test the expand functionalities, the user is not aware it - from neural_compressor.onnxrt import RTNConfig - - tune_config = RTNConfig(weight_bits=[4, 8]) - expand_config_list = RTNConfig.expand(tune_config) - self.assertEqual(expand_config_list[0].weight_bits, 4) - self.assertEqual(expand_config_list[1].weight_bits, 8) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/onnxrt/test_smooth_quant.py b/test/3x/onnxrt/test_smooth_quant.py deleted file mode 100644 index 6974020185b..00000000000 --- a/test/3x/onnxrt/test_smooth_quant.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2023 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import os -import shutil -import unittest - -import numpy as np -import onnx -from optimum.exporters.onnx import main_export - -from neural_compressor.common import Logger -from neural_compressor.onnxrt import CalibrationDataReader, QuantType, SmoohQuantConfig, get_default_sq_config -from neural_compressor.onnxrt.quantization.quantize import _quantize - -logger = Logger().get_logger() - - -class DataReader(CalibrationDataReader): - def __init__(self, model): - model = onnx.load(model) - batch_size = 1 - sequence_length = 1 - self.data = { - "input_ids": np.random.randint(10, size=(batch_size, sequence_length)).astype("int64"), - "attention_mask": np.zeros((batch_size, sequence_length)).astype("int64"), - } - for inp in model.graph.input: - if inp.name in self.data: - continue - if inp.name == "position_ids": - # model is exported with optimum >= 1.14.0 with new input 'position_ids' - self.data[inp.name] = np.random.randint(10, size=(batch_size, sequence_length)).astype("int64") - - self.enum_data = None - - def get_next(self): - if self.enum_data is None: - self.enum_data = iter([self.data]) - return next(self.enum_data, None) - - def rewind(self): - self.enum_data = None - - -class TestONNXRT3xSmoothQuant(unittest.TestCase): - @classmethod - def setUpClass(self): - main_export( - "hf-internal-testing/tiny-random-gptj", - output="gptj", - ) - self.gptj = glob.glob(os.path.join("./gptj", "*.onnx"))[0] - self.data_reader = DataReader(self.gptj) - - @classmethod - def tearDownClass(self): - shutil.rmtree("./gptj", ignore_errors=True) - - def test_sq_from_class_beginner(self): - self.data_reader.rewind() - config = get_default_sq_config() - model = _quantize(self.gptj, config, self.data_reader) - num_muls = len([i for i in model.graph.node if i.name.endswith("_smooth_mul") and i.op_type == "Mul"]) - self.assertEqual(num_muls, 30) - - def test_sq_auto_tune_from_class_beginner(self): - self.data_reader.rewind() - config = SmoohQuantConfig(alpha="auto", scales_per_op=False) - model = _quantize(self.gptj, config, self.data_reader) - num_muls = len([i for i in model.graph.node if i.name.endswith("_smooth_mul") and i.op_type == "Mul"]) - self.assertEqual(num_muls, 15) - - def test_sq_from_dict_beginner(self): - config = { - "smooth_quant": { - "global": { - "alpha": 0.5, - "scales_per_op": False, - }, - } - } - self.data_reader.rewind() - model = _quantize(self.gptj, config, self.data_reader) - num_muls = len([i for i in model.graph.node if i.name.endswith("_smooth_mul") and i.op_type == "Mul"]) - self.assertEqual(num_muls, 15) - - def test_sq_auto_tune_from_dict_beginner(self): - config = { - "smooth_quant": { - "global": { - "alpha": "auto", - }, - } - } - self.data_reader.rewind() - model = _quantize(self.gptj, config, self.data_reader) - num_muls = len([i for i in model.graph.node if i.name.endswith("_smooth_mul") and i.op_type == "Mul"]) - self.assertEqual(num_muls, 30) - - def test_sq_ort_param_class_beginner(self): - self.data_reader.rewind() - config = SmoohQuantConfig(weight_type=QuantType.QUInt8, activation_type=QuantType.QUInt8) - model = _quantize(self.gptj, config, self.data_reader) - num_muls = len([i for i in model.graph.node if i.name.endswith("_smooth_mul") and i.op_type == "Mul"]) - self.assertTrue(2 in [i.data_type for i in model.graph.initializer]) - self.assertTrue(3 not in [i.data_type for i in model.graph.initializer]) - self.assertEqual(num_muls, 30) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/tensorflow/keras/test_layers.py b/test/3x/tensorflow/keras/test_layers.py new file mode 100644 index 00000000000..b43b3fd8bf7 --- /dev/null +++ b/test/3x/tensorflow/keras/test_layers.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import os +import shutil +import unittest + +import keras +import numpy as np +import tensorflow as tf + +from neural_compressor.common import Logger +from neural_compressor.tensorflow.utils import version1_gte_version2 + +logger = Logger().get_logger() + + +def build_model1(): + # Load MNIST dataset + mnist = keras.datasets.mnist + + # 60000 images in train and 10000 images in test, but we don't need so much for ut + (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + train_images, train_labels = train_images[:1000], train_labels[:1000] + test_images, test_labels = test_images[:200], test_labels[:200] + + # Normalize the input image so that each pixel value is between 0 to 1. + train_images = train_images / 255.0 + test_images = test_images / 255.0 + + # Define the model architecture. + model = keras.Sequential( + [ + keras.layers.InputLayer(input_shape=(28, 28)), + keras.layers.Reshape(target_shape=(28, 28, 1)), + keras.layers.DepthwiseConv2D(3, 3, activation="relu", name="conv2d"), + keras.layers.MaxPooling2D(pool_size=(2, 2)), + keras.layers.Flatten(), + keras.layers.Dense(10, name="dense"), + ] + ) + # Train the digit classification model + model.compile( + optimizer="adam", loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + + model.fit( + train_images, + train_labels, + epochs=1, + validation_split=0.1, + ) + + _, baseline_model_accuracy = model.evaluate(test_images, test_labels, verbose=0) + + print("Baseline test accuracy:", baseline_model_accuracy) + if version1_gte_version2(tf.__version__, "2.16.1"): + model.save("baseline_model1.keras") + else: + model.save("baseline_model1") + + +def build_model2(): + # Load MNIST dataset + mnist = keras.datasets.mnist + + # 60000 images in train and 10000 images in test, but we don't need so much for ut + (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + train_images, train_labels = train_images[:1000], train_labels[:1000] + test_images, test_labels = test_images[:200], test_labels[:200] + + # Normalize the input image so that each pixel value is between 0 to 1. + train_images = train_images / 255.0 + test_images = test_images / 255.0 + + # Define the model architecture. + model = keras.Sequential( + [ + keras.layers.InputLayer(input_shape=(28, 28)), + keras.layers.Reshape(target_shape=(28, 28, 1)), + keras.layers.SeparableConv2D(3, 3, activation="relu"), + keras.layers.AveragePooling2D(pool_size=(2, 2)), + keras.layers.Flatten(), + keras.layers.Dense(10, name="dense"), + ] + ) + # Train the digit classification model + model.compile( + optimizer="adam", loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + + model.fit( + train_images, + train_labels, + epochs=1, + validation_split=0.1, + ) + + _, baseline_model_accuracy = model.evaluate(test_images, test_labels, verbose=0) + + print("Baseline test accuracy:", baseline_model_accuracy) + if version1_gte_version2(tf.__version__, "2.16.1"): + model.save("baseline_model2.keras") + else: + model.save("baseline_model2") + + +class Dataset(object): + def __init__(self, batch_size=1): + self.batch_size = batch_size + mnist = keras.datasets.mnist + (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + train_images, train_labels = train_images[:1000], train_labels[:1000] + test_images, test_labels = test_images[:200], test_labels[:200] + # Normalize the input image so that each pixel value is between 0 to 1. + self.train_images = train_images / 255.0 + self.test_images = test_images / 255.0 + self.train_labels = train_labels + self.test_labels = test_labels + + def __len__(self): + return len(self.test_images) + + def __getitem__(self, idx): + return self.test_images[idx], self.test_labels[idx] + + +class MyDataloader: + def __init__(self, dataset, batch_size=1): + self.dataset = dataset + self.batch_size = batch_size + self.length = math.ceil(len(dataset) / self.batch_size) + + def __iter__(self): + for _, (images, labels) in enumerate(self.dataset): + images = np.expand_dims(images, axis=0) + labels = np.expand_dims(labels, axis=0) + yield (images, labels) + + def __len__(self): + return self.length + + +class TestTF3xNewApi(unittest.TestCase): + @classmethod + def setUpClass(self): + build_model1() + build_model2() + os.environ["ITEX_ONEDNN_GRAPH"] = "1" + self.fp32_model_path1 = ( + "baseline_model1.keras" if version1_gte_version2(tf.__version__, "2.16.1") else "baseline_model1" + ) + self.fp32_model_path2 = ( + "baseline_model2.keras" if version1_gte_version2(tf.__version__, "2.16.1") else "baseline_model2" + ) + + @classmethod + def tearDownClass(self): + if self.fp32_model_path1.endswith(".keras"): + os.remove(self.fp32_model_path1) + os.remove(self.fp32_model_path2) + else: + shutil.rmtree(self.fp32_model_path1, ignore_errors=True) + shutil.rmtree(self.fp32_model_path2, ignore_errors=True) + os.environ["ITEX_ONEDNN_GRAPH"] = "0" + + def test_depthwise_conv2d(self): + logger.info("test_static_quant_from_dict_default") + from neural_compressor.tensorflow import quantize_model + from neural_compressor.tensorflow.keras import get_default_static_quant_config + + calib_dataloader = MyDataloader(dataset=Dataset()) + fp32_model = keras.models.load_model(self.fp32_model_path1) + qmodel = quantize_model(fp32_model, get_default_static_quant_config(), calib_dataloader) + self.assertIsNotNone(qmodel) + + for layer in qmodel.layers: + if layer.name == "conv2d": + self.assertEqual(layer.__class__.__name__, "QDepthwiseConv2D") + break + + def test_seprable_conv2d(self): + logger.info("test_static_quant_from_dict_default") + from neural_compressor.tensorflow import quantize_model + from neural_compressor.tensorflow.keras import get_default_static_quant_config + + calib_dataloader = MyDataloader(dataset=Dataset()) + fp32_model = keras.models.load_model(self.fp32_model_path2) + qmodel = quantize_model(fp32_model, get_default_static_quant_config(), calib_dataloader) + self.assertIsNotNone(qmodel) + + for layer in qmodel.layers: + if layer.name == "conv2d": + self.assertEqual(layer.__class__.__name__, "QSeparableConv2D") + break + + +if __name__ == "__main__": + unittest.main() diff --git a/test/3x/tensorflow/keras/test_model_wrappers.py b/test/3x/tensorflow/keras/test_model_wrappers.py index b9cb3eecfd0..e4d906e27a2 100644 --- a/test/3x/tensorflow/keras/test_model_wrappers.py +++ b/test/3x/tensorflow/keras/test_model_wrappers.py @@ -94,30 +94,6 @@ def test_keras_model(self): self.assertEqual(os.path.isfile("./keras_model.keras"), True) - def test_tf_qat_model(self): - if parse_version(tf.version.VERSION) < parse_version("2.3.0"): - return - - from neural_compressor.tensorflow.utils.model_wrappers import TensorflowQATModel - - keras_model = self.model - model = TensorflowQATModel(keras_model) - self.assertEqual(isinstance(model.model, tf.keras.Model), True) - self.assertEqual(model.model_path, None) - - keras_model.save("./simple_model.keras") - model = TensorflowQATModel("./simple_model.keras") - self.assertEqual(isinstance(model.model, tf.keras.Model), True) - self.assertEqual(model.model_path, "./simple_model.keras") - - model.save("./keras_model.keras") - loaded_model = tf.keras.models.load_model("./keras_model.keras") - self.assertEqual(isinstance(loaded_model, tf.keras.Model), True) - - model.save("keras_model.h5") - loaded_model = tf.keras.models.load_model("keras_model.h5") - self.assertEqual(isinstance(loaded_model, tf.keras.Model), True) - if __name__ == "__main__": unittest.main() diff --git a/test/3x/tensorflow/quantization/ptq/newapi/test_graph_fuse_gelu_newapi.py b/test/3x/tensorflow/quantization/ptq/newapi/test_graph_fuse_gelu_newapi.py deleted file mode 100644 index e0194700fc3..00000000000 --- a/test/3x/tensorflow/quantization/ptq/newapi/test_graph_fuse_gelu_newapi.py +++ /dev/null @@ -1,413 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import unittest - -import tensorflow as tf -from tensorflow.compat.v1 import graph_util - -from neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu import FuseGeluOptimizer -from neural_compressor.tensorflow.utils import disable_random - - -class TestGeluFusion(unittest.TestCase): - def gelu(self, input_tensor, mul_value=0.5, addv2_value=1.0, sqrt_value=2.0): - cdf = mul_value * (addv2_value + tf.math.erf(input_tensor / tf.sqrt(sqrt_value))) - return input_tensor * cdf - - def gelu_enable_approximation( - self, - input_tensor, - another_mul_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - return ( - another_mul_value - * input_tensor - * (addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value)))) - ) - - def gelu_enable_approximation_varaint( - self, - input_tensor, - another_mul_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - cdf = another_mul_value * ( - addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value))) - ) - - return input_tensor * cdf - - def gelu_disable_approximation( - self, - input_tensor, - another_add_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - return (another_add_value + input_tensor) * ( - addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value))) - ) - - @disable_random() - def test_gelu_disable_approximation_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_disable_approximation(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - @disable_random() - def test_gelu_approximation_fusion_varaint(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation_varaint(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_pow_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, pow_value=1.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_mul2_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, mul2_value=1.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_addv2_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, addv2_value=12.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_mul1_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, mul1_value=1.0) - relu = tf.nn.relu(gelu) - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_another_mul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, another_mul_value=1.0) - relu = tf.nn.relu(gelu) - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_sqrt(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, sqrt_value=1.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_addv2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, addv2_value=10.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_mul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, mul_value=1.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/tensorflow/quantization/ptq/test_get_estimator_graph.py b/test/3x/tensorflow/quantization/ptq/test_get_estimator_graph.py deleted file mode 100644 index b538c34a43d..00000000000 --- a/test/3x/tensorflow/quantization/ptq/test_get_estimator_graph.py +++ /dev/null @@ -1,52 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import platform -import unittest - -import tensorflow as tf - -from neural_compressor.tensorflow.quantization.utils.utility import get_estimator_graph -from neural_compressor.tensorflow.utils import version1_gte_version2 - - -class TestEstimatorGraphConvert(unittest.TestCase): - @classmethod - def setUpClass(self): - if version1_gte_version2(tf.version.VERSION, "2.16.1"): - return - - self.dst_path = "/tmp/.neural_compressor/train.csv" - self.titanic_file = tf.keras.utils.get_file( - self.dst_path, "https://storage.googleapis.com/tf-datasets/titanic/train.csv" - ) - - @unittest.skipIf( - version1_gte_version2(tf.version.VERSION, "2.16.1"), "The estimator APIs are deleted after TF2.16.1" - ) - def test_get_estimator_graph(self): - def train_input_fn(): - titanic = tf.data.experimental.make_csv_dataset(self.titanic_file, batch_size=32, label_name="survived") - titanic_batches = titanic.cache().repeat().shuffle(500).prefetch(tf.data.experimental.AUTOTUNE) - return titanic_batches - - age = tf.feature_column.numeric_column("age") - cls = tf.feature_column.categorical_column_with_vocabulary_list("class", ["First", "Second", "Third"]) - embark = tf.feature_column.categorical_column_with_hash_bucket("embark_town", 32) - import tempfile - - model_dir = tempfile.mkdtemp() - model = tf.estimator.LinearClassifier(model_dir=model_dir, feature_columns=[embark, cls, age], n_classes=2) - model = model.train(input_fn=train_input_fn, steps=100) - result = model.evaluate(train_input_fn, steps=10) - - graph = get_estimator_graph(model, train_input_fn) - - self.assertTrue(isinstance(graph, tf.Graph)) - graph_def = graph.as_graph_def() - self.assertGreater(len(graph_def.node), 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/tensorflow/quantization/ptq/test_set_tensor.py b/test/3x/tensorflow/quantization/ptq/test_set_tensor.py deleted file mode 100644 index 99e5f50ff7d..00000000000 --- a/test/3x/tensorflow/quantization/ptq/test_set_tensor.py +++ /dev/null @@ -1,145 +0,0 @@ -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.tensorflow.algorithms.static_quant.tensorflow import TensorFlowAdaptor -from neural_compressor.tensorflow.utils import disable_random - - -class TestSetTensor(unittest.TestCase): - @classmethod - def tearDownClass(self): - shutil.rmtree("./saved", ignore_errors=True) - - @disable_random() - def test_fp32bias(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias = tf.compat.v1.get_variable( - "bias", [16], dtype=tf.float32, initializer=tf.compat.v1.random_normal_initializer() - ) - - conv_bias = tf.math.add(conv, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.tensorflow import StaticQuantConfig, quantize_model - from neural_compressor.tensorflow.utils import BaseDataLoader, DummyDataset - - dataset = DummyDataset(shape=(100, 56, 56, 16), label=True) - calib_dataloader = BaseDataLoader(dataset) - quant_config = StaticQuantConfig() - q_model = quantize_model(constant_graph, quant_config, calib_dataloader) - - framework_specific_info = { - "device": "cpu", - "workspace_path": "saved", - "random_seed": 1978, - "inputs": ["input"], - "outputs": ["op_to_store"], - "approach": "post_training_static_quant", - "format": "default", - "backend": "default", - } - adaptor = TensorFlowAdaptor(framework_specific_info) - adaptor.set_tensor(q_model, {"bias": np.random.random(16)}) - - from tensorflow.core.framework import attr_value_pb2 - from tensorflow.python.framework import dtypes - - for node in q_model.graph_def.node: - if node.name == "bias": - self.assertEqual(node.attr["dtype"], attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - @disable_random() - def test_int32bias(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias = tf.compat.v1.get_variable("bias", [16], dtype=tf.float32) - - conv_bias = tf.math.add(conv, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="relu_0") - - conv_weights1 = tf.compat.v1.get_variable( - "weight1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(relu6, conv_weights1, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias1 = tf.compat.v1.get_variable("bias1", [16], dtype=tf.float32) - - conv_bias1 = tf.math.add(conv1, conv_bias1) - relu6 = tf.nn.relu6(conv_bias1, name="relu_1") - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(relu6, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias2 = tf.compat.v1.get_variable("bias2", [16], dtype=tf.float32) - - conv_bias2 = tf.math.add(conv2, conv_bias2) - relu6 = tf.nn.relu6(conv_bias2, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - for i in constant_graph.node: - if i.op.find("Add") != -1: - i.op = "Add" - - from neural_compressor.tensorflow import StaticQuantConfig, quantize_model - from neural_compressor.tensorflow.utils import BaseDataLoader, DummyDataset - - dataset = DummyDataset(shape=(100, 56, 56, 16), label=True) - calib_dataloader = BaseDataLoader(dataset) - quant_config = StaticQuantConfig() - q_model = quantize_model(constant_graph, quant_config, calib_dataloader) - - framework_specific_info = { - "device": "cpu", - "workspace_path": "saved", - "random_seed": 1978, - "inputs": ["input"], - "outputs": ["op_to_store"], - "approach": "post_training_static_quant", - "format": "default", - "backend": "default", - } - adaptor = TensorFlowAdaptor(framework_specific_info) - adaptor.set_tensor(q_model, {"bias1": np.random.randint(6, size=2, dtype="int32")}) - from tensorflow.core.framework import attr_value_pb2 - from tensorflow.python.framework import dtypes - - for node in q_model.graph_def.node: - if node.name == "bias2": - self.assertEqual(node.attr["dtype"], attr_value_pb2.AttrValue(type=dtypes.qint32.as_datatype_enum)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/tensorflow/test_config.py b/test/3x/tensorflow/test_config.py index 579f69166d5..4a48387af55 100644 --- a/test/3x/tensorflow/test_config.py +++ b/test/3x/tensorflow/test_config.py @@ -213,6 +213,35 @@ def test_static_quant_from_dict_advance(self): self.assertEqual(conv2d_quantized, False) + def test_static_quant_from_dict_advance2(self): + logger.info("test_static_quant_from_dict_advance2") + from neural_compressor.tensorflow import quantize_model + from neural_compressor.tensorflow.utils import DummyDataset + + dataset = DummyDataset(shape=(100, 32, 32, 3), label=True) + calib_dataloader = MyDataLoader(dataset=dataset) + fp32_model = self.graph + quant_config = { + "static_quant": { + "global": { + "weight_dtype": "int8", + "weight_sym": True, + "weight_granularity": "per_channel", + "act_dtype": "int8", + "act_sym": True, + "act_granularity": "per_channel", + }, + "local": { + "conv1": { + "weight_algorithm": "kl", + "act_algorithm": "kl", + } + }, + } + } + qmodel = quantize_model(fp32_model, quant_config, calib_dataloader) + self.assertIsNotNone(qmodel) + def test_static_quant_from_class_advance(self): logger.info("test_static_quant_from_class_advance") from neural_compressor.tensorflow import StaticQuantConfig, quantize_model diff --git a/test/3x/tensorflow/test_model_wrappers.py b/test/3x/tensorflow/test_model_wrappers.py index da76526e6f9..b5cdb7be9de 100644 --- a/test/3x/tensorflow/test_model_wrappers.py +++ b/test/3x/tensorflow/test_model_wrappers.py @@ -147,8 +147,6 @@ def test_validate_graph_node(self): "Only supports tf previous to the version 2.16.1", ) def test_estimator(self): - from neural_compressor.tensorflow.quantization.utils.utility import get_estimator_graph - model_fn = build_estimator() input_fn = build_input_fn() estimator = tf.estimator.Estimator(model_fn, model_dir=None, config=None, params=None, warm_start_from=None) @@ -205,7 +203,7 @@ def test_slim(self): self.assertGreaterEqual(len(model.input_node_names), 1) self.assertEqual(model.model_path, "./slim_ckpt/inception_v1.ckpt") # test net factory - from neural_compressor.tensorflow.utils.nets_factory import TFSlimNetsFactory + from neural_compressor.tensorflow.utils.utility import TFSlimNetsFactory factory = TFSlimNetsFactory() from tf_slim.nets import inception diff --git a/test/3x/tensorflow/test_quantize_model.py b/test/3x/tensorflow/test_quantize_model.py new file mode 100644 index 00000000000..383bb80c0ab --- /dev/null +++ b/test/3x/tensorflow/test_quantize_model.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import shutil +import time +import unittest + +import numpy as np +import tensorflow as tf +from tensorflow import keras + +from neural_compressor.common import logger +from neural_compressor.tensorflow.utils import version1_gte_version2 + + +def build_model(): + # Load MNIST dataset + mnist = keras.datasets.mnist + + # 60000 images in train and 10000 images in test, but we don't need so much for ut + (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + train_images, train_labels = train_images[:1000], train_labels[:1000] + test_images, test_labels = test_images[:200], test_labels[:200] + + # Normalize the input image so that each pixel value is between 0 to 1. + train_images = train_images / 255.0 + test_images = test_images / 255.0 + + # Define the model architecture. + model = keras.Sequential( + [ + keras.layers.InputLayer(input_shape=(28, 28)), + keras.layers.Reshape(target_shape=(28, 28, 1)), + keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu", name="conv2d"), + keras.layers.MaxPooling2D(pool_size=(2, 2)), + keras.layers.Flatten(), + keras.layers.Dense(10, name="dense"), + ] + ) + # Train the digit classification model + model.compile( + optimizer="adam", loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] + ) + + model.fit( + train_images, + train_labels, + epochs=1, + validation_split=0.1, + ) + + _, baseline_model_accuracy = model.evaluate(test_images, test_labels, verbose=0) + + print("Baseline test accuracy:", baseline_model_accuracy) + if version1_gte_version2(tf.__version__, "2.16.1"): + model.export("baseline_model") + else: + model.save("baseline_model") + + +class Dataset(object): + def __init__(self, batch_size=1): + self.batch_size = batch_size + mnist = keras.datasets.mnist + (train_images, train_labels), (test_images, test_labels) = mnist.load_data() + train_images, train_labels = train_images[:1000], train_labels[:1000] + test_images, test_labels = test_images[:200], test_labels[:200] + # Normalize the input image so that each pixel value is between 0 to 1. + self.train_images = train_images / 255.0 + self.test_images = test_images / 255.0 + self.train_labels = train_labels + self.test_labels = test_labels + + def __len__(self): + return len(self.test_images) + + def __getitem__(self, idx): + return self.test_images[idx], self.test_labels[idx] + + +class MyDataloader: + def __init__(self, dataset, batch_size=1): + self.dataset = dataset + self.batch_size = batch_size + self.length = math.ceil(len(dataset) / self.batch_size) + + def __iter__(self): + for _, (images, labels) in enumerate(self.dataset): + images = np.expand_dims(images, axis=0) + labels = np.expand_dims(labels, axis=0) + yield (images, labels) + + def __len__(self): + return self.length + + +def evaluate(model): + input_tensor = model.input_tensor + output_tensor = model.output_tensor if len(model.output_tensor) > 1 else model.output_tensor[0] + + iteration = -1 + calib_dataloader = MyDataloader(dataset=Dataset()) + for idx, (inputs, labels) in enumerate(calib_dataloader): + # dataloader should keep the order and len of inputs same with input_tensor + inputs = np.array([inputs]) + feed_dict = dict(zip(input_tensor, inputs)) + + start = time.time() + predictions = model.sess.run(output_tensor, feed_dict) + end = time.time() + + if idx + 1 == iteration: + break + + +class TestQuantizeModel(unittest.TestCase): + @classmethod + def setUpClass(self): + build_model() + self.fp32_model_path = "baseline_model" + + @classmethod + def tearDownClass(self): + shutil.rmtree(self.fp32_model_path, ignore_errors=True) + + def test_calib_func(self): + logger.info("Run test_calib_func case...") + + from neural_compressor.common import set_random_seed + from neural_compressor.tensorflow import StaticQuantConfig, quantize_model + + set_random_seed(9527) + quant_config = StaticQuantConfig() + q_model = quantize_model(self.fp32_model_path, quant_config, calib_func=evaluate) + quantized = False + for node in q_model.graph_def.node: + if "Quantized" in node.op: + quantized = True + break + + self.assertEqual(quantized, True) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/3x/torch/algorithms/fp8_quant/test_basic.py b/test/3x/torch/algorithms/fp8_quant/test_basic.py new file mode 100644 index 00000000000..98ca06222a5 --- /dev/null +++ b/test/3x/torch/algorithms/fp8_quant/test_basic.py @@ -0,0 +1,56 @@ +import os +import sys +import time + +import habana_frameworks.torch.core as htcore +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.fc1 = nn.Linear(784, 256) + self.fc2 = nn.Linear(256, 64) + self.fc3 = nn.Linear(64, 10) + + def forward(self, x): + out = x.view(-1, 28 * 28) + out = F.relu(self.fc1(out)) + out = F.relu(self.fc2(out)) + out = self.fc3(out) + out = F.log_softmax(out, dim=1) + return out + + +def test_hpu(): + model = Net() + model_link = "https://vault.habana.ai/artifactory/misc/inference/mnist/mnist-epoch_20.pth" + model_path = "/tmp/.neural_compressor/mnist-epoch_20.pth" + os.system("mkdir -p /tmp/.neural_compressor && wget {} -O {} ".format(model_link, model_path)) + checkpoint = torch.load(model_path) + model.load_state_dict(checkpoint) + + model = model.eval() + + model = model.to("hpu") + + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + + data_path = "./data" + test_kwargs = {"batch_size": 32} + dataset1 = datasets.MNIST(data_path, train=False, download=True, transform=transform) + test_loader = torch.utils.data.DataLoader(dataset1, **test_kwargs) + + correct = 0 + for batch_idx, (data, label) in enumerate(test_loader): + data = data.to("hpu") + output = model(data) + htcore.mark_step() + correct += output.max(1)[1].eq(label).sum() + + accuracy = 100.0 * correct / (len(test_loader) * 32) + assert accuracy > 90 diff --git a/test/3x/torch/algorithms/weight_only/test_hqq_quantizer.py b/test/3x/torch/algorithms/weight_only/test_hqq_quantizer.py new file mode 100644 index 00000000000..f17717f8f71 --- /dev/null +++ b/test/3x/torch/algorithms/weight_only/test_hqq_quantizer.py @@ -0,0 +1,106 @@ +import pytest +import torch + +from neural_compressor.torch.algorithms.weight_only.hqq.bitpack import Packer +from neural_compressor.torch.algorithms.weight_only.hqq.config import ( + HQQModuleConfig, + QTensorConfig, + default_hqq_module_config, + default_scale_quant_config, + default_weight_quant_config, + default_zero_quant_config, +) +from neural_compressor.torch.algorithms.weight_only.hqq.qtensor import QTensor, QTensorMetaInfo + + +def test_default_hqq_module_config(): + config = default_hqq_module_config + print(config) + assert isinstance(config, HQQModuleConfig) + assert config.weight == default_weight_quant_config + assert config.zero == default_zero_quant_config + assert config.scale == default_scale_quant_config + + +def test_default_weight_quant_config(): + config = default_weight_quant_config + assert isinstance(config, QTensorConfig) + assert config.nbits == 4 + assert config.channel_wise is True + + +def test_default_zero_quant_config(): + config = default_zero_quant_config + assert isinstance(config, QTensorConfig) + assert config.nbits == 8 + assert config.channel_wise is False + + +def test_default_scale_quant_config(): + config = default_scale_quant_config + assert isinstance(config, QTensorConfig) + assert config.nbits == 8 + assert config.channel_wise is True + + +def test_qtensor_meta_info(): + meta_info = QTensorMetaInfo + print(meta_info) + + +@pytest.mark.parametrize("nbits", [2, 3, 4, 8]) +def test_packer(nbits): + # TODO: add test for 3 bits + range_max = 2**nbits + dims = 16 if nbits != 3 else 10 + W = torch.randint(0, range_max, (dims, dims)).to(torch.uint8) + W_pack = Packer.get_pack_fn(nbits)(W) + W_pack_unpack = Packer.get_unpack_fn(nbits)(W_pack) + assert torch.allclose(W, W_pack_unpack) + print("Packer test passed!") + + +class TestQTensor: + def test_q_tensor(self): + in_feats = 3 + out_feats = 4 + + val = torch.randn(out_feats, in_feats) + scale = torch.randn(out_feats) + zero = torch.randint(1, 10, (out_feats,)) + q_tensor_meta = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) + q_tensor = QTensor(val, scale, zero, q_tensor_meta) + print(q_tensor) + q_tensor_half = q_tensor.half() + print(q_tensor_half) + + def test_q_tensor2(self): + in_feats = 64 + out_feats = 64 + + val = torch.randn(out_feats, in_feats) + scale = torch.randn(out_feats) + zero = torch.randint(1, 10, (out_feats,)) + q_tensor_meta = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) + q_tensor = QTensor(val, scale, zero, q_tensor_meta) + q_scale_meta = QTensorMetaInfo(nbits=8, group_size=64, shape=(out_feats,), axis=0, packing=False) + q_scale_scale = torch.randn(out_feats) + q_scale_zero = torch.randint(1, 10, (1,)) + q_scale = QTensor(scale, q_scale_scale, q_scale_zero, q_tensor_meta) + q_tensor.scale = q_scale + print(q_tensor) + print(q_tensor.half()) + + def test_qtensor_meta_info(self): + in_feats = 64 + out_feats = 64 + meta_config = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) + print(meta_config) + print(meta_config.to_dict) + assert meta_config.to_dict() == { + "nbits": 4, + "group_size": 64, + "shape": (out_feats, in_feats), + "axis": 0, + "packing": False, + } diff --git a/test/3x/torch/algorithms/weight_only/test_teq_quantizer.py b/test/3x/torch/algorithms/weight_only/test_teq_quantizer.py index a27ce5ec0f2..4e06cedb284 100644 --- a/test/3x/torch/algorithms/weight_only/test_teq_quantizer.py +++ b/test/3x/torch/algorithms/weight_only/test_teq_quantizer.py @@ -82,8 +82,21 @@ def setUpClass(self): ) self.gptj.seqlen = 512 - def train_func(self): - pass + def test_teq_detect_absorb_layers(self): + example_inputs = torch.ones([1, 512], dtype=torch.long) + test_input = torch.ones([1, 512], dtype=torch.long) + model = copy.deepcopy(self.gptj) + out0 = model(test_input) + + weight_config = { + # 'op_name': (bit, group_size, scheme) + "transformer.h.0.mlp.fc_in": {"bits": 8, "group_size": -1, "scheme": "sym"}, + "transformer.h.0.mlp.fc_out": {"bits": 4, "group_size": 32, "scheme": "asym"}, + } + quantizer = TEQuantizer(quant_config=weight_config, folding=True, example_inputs=example_inputs) + model = quantizer.quantize(copy.deepcopy(self.gptj), run_fn=train) + out1 = model(test_input) + self.assertTrue(torch.allclose(out1[0], out0[0], atol=0.03)) def test_teq(self): example_inputs = torch.ones([1, 512], dtype=torch.long) diff --git a/test/3x/torch/algorithms/weight_only/test_woq_module.py b/test/3x/torch/algorithms/weight_only/test_woq_module.py new file mode 100644 index 00000000000..959e975b8c1 --- /dev/null +++ b/test/3x/torch/algorithms/weight_only/test_woq_module.py @@ -0,0 +1,52 @@ +import copy + +import pytest +import torch + +from neural_compressor.torch.algorithms.weight_only.modules import INCWeightOnlyLinear +from neural_compressor.torch.algorithms.weight_only.utility import quant_tensor + + +class TestWeightOnlyLinear: + @pytest.mark.parametrize( + "bits, compression_dtype", + [ + (8, torch.int8), + (8, torch.int16), + (8, torch.int32), + (8, torch.int64), + (4, torch.int8), + (4, torch.int16), + (4, torch.int32), + (4, torch.int64), + (2, torch.int8), + (2, torch.int16), + (2, torch.int32), + (2, torch.int64), + ], + ) + def test_pack_with_numba(self, bits, compression_dtype): + m = torch.nn.Linear(64, 32) + dtype = "int" + weight = m.weight.detach() + int_weight, scale, zp = quant_tensor( + weight, + dtype=dtype, + bits=bits, + return_int=True, + group_size=32, + ) + new_module = INCWeightOnlyLinear( + m.in_features, + m.out_features, + dtype=dtype, + bits=bits, + group_size=32, + zp=zp is not None, + bias=m.bias is not None, + use_optimum_format=False, + compression_dtype=compression_dtype, + ) + new_module.pack(int_weight, scale, zp, m.bias) + unpacked_int_weight = new_module.unpack_tensor(new_module.qweight) + assert torch.equal(unpacked_int_weight, int_weight) diff --git a/test/3x/torch/quantization/fp8_quant/test_fp8_static_quant.py b/test/3x/torch/quantization/fp8_quant/test_fp8_static_quant.py index eb71a550782..6a42734d23c 100644 --- a/test/3x/torch/quantization/fp8_quant/test_fp8_static_quant.py +++ b/test/3x/torch/quantization/fp8_quant/test_fp8_static_quant.py @@ -3,9 +3,10 @@ import pytest import torch +import torchvision import transformers -from neural_compressor.torch.algorithms.fp8_quant._quant_common.helper_modules import PatchedLinear +from neural_compressor.torch.algorithms.fp8_quant._quant_common.helper_modules import PatchedConv2d, PatchedLinear from neural_compressor.torch.quantization import ( FP8Config, convert, @@ -17,6 +18,14 @@ from neural_compressor.torch.utils import is_hpex_available +def change_to_cur_file_dir(): + import os + + current_file_path = os.path.abspath(__file__) + current_directory = os.path.dirname(current_file_path) + os.chdir(current_directory) + + @torch.no_grad() def calib_func(model): example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long).to("hpu") @@ -31,24 +40,55 @@ def setup_class(self): "hf-internal-testing/tiny-random-GPTJForCausalLM", device_map="cpu", ) - self.example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long) + self.example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long).to("hpu") + self.resnet18 = torchvision.models.resnet18(pretrained=True) + self.cv_dummy_inputs = torch.randn([1, 3, 224, 224]).to("hpu") def teardown_class(self): shutil.rmtree("test_ouputs", ignore_errors=True) - def test_one_step_quant(self): + def test_one_step_quant_nlp(self): model = copy.deepcopy(self.tiny_gptj) + model.to("hpu") + fp32_out = model(self.example_inputs)[0] qconfig = FP8Config(fp8_config="E4M3") model = prepare(model, qconfig) assert isinstance(model.transformer.h[0].attn.k_proj, PatchedLinear), "k_proj is not prepared." calib_func(model) model = convert(model) + fp8_out = model(self.example_inputs)[0] assert isinstance(model.transformer.h[0].attn.k_proj, PatchedLinear), "k_proj is not quantized." assert ( model.transformer.h[0].attn.k_proj.quant_input.lp_dtype == torch.float8_e4m3fn ), "k_proj input dtype is not torch.float8_e4m3fn." + assert (fp32_out != fp8_out).any(), "FP32 output should be different with FP8 output" + print((fp32_out - fp8_out).abs().max()) + assert torch.allclose(fp32_out, fp8_out, atol=0.04), "Accuracy gap atol > 0.04 is unexpected." - def test_two_step_quant(self): + # @pytest.mark.skipif(not is_hpex_available(), reason="HPU environment is required!") + def test_one_step_quant_cv(self): + model = copy.deepcopy(self.resnet18) + model.to("hpu") + fp32_out = model(self.cv_dummy_inputs) + # model.to('cpu') + qconfig = FP8Config(fp8_config="E4M3") + model = prepare(model, qconfig) + assert model.fc.weight.device.type == "hpu", "model is not mapped to HPU." + assert isinstance(model.fc, PatchedLinear) and isinstance(model.conv1, PatchedConv2d), "model is not prepared." + # calibration + model(self.cv_dummy_inputs) + model = convert(model) + fp8_out = model(self.cv_dummy_inputs) + assert ( + isinstance(model.fc, PatchedLinear) + and isinstance(model.conv1, PatchedConv2d) + and model.fc.quant_input.lp_dtype == torch.float8_e4m3fn + and model.conv1.quant_input.lp_dtype == torch.float8_e4m3fn + ), "model is not quantized to torch.float8_e4m3fn." + assert (fp32_out != fp8_out).any(), "FP32 output should be different with FP8 output" + + def test_two_step_quant_nlp(self): + change_to_cur_file_dir() # step 1: measurement model = copy.deepcopy(self.tiny_gptj) config = FP8Config.from_json_file("test_fp8_jsons/test_measure.json") @@ -64,3 +104,25 @@ def test_two_step_quant(self): assert ( model.transformer.h[0].attn.k_proj.quant_input.lp_dtype == torch.float8_e4m3fn ), "k_proj input dtype is not torch.float8_e4m3fn." + + def test_two_step_quant_cv(self): + change_to_cur_file_dir() + # step 1: measurement + model = copy.deepcopy(self.resnet18) + config = FP8Config.from_json_file("test_fp8_jsons/test_measure.json") + model = prepare(model, config) + fp32_out = model(self.cv_dummy_inputs) + finalize_calibration(model) + assert isinstance(model.fc, PatchedLinear) and isinstance(model.conv1, PatchedConv2d), "model is not prepared." + # step 2: quantize based on measurement + model = copy.deepcopy(self.resnet18) + config = FP8Config.from_json_file("test_fp8_jsons/test_hw_quant.json") + model = convert(model, config) + fp8_out = model(self.cv_dummy_inputs) + assert ( + isinstance(model.fc, PatchedLinear) + and isinstance(model.conv1, PatchedConv2d) + and model.fc.quant_input.lp_dtype == torch.float8_e4m3fn + and model.conv1.quant_input.lp_dtype == torch.float8_e4m3fn + ), "model is not quantized to torch.float8_e4m3fn." + assert (fp32_out != fp8_out).any(), "FP32 output should be different with FP8 output" diff --git a/test/3x/torch/quantization/test_pt2e_quant.py b/test/3x/torch/quantization/test_pt2e_quant.py index 7d1aab562d3..d55e9004a3a 100644 --- a/test/3x/torch/quantization/test_pt2e_quant.py +++ b/test/3x/torch/quantization/test_pt2e_quant.py @@ -1,6 +1,4 @@ -import os -import unittest -from unittest.mock import patch +import shutil import pytest import torch @@ -17,7 +15,7 @@ prepare, quantize, ) -from neural_compressor.torch.utils import TORCH_VERSION_2_2_2, get_torch_version +from neural_compressor.torch.utils import GT_TORCH_VERSION_2_3_2, TORCH_VERSION_2_2_2, get_torch_version torch.manual_seed(0) @@ -29,10 +27,12 @@ def _is_ipex_imported(): monkeypatch.setattr("neural_compressor.torch.quantization.config.is_ipex_imported", _is_ipex_imported) monkeypatch.setattr("neural_compressor.torch.quantization.algorithm_entry.is_ipex_imported", _is_ipex_imported) - monkeypatch.setattr("neural_compressor.torch.export._export.is_ipex_imported", _is_ipex_imported) + monkeypatch.setattr("neural_compressor.torch.export.pt2e_export.is_ipex_imported", _is_ipex_imported) class TestPT2EQuantization: + def teardown_class(self): + shutil.rmtree("saved_results", ignore_errors=True) @staticmethod def get_toy_model(): @@ -114,11 +114,59 @@ def calib_fn(model): config.freezing = True q_model_out = q_model(*example_inputs) assert torch.allclose(float_model_output, q_model_out, atol=1e-2), "Quantization failed!" + + # test save and load + q_model.save( + example_inputs=example_inputs, + output_dir="./saved_results", + ) + from neural_compressor.torch.quantization import load + + loaded_quantized_model = load("./saved_results") + loaded_q_model_out = loaded_quantized_model(*example_inputs) + assert torch.equal(loaded_q_model_out, q_model_out) + opt_model = torch.compile(q_model) out = opt_model(*example_inputs) logger.warning("out shape is %s", out.shape) assert out is not None + @pytest.mark.skipif(not GT_TORCH_VERSION_2_3_2, reason="Requires torch>=2.3.2") + def test_quantize_simple_model_with_set_local(self, force_not_import_ipex): + model, example_inputs = self.build_simple_torch_model_and_example_inputs() + float_model_output = model(*example_inputs) + quant_config = None + + def calib_fn(model): + for i in range(4): + model(*example_inputs) + + quant_config = get_default_static_config() + quant_config.set_local("fc1", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + q_model = quantize(model=model, quant_config=quant_config, run_fn=calib_fn) + + # check the half node + expected_node_occurrence = { + # Only quantize the `fc2` + torch.ops.quantized_decomposed.quantize_per_tensor.default: 2, + torch.ops.quantized_decomposed.quantize_per_tensor.default: 2, + } + expected_node_occurrence = { + torch_test_quant_common.NodeSpec.call_function(k): v for k, v in expected_node_occurrence.items() + } + node_in_graph = self.get_node_in_graph(q_model) + for node, cnt in expected_node_occurrence.items(): + assert node_in_graph.get(node, 0) == cnt, f"Node {node} should occur {cnt} times, but {node_in_graph[node]}" + + from torch._inductor import config + + config.freezing = True + q_model_out = q_model(*example_inputs) + assert torch.allclose(float_model_output, q_model_out, atol=1e-2), "Quantization failed!" + opt_model = torch.compile(q_model) + out = opt_model(*example_inputs) + assert out is not None + @pytest.mark.skipif(get_torch_version() <= TORCH_VERSION_2_2_2, reason="Requires torch>=2.3.0") @pytest.mark.parametrize("is_dynamic", [False, True]) def test_prepare_and_convert_on_simple_model(self, is_dynamic, force_not_import_ipex): @@ -193,9 +241,9 @@ def get_node_in_graph(graph_module): nodes_in_graph[n] += 1 else: nodes_in_graph[n] = 1 - return + return nodes_in_graph - @pytest.mark.skipif(get_torch_version() <= TORCH_VERSION_2_2_2, reason="Requires torch>=2.3.0") + @pytest.mark.skipif(not GT_TORCH_VERSION_2_3_2, reason="Requires torch>=2.3.0") def test_mixed_fp16_and_int8(self, force_not_import_ipex): model, example_inputs = self.build_model_include_conv_and_linear() model = export(model, example_inputs=example_inputs) @@ -221,9 +269,7 @@ def test_mixed_fp16_and_int8(self, force_not_import_ipex): } node_in_graph = self.get_node_in_graph(converted_model) for node, cnt in expected_node_occurrence.items(): - assert ( - expected_node_occurrence.get(node, 0) == cnt - ), f"Node {node} should occur {cnt} times, but {node_in_graph[node]}" + assert node_in_graph.get(node, 0) == cnt, f"Node {node} should occur {cnt} times, but {node_in_graph[node]}" # inference from torch._inductor import config diff --git a/test/3x/torch/quantization/test_static_quant.py b/test/3x/torch/quantization/test_static_quant.py index 60d0b205371..5bc37180045 100644 --- a/test/3x/torch/quantization/test_static_quant.py +++ b/test/3x/torch/quantization/test_static_quant.py @@ -4,6 +4,14 @@ import pytest import torch +try: + import intel_extension_for_pytorch as ipex + + is_ipex_available = True +except: # pragma: no cover + is_ipex_available = False + assert False, "Please install IPEX for static quantization." + from neural_compressor.torch.quantization import ( StaticQuantConfig, convert, @@ -11,10 +19,9 @@ prepare, quantize, ) -from neural_compressor.torch.utils import is_ipex_available +from neural_compressor.torch.utils.auto_accelerator import auto_detect_accelerator -if is_ipex_available(): - import intel_extension_for_pytorch as ipex +device = auto_detect_accelerator().current_device() def build_simple_torch_model(): @@ -22,13 +29,18 @@ class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.fc1 = torch.nn.Linear(30, 50) - self.fc2 = torch.nn.Linear(50, 30) - self.fc3 = torch.nn.Linear(30, 5) + self.fc2 = torch.nn.Linear(50, 50) + self.fc3 = torch.nn.Linear(50, 30) + self.fc4 = torch.nn.Linear(30, 5) + self.relu = torch.nn.ReLU() def forward(self, x): out = self.fc1(x) out = self.fc2(out) + out = self.relu(out) out = self.fc3(out) + out = out + x + out = self.fc4(out) return out model = Model() @@ -48,7 +60,7 @@ def setup_class(self): def teardown_class(self): shutil.rmtree("saved_results", ignore_errors=True) - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_default(self): fp32_model = copy.deepcopy(self.fp32_model) quant_config = get_default_static_config() @@ -65,36 +77,37 @@ def test_static_quant_default(self): q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_fallback(self): fp32_model = copy.deepcopy(self.fp32_model) quant_config = get_default_static_config() example_inputs = self.input # fallback by op_type - quant_config.set_local(torch.nn.Linear, StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + quant_config.set_local([torch.nn.Linear, "Linear&add"], StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) run_fn(prepared_model) q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" for op, op_info in q_model.tune_cfg[" "]["q_op_infos"].items(): - if op_info["op_type"] == "": + if op_info["op_type"] == "Linear": dtype = q_model.tune_cfg[" "]["q_op_infos"][op]["input_tensor_infos"][0]["force_dtype"] assert dtype == "torch.float32", "Failed to fallback linear op, please check!" # fallback by op_name - quant_config.set_local("fc1", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + quant_config = get_default_static_config() + quant_config.set_local("fc2", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) run_fn(prepared_model) q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" for op, op_info in q_model.tune_cfg[" "]["q_op_infos"].items(): - if op_info["fqn"] == "fc1": + if op_info["fqn"] == "fc2": dtype = q_model.tune_cfg[" "]["q_op_infos"][op]["input_tensor_infos"][0]["force_dtype"] - assert dtype == "torch.float32", "Failed to fallback fc1 layer, please check!" + assert dtype == "torch.float32", "Failed to fallback fc2 layer, please check!" - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") @pytest.mark.parametrize( "act_sym, act_algo", [ @@ -113,7 +126,7 @@ def test_static_quant_params(self, act_sym, act_algo): q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_accuracy(self): class M(torch.nn.Module): def __init__(self): @@ -142,7 +155,7 @@ def run_fn(model): # set a big atol to avoid random issue assert torch.allclose(output1, output2, atol=2e-2), "Accuracy gap atol > 0.02 is unexpected. Please check." - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_save_load(self): from intel_extension_for_pytorch.quantization import convert as ipex_convert from intel_extension_for_pytorch.quantization import prepare as ipex_prepare @@ -190,7 +203,7 @@ def run_fn(model): loaded_model = load("saved_results") assert isinstance(loaded_model, torch.jit.ScriptModule) - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_with_quantize_API(self): # quantize API fp32_model = copy.deepcopy(self.fp32_model) @@ -199,11 +212,11 @@ def test_static_quant_with_quantize_API(self): q_model = quantize(fp32_model, quant_config=quant_config, run_fn=run_fn, example_inputs=example_inputs) assert q_model is not None, "Quantization failed!" - @pytest.mark.skipif(not is_ipex_available(), reason="Requires IPEX") + @pytest.mark.skipif(not is_ipex_available or device != "cpu", reason="Requires IPEX on CPU device") def test_static_quant_mixed_precision(self): fp32_model = copy.deepcopy(self.fp32_model) example_inputs = self.input - quant_config = get_default_static_config() + quant_config = StaticQuantConfig(excluded_precisions=["bf16"]) prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) run_fn(prepared_model) q_model = convert(prepared_model) @@ -216,8 +229,53 @@ def test_static_quant_mixed_precision(self): q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" - quant_config.excluded_precisions = ["bf16"] prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) run_fn(prepared_model) q_model = convert(prepared_model) assert q_model is not None, "Quantization failed!" + + @pytest.mark.skipif(not is_ipex_available or device == "cpu", reason="Requires IPEX on XPU device") + @pytest.mark.parametrize( + "act_sym, act_algo", + [ + (True, "kl"), + (True, "minmax"), + (False, "kl"), + (False, "minmax"), + ], + ) + def test_static_quant_xpu(self, act_sym, act_algo): + import torchvision.models as models + + model = models.resnet50(pretrained=True) + fp32_model = copy.deepcopy(model) + data = torch.rand(1, 3, 224, 224) + example_inputs = data.to("xpu") + + def run_fn(model): + model(example_inputs) + + quant_config = StaticQuantConfig(act_sym=act_sym, act_algo=act_algo, excluded_precisions=["bf16"]) + # fallback by op_name + quant_config.set_local("conv1", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) + run_fn(prepared_model) + q_model = convert(prepared_model) + run_fn(q_model) + assert q_model is not None, "Quantization failed!" + + quant_config = StaticQuantConfig(act_sym=act_sym, act_algo=act_algo, excluded_precisions=["bf16"]) + # fallback by op_type + quant_config.set_local("Conv2d", StaticQuantConfig(w_dtype="fp32", act_dtype="fp32")) + prepared_model = prepare(fp32_model, quant_config=quant_config, example_inputs=example_inputs) + run_fn(prepared_model) + q_model = convert(prepared_model) + run_fn(q_model) + assert q_model is not None, "Quantization failed!" + + q_model.save("saved_results") + from neural_compressor.torch.quantization import load + + # load + loaded_model = load("saved_results") + assert isinstance(loaded_model, torch.jit.ScriptModule), "Loading failed!" diff --git a/test/3x/torch/quantization/weight_only/hqq/test_hqq_config.py b/test/3x/torch/quantization/weight_only/hqq/test_hqq_config.py deleted file mode 100644 index bdfd2145aff..00000000000 --- a/test/3x/torch/quantization/weight_only/hqq/test_hqq_config.py +++ /dev/null @@ -1,44 +0,0 @@ -from neural_compressor.torch.algorithms.weight_only.hqq.config import ( - HQQModuleConfig, - QTensorConfig, - default_hqq_module_config, - default_scale_quant_config, - default_weight_quant_config, - default_zero_quant_config, -) -from neural_compressor.torch.algorithms.weight_only.hqq.qtensor import QTensorMetaInfo - - -def test_default_hqq_module_config(): - config = default_hqq_module_config - print(config) - assert isinstance(config, HQQModuleConfig) - assert config.weight == default_weight_quant_config - assert config.zero == default_zero_quant_config - assert config.scale == default_scale_quant_config - - -def test_default_weight_quant_config(): - config = default_weight_quant_config - assert isinstance(config, QTensorConfig) - assert config.nbits == 4 - assert config.channel_wise is True - - -def test_default_zero_quant_config(): - config = default_zero_quant_config - assert isinstance(config, QTensorConfig) - assert config.nbits == 8 - assert config.channel_wise is False - - -def test_default_scale_quant_config(): - config = default_scale_quant_config - assert isinstance(config, QTensorConfig) - assert config.nbits == 8 - assert config.channel_wise is True - - -def test_qtensor_meta_info(): - meta_info = QTensorMetaInfo - print(meta_info) diff --git a/test/3x/torch/quantization/weight_only/hqq/test_hqq_cpu.py b/test/3x/torch/quantization/weight_only/hqq/test_hqq_cpu.py deleted file mode 100644 index 16e390318d9..00000000000 --- a/test/3x/torch/quantization/weight_only/hqq/test_hqq_cpu.py +++ /dev/null @@ -1,145 +0,0 @@ -import os -from copy import deepcopy - -import pytest -import torch -from transformers import AutoModelForCausalLM - -from neural_compressor.torch.algorithms.weight_only.hqq.config import HQQModuleConfig, QTensorConfig, hqq_global_option -from neural_compressor.torch.algorithms.weight_only.hqq.core import HQQLinear - - -def _common_cpu_test(nbits=4, group_size=64, quant_zero=True, quant_scale=False, scale_quant_group_size=128): - # Parse config - weight_qconfig = QTensorConfig( - nbits=nbits, channel_wise=True, group_size=group_size, optimize=True, round_zero=True if nbits == 4 else False - ) - zero_qconfig = None - if quant_zero: - zero_qconfig = QTensorConfig(nbits=8, channel_wise=False, group_size=None, optimize=False) - scale_qconfig = None - if quant_scale: - scale_qconfig = QTensorConfig(nbits=8, channel_wise=True, group_size=scale_quant_group_size, optimize=False) - hqq_quant_config = HQQModuleConfig(weight=weight_qconfig, scale=scale_qconfig, zero=zero_qconfig) - device = "cpu" - - # Create HQQ Linear - bs = 4 - in_features = 64 - out_features = 128 - float_linear = torch.nn.Linear(in_features=in_features, out_features=out_features) - if hqq_global_option.use_half: - print(f"hqq_global_option use half: {hqq_global_option.use_half}") - float_linear = float_linear.half() - float_linear.to(device) - float_linear_copy = deepcopy(float_linear) - hqq_linear = HQQLinear.from_float(float_linear_copy, quant_config=hqq_quant_config) - - # Forward - input = torch.randn(bs, in_features, device=device) - if hqq_global_option.use_half: - input = input.half() - float_output = float_linear(input) - input_for_hqq = deepcopy(input) - hqq_output = hqq_linear(input_for_hqq) - hqq_output_2 = hqq_linear(input_for_hqq) - torch.allclose(float_output, hqq_output, atol=0.5) - torch.allclose(hqq_output, hqq_output_2) - del float_linear, hqq_linear - del float_output, hqq_output, hqq_output_2 - - -class TestHQQCPU: - - @classmethod - def setup_class(cls): - torch.manual_seed(0) - - @pytest.fixture - def force_use_cpu(self, monkeypatch): - # Force use CPU - monkeypatch.setenv("FORCE_DEVICE", "cpu") - - @pytest.fixture - def force_not_half(self, monkeypatch): - monkeypatch.setattr(hqq_global_option, "use_half", False) - - def test_hqq_quant(self, force_use_cpu, force_not_half): - from neural_compressor.torch.quantization import convert, get_default_hqq_config, prepare, quantize - - hqq_global_option.use_half = False - fp32_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") - example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long, device="cpu") - # test_default_config - quant_config = get_default_hqq_config() - - # prepare + convert API - model = prepare(deepcopy(fp32_model), quant_config) - model = convert(model) - q_label_1 = model(example_inputs)[0] - - # quantize API - model = quantize(deepcopy(fp32_model), quant_config) - q_label_2 = model(example_inputs)[0] - - # compare the results of calling `convert` + `prepare` and calling `quantize` - assert torch.all( - q_label_1.eq(q_label_2) - ), "The results of calling `convert` + `prepare` and calling `quantize` should be equal." - - def test_hqq_fallback(self, force_use_cpu, force_not_half): - from neural_compressor.torch.quantization import HQQConfig, convert, prepare - - class ToyModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.fc1 = torch.nn.Linear(128, 1024) - self.fc2 = torch.nn.Linear(1024, 512) - - def forward(self, x): - x = self.fc1(x) - x = self.fc2(x) - return x - - quant_config = HQQConfig().set_local("fc1", HQQConfig(dtype="fp32")) - qmodel = convert(prepare(model=ToyModel(), quant_config=quant_config)) - assert type(qmodel.fc1).__name__ == torch.nn.Linear.__name__, f"Expect fallback fc1, but get {type(qmodel.fc1)}" - assert type(qmodel.fc2).__name__ != torch.nn.Linear.__name__, f"Expect quantize fc2, but get {type(qmodel.fc2)}" - - @pytest.mark.parametrize( - "nbits, group_size, quant_zero, quant_scale, scale_quant_group_size", - [ - (4, 64, True, False, 128), - (4, 64, False, False, 128), - (4, 64, True, True, 128), - (4, 64, False, True, 128), - (8, 64, True, False, 128), - (8, 64, False, False, 128), - (8, 64, True, True, 128), - (8, 64, False, True, 128), - (4, 64, True, False, 64), - (4, 64, False, False, 64), - (4, 64, True, True, 64), - (4, 64, False, True, 64), - (4, -1, False, True, 64), - ], - ) - def test_hqq_module_cpu( - self, force_use_cpu, force_not_half, nbits, group_size, quant_zero, quant_scale, scale_quant_group_size - ): - _common_cpu_test( - nbits=nbits, - group_size=group_size, - quant_zero=quant_zero, - quant_scale=quant_scale, - scale_quant_group_size=scale_quant_group_size, - ) - - -# _common_cpu_test( -# nbits=4, -# group_size=64, -# quant_zero=False, -# quant_scale=False, -# scale_quant_group_size=128 -# ) diff --git a/test/3x/torch/quantization/weight_only/hqq/test_hqq_cuda.py b/test/3x/torch/quantization/weight_only/hqq/test_hqq_cuda.py deleted file mode 100644 index 777daf0e60b..00000000000 --- a/test/3x/torch/quantization/weight_only/hqq/test_hqq_cuda.py +++ /dev/null @@ -1,130 +0,0 @@ -from copy import deepcopy - -import pytest -import torch -from transformers import AutoModelForCausalLM - -from neural_compressor.torch.algorithms.weight_only.hqq.config import HQQModuleConfig, QTensorConfig, hqq_global_option -from neural_compressor.torch.algorithms.weight_only.hqq.core import HQQLinear -from neural_compressor.torch.algorithms.weight_only.hqq.utility import see_cuda_memory_usage -from neural_compressor.torch.utils.auto_accelerator import auto_detect_accelerator - - -def _common_cuda_test(nbits=4, group_size=64, quant_zero=True, quant_scale=False, scale_quant_group_size=128): - # Parse config - weight_qconfig = QTensorConfig( - nbits=nbits, channel_wise=True, group_size=group_size, optimize=True, round_zero=True if nbits == 4 else False - ) - zero_qconfig = None - if quant_zero: - zero_qconfig = QTensorConfig(nbits=8, channel_wise=False, group_size=None, optimize=False) - scale_qconfig = None - if quant_scale: - scale_qconfig = QTensorConfig(nbits=8, channel_wise=True, group_size=scale_quant_group_size, optimize=False) - hqq_quant_config = HQQModuleConfig(weight=weight_qconfig, scale=scale_qconfig, zero=zero_qconfig) - device = torch.cuda.current_device() - - # Create HQQ Linear - bs = 4 - in_features = 64 - out_features = 128 - see_cuda_memory_usage(message="Before create float linear") - float_linear = torch.nn.Linear(in_features=in_features, out_features=out_features) - if hqq_global_option.use_half: - float_linear = float_linear.half() - see_cuda_memory_usage(message="After create float linear") - float_linear.to(device) - float_linear_copy = deepcopy(float_linear) - see_cuda_memory_usage(message="After copy the float linear") - hqq_linear = HQQLinear.from_float(float_linear_copy, quant_config=hqq_quant_config) - see_cuda_memory_usage(message="After create hqq linear") - - # Forward - input = torch.randn(bs, in_features, device=device) - if hqq_global_option.use_half: - input = input.half() - float_output = float_linear(input) - input_for_hqq = deepcopy(input) - hqq_output = hqq_linear(input_for_hqq) - hqq_output_2 = hqq_linear(input_for_hqq) - float_qdq_diff = 0.1 # hard code it first - torch.allclose(float_output, hqq_output, atol=float_qdq_diff) - torch.allclose(hqq_output, hqq_output_2) - del float_linear, hqq_linear - del float_output, hqq_output, hqq_output_2 - see_cuda_memory_usage("At the end of test") - - -@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU") -class TestHQQCUDA: - @classmethod - def setup_class(cls): - torch.manual_seed(0) - torch.cuda.manual_seed(0) - hqq_global_option.use_half = True - - def test_hqq_quant(self): - from neural_compressor.torch.quantization import convert, get_default_hqq_config, prepare, quantize - - fp32_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") - example_inputs = torch.tensor( - [[10, 20, 30, 40, 50, 60]], dtype=torch.long, device=auto_detect_accelerator().current_device() - ) - # test_default_config - quant_config = get_default_hqq_config() - - # prepare + convert API - model = prepare(deepcopy(fp32_model), quant_config) - model = convert(model) - q_label_1 = model(example_inputs)[0] - - # quantize API - model = quantize(deepcopy(fp32_model), quant_config) - q_label_2 = model(example_inputs)[0] - - # compare the results of calling `convert` + `prepare` and calling `quantize` - assert torch.all( - q_label_1.eq(q_label_2) - ), "The results of calling `convert` + `prepare` and calling `quantize` should be equal." - - @pytest.mark.parametrize( - "nbits, group_size, quant_zero, quant_scale, scale_quant_group_size", - [ - (4, 64, True, False, 128), - (4, 64, False, False, 128), - (4, 64, True, True, 128), - (4, 64, False, True, 128), - (8, 64, True, False, 128), - (8, 64, False, False, 128), - (8, 64, True, True, 128), - (8, 64, False, True, 128), - (4, 64, True, False, 64), - (4, 64, False, False, 64), - (4, 64, True, True, 64), - (4, 64, False, True, 64), - ], - ) - def test_hqq_module_cuda( - self, - nbits, - group_size, - quant_zero, - quant_scale, - scale_quant_group_size, - ): - _common_cuda_test( - nbits=nbits, - group_size=group_size, - quant_zero=quant_zero, - quant_scale=quant_scale, - scale_quant_group_size=scale_quant_group_size, - ) - - -# _common_cuda_test( -# nbits=4, -# group_size=64, -# quant_zero=False, -# quant_scale=False, -# scale_quant_group_size=128 -# ) diff --git a/test/3x/torch/quantization/weight_only/hqq/test_packer.py b/test/3x/torch/quantization/weight_only/hqq/test_packer.py deleted file mode 100644 index be471c0e440..00000000000 --- a/test/3x/torch/quantization/weight_only/hqq/test_packer.py +++ /dev/null @@ -1,16 +0,0 @@ -import pytest -import torch - -from neural_compressor.torch.algorithms.weight_only.hqq.bitpack import Packer - - -@pytest.mark.parametrize("nbits", [2, 3, 4, 8]) -def test_packer(nbits): - # TODO: add test for 3 bits - range_max = 2**nbits - dims = 16 if nbits != 3 else 10 - W = torch.randint(0, range_max, (dims, dims)).to(torch.uint8) - W_pack = Packer.get_pack_fn(nbits)(W) - W_pack_unpack = Packer.get_unpack_fn(nbits)(W_pack) - assert torch.allclose(W, W_pack_unpack) - print("Packer test passed!") diff --git a/test/3x/torch/quantization/weight_only/hqq/test_q_tensor.py b/test/3x/torch/quantization/weight_only/hqq/test_q_tensor.py deleted file mode 100644 index 0548c10e3f1..00000000000 --- a/test/3x/torch/quantization/weight_only/hqq/test_q_tensor.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import torch - -from neural_compressor.torch.algorithms.weight_only.hqq.qtensor import QTensor, QTensorMetaInfo - - -class TestQTensor: - def test_q_tensor(self): - in_feats = 3 - out_feats = 4 - - val = torch.randn(out_feats, in_feats) - scale = torch.randn(out_feats) - zero = torch.randint(1, 10, (out_feats,)) - q_tensor_meta = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) - q_tensor = QTensor(val, scale, zero, q_tensor_meta) - print(q_tensor) - q_tensor_half = q_tensor.half() - print(q_tensor_half) - - def test_q_tensor2(self): - in_feats = 64 - out_feats = 64 - - val = torch.randn(out_feats, in_feats) - scale = torch.randn(out_feats) - zero = torch.randint(1, 10, (out_feats,)) - q_tensor_meta = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) - q_tensor = QTensor(val, scale, zero, q_tensor_meta) - q_scale_meta = QTensorMetaInfo(nbits=8, group_size=64, shape=(out_feats,), axis=0, packing=False) - q_scale_scale = torch.randn(out_feats) - q_scale_zero = torch.randint(1, 10, (1,)) - q_scale = QTensor(scale, q_scale_scale, q_scale_zero, q_tensor_meta) - q_tensor.scale = q_scale - print(q_tensor) - print(q_tensor.half()) - - def test_qtensor_meta_info(self): - in_feats = 64 - out_feats = 64 - meta_config = QTensorMetaInfo(nbits=4, group_size=64, shape=(out_feats, in_feats), axis=0, packing=False) - print(meta_config) - print(meta_config.to_dict) - assert meta_config.to_dict() == { - "nbits": 4, - "group_size": 64, - "shape": (out_feats, in_feats), - "axis": 0, - "packing": False, - } diff --git a/test/3x/torch/quantization/weight_only/test_autoround.py b/test/3x/torch/quantization/weight_only/test_autoround.py index e8120d463dc..88cae7e9384 100644 --- a/test/3x/torch/quantization/weight_only/test_autoround.py +++ b/test/3x/torch/quantization/weight_only/test_autoround.py @@ -49,7 +49,7 @@ def setup_class(self): tokenizer = transformers.AutoTokenizer.from_pretrained( "hf-internal-testing/tiny-random-GPTJForCausalLM", trust_remote_code=True ) - self.dataloader = get_dataloader(tokenizer, 32, dataset_name="NeelNanda/pile-10k", seed=42, bs=8, n_samples=10) + self.dataloader = get_dataloader(tokenizer, 32, dataset_name="NeelNanda/pile-10k", seed=42, bs=8, nsamples=10) self.label = self.gptj(self.inp)[0] def teardown_class(self): @@ -61,7 +61,7 @@ def setup_method(self, method): @pytest.mark.parametrize("quant_lm_head", [True, False]) def test_autoround(self, quant_lm_head): fp32_model = copy.deepcopy(self.gptj) - quant_config = AutoRoundConfig(n_samples=32, seqlen=10, iters=10, scale_dtype="fp32") + quant_config = AutoRoundConfig(nsamples=32, seqlen=10, iters=10, scale_dtype="fp32") if quant_lm_head is False: quant_config.set_local("lm_head", AutoRoundConfig(dtype="fp32")) logger.info(f"Test AutoRound with config {quant_config}") @@ -80,10 +80,27 @@ def test_autoround(self, quant_lm_head): if quant_lm_head is True: assert isinstance(q_model.lm_head, WeightOnlyLinear), "quantization for lm_head failed." + def test_int4_dtype(self): + fp32_model = copy.deepcopy(self.gptj) + quant_config = AutoRoundConfig(dtype="int4", nsamples=32, seqlen=10, iters=10, scale_dtype="fp32") + logger.info(f"Test AutoRound with config {quant_config}") + + # prepare + convert API + model = prepare(model=fp32_model, quant_config=quant_config) + + run_fn(model, self.dataloader) + q_model = convert(model) + out = q_model(self.inp)[0] + assert torch.allclose(out, self.label, atol=1e-1) + assert "transformer.h.0.attn.k_proj" in q_model.autoround_config.keys() + assert "scale" in q_model.autoround_config["transformer.h.0.attn.k_proj"].keys() + assert torch.float32 == q_model.autoround_config["transformer.h.0.attn.k_proj"]["scale_dtype"] + assert isinstance(q_model.transformer.h[0].attn.k_proj, WeightOnlyLinear), "packing model failed." + def test_autoround_with_quantize_API(self): gpt_j_model = copy.deepcopy(self.gptj) - quant_config = AutoRoundConfig(n_samples=32, seqlen=10, iters=10, scale_dtype="fp32") + quant_config = AutoRoundConfig(nsamples=32, seqlen=10, iters=10, scale_dtype="fp32") quant_config.set_local("lm_head", AutoRoundConfig(dtype="fp32")) logger.info(f"Test AutoRound with config {quant_config}") @@ -102,7 +119,7 @@ def test_autoround_with_quantize_API(self): def test_save_and_load(self): fp32_model = copy.deepcopy(self.gptj) # known issue: scale_dtype="fp32" will cause accuracy gap between quantized model (using auto-round WeightOnlyLinear) and reloaded model (using INCWeightOnlyLinear) - quant_config = AutoRoundConfig(n_samples=32, seqlen=10, iters=10, scale_dtype="fp16") + quant_config = AutoRoundConfig(nsamples=32, seqlen=10, iters=10, scale_dtype="fp16") # quant_config.set_local("lm_head", AutoRoundConfig(dtype="fp32")) logger.info(f"Test AutoRound with config {quant_config}") @@ -135,7 +152,7 @@ def test_conv1d(self): text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors="pt") out1 = model(**encoded_input)[0] - quant_config = AutoRoundConfig(n_samples=32, seqlen=10, iters=10, scale_dtype="fp32") + quant_config = AutoRoundConfig(nsamples=32, seqlen=10, iters=10, scale_dtype="fp32") model = prepare(model=model, quant_config=quant_config) run_fn(model, self.dataloader) q_model = convert(model) diff --git a/test/3x/torch/quantization/weight_only/test_awq.py b/test/3x/torch/quantization/weight_only/test_awq.py index 51dabcd3963..b830841fbff 100644 --- a/test/3x/torch/quantization/weight_only/test_awq.py +++ b/test/3x/torch/quantization/weight_only/test_awq.py @@ -24,6 +24,13 @@ def get_gpt_j(): return tiny_gptj +@torch.no_grad() +def calib_func(model): + example_inputs = torch.ones([1, 10], dtype=torch.long).to(device) + for i in range(2): + model(example_inputs) + + def get_woq_linear_num(model, woq_module_type_name): woq_linear_num = 0 for _, module in model.named_modules(): @@ -58,12 +65,6 @@ def teardown_class(self): ) def test_awq(self, bits, use_sym, group_size): model = copy.deepcopy(self.tiny_gptj) - - @torch.no_grad() - def calib_func(model): - for i in range(2): - model(self.example_inputs) - quant_config = AWQConfig(bits=8, group_size=-1) logger.info(f"Test AWQ with config {quant_config}") model = prepare( @@ -85,11 +86,6 @@ def calib_func(model): assert torch.allclose(out, self.label, atol=1e-1), "Accuracy gap atol > 0.01 is unexpected." def test_awq_with_quantize_API(self): - @torch.no_grad() - def calib_func(model): - for i in range(2): - model(self.example_inputs) - quant_config = get_default_awq_config() logger.info(f"Test AWQ with config {quant_config}") @@ -127,7 +123,6 @@ def calib_func(model): fp32_model = copy.deepcopy(self.tiny_gptj) quant_config = get_default_awq_config() - # prepare + convert API model = prepare( model=fp32_model, @@ -146,5 +141,73 @@ def calib_func(model): output = loaded_model(self.example_inputs)[0] assert torch.allclose(inc_out, output), "Unexpected result. Please double check." assert ( - get_woq_linear_num(loaded_model, "INCWeightOnlyLinear") == 31 + get_woq_linear_num(loaded_model, "INCWeightOnlyLinear") == 30 ), "Incorrect number of INCWeightOnlyLinear modules" + + def test_quant_lm_head(self): + # tie_word_embeddings=false + gptj_model = transformers.AutoModelForCausalLM.from_pretrained( + "hf-internal-testing/tiny-random-GPTJForCausalLM", + device_map=device, + ) + lm_head_id = id(gptj_model.lm_head.weight) + assert id(gptj_model.transformer.wte.weight) != lm_head_id, "The lm_head weight is tied, please check!" + quant_config = AWQConfig(quant_lm_head=True) + model = prepare(gptj_model, quant_config, example_inputs=self.example_inputs) + calib_func(model) + model = convert(model) + + # tie_word_embeddings=true + opt_model = transformers.AutoModelForCausalLM.from_pretrained( + "trl-internal-testing/tiny-random-OPTForCausalLM", + device_map=device, + ) + lm_head_id = id(opt_model.lm_head.weight) + assert ( + id(opt_model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The lm_head weight is not tied, please check!" + quant_config = AWQConfig(quant_lm_head=True) + model = prepare(opt_model, quant_config, example_inputs=self.example_inputs) + calib_func(model) + model = convert(model) + assert ( + id(model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The tied lm_head weight is not deep copied, please check!" + + def test_awq_absorb_to_layer(self): + absorb_layer_dict = { + "ln_1": ( + "attn.q_proj", + "attn.k_proj", + "attn.v_proj", + "mlp.fc_in", + ), + "attn.out_proj": "attn.out_proj", + "mlp.fc_out": ("mlp.fc_out"), + } + + quant_config = AWQConfig(absorb_layer_dict=absorb_layer_dict) + logger.info(f"Test AWQ with config {quant_config}") + # prepare + convert API + model = prepare( + model=copy.deepcopy(self.tiny_gptj), + quant_config=quant_config, + example_inputs=self.example_inputs, + ) + calib_func(model) + model = convert(model) + out1 = model(self.example_inputs) + quant_config = AWQConfig() + logger.info(f"Test AWQ with config {quant_config}") + + # prepare + convert API + model = prepare( + model=copy.deepcopy(self.tiny_gptj), + quant_config=quant_config, + example_inputs=self.example_inputs, + ) + calib_func(model) + model = convert(model) + out2 = model(self.example_inputs) + + assert torch.all(out1[0].eq(out2[0])), "The results should be equal." diff --git a/test/3x/torch/quantization/weight_only/test_gptq.py b/test/3x/torch/quantization/weight_only/test_gptq.py index 4575a29b0e2..b174cc91e65 100644 --- a/test/3x/torch/quantization/weight_only/test_gptq.py +++ b/test/3x/torch/quantization/weight_only/test_gptq.py @@ -18,17 +18,16 @@ device = accelerator.current_device_name() -def run_fn_for_rtn(model): +def run_fn(model): model(torch.tensor([[10, 20, 30]], dtype=torch.long).to(device)) - model(torch.tensor([[40, 50, 60]], dtype=torch.long).to(device)) -def run_fn(model): - # GPTQ uses ValueError to reduce computation when collecting input data of the first block - # It's special for UTs, no need to add this wrapper in examples. - with pytest.raises(ValueError): - model(torch.tensor([[10, 20, 30]], dtype=torch.long).to(device)) - model(torch.tensor([[40, 50, 60]], dtype=torch.long).to(device)) +def get_woq_linear_num(model, woq_module_type_name): + woq_linear_num = 0 + for _, module in model.named_modules(): + if module.__class__.__name__ == woq_module_type_name: + woq_linear_num += 1 + return woq_linear_num def get_woq_linear_num(model, woq_module_type_name): @@ -52,12 +51,26 @@ def setup_class(self): def teardown_class(self): shutil.rmtree("saved_results", ignore_errors=True) + @pytest.mark.skipif(device == "cpu", reason="no available accelerator") + def test_auto_host2device(self): + # if model is on CPU, we move it to device layer-by-layer for acceleration, + # and then move it back to CPU after quantization. + model = copy.deepcopy(self.tiny_gptj).to("cpu") + example_inputs = copy.deepcopy(self.example_inputs).to("cpu") + quant_config = get_default_gptq_config() + model = prepare(model, quant_config) + run_fn(model) + model = convert(model) + gptq_label = model(example_inputs)[0] + gptq_atol = (gptq_label - self.label.to("cpu")).amax() + assert gptq_atol < 0.06, "GPTQ should have low atol." + def test_accuracy_improvement(self): # test_default_rtn_config model = copy.deepcopy(self.tiny_gptj) quant_config = get_default_rtn_config() model = prepare(model, quant_config) - run_fn_for_rtn(model) + run_fn(model) model = convert(model) rtn_label = model(self.example_inputs)[0] rtn_atol = (rtn_label - self.label).amax() @@ -177,13 +190,24 @@ def test_act_order(self): # compare atol, this case is an ideal case. assert atol_false > atol_true, "act_order=True doesn't help accuracy, maybe is reasonable, please double check." - # def test_layer_wise(self): - # model = copy.deepcopy(self.tiny_gptj) - # quant_config = GPTQConfig( - # use_layer_wise=True, - # ) - # model = quantize(model, quant_config, run_fn=run_fn) - # TODO: (Xin) not implemented + def test_layer_wise(self): + model = copy.deepcopy(self.tiny_gptj) + quant_config = GPTQConfig() + model = prepare(model, quant_config) + run_fn(model) + model = convert(model) + q_label = model(self.example_inputs)[0] + + from neural_compressor.torch import load_empty_model + + model = load_empty_model("hf-internal-testing/tiny-random-GPTJForCausalLM") + + quant_config = GPTQConfig(use_layer_wise=True, model_path="hf-internal-testing/tiny-random-GPTJForCausalLM") + model = prepare(model, quant_config) + run_fn(model) + model = convert(model) + out = model(self.example_inputs)[0] + assert torch.equal(out, q_label), "use_layer_wise=True output should be same. Please double check." @pytest.mark.parametrize("dtype", ["nf4", "int4"]) @pytest.mark.parametrize("double_quant_bits", [6]) @@ -231,14 +255,12 @@ def test_conv1d(self): from transformers import GPT2Model, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("sshleifer/tiny-gpt2") - model = GPT2Model.from_pretrained("sshleifer/tiny-gpt2") + model = GPT2Model.from_pretrained("sshleifer/tiny-gpt2").to(device) text = "Replace me by any text you'd like." - encoded_input = tokenizer(text, return_tensors="pt") + encoded_input = tokenizer(text, return_tensors="pt").to(device) def run_fn_conv1d(model): - with pytest.raises(ValueError): - for i in range(2): - model(**encoded_input) + model(**encoded_input) quant_config = get_default_gptq_config() out1 = model(**encoded_input)[0] diff --git a/test/3x/torch/quantization/weight_only/test_hqq.py b/test/3x/torch/quantization/weight_only/test_hqq.py new file mode 100644 index 00000000000..d6e0352c312 --- /dev/null +++ b/test/3x/torch/quantization/weight_only/test_hqq.py @@ -0,0 +1,261 @@ +import copy +import os +import time +from copy import deepcopy + +import pytest +import torch +import transformers +from transformers import AutoModelForCausalLM + +from neural_compressor.common import options +from neural_compressor.common.utils import logger +from neural_compressor.torch.algorithms.weight_only.hqq.config import HQQModuleConfig, QTensorConfig, hqq_global_option +from neural_compressor.torch.algorithms.weight_only.hqq.core import HQQLinear +from neural_compressor.torch.quantization import HQQConfig, convert, get_default_hqq_config, prepare, quantize +from neural_compressor.torch.utils import accelerator + +device = accelerator.current_device_name() + + +def _common_hqq_test( + nbits=4, group_size=64, quant_zero=True, quant_scale=False, scale_quant_group_size=128, device=None +): + # Parse config + weight_qconfig = QTensorConfig( + nbits=nbits, channel_wise=True, group_size=group_size, optimize=True, round_zero=True if nbits == 4 else False + ) + zero_qconfig = None + if quant_zero: + zero_qconfig = QTensorConfig(nbits=8, channel_wise=False, group_size=None, optimize=False) + scale_qconfig = None + if quant_scale: + scale_qconfig = QTensorConfig(nbits=8, channel_wise=True, group_size=scale_quant_group_size, optimize=False) + hqq_quant_config = HQQModuleConfig(weight=weight_qconfig, scale=scale_qconfig, zero=zero_qconfig) + + # Create HQQ Linear + bs = 4 + in_features = 64 + out_features = 128 + float_linear = torch.nn.Linear(in_features=in_features, out_features=out_features) + if hqq_global_option.use_half: + logger.info(f"hqq_global_option use half: {hqq_global_option.use_half}") + float_linear = float_linear.half() + float_linear.to(device) + float_linear_copy = deepcopy(float_linear) + hqq_linear = HQQLinear.from_float(float_linear_copy, quant_config=hqq_quant_config) + + # Forward + input = torch.randn(bs, in_features, device=device) + if hqq_global_option.use_half: + input = input.half() + float_output = float_linear(input) + input_for_hqq = deepcopy(input) + hqq_output = hqq_linear(input_for_hqq) + hqq_output_2 = hqq_linear(input_for_hqq) + torch.allclose(float_output, hqq_output, atol=0.5) + torch.allclose(hqq_output, hqq_output_2) + del float_linear, hqq_linear + del float_output, hqq_output, hqq_output_2 + + +class TestHQQ: + + @classmethod + def setup_class(cls): + torch.manual_seed(0) + + @pytest.fixture + def force_use_cpu(self, monkeypatch): + # Force use CPU + monkeypatch.setenv("FORCE_DEVICE", "cpu") + + @pytest.fixture + def force_not_half(self, monkeypatch): + monkeypatch.setattr(hqq_global_option, "use_half", False) + + def test_hqq_quant(self, force_use_cpu, force_not_half): + + hqq_global_option.use_half = False + fp32_model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-random-OPTForCausalLM") + example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long, device="cpu") + # test_default_config + quant_config = get_default_hqq_config() + + # prepare + convert API + model = prepare(deepcopy(fp32_model), quant_config) + model = convert(model) + q_label_1 = model(example_inputs)[0] + + # quantize API + model = quantize(deepcopy(fp32_model), quant_config) + q_label_2 = model(example_inputs)[0] + + # compare the results of calling `convert` + `prepare` and calling `quantize` + assert torch.all( + q_label_1.eq(q_label_2) + ), "The results of calling `convert` + `prepare` and calling `quantize` should be equal." + + def test_hqq_load_save(self, force_use_cpu, force_not_half): + + hqq_global_option.use_half = False + fp32_model = AutoModelForCausalLM.from_pretrained("trl-internal-testing/tiny-random-OPTForCausalLM") + example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long, device="cpu") + # test_default_config + quant_config = get_default_hqq_config() + + # prepare + convert API + model = prepare(deepcopy(fp32_model), quant_config) + qmodel = convert(model) + qmodel_out_ref = model(example_inputs)[0] + save_path = options.workspace + f"/_hqq_model_{time.time()}.pth" + qmodel.save(save_path) + from neural_compressor.torch.quantization import load + + # loading compressed model + loaded_model = load(save_path, copy.deepcopy(fp32_model)) + loaded_model_out = loaded_model(example_inputs)[0] + assert torch.allclose(qmodel_out_ref, loaded_model_out), "Unexpected result. Please double check." + + def test_hqq_fallback(self, force_use_cpu, force_not_half): + + class ToyModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(128, 1024) + self.fc2 = torch.nn.Linear(1024, 512) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + quant_config = HQQConfig().set_local("fc1", HQQConfig(dtype="fp32")) + qmodel = convert(prepare(model=ToyModel(), quant_config=quant_config)) + assert type(qmodel.fc1).__name__ == torch.nn.Linear.__name__, f"Expect fallback fc1, but get {type(qmodel.fc1)}" + assert type(qmodel.fc2).__name__ != torch.nn.Linear.__name__, f"Expect quantize fc2, but get {type(qmodel.fc2)}" + + def test_quant_lm_head(self, force_use_cpu, force_not_half): + # tie_word_embeddings=false + gptj_model = transformers.AutoModelForCausalLM.from_pretrained( + "hf-internal-testing/tiny-random-GPTJForCausalLM", + device_map=device, + ) + lm_head_id = id(gptj_model.lm_head.weight) + assert id(gptj_model.transformer.wte.weight) != lm_head_id, "The lm_head weight is tied, please check!" + quant_config = HQQConfig(quant_lm_head=True) + model = prepare(gptj_model, quant_config) + model = convert(model) + + # tie_word_embeddings=true + opt_model = transformers.AutoModelForCausalLM.from_pretrained( + "facebook/opt-125m", # group_size should be divisible by tensor.numel(). Dummy model cannot work. + device_map=device, + ) + lm_head_id = id(opt_model.lm_head.weight) + assert ( + id(opt_model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The lm_head weight is not tied, please check!" + quant_config = HQQConfig(quant_lm_head=True) + model = prepare(opt_model, quant_config) + model = convert(model) + assert ( + id(model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The tied lm_head weight is not deep copied, please check!" + + @pytest.mark.parametrize("device_name", ["cuda", "cpu"]) + @pytest.mark.parametrize( + "nbits, group_size, quant_zero, quant_scale, scale_quant_group_size", + [ + (4, 64, True, False, 128), + (4, 64, False, False, 128), + (4, 64, True, True, 128), + (4, 64, False, True, 128), + (8, 64, True, False, 128), + (8, 64, False, False, 128), + (8, 64, True, True, 128), + (8, 64, False, True, 128), + (4, 64, True, False, 64), + (4, 64, False, False, 64), + (4, 64, True, True, 64), + (4, 64, False, True, 64), + (4, -1, False, True, 64), + ], + ) + def test_hqq_module( + self, + nbits, + group_size, + quant_zero, + quant_scale, + scale_quant_group_size, + device_name, + ): + if device_name == "cuda" and not torch.cuda.is_available(): + pytest.skip("Skipping CUDA test because cuda is not available") + if device_name == "cpu": + os.environ["FORCE_DEVICE"] = "cpu" + hqq_global_option.use_half = False + + _common_hqq_test( + nbits=nbits, + group_size=group_size, + quant_zero=quant_zero, + quant_scale=quant_scale, + scale_quant_group_size=scale_quant_group_size, + device=torch.device(device_name), + ) + + @pytest.mark.parametrize( + "nbits, group_size, quant_zero, quant_scale, scale_quant_group_size", + [ + (4, 64, True, False, 128), + (4, 64, False, False, 128), + (4, 64, True, True, 128), + (4, 64, False, True, 128), + (8, 64, True, False, 128), + ], + ) + def test_hqq_linear_save_and_load( + self, + nbits, + group_size, + quant_zero, + quant_scale, + scale_quant_group_size, + ): + hqq_global_option.use_half = False + # Parse config + weight_qconfig = QTensorConfig( + nbits=nbits, + channel_wise=True, + group_size=group_size, + optimize=True, + round_zero=True if nbits == 4 else False, + ) + zero_qconfig = None + if quant_zero: + zero_qconfig = QTensorConfig(nbits=8, channel_wise=False, group_size=None, optimize=False) + scale_qconfig = None + if quant_scale: + scale_qconfig = QTensorConfig(nbits=8, channel_wise=True, group_size=scale_quant_group_size, optimize=False) + hqq_quant_config = HQQModuleConfig(weight=weight_qconfig, scale=scale_qconfig, zero=zero_qconfig) + # Create HQQ Linear + bs = 4 + in_features = 64 + out_features = 128 + float_linear = torch.nn.Linear(in_features=in_features, out_features=out_features) + float_linear.to(device) + float_linear_copy = deepcopy(float_linear) + input = torch.randn(bs, in_features, device=device) + hqq_linear = HQQLinear.from_float(float_linear_copy, quant_config=hqq_quant_config) + out_ref = hqq_linear(input) + state_dict = hqq_linear.state_dict() + hqq_module_path = options.workspace + f"/_hqq_linear_{time.time()}.pth" + torch.save(state_dict, hqq_module_path) + reload_state_dict = torch.load(hqq_module_path) + new_float = torch.nn.Linear(in_features=in_features, out_features=out_features) + new_hqq_linear = HQQLinear.from_float(new_float, quant_config=hqq_quant_config) + new_hqq_linear.load_state_dict(reload_state_dict) + out = new_hqq_linear(input) + assert torch.equal(out_ref, out), f"out_ref: {out_ref}, out: {out}" diff --git a/test/3x/torch/quantization/weight_only/test_load.py b/test/3x/torch/quantization/weight_only/test_load.py index 03b58b2adcc..28b2f4ab3d4 100644 --- a/test/3x/torch/quantization/weight_only/test_load.py +++ b/test/3x/torch/quantization/weight_only/test_load.py @@ -16,13 +16,16 @@ class TestHFModelLoad: def setup_class(self): self.model_name = "TheBloke/TinyLlama-1.1B-python-v0.1-GPTQ" self.example_inputs = torch.tensor([[10, 20, 30, 40, 50, 60]], dtype=torch.long).to(device) + self.local_cache = "local_cache" - self.local_hf_model = "./TinyLlama-1.1B-python-v0.1-GPTQ" + self.local_hf_model = "TinyLlama-1.1B-Chat-v0.1-GPTQ" huggingface_hub.snapshot_download(self.model_name, local_dir=self.local_hf_model) def teardown_class(self): shutil.rmtree("TinyLlama-1.1B-python-v0.1-GPTQ", ignore_errors=True) shutil.rmtree("saved_results", ignore_errors=True) + shutil.rmtree("nc_workspace", ignore_errors=True) + shutil.rmtree("local_cache", ignore_errors=True) def get_woq_linear_num(self, model, woq_module_type_name): woq_linear_num = 0 @@ -54,12 +57,14 @@ def test_load_hf_woq_model_cpu(self): @pytest.mark.skipif(not is_hpex_available(), reason="no hpex in environment here.") def test_load_hf_woq_model_hpu(self): - # 1. use huggingface model_id (format=huggingface, device="hpu") + # use huggingface model_id (format=huggingface, device="hpu") # first load: linear -> INCWeightOnlyLinear -> HPUWeightOnlyLinear, save hpu_model.safetensors to local cache dir model = load( model_name_or_path=self.model_name, format="huggingface", device="hpu", + torch_dtype=torch.bfloat16, + cache_dir=self.local_cache, ) assert ( self.get_woq_linear_num(model, "HPUWeightOnlyLinear") == 154 @@ -71,33 +76,8 @@ def test_load_hf_woq_model_hpu(self): model_name_or_path=self.model_name, format="huggingface", device="hpu", - ) - assert ( - self.get_woq_linear_num(model, "HPUWeightOnlyLinear") == 154 - ), "Incorrect number of HPUWeightOnlyLinear modules" - output2 = model(self.example_inputs)[0] - - assert torch.equal( - output1, output2 - ), "The model loaded the second time is different from the model loaded the first time" - - # 2. use huggingface local model_path (format=huggingface, device="hpu") - # first load: linear -> INCWeightOnlyLinear -> HPUWeightOnlyLinear, save hpu_model.safetensors to local cache dir - model = load( - model_name_or_path=self.local_hf_model, - format="huggingface", - device="hpu", - ) - assert ( - self.get_woq_linear_num(model, "HPUWeightOnlyLinear") == 154 - ), "Incorrect number of HPUWeightOnlyLinear modules" - output1 = model(self.example_inputs)[0] - - # second load: linear -> HPUWeightOnlyLinear using hpu_model.safetensors saved in local cache dir - model = load( - model_name_or_path=self.local_hf_model, - format="huggingface", - device="hpu", + torch_dtype=torch.bfloat16, + cache_dir=self.local_cache, ) assert ( self.get_woq_linear_num(model, "HPUWeightOnlyLinear") == 154 diff --git a/test/3x/torch/quantization/weight_only/test_mixed_algos.py b/test/3x/torch/quantization/weight_only/test_mixed_algos.py index 0d354f29728..7b64824bc1b 100644 --- a/test/3x/torch/quantization/weight_only/test_mixed_algos.py +++ b/test/3x/torch/quantization/weight_only/test_mixed_algos.py @@ -12,18 +12,14 @@ def run_fn(model): - # GPTQ uses ValueError to reduce computation when collecting input data of the first block - # It's special for UTs, no need to add this wrapper in examples. - with pytest.raises(ValueError): - model(torch.tensor([[10, 20, 30]], dtype=torch.long).to(device)) - model(torch.tensor([[40, 50, 60]], dtype=torch.long).to(device)) + model(torch.tensor([[10, 20, 30]], dtype=torch.long).to(device)) class TestMixedTwoAlgo: def test_mixed_gptq_and_rtn(self): with patch.object(logger, "info") as mock_info: - rtn_config = RTNConfig(white_list=["lm_head"]) - gptq_config = GPTQConfig(double_quant_bits=4, white_list=["transformer.*"]) + rtn_config = RTNConfig(white_list=[".*mlp.*"]) + gptq_config = GPTQConfig(double_quant_bits=4, white_list=[".*attn.*"]) combined_config = rtn_config + gptq_config logger.info(combined_config) diff --git a/test/3x/torch/quantization/weight_only/test_rtn.py b/test/3x/torch/quantization/weight_only/test_rtn.py index 8e2c521ae43..d63672539ae 100644 --- a/test/3x/torch/quantization/weight_only/test_rtn.py +++ b/test/3x/torch/quantization/weight_only/test_rtn.py @@ -13,7 +13,7 @@ prepare, quantize, ) -from neural_compressor.torch.utils import accelerator +from neural_compressor.torch.utils import accelerator, is_hpex_available device = accelerator.current_device_name() @@ -21,8 +21,8 @@ class ModelConv1d(torch.nn.Module): def __init__(self): super(ModelConv1d, self).__init__() - self.fc1 = transformers.Conv1D(50, 32) - self.fc2 = torch.nn.Linear(50, 32) + self.fc1 = transformers.Conv1D(64, 32) + self.fc2 = torch.nn.Linear(64, 32) self.fc3 = torch.nn.Linear(32, 5) def forward(self, x): @@ -51,7 +51,7 @@ def setup_class(self): self.label = self.tiny_gptj(self.example_inputs)[0] # test_default_config model = copy.deepcopy(self.tiny_gptj) - quant_config = get_default_rtn_config() + quant_config = get_default_rtn_config("Server") model = prepare(model, quant_config) model = convert(model) # record q_label for comparison @@ -83,6 +83,8 @@ def test_int_params(self, bits, use_sym, group_size, group_dim): model = convert(model) out = model(self.example_inputs)[0] assert (out != self.label).any(), "WOQ output should be different with raw output" + if is_hpex_available(): + assert "hpu" in out.device.type, "Neural Compressor should run on HPU when HPEX is available." if (bits, use_sym, group_size, group_dim) == (8, True, -1, 1): assert torch.allclose(out, self.label, atol=0.01), "Accuracy gap atol > 0.01 is unexpected." if (bits, use_sym, group_size, group_dim) == [(4, True, 128, 0), (4, True, 32, 1)]: @@ -143,14 +145,45 @@ def test_mse_search(self): except: assert torch.allclose(atol_false, atol_true, atol=0.012), "atol is very close, double checked the logic." + def test_quant_lm_head(self): + # tie_word_embeddings=false + gptj_model = transformers.AutoModelForCausalLM.from_pretrained( + "hf-internal-testing/tiny-random-GPTJForCausalLM", + device_map=device, + ) + lm_head_id = id(gptj_model.lm_head.weight) + assert id(gptj_model.transformer.wte.weight) != lm_head_id, "The lm_head weight is tied, please check!" + quant_config = RTNConfig(quant_lm_head=True) + model = prepare(gptj_model, quant_config) + model = convert(model) + + # tie_word_embeddings=true + opt_model = transformers.AutoModelForCausalLM.from_pretrained( + "trl-internal-testing/tiny-random-OPTForCausalLM", + device_map=device, + ) + lm_head_id = id(opt_model.lm_head.weight) + assert ( + id(opt_model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The lm_head weight is not tied, please check!" + quant_config = RTNConfig(quant_lm_head=True) + model = prepare(opt_model, quant_config) + model = convert(model) + assert ( + id(model.model.decoder.embed_tokens.weight) == lm_head_id + ), "The tied lm_head weight is not deep copied, please check!" + def test_layer_wise(self): - model = copy.deepcopy(self.tiny_gptj) + from neural_compressor.torch import load_empty_model + + model = load_empty_model("hf-internal-testing/tiny-random-GPTJForCausalLM") quant_config = RTNConfig( use_layer_wise=True, ) model = prepare(model, quant_config) model = convert(model) - # TODO: (Xin) not implemented + out = model(self.example_inputs)[0] + assert torch.equal(out, self.q_label), "use_layer_wise=True output should be same. Please double check." @pytest.mark.parametrize( "dtype", @@ -159,7 +192,7 @@ def test_layer_wise(self): def test_dtype_params(self, dtype): if dtype in ["fp8_e5m2", "fp8_e5m2fnuz", "fp8_e4m3fn", "fp8_e4m3fnuz"]: full_dtype_name = dtype.replace("fp8", "float8") - if not hasattr(torch, full_dtype_name): + if not hasattr(torch, full_dtype_name) or "hpu" in device: return # for low torch version model = copy.deepcopy(self.tiny_gptj) quant_config = RTNConfig( @@ -172,6 +205,19 @@ def test_dtype_params(self, dtype): assert torch.allclose(out, self.label, atol=0.11), "Accuracy gap atol > 0.11 is unexpected." assert torch.allclose(out, out_next), "output should be same" + def test_mix_dtype(self): + model = copy.deepcopy(self.tiny_gptj) + quant_config = RTNConfig() + quant_config.set_local(".*mlp.*", RTNConfig(bits=8)) + quant_config.set_local(".*.out_proj", RTNConfig(bits=6)) + quant_config.set_local(".*.k_proj", RTNConfig(dtype="nf4")) + model = prepare(model, quant_config) + model = convert(model) + out = model(self.example_inputs)[0] + out_next = model(self.example_inputs)[0] + assert torch.allclose(out, self.label, atol=0.08), "Accuracy gap atol > 0.08 is unexpected." + assert torch.allclose(out, out_next), "output should be same" + @pytest.mark.parametrize("dtype", ["int4", "nf4"]) @pytest.mark.parametrize("double_quant_bits", [6]) @pytest.mark.parametrize("double_quant_group_size", [8, 256]) @@ -205,9 +251,10 @@ def test_double_quant_params(self, dtype, double_quant_bits, double_quant_group_ out = model(self.example_inputs)[0] atol_true = (out - self.q_label).amax() # compare atol, this case is an ideal case. - assert ( - atol_false < atol_true - ), "asym for double quant should have smaller atol because scales is bigger than zero, please double check." + if not (dtype, double_quant_bits, double_quant_group_size) == ("nf4", 6, 256): + assert ( + atol_false < atol_true + ), "asym for double quant should have smaller atol because scales is bigger than zero, please double check." def test_double_quant_constants(self): model = copy.deepcopy(self.tiny_gptj) @@ -278,7 +325,8 @@ def test_conv1d(self, bits, use_sym, group_size, group_dim): assert (out2 != out1).any(), "WOQ out2put should be different with raw output" if (bits, use_sym, group_size, group_dim) == (8, True, -1, 1): if "hpu" in device: - assert torch.allclose(out2, out1, atol=0.15), "Accuracy gap atol > 0.15 is unexpected." + # out2 is float16, no idea. + assert torch.allclose(out2.float(), out1.float(), atol=0.15), "Accuracy gap atol > 0.15 is unexpected." else: assert torch.allclose(out2, out1, atol=0.01), "Accuracy gap atol > 0.01 is unexpected." if (bits, use_sym, group_size, group_dim) == [(4, True, 128, 0), (4, True, 32, 1)]: @@ -300,9 +348,12 @@ def test_save_and_load(self): # linear -> INCWeightOnlyLinear loaded_model = load("saved_results", copy.deepcopy(self.tiny_gptj)) output = loaded_model(self.example_inputs)[0] - assert torch.allclose(inc_out, output), "Unexpected result. Please double check." + if "hpu" in device: + assert torch.allclose(inc_out, output, atol=0.001), "Unexpected result. Please double check." + else: + assert torch.allclose(inc_out, output), "Unexpected result. Please double check." assert ( - get_woq_linear_num(loaded_model, "INCWeightOnlyLinear") == 31 + get_woq_linear_num(loaded_model, "INCWeightOnlyLinear") == 30 ), "Incorrect number of INCWeightOnlyLinear modules" @pytest.mark.skipif(not is_hpex_available(), reason="no hpex in environment here.") @@ -320,14 +371,14 @@ def test_save_and_load_hpu(self): # first load: linear -> INCWeightOnlyLinear -> HPUWeightOnlyLinear, save quantized_hpu_weight.pt to local cache dir loaded_model = load("saved_results", copy.deepcopy(self.tiny_gptj), device="hpu") assert ( - get_woq_linear_num(loaded_model, "HPUWeightOnlyLinear") == 31 + get_woq_linear_num(loaded_model, "HPUWeightOnlyLinear") == 30 ), "Incorrect number of HPUWeightOnlyLinear modules" output1 = loaded_model(self.example_inputs)[0] # second load: linear -> HPUWeightOnlyLinear using quantized_hpu_weight.pt saved in local cache dir loaded_model = load("saved_results", copy.deepcopy(self.tiny_gptj), device="hpu") assert ( - get_woq_linear_num(loaded_model, "HPUWeightOnlyLinear") == 31 + get_woq_linear_num(loaded_model, "HPUWeightOnlyLinear") == 30 ), "Incorrect number of HPUWeightOnlyLinear modules" output2 = loaded_model(self.example_inputs)[0] @@ -348,3 +399,16 @@ def mock_is_transformers_imported(): model = convert(model) out = model(self.example_inputs)[0] assert torch.allclose(out, self.label, atol=1e-1), "Accuracy gap atol > 0.1 is unexpected." + + @pytest.mark.skipif(device == "cpu", reason="no available accelerator") + def test_auto_host2device(self): + # if model is on CPU, we move it to device layer-by-layer for acceleration, + # and then move it back to CPU after quantization. + model = copy.deepcopy(self.tiny_gptj).to("cpu") + example_inputs = copy.deepcopy(self.example_inputs).to("cpu") + quant_config = get_default_rtn_config() + model = prepare(model, quant_config) + model = convert(model) + rtn_label = model(example_inputs)[0] + rtn_atol = (rtn_label - self.label.to("cpu")).amax() + assert rtn_atol < 0.08, "RTN should have low atol." diff --git a/test/3x/torch/quantization/weight_only/test_woq_utils.py b/test/3x/torch/quantization/weight_only/test_woq_utils.py index c31d94b823d..3bee40696c8 100644 --- a/test/3x/torch/quantization/weight_only/test_woq_utils.py +++ b/test/3x/torch/quantization/weight_only/test_woq_utils.py @@ -169,7 +169,16 @@ def test_captured_dataloader_iteration(self): result = list(dataloader) - assert result == [(1,), (2,), (3,)] + assert result == [1, 2, 3] + + # Test case when kwargs is empty + args_list = [(1, 2), (2, 3), (3, 4)] + kwargs_list = [{}, {}, {}] + dataloader = CapturedDataloader(args_list, kwargs_list) + + result = list(dataloader) + + assert result == [(1, 2), (2, 3), (3, 4)] # Test case when both args and kwargs are present args_list = [(1,), (2,), (3,)] diff --git a/test/3x/torch/requirements.txt b/test/3x/torch/requirements.txt index 88d2e4ea52a..c17e22d6f77 100644 --- a/test/3x/torch/requirements.txt +++ b/test/3x/torch/requirements.txt @@ -1,4 +1,6 @@ +auto_round @ git+https://github.com/intel/auto-round.git@e24b9074af6cdb099e31c92eb81b7f5e9a4a244e expecttest +intel_extension_for_pytorch numpy prettytable psutil diff --git a/test/3x/torch/test_auto_accelerator.py b/test/3x/torch/test_auto_accelerator.py deleted file mode 100644 index 918a54ebbd5..00000000000 --- a/test/3x/torch/test_auto_accelerator.py +++ /dev/null @@ -1,76 +0,0 @@ -import os - -import pytest -import torch - -from neural_compressor.torch.utils import get_accelerator -from neural_compressor.torch.utils.auto_accelerator import accelerator_registry, auto_detect_accelerator - - -class Test_CPU_Accelerator: - @pytest.fixture - def force_use_cpu(self, monkeypatch): - # Force use CPU - monkeypatch.setenv("FORCE_DEVICE", "cpu") - - def test_cpu_accelerator(self, force_use_cpu): - print(f"FORCE_DEVICE: {os.environ.get('FORCE_DEVICE', None)}") - accelerator = auto_detect_accelerator() - assert accelerator.current_device() == "cpu", f"{accelerator.current_device()}" - assert accelerator.current_device_name() == "cpu" - assert accelerator.is_available() - assert accelerator.set_device(1) is None - assert accelerator.device() is None - assert accelerator.empty_cache() is None - assert accelerator.synchronize() is None - - -@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available") -class Test_CUDA_Accelerator: - - @pytest.fixture - def force_use_cuda(self, monkeypatch): - # Force use CUDA - monkeypatch.setenv("FORCE_DEVICE", "cuda") - - def test_cuda_accelerator(self, force_use_cuda): - print(f"FORCE_DEVICE: {os.environ.get('FORCE_DEVICE', None)}") - accelerator = auto_detect_accelerator() - assert accelerator.current_device() == 0, f"{accelerator.current_device()}" - assert accelerator.current_device_name() == "cuda:0" - assert accelerator.device() is not None - assert accelerator.empty_cache() is None - assert accelerator.synchronize() is None - assert accelerator.set_device(0) is None - assert accelerator.device_name(0) == "cuda:0" - assert accelerator.is_available() is True - assert accelerator.name() == "cuda" - assert accelerator.device_name(1) == "cuda:1" - assert accelerator.set_device(1) is None - assert accelerator.device_name(1) == "cuda:1" - assert accelerator.current_device() == 1 - assert accelerator.current_device_name() == "cuda:1" - assert accelerator.synchronize() is None - assert accelerator.empty_cache() is None - - @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Only one GPU is available") - def test_get_device(self): - accelerator = auto_detect_accelerator() - assert accelerator.set_device(1) is None - assert accelerator.current_device_name() == "cuda:1" - cur_device = get_accelerator().current_device_name() - assert cur_device == "cuda:1" - tmp_tensor = torch.tensor([1, 2], device=cur_device) - assert "cuda:1" == str(tmp_tensor.device) - - -class TestAutoAccelerator: - - @pytest.fixture - def set_cuda_available(self, monkeypatch): - monkeypatch.setattr(torch.cuda, "is_available", lambda: True) - - def test_auto_accelerator(self, set_cuda_available): - accelerator = auto_detect_accelerator() - all_accelerators = accelerator_registry.get_sorted_accelerators() - assert accelerator.name() == all_accelerators[0]().name() diff --git a/test/3x/torch/test_autotune.py b/test/3x/torch/test_autotune.py index a9bd8a971c5..ba7465871f8 100644 --- a/test/3x/torch/test_autotune.py +++ b/test/3x/torch/test_autotune.py @@ -6,9 +6,10 @@ import torch import transformers +import neural_compressor.common.utils.utility as inc_utils from neural_compressor.common import logger from neural_compressor.torch.quantization import ( - MixPrecisionConfig, + MixedPrecisionConfig, RTNConfig, TuningConfig, autotune, @@ -163,6 +164,43 @@ def eval_acc_fn(model) -> float: custom_tune_config = TuningConfig(config_set=[RTNConfig(bits=[4, 6])], max_trials=2) best_model = autotune(model=build_simple_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) + print(inc_utils.FUNC_CALL_COUNTS) + self.assertIsNotNone(best_model) + + def test_autotune_return_qmodel_directly(self): + inc_utils.FUNC_CALL_COUNTS.clear() + + baseline = 1 + eval_result = [0.9, 1.1] + acc_list = [baseline] + eval_result + + def eval_acc_fn(model) -> float: + acc = acc_list.pop(0) + return acc + + custom_tune_config = TuningConfig(config_set=[RTNConfig(bits=[4, 6])], max_trials=2) + best_model = autotune(model=build_simple_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) + assert ( + inc_utils.FUNC_CALL_COUNTS.get("quantize") == 2 + ), f"quantize should be called twice, but got {inc_utils.FUNC_CALL_COUNTS.get('quantize')}" + self.assertIsNotNone(best_model) + + def test_autotune_return_re_quant_qmodel(self): + inc_utils.FUNC_CALL_COUNTS.clear() + + baseline = 1 + eval_result = [0.9, 0.8] + acc_list = [baseline] + eval_result + + def eval_acc_fn(model) -> float: + acc = acc_list.pop(0) + return acc + + custom_tune_config = TuningConfig(config_set=[RTNConfig(bits=[4, 6])], max_trials=2) + best_model = autotune(model=build_simple_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) + assert ( + inc_utils.FUNC_CALL_COUNTS.get("quantize") == 3 + ), f"quantize should be called three times, but got {inc_utils.FUNC_CALL_COUNTS.get('quantize')}" self.assertIsNotNone(best_model) @reset_tuning_target @@ -339,8 +377,8 @@ def eval_acc_fn(model): self.assertIsNone(best_model) @reset_tuning_target - def test_autotune_mix_precision_default(self): - from neural_compressor.torch.algorithms.mix_precision import HalfPrecisionModuleWrapper + def test_autotune_mixed_precision_default(self): + from neural_compressor.torch.algorithms.mixed_precision import HalfPrecisionModuleWrapper baseline = [1] acc_res_lst = baseline + [0.9, 0.99, 1] @@ -349,7 +387,9 @@ def eval_acc_fn(model): res = acc_res_lst.pop(0) return res - custom_tune_config = TuningConfig(config_set=[MixPrecisionConfig(dtype=["fp16", "bf16", "fp32"])], max_trials=3) + custom_tune_config = TuningConfig( + config_set=[MixedPrecisionConfig(dtype=["fp16", "bf16", "fp32"])], max_trials=3 + ) best_model = autotune(model=build_simple_torch_model(), tune_config=custom_tune_config, eval_fn=eval_acc_fn) self.assertIsNotNone(best_model) @@ -358,9 +398,9 @@ def eval_acc_fn(model): self.assertTrue(isinstance(best_model.fc3, HalfPrecisionModuleWrapper)) @reset_tuning_target - def test_autotune_mix_precision_set_op_name(self): + def test_autotune_mixed_precision_set_op_name(self): from neural_compressor.common.base_config import ComposableConfig, config_registry - from neural_compressor.torch.algorithms.mix_precision import HalfPrecisionModuleWrapper + from neural_compressor.torch.algorithms.mixed_precision import HalfPrecisionModuleWrapper baseline = [1] acc_res_lst = baseline + [0.9, 1.1] @@ -370,7 +410,7 @@ def eval_acc_fn(model): return res config1 = { - "mix_precision": { + "mixed_precision": { "global": { "dtype": "bf16", }, @@ -382,7 +422,7 @@ def eval_acc_fn(model): } } config2 = { - "mix_precision": { + "mixed_precision": { "global": { "dtype": "fp16", }, diff --git a/test/3x/torch/test_config.py b/test/3x/torch/test_config.py index c5bdc5261cf..68e7d5975cc 100644 --- a/test/3x/torch/test_config.py +++ b/test/3x/torch/test_config.py @@ -1,9 +1,11 @@ import copy import unittest +import pytest import torch import transformers +import neural_compressor.torch.utils as torch_utils from neural_compressor.torch.quantization import ( AutoRoundConfig, AWQConfig, @@ -13,6 +15,8 @@ SmoothQuantConfig, StaticQuantConfig, TEQConfig, + get_default_AutoRound_config, + get_default_gptq_config, get_default_hqq_config, get_default_rtn_config, quantize, @@ -331,15 +335,41 @@ def test_hqq_config(self): self.assertEqual(hqq_config.to_dict(), hqq_config2.to_dict()) -class TestQuantConfigForAutotune(unittest.TestCase): - def test_expand_config(self): - # test the expand functionalities, the user is not aware it - - tune_config = RTNConfig(bits=[4, 6]) - expand_config_list = RTNConfig.expand(tune_config) - self.assertEqual(expand_config_list[0].bits, 4) - self.assertEqual(expand_config_list[1].bits, 6) - - -if __name__ == "__main__": - unittest.main() +class TestQuantConfigBasedonProcessorType: + + @pytest.mark.parametrize("config_cls", [RTNConfig, GPTQConfig, AutoRoundConfig]) + def test_get_config_based_on_processor_type(self, config_cls): + config_for_client = config_cls.get_predefined_configs()[torch_utils.ProcessorType.Client] + assert ( + config_for_client.use_layer_wise + ), f"Expect use_layer_wise to be True, got {config_for_client.use_layer_wise}" + + config_for_server = config_cls.get_predefined_configs()[torch_utils.ProcessorType.Server] + assert ( + config_for_server.use_layer_wise is False + ), f"Expect use_layer_wise to be False, got {config_for_server.use_layer_wise}" + + @pytest.fixture + def force_server(self, monkeypatch): + monkeypatch.setattr(torch_utils.utility.cpu_info, "sockets", 2) + + def test_get_default_config_force_server(self, force_server): + rtn_config = get_default_rtn_config() + assert not rtn_config.use_layer_wise, f"Expect use_layer_wise to be `False`, got {rtn_config.use_layer_wise}" + gptq_config = get_default_gptq_config() + assert not gptq_config.use_layer_wise, f"Expect use_layer_wise to be `False`, got {gptq_config.use_layer_wise}" + + @pytest.mark.parametrize("p_type", [None, torch_utils.ProcessorType.Client, torch_utils.ProcessorType.Server]) + def test_get_default_config(self, p_type): + rtn_config = get_default_rtn_config(processor_type=p_type) + assert rtn_config.use_layer_wise == ( + p_type == torch_utils.ProcessorType.Client + ), f"Expect use_layer_wise to be {p_type == torch_utils.ProcessorType.Client}, got {rtn_config.use_layer_wise}" + gptq_config = get_default_gptq_config(processor_type=p_type) + assert gptq_config.use_layer_wise == ( + p_type == torch_utils.ProcessorType.Client + ), f"Expect use_layer_wise to be {p_type == torch_utils.ProcessorType.Client}, got {gptq_config.use_layer_wise}" + autoround_config = get_default_AutoRound_config(processor_type=p_type) + assert autoround_config.use_layer_wise == ( + p_type == torch_utils.ProcessorType.Client + ), f"Expect use_layer_wise to be {p_type == torch_utils.ProcessorType.Client}, got {autoround_config.use_layer_wise}" diff --git a/test/3x/torch/test_utils.py b/test/3x/torch/test_utils.py deleted file mode 100644 index 00ca99a5734..00000000000 --- a/test/3x/torch/test_utils.py +++ /dev/null @@ -1,86 +0,0 @@ -import unittest - -import torch - -from neural_compressor.torch.utils import logger - - -def get_gpt_j(): - import transformers - - tiny_gptj = transformers.AutoModelForCausalLM.from_pretrained( - "hf-internal-testing/tiny-random-GPTJForCausalLM", - torchscript=True, - ) - return tiny_gptj - - -def build_simple_torch_model(): - class Model(torch.nn.Module): - def __init__(self): - super(Model, self).__init__() - self.fc1 = torch.nn.Linear(8, 30) - self.fc2 = torch.nn.Linear(30, 60) - self.fc3 = torch.nn.Linear(60, 30) - self.fc4 = torch.nn.Linear(30, 50) - - def forward(self, x): - out = self.fc1(x) - out = self.fc2(out) - out = self.fc3(out) - out = self.fc4(out) - return out - - model = Model() - return model - - -from neural_compressor.torch.utils.utility import fetch_module, set_module - - -class TestTorchUtils(unittest.TestCase): - @classmethod - def setUpClass(self): - self.model = get_gpt_j() - - @classmethod - def tearDownClass(self): - pass - - def setUp(self): - # print the test name - logger.info(f"Running TestTorchUtils test: {self.id()}") - - def test_fetch_module(self): - result = fetch_module(self.model, "transformer.h.2.mlp.fc_in") - self.assertIsInstance(result, torch.nn.Linear) - - def test_set_module(self): - module_name = "transformer.h.2.mlp.fc_in" - mew_value = torch.nn.Linear(32, 128, bias=False) - set_module(self.model, module_name, mew_value) - result = fetch_module(self.model, module_name) - self.assertFalse(result.bias) - - def test_set_module_nonexistent_attribute(self): - new_value = torch.nn.Parameter(torch.Tensor([3.0])) - attr_name = "transformer.nonexistent_attr" - set_module(self.model, attr_name, new_value) - result = fetch_module(self.model, attr_name) - self.assertTrue(torch.equal(result, torch.Tensor([3.0]))) - - def test_fetch_module_nonexistent_attribute(self): - attr_name = "transformer.nonexistent_attr" - result = fetch_module(self.model, attr_name) - self.assertIsNone(result) - - def test_get_model_info(self): - from neural_compressor.torch.utils.utility import get_model_info - - white_module_list = [torch.nn.Linear] - model_info = get_model_info(build_simple_torch_model(), white_module_list) - self.assertEqual(len(model_info), 4) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/3x/torch/utils/test_auto_accelerator.py b/test/3x/torch/utils/test_auto_accelerator.py new file mode 100644 index 00000000000..dea9cdce918 --- /dev/null +++ b/test/3x/torch/utils/test_auto_accelerator.py @@ -0,0 +1,138 @@ +import os + +import pytest +import torch + +from neural_compressor.torch.utils import get_accelerator +from neural_compressor.torch.utils.auto_accelerator import ( + CPU_Accelerator, + CUDA_Accelerator, + HPU_Accelerator, + XPU_Accelerator, + accelerator_registry, + auto_detect_accelerator, +) + + +@pytest.mark.skipif(not HPU_Accelerator.is_available(), reason="HPEX is not available") +class TestHPUAccelerator: + def test_cuda_accelerator(self): + assert os.environ.get("FORCE_DEVICE", None) is None, "FORCE_DEVICE shouldn't be set. HPU is the first priority." + accelerator = auto_detect_accelerator() + assert accelerator.current_device() == 0, f"{accelerator.current_device()}" + assert accelerator.current_device_name() == "hpu:0" + assert accelerator.device() is not None + assert accelerator.device_name(0) == "hpu:0" + assert accelerator.is_available() is True + assert accelerator.name() == "hpu" + assert accelerator.device_name(1) == "hpu:1" + assert accelerator.synchronize() is None + assert accelerator.empty_cache() is None + + def test_get_device(self): + if torch.hpu.device_count() < 2: + return + accelerator = auto_detect_accelerator() + assert accelerator.set_device(1) is None + assert accelerator.current_device_name() == "hpu:1" + cur_device = get_accelerator().current_device_name() + assert cur_device == "hpu:1" + tmp_tensor = torch.tensor([1, 2], device=cur_device) + assert "hpu:1" == str(tmp_tensor.device) + + +@pytest.mark.skipif(not XPU_Accelerator.is_available(), reason="XPU is not available") +class TestXPUAccelerator: + + @pytest.fixture + def force_use_xpu(self, monkeypatch): + # Force use xpu + monkeypatch.setenv("FORCE_DEVICE", "xpu") + + def test_xpu_accelerator(self, force_use_xpu): + print(f"FORCE_DEVICE: {os.environ.get('FORCE_DEVICE', None)}") + accelerator = auto_detect_accelerator() + assert accelerator.current_device() == 0, f"{accelerator.current_device()}" + assert accelerator.current_device_name() == "xpu:0" + assert accelerator.device() is not None + assert accelerator.set_device(0) is None + assert accelerator.device_name(0) == "xpu:0" + assert accelerator.is_available() is True + assert accelerator.name() == "xpu" + assert accelerator.device_name(1) == "xpu:1" + assert accelerator.synchronize() is None + assert accelerator.empty_cache() is None + + def test_get_device(self): + if torch.xpu.device_count() < 2: + return + accelerator = auto_detect_accelerator() + assert accelerator.set_device(1) is None + assert accelerator.current_device_name() == "xpu:1" + cur_device = get_accelerator().current_device_name() + assert cur_device == "xpu:1" + tmp_tensor = torch.tensor([1, 2], device=cur_device) + assert "xpu:1" == str(tmp_tensor.device) + + +class TestCPUAccelerator: + @pytest.fixture + def force_use_cpu(self, monkeypatch): + # Force use CPU + monkeypatch.setenv("FORCE_DEVICE", "cpu") + + def test_cpu_accelerator(self, force_use_cpu): + print(f"FORCE_DEVICE: {os.environ.get('FORCE_DEVICE', None)}") + accelerator = auto_detect_accelerator() + assert accelerator.current_device() == "cpu", f"{accelerator.current_device()}" + assert accelerator.current_device_name() == "cpu" + assert accelerator.is_available() + assert accelerator.set_device(1) is None + assert accelerator.device() is None + assert accelerator.empty_cache() is None + assert accelerator.synchronize() is None + + +@pytest.mark.skipif(not CUDA_Accelerator.is_available(), reason="CUDA is not available") +class TestCUDAAccelerator: + + @pytest.fixture + def force_use_cuda(self, monkeypatch): + # Force use CUDA + monkeypatch.setenv("FORCE_DEVICE", "cuda") + + def test_cuda_accelerator(self, force_use_cuda): + print(f"FORCE_DEVICE: {os.environ.get('FORCE_DEVICE', None)}") + accelerator = auto_detect_accelerator() + assert accelerator.current_device() == 0, f"{accelerator.current_device()}" + assert accelerator.current_device_name() == "cuda:0" + assert accelerator.device() is not None + assert accelerator.set_device(0) is None + assert accelerator.device_name(0) == "cuda:0" + assert accelerator.is_available() is True + assert accelerator.name() == "cuda" + assert accelerator.device_name(1) == "cuda:1" + assert accelerator.synchronize() is None + assert accelerator.empty_cache() is None + + @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Only one GPU is available") + def test_get_device(self): + accelerator = auto_detect_accelerator() + assert accelerator.set_device(1) is None + assert accelerator.current_device_name() == "cuda:1" + cur_device = get_accelerator().current_device_name() + assert cur_device == "cuda:1" + tmp_tensor = torch.tensor([1, 2], device=cur_device) + assert "cuda:1" == str(tmp_tensor.device) + + +class TestAutoAccelerator: + + @pytest.fixture + def set_cuda_available(self, monkeypatch): + monkeypatch.setattr(torch.cuda, "is_available", lambda: True) + + def test_auto_accelerator(self, set_cuda_available): + accelerator = auto_detect_accelerator() + all_accelerators = accelerator_registry.get_sorted_accelerators() + assert accelerator.name() == all_accelerators[0]().name() diff --git a/test/3x/torch/utils/test_torch_utility.py b/test/3x/torch/utils/test_torch_utility.py new file mode 100644 index 00000000000..b84db61ff7a --- /dev/null +++ b/test/3x/torch/utils/test_torch_utility.py @@ -0,0 +1,79 @@ +import pytest +import torch + +from neural_compressor.torch.utils.utility import get_double_quant_config_dict + + +def get_gpt_j(): + import transformers + + tiny_gptj = transformers.AutoModelForCausalLM.from_pretrained( + "hf-internal-testing/tiny-random-GPTJForCausalLM", + torchscript=True, + ) + return tiny_gptj + + +def build_simple_torch_model(): + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.fc1 = torch.nn.Linear(8, 30) + self.fc2 = torch.nn.Linear(30, 60) + self.fc3 = torch.nn.Linear(60, 30) + self.fc4 = torch.nn.Linear(30, 50) + + def forward(self, x): + out = self.fc1(x) + out = self.fc2(out) + out = self.fc3(out) + out = self.fc4(out) + return out + + model = Model() + return model + + +from neural_compressor.torch.utils.utility import fetch_module, set_module + + +class TestTorchUtils: + def setup_class(self): + self.model = get_gpt_j() + + def teardown_class(self): + pass + + @pytest.mark.parametrize( + "module_name", + [ + "transformer.h.2.mlp.fc_in", + "transformer.nonexistent_attr", + ], + ) + def test_fetch_set_module(self, module_name): + # fetch + result = fetch_module(self.model, module_name) + if "nonexistent_attr" in module_name: + assert result is None, "result should be None" + else: + assert isinstance(result, torch.nn.Linear), "fetched module should be Linear" + assert result.bias is not None, "The bias of fetched module should not be None." + # set + new_value = torch.nn.Linear(32, 128, bias=False) + set_module(self.model, module_name, new_value) + result = fetch_module(self.model, module_name) + print(result) + assert result.bias is None, "The bias of new module should be None." + + def test_get_model_info(self): + from neural_compressor.torch.utils.utility import get_model_info + + white_module_list = [torch.nn.Linear] + model_info = get_model_info(build_simple_torch_model(), white_module_list) + assert len(model_info) == 4, "The length of model_info should be 4." + + @pytest.mark.parametrize("double_quant_type", ["BNB_NF4", "GGML_TYPE_Q4_K"]) + def test_double_quant_config_dict(self, double_quant_type): + config_dict = get_double_quant_config_dict(double_quant_type) + assert isinstance(config_dict, dict), "The returned object should be a dict." diff --git a/test/adaptor/mxnet_adaptor/test_adaptor_mxnet.py b/test/adaptor/mxnet_adaptor/test_adaptor_mxnet.py deleted file mode 100644 index b576a1f6eb0..00000000000 --- a/test/adaptor/mxnet_adaptor/test_adaptor_mxnet.py +++ /dev/null @@ -1,299 +0,0 @@ -import json -import os -import platform -import shutil -import sys -import unittest -from pathlib import Path -from tempfile import TemporaryDirectory - -import mxnet as mx -import mxnet.gluon.nn as nn -import numpy as np -import yaml - -from neural_compressor.adaptor.mxnet_utils.util import check_mx_version -from neural_compressor.experimental import Quantization, common -from neural_compressor.experimental.metric.metric import MXNetMetrics -from neural_compressor.utils.utility import recover - -WORKSPACE_DIR = Path("./saved") - -MX_NAMESPACE = mx.np if check_mx_version("2.0.0") else mx.nd - - -def build_mxnet(): - fake_yaml = """ - model: - name: imagenet - framework: mxnet - - evaluation: - accuracy: - metric: - topk: 1 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: {} - """.format( - str(WORKSPACE_DIR) - ) - configs = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("mxnet.yaml", "w", encoding="utf-8") as f: - yaml.dump(configs, f) - f.close() - - -def build_mxnet_kl(): - fake_yaml = """ - model: - name: imagenet - framework: mxnet - - quantization: - model_wise: - activation: - algorithm: kl - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: {} - """.format( - str(WORKSPACE_DIR) - ) - configs = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("mxnet_kl.yaml", "w", encoding="utf-8") as f: - yaml.dump(configs, f) - f.close() - - -def are_models_equal(tester, model_a, model_b): - symnet_a, args_a, auxs_a = model_a - symnet_b, args_b, auxs_b = model_b - - nodes_a = [(node["op"], node["inputs"]) for node in json.loads(symnet_a.tojson())["nodes"]] - nodes_b = [(node["op"], node["inputs"]) for node in json.loads(symnet_b.tojson())["nodes"]] - tester.assertEqual(nodes_a, nodes_b) - - args_a = dict(sorted(args_a.items(), key=lambda x: x[0])) - args_b = dict(sorted(args_b.items(), key=lambda x: x[0])) - auxs_a = dict(sorted(auxs_a.items(), key=lambda x: x[0])) - auxs_b = dict(sorted(auxs_b.items(), key=lambda x: x[0])) - - assert len(args_a) == len(args_b) - for val_a, val_b in zip(args_a.values(), args_b.values()): - tester.assertTrue(np.all((val_a == val_b).asnumpy())) - - assert len(auxs_a) == len(auxs_b) - for val_a, val_b in zip(auxs_a.values(), auxs_b.values()): - tester.assertTrue(np.all((val_a == val_b).asnumpy())) - - -class TestAdaptorMXNet(unittest.TestCase): - """Test MXNet adaptor functions.""" - - @classmethod - def setUpClass(self): - if platform.system().lower() == "windows": - self.skipTest(self, "not support mxnet on windows yet") - build_mxnet() - build_mxnet_kl() - - self.data_low = -1000 - self.data_high = 1000 - - @classmethod - def tearDownClass(self): - os.remove("mxnet.yaml") - os.remove("mxnet_kl.yaml") - shutil.rmtree(WORKSPACE_DIR, ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_utils(self): - import neural_compressor.adaptor.mxnet_utils.util as utils - - self.assertTrue(utils.isiterable([1, 2, 3])) - self.assertFalse(utils.isiterable(123)) - - def test_mlp_model_quantization(self): - """Use MLP model to test minmax calibration and built-in evaluate function.""" - mlp_input = mx.symbol.Variable("data") - mlp_model = mx.symbol.FullyConnected(data=mlp_input, name="fc1", num_hidden=32) - mlp_model = mx.symbol.Activation(data=mlp_model, act_type="relu") - mlp_model = mx.symbol.FullyConnected(data=mlp_model, name="fc2", num_hidden=16) - mlp_model = mx.symbol.Softmax(mlp_model, name="softmax") - - for shape in [ - (32, 64), - ]: - data = MX_NAMESPACE.random.uniform(self.data_low, self.data_high, shape).astype("float32") - labels = MX_NAMESPACE.ones((shape[0],)) - calib_data = mx.io.NDArrayIter(data=data, label=labels, batch_size=shape[0]) - - with TemporaryDirectory() as tmpdirname: - prefix = str(Path(tmpdirname) / "tmp") - sym_block = mx.gluon.SymbolBlock(mlp_model, [mlp_input]) - sym_block.initialize() - sym_block.forward(data) - sym_block.export(prefix, epoch=0) - fp32_model = mx.model.load_checkpoint(prefix, 0) - - quantizer = Quantization("./mxnet.yaml") - quantizer.model = fp32_model - quantizer.calib_dataloader = calib_data - quantizer.eval_dataloader = calib_data - qmodel = quantizer.fit() - self.assertIsInstance(qmodel.model[0], mx.symbol.Symbol) - - # test inspect_tensor - inspect_tensor = quantizer.strategy.adaptor.inspect_tensor - quantizer.model = fp32_model - - fc_op_name = "sg_{}_fully_connected".format("onednn" if check_mx_version("2.0.0") else "mkldnn") - fc_node_name1 = fc_op_name + "_eltwise_0" - fc_node_name2 = fc_op_name + "_1" - - insp = inspect_tensor( - quantizer.model, - quantizer.calib_dataloader, - op_list=[fc_node_name1, fc_node_name2], - iteration_list=[1, 3], - ) - qinsp = inspect_tensor( - qmodel, quantizer.calib_dataloader, op_list=[fc_node_name1, fc_node_name2], iteration_list=[1, 3] - ) - - self.assertNotEqual(len(insp["activation"]), 0) - self.assertEqual(len(insp["activation"]), len(qinsp["activation"])) - - for tensors, qtensors in zip(insp["activation"], qinsp["activation"]): - for k in set(tensors.keys()) & set(qtensors.keys()): - tensor, qtensor = tensors[k][k], qtensors[k][k] - self.assertEqual(tensor.shape, qtensor.shape) - - # test inspect with an empty iteration_list - inspect_tensor(qmodel, quantizer.calib_dataloader, op_list=[fc_node_name1], iteration_list=[]) - - # test recovery for symbolic model - qmodel_r = recover(fp32_model, WORKSPACE_DIR / "history.snapshot", -1) - are_models_equal(self, qmodel.model, qmodel_r.model) - - # test symbolic model saving - qmodel_r.save(WORKSPACE_DIR / "save_test") - - def test_conv_model_quantization(self): - """Use Conv model to test KL calibration and user specific evaluate function.""" - conv_net = nn.HybridSequential() - conv_net.add(nn.Conv2D(channels=3, kernel_size=(1, 1))) - conv_net.add(nn.BatchNorm()) - conv_net.add(nn.Activation("relu")) - conv_net.add(nn.AvgPool2D(pool_size=(4, 4))) - conv_net.add(nn.Dense(1, activation="sigmoid")) - conv_net.initialize() - - for shape in [ - (32, 3, 224, 224), - ]: - dataShape = (shape[0] * 5, *shape[1:]) - data = MX_NAMESPACE.random.uniform(self.data_low, self.data_high, dataShape, dtype="float32") - label = MX_NAMESPACE.random.randint(0, 2, (dataShape[0], 1)).astype("float32") - dataset = mx.gluon.data.ArrayDataset(data, label) - - def eval(model): - eval_dataloader = mx.gluon.data.DataLoader(dataset, batch_size=8) - metric = MXNetMetrics().metrics["Accuracy"]() - for batch in eval_dataloader: - data, labels = batch - preds = model.forward(data) - metric.update(labels.asnumpy(), preds.asnumpy()) - return metric.result() - - calib_dataloader = mx.gluon.data.DataLoader(dataset, batch_size=8) - calib_dataloader.batch_size = 8 - quantizer = Quantization("./mxnet_kl.yaml") - quantizer.model = conv_net - quantizer.calib_dataloader = calib_dataloader - quantizer.eval_func = eval - qnet = quantizer.fit().model - self.assertIsInstance(qnet, mx.gluon.HybridBlock) - - def test_gluon_model(self): - """Use gluon model to test gluon related functions in mxnet adaptor.""" - - # create gluon model - def create_model(params=None): - net = nn.HybridSequential() - net.add(nn.Conv2D(1, (1, 1), activation="relu")) - net.add(nn.Flatten()) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - if params is not None: - if check_mx_version("2.0.0"): - net.load_dict({k: v.data() for k, v in params.items()}) - else: - param_keys = sorted(net.collect_params().keys()) - param_values = sorted(params.items(), key=lambda x: x[0]) - params = {k: v.data() for k, (old_k, v) in zip(param_keys, param_values)} - net.collect_params().load_dict(params) - else: - net.initialize() - return net - - class CalibDataset: - def __init__(self, dataset): - self.dataset = dataset - - def __getitem__(self, idx): - if check_mx_version("2.0.0"): - mx_namespace = mx.np - else: - mx_namespace = mx.nd - data, label = self.dataset[idx] - data = mx_namespace.reshape(data, (data.shape[-1], *data.shape[:-1])).astype("float32") - return data, label - - def __len__(self): - return len(self.dataset) - - net = create_model() - dataset = CalibDataset(mx.gluon.data.vision.datasets.FashionMNIST(train=False)) - dataloader = common.DataLoader(dataset, batch_size=8) - quantizer = Quantization("./mxnet.yaml") - quantizer.model = net - quantizer.calib_dataloader = dataloader - quantizer.eval_dataloader = dataloader - qnet = quantizer.fit() - self.assertIsInstance(qnet.model, mx.gluon.HybridBlock) - - # test recovery for gluon model - net = create_model(net.collect_params()) - qnet_r = recover(net, WORKSPACE_DIR / "history.snapshot", -1) - - from neural_compressor.adaptor.mxnet_utils.util import prepare_dataloader, prepare_model - - dataloader = prepare_dataloader(qnet, mx.cpu(), quantizer.calib_dataloader) - - # test calling prepare_dataloader for already prepared dataloader - self.assertIs(dataloader, prepare_dataloader(qnet, mx.cpu(), dataloader)) - - model_a = prepare_model(qnet, mx.cpu(), dataloader.input_desc) - model_b = prepare_model(qnet_r, mx.cpu(), dataloader.input_desc) - are_models_equal(self, model_a, model_b) - - # test gluon model saving - qnet_r.save(WORKSPACE_DIR / "save_test") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py b/test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py index 8f4b9582433..b85ad84531b 100644 --- a/test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py +++ b/test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py @@ -17,329 +17,11 @@ from neural_compressor import PostTrainingQuantConfig, quantization, set_workspace from neural_compressor.adaptor import FRAMEWORKS from neural_compressor.adaptor.pytorch import get_torch_version -from neural_compressor.conf.config import conf from neural_compressor.data import DATALOADERS, DataLoader, Datasets -from neural_compressor.experimental import Benchmark, Quantization, common from neural_compressor.model import Model from neural_compressor.utils.utility import recover -def build_static_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - quantization: - approach: post_training_static_quant - calibration: - sampling_size: 50 - op_wise: { - 'Gather_*': { - 'activation': {'dtype': ['fp32'], 'scheme':['sym']}, - 'weight': {'dtype': ['fp32'], 'scheme':['sym']} - } - } - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: ./nc_workspace/recover/ - """ - with open("qlinear.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qdq - - quantization: - approach: post_training_static_quant - calibration: - sampling_size: 50 - op_wise: { - 'Gather_*': { - 'activation': {'dtype': ['fp32'], 'scheme':['sym']}, - 'weight': {'dtype': ['fp32'], 'scheme':['sym']} - } - } - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: ./nc_workspace/recover/ - """ - with open("qdq.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_benchmark_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - evaluation: - performance: - warmup: 1 - iteration: 10 - configs: - num_of_instance: 1 - dataloader: - batch_size: 1 - dataset: - ImageFolder: - root: /path/to/evaluation/dataset/ - accuracy: - metric: - topk: 1 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("benchmark.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_dynamic_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_integerops - - quantization: - approach: post_training_dynamic_quant - calibration: - sampling_size: 50 - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: ./nc_workspace/recover/ - - """ - with open("dynamic.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_recipe_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - quantization: - approach: post_training_static_quant - recipes: - first_conv_or_matmul_quantization: False - last_conv_or_matmul_quantization: False - calibration: - sampling_size: 1 - dataloader: - dataset: - dummy_v2: - input_shape: [100, 4] - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - dataloader: - dataset: - dummy_v2: - input_shape: [100, 4] - - tuning: - accuracy_criterion: - relative: -0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("recipe.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_recipe2_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - quantization: - approach: post_training_static_quant - recipes: - last_conv_or_matmul_quantization: False - pre_post_process_quantization: False - calibration: - sampling_size: 1 - dataloader: - dataset: - dummy_v2: - input_shape: [100, 4] - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - dataloader: - dataset: - dummy_v2: - input_shape: [100, 4] - - tuning: - accuracy_criterion: - relative: -0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("recipe2.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_gather_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - quantization: - approach: post_training_static_quant - calibration: - sampling_size: 1 - dataloader: - batch_size: 1 - dataset: - dummy_v2: - input_shape: [100, 4] - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - dataloader: - batch_size: 1 - dataset: - dummy_v2: - input_shape: [100, 4] - - tuning: - accuracy_criterion: - relative: -0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("gather.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_rename_yaml(): - fake_yaml = """ - model: - name: test - framework: onnxrt_integerops - - quantization: - approach: post_training_dynamic_quant - calibration: - sampling_size: 1 - - evaluation: - accuracy: - metric: - Accuracy: {} - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("rename.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_non_MSE_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: onnxrt_qlinearops - - quantization: - approach: post_training_static_quant - calibration: - sampling_size: 50 - op_wise: { - 'Gather_*': { - 'activation': {'dtype': ['fp32'], 'scheme':['sym']}, - 'weight': {'dtype': ['fp32'], 'scheme':['sym']} - } - } - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.1 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: ./nc_workspace/recover/ - - """ - with open("non_MSE.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - def eval_func(model): return 1.0 @@ -696,50 +378,6 @@ def build_model_share_init(): return model -def build_benchmark(): - seq = """ -from neural_compressor.experimental import Benchmark -from neural_compressor.data import Datasets, DATALOADERS -from neural_compressor import conf -from onnx import onnx_pb as onnx_proto -from onnx import helper, TensorProto, numpy_helper -from onnxruntime_extensions import onnx_op -import numpy as np - -@onnx_op(op_type="PyReverseMatrix") -def reverse_matrix(x): - # The user custom op implementation here. - return np.flip(x, axis=0).astype(np.float32) - -nodes = [] -nodes[0:] = [helper.make_node('Identity', ['input_1'], ['identity1'])] -nodes[1:] = [helper.make_node('PyReverseMatrix', - ['identity1'], ['reversed'], - domain='ai.onnx.contrib')] - -input0 = helper.make_tensor_value_info( - 'input_1', onnx_proto.TensorProto.FLOAT, [None, 2]) -output0 = helper.make_tensor_value_info( - 'reversed', onnx_proto.TensorProto.FLOAT, [None, 2]) - -graph = helper.make_graph(nodes, 'test0', [input0], [output0]) -model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]}) - -datasets = Datasets('onnxrt_qlinearops') -ext_dataset = datasets['dummy'](shape=(10, 2), low=0., high=1., label=True) -ext_dataloader = DATALOADERS['onnxrt_qlinearops'](ext_dataset) - -conf.model.framework = 'onnxrt_qlinearops' -conf.evaluation.accuracy.metric = {'Accuracy': {}} -evaluator = Benchmark(conf) -evaluator.b_dataloader = ext_dataloader -evaluator.model = model -evaluator('performance') - """ - with open("benchmark.py", "w", encoding="utf-8") as f: - f.writelines(seq) - - class MatmulDataset: def __init__(self): self.data = [] @@ -869,14 +507,6 @@ class TestAdaptorONNXRT(unittest.TestCase): @classmethod def setUpClass(self): - build_rename_yaml() - build_static_yaml() - build_dynamic_yaml() - build_gather_yaml() - build_non_MSE_yaml() - build_benchmark_yaml() - build_recipe_yaml() - build_recipe2_yaml() export_onnx_cv_model(self.mb_v2_model, self.mb_v2_export_path, 13) self.mb_v2_model = onnx.load(self.mb_v2_export_path) export_onnx_cv_model(self.rn50_model, self.rn50_export_path, 12) @@ -898,21 +528,10 @@ def setUpClass(self): self.distilbert_model = onnx.load(self.distilbert_export_path) self.albert_model = onnx.load(self.albert_export_path) self.gather_matmul_model = build_matmul_gather_model() - build_benchmark() set_workspace("nc_workspace") @classmethod def tearDownClass(self): - os.remove("qlinear.yaml") - os.remove("qdq.yaml") - os.remove("recipe.yaml") - os.remove("recipe2.yaml") - os.remove("dynamic.yaml") - os.remove("non_MSE.yaml") - os.remove("benchmark.yaml") - os.remove("gather.yaml") - os.remove("rename.yaml") - os.remove("rename_model.onnx") os.remove("rn50_9.onnx") os.remove(self.mb_v2_export_path) os.remove(self.rn50_export_path) @@ -942,128 +561,6 @@ def evaluate(self): with self.assertRaises(ValueError): test() - @unittest.skipIf( - Version(ort.__version__) == Version("1.13.1"), - "This function does not work with ONNX Runtime 1.13.1 for QDQ format quantization of ONNX models.", - ) - def test_inspect_tensor(self): - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "backend": "default", - "format": "default", - "domain": "auto", - "recipes": {}, - "workspace_path": "./nc_workspace/{}/{}/".format("onnxrt", "imagenet"), - } - framework = "onnxrt_qlinearops" - adaptor = FRAMEWORKS[framework](framework_specific_info) - op_list = [i.name for i in self.rn50_model.graph.node if i.op_type == "Conv"] - - data = adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type="activation", op_list=op_list) - self.assertNotEqual(len(data["activation"][0]), 0) - self.assertTrue("weight" not in data) - - adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type="activation", save_to_disk=True) - self.assertTrue(os.path.isfile(framework_specific_info["workspace_path"] + "inspect_result.pkl")) - - data = adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type="weight", op_list=op_list) - self.assertNotEqual(len(data["weight"]), 0) - self.assertTrue("activation" not in data) - - data = adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, inspect_type="all", op_list=op_list) - self.assertTrue("activation" in data) - self.assertTrue("weight" in data) - self.assertNotEqual(len(data["activation"][0]), 0) - self.assertNotEqual(len(data["weight"]), 0) - - data = adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, [op_list[0]], inspect_type="activation") - self.assertEqual(len(data["activation"][0]), 1) - - op = OrderedDict() - op[(op_list[0], "Conv")] = None - data = adaptor.inspect_tensor(self.rn50_model, self.cv_dataloader, op.keys(), inspect_type="activation") - self.assertEqual(len(data["activation"][0]), 1) - - for fake_yaml in ["qlinear.yaml", "qdq.yaml"]: - quantizer = Quantization(fake_yaml) - quantizer.calib_dataloader = self.cv_dataloader - quantizer.eval_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - adaptor._pre_optimize(common.Model(self.rn50_model)) - opt_model = quantizer.strategy.adaptor.pre_optimized_model - - op_list, _ = quantizer.strategy.adaptor.diagnosis_helper( - opt_model, q_model, None, "./nc_workspace/recover/" - ) - - fp32_tensor = quantizer.strategy.adaptor.inspect_tensor(opt_model.model, self.cv_dataloader, op_list) - int8_tensor = quantizer.strategy.adaptor.inspect_tensor(q_model.model, self.cv_dataloader, op_list) - - self.assertTrue(len(fp32_tensor["activation"]) == len(int8_tensor["activation"])) - self.assertTrue(sorted(fp32_tensor["activation"][0].keys()) == sorted(int8_tensor["activation"][0].keys())) - for op in op_list: - for x, y in zip(fp32_tensor["activation"][0][op].values(), int8_tensor["activation"][0][op].values()): - self.assertTrue(x.shape == y.shape) - - if fake_yaml == "qlinear.yaml": - fp32_tensor = quantizer.strategy.adaptor.inspect_tensor( - opt_model.model, self.cv_dataloader, op_list, inspect_type="weight" - ) - int8_tensor = quantizer.strategy.adaptor.inspect_tensor( - q_model.model, self.cv_dataloader, op_list, inspect_type="weight" - ) - self.assertTrue(len(fp32_tensor["weight"]) == len(int8_tensor["weight"])) - self.assertTrue(sorted(fp32_tensor["weight"].keys()) == sorted(int8_tensor["weight"].keys())) - ai_onnx_domain = [ - opset for opset in q_model.model.opset_import if not opset.domain or opset.domain == "ai.onnx" - ] - if ai_onnx_domain[0].version > 12 or Version(ort.__version__) < Version("1.12.0"): - for op in fp32_tensor["weight"].keys(): - self.assertTrue( - sorted(fp32_tensor["weight"][op].keys()) == sorted(int8_tensor["weight"][op].keys()) - ) - fp32_tensor = quantizer.strategy.adaptor.inspect_tensor( - opt_model.model, self.cv_dataloader, op_list, inspect_type="all" - ) - int8_tensor = quantizer.strategy.adaptor.inspect_tensor( - q_model.model, self.cv_dataloader, op_list, inspect_type="all" - ) - self.assertTrue(len(fp32_tensor["weight"]) == len(int8_tensor["weight"])) - self.assertTrue(len(fp32_tensor["activation"]) == len(int8_tensor["activation"])) - self.assertTrue(sorted(fp32_tensor["weight"].keys()) == sorted(int8_tensor["weight"].keys())) - if ai_onnx_domain[0].version > 12 or Version(ort.__version__) < Version("1.12.0"): - for op in fp32_tensor["weight"].keys(): - self.assertTrue( - sorted(fp32_tensor["weight"][op].keys()) == sorted(int8_tensor["weight"][op].keys()) - ) - self.assertTrue( - sorted(fp32_tensor["activation"][0].keys()) == sorted(int8_tensor["activation"][0].keys()) - ) - if ai_onnx_domain[0].version > 12 or Version(ort.__version__) < Version("1.12.0"): - for op in op_list: - self.assertTrue( - sorted(fp32_tensor["activation"][0][op].keys()) - == sorted(int8_tensor["activation"][0][op].keys()) - ) - - config = PostTrainingQuantConfig(approach="static", recipes={"gemm_to_matmul": False}) - q_model = quantization.fit(self.gemm_model, config, calib_dataloader=self.ir3_dataloader) - - fp32_tensor = quantizer.strategy.adaptor.inspect_tensor( - self.gemm_model, self.ir3_dataloader, ["gemm"], inspect_type="weight" - ) - int8_tensor = quantizer.strategy.adaptor.inspect_tensor( - q_model.model, self.ir3_dataloader, ["gemm"], inspect_type="weight" - ) - self.assertTrue(len(fp32_tensor["weight"]) == len(int8_tensor["weight"])) - self.assertTrue(sorted(fp32_tensor["weight"].keys()) == sorted(int8_tensor["weight"].keys())) - def test_set_tensor(self): from neural_compressor.adaptor.ox_utils.util import get_node_original_name, quantize_data_with_scale_zero @@ -1156,31 +653,6 @@ def test_set_tensor(self): (new_tensor == numpy_helper.to_array(q_model.get_initializer(tensor_name + "_quantized"))).all() ) - def test_auto_quant(self): - conf.model.framework = "onnxrt_qlinearops" - conf.quantization.approach = "post_training_auto_quant" - conf.quantization.optype_wise = { - "Add|MatMul|Conv": {"weight": {"algorithm": ["minmax"]}, "activation": {"algorithm": ["minmax"]}} - } - conf.quantization.calibration.sampling_size = 1 - conf.tuning.exit_policy.timeout = 1000000 - conf.tuning.exit_policy.max_trials = 8 - conf.evaluation.accuracy.metric = {"MSE": {"compare_label": False}} - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.cv_dataloader - quantizer.eval_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - conf.model.framework = "onnxrt_qdq" - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.cv_dataloader - quantizer.eval_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - def test_auto_quant_v2(self): from neural_compressor.config import AccuracyCriterion, PostTrainingQuantConfig, TuningCriterion from neural_compressor.quantization import fit @@ -1210,191 +682,6 @@ def test_quantize_data_per_channel(self): new_tensor_value = quantize_data_per_channel(tensor_value, 1, 254, qType, "sym") self.assertEqual(tensor_value.all(), new_tensor_value[-1].all()) - def test_adaptor(self): - from neural_compressor.utils.constant import FP32, INT8_SYM_MINMAX_PERTENSOR, UINT8_ASYM_MINMAX_PERTENSOR - - # check op_wise has higher priority than optype_wise - conf.model.framework = "onnxrt_qlinearops" - conf.quantization.approach = "post_training_static_quant" - conf.quantization.calibration.sampling_size = 1 - conf.quantization.optype_wise = {"Add": FP32} - conf.quantization.op_wise = { - "add": {"weight": INT8_SYM_MINMAX_PERTENSOR, "activation": UINT8_ASYM_MINMAX_PERTENSOR} - } - conf.evaluation.accuracy.metric = {"MSE": {"compare_label": False}} - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.matmul_dataloader - quantizer.eval_dataloader = self.matmul_dataloader - quantizer.model = self.matmul_model - q_model = quantizer.fit() - self.assertTrue("add2" in [i.name for i in q_model.nodes()]) - self.assertTrue("add_quant" in [i.name for i in q_model.nodes()]) - - # check optype_wise has higher priority than model_wise - conf.quantization.pop("op_wise") - conf.quantization.model_wise = {"weight": INT8_SYM_MINMAX_PERTENSOR} - conf.quantization.optype_wise = {"MatMul": {"weight": {"granularity": ["per_channel"]}}} - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.matmul_dataloader - quantizer.eval_dataloader = self.matmul_dataloader - quantizer.model = self.matmul_model - q_model = quantizer.fit() - self.assertEqual(len([i for i in q_model.initializer() if i.name == "B_scale"][0].float_data), 2) - - conf.quantization.pop("optype_wise") - conf.quantization.pop("model_wise") - - # check rename renamed nodes function - conf.model.framework = "onnxrt_integerops" - conf.quantization.approach = "post_training_dynamic_quant" - conf.quantization.calibration.sampling_size = 1 - conf.evaluation.accuracy.metric = {"MSE": {"compare_label": False}} - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.rename_dataloader - quantizer.eval_dataloader = self.rename_dataloader - quantizer.model = self.rename_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - self.assertEqual(len([i.name for i in q_model.nodes()]), len(set([i.name for i in q_model.nodes()]))) - - # check large model quantization - conf.model.framework = "onnxrt_integerops" - conf.quantization.approach = "post_training_dynamic_quant" - conf.quantization.calibration.sampling_size = 1 - conf.evaluation.accuracy.metric = {"MSE": {"compare_label": False}} - quantizer = Quantization(conf) - quantizer.calib_dataloader = self.rename_dataloader - quantizer.eval_dataloader = self.rename_dataloader - onnx.save(self.rename_model, "rename_model.onnx") - quantizer.model = "rename_model.onnx" - # force set the model to large model - quantizer.model._is_large_model = True - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - quantizer = Quantization("dynamic.yaml") - quantizer.calib_dataloader = self.cv_dataloader - quantizer.eval_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - import copy - - # check opset version - tmp_model = copy.deepcopy(self.rn50_model) - tmp_model.opset_import[0].version = 10 - quantizer.model = tmp_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - tmp_model.opset_import.extend([onnx.helper.make_opsetid("", 11)]) - quantizer.model = tmp_model - q_model = quantizer.fit() - self.assertEqual(q_model, None) - model = onnx.load("rn50_9.onnx") - quantizer.model = model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - # check query quantizable_ops function - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "backend": "default", - "format": "default", - "domain": "auto", - "recipes": {}, - "workspace_path": "./nc_workspace/{}/{}/".format("onnxrt", "imagenet"), - } - framework = "onnxrt_qlinearops" - adaptor = FRAMEWORKS[framework](framework_specific_info) - tune_cfg = { - "calib_iteration": 1, - "op": { - ("gather", "Gather"): { - "activation": {"dtype": ["uint8"], "quant_mode": "static"}, - "weight": {"dtype": ["uint8"]}, - }, - ("add", "Add"): { - "activation": {"dtype": ["uint8"], "quant_mode": "static"}, - "weight": {"dtype": ["int8"]}, - }, - ("squeeze", "Squeeze"): { - "activation": {"dtype": ["uint8"], "quant_mode": "static"}, - "weight": {"dtype": ["int8"]}, - }, - }, - } - adaptor.quantize(tune_cfg, common.Model(self.gather_model), self.gather_dataloader) - self.assertTrue(len(adaptor.quantizable_ops), 2) - - # check int8 + fp16 function - framework_specific_info["device"] = "gpu" - framework_specific_info["backend"] = "onnxrt_cuda_ep" - - tune_cfg = { - "calib_iteration": 1, - "op": { - ("Matmul", "MatMul"): { - "activation": {"dtype": ["uint8"], "quant_mode": "static"}, - "weight": {"dtype": ["int8"]}, - }, - ("add", "Add"): {"activation": {"dtype": "fp16", "quant_mode": "static"}, "weight": {"dtype": "fp16"}}, - ("add2", "Add"): {"activation": {"dtype": "fp16", "quant_mode": "static"}, "weight": {"dtype": "fp16"}}, - }, - } - adaptor = FRAMEWORKS[framework](framework_specific_info) - model = adaptor.quantize(tune_cfg, common.Model(self.matmul_model), self.matmul_dataloader) - self.assertEqual(len([i for i in model.model.graph.node if i.op_type == "Cast"]), 2) - - for fake_yaml in ["gather.yaml"]: - quantizer = Quantization(fake_yaml) - quantizer.model = self.gather_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - quantizer.model = self.matmul_model2 - q_model = quantizer.fit() # error input shape test - self.assertEqual(q_model, None) - - quantizer.eval_dataloader = self.matmul_dataloader - q_model = quantizer.fit() # error input shape test - self.assertEqual(q_model, None) - - quantizer.calib_dataloader = self.matmul_dataloader - quantizer.eval_dataloader = self.matmul_dataloader - quantizer.model = self.matmul_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - quantizer = Quantization("recipe.yaml") - quantizer.model = self.matmul_model - quantizer.calib_dataloader = self.matmul_dataloader - quantizer.eval_dataloader = self.matmul_dataloader - q_model = quantizer.fit() - self.assertTrue("Matmul" in [i.name for i in q_model.nodes()]) - - quantizer = Quantization("recipe2.yaml") - quantizer.model = self.conv_model2 - quantizer.calib_dataloader = self.conv_dataloader - quantizer.eval_dataloader = self.conv_dataloader - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - for fake_yaml in ["non_MSE.yaml"]: - quantizer = Quantization(fake_yaml) - quantizer.calib_dataloader = self.cv_dataloader - quantizer.eval_dataloader = self.cv_dataloader - quantizer.model = self.mb_v2_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - # check recover model function - model = recover(self.mb_v2_model, "./nc_workspace/recover/history.snapshot", 0) - self.assertTrue(model.model == q_model.model) - def test_qdq_settings(self): config = PostTrainingQuantConfig( approach="static", quant_format="QDQ", recipes={"add_qdq_pair_to_weight": True} @@ -1426,43 +713,6 @@ def test_model_name_checking(self): q_model = quantization.fit(self.matmul_model3, config, calib_dataloader=self.matmul_dataloader) self.assertTrue("MatMulInteger" in [i.op_type for i in q_model.nodes()]) - def test_lower_is_better_case(self): - import time - - conf.model.framework = "onnxrt_qlinearops" - conf.quantization.approach = "post_training_static_quant" - conf.quantization.model_wise = { - "weight": {"granularity": ["per_tensor"]}, - "activation": {"granularity": ["per_tensor"]}, - } - conf.tuning.exit_policy.max_trials = 5 - conf.tuning.accuracy_criterion.relative = 0.01 - conf.tuning.accuracy_criterion.higher_is_better = False - conf.tuning.exit_policy.timeout = 100 - - result = [0.0, 0.1, 0.1005, 0.102, 0.1002, 0.102, 0.102] - - def sub_eval(model, result): - time.sleep(0.001 * len(result)) - del result[0] - return result[0] - - def eval(model): - return sub_eval(model, result) - - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.model = self.matmul_model - quantizer.calib_dataloader = self.matmul_dataloader - quantizer.eval_func = eval - q_model = quantizer.fit() - node_names = [i.name for i in q_model.nodes()] - # This assert it depends on the number of trials, disables it first. - # self.assertTrue('Matmul_quant' in node_names) - # self.assertTrue('add' in node_names) - # self.assertTrue('add2' in node_names) - def test_new_API(self): import time @@ -1564,107 +814,6 @@ def test_smooth_quant_args(self): adaptor.smooth_quant(self.conv_model, self.cv_dataloader, 1, scales_per_op=False) self.assertEqual(len([i for i in adaptor.pre_optimized_model.nodes() if i.op_type == "Mul"]), 1) - def test_multi_metrics(self): - conf.model.framework = "onnxrt_qlinearops" - conf.quantization.approach = "post_training_static_quant" - conf.evaluation.accuracy.multi_metrics = {"Accuracy": {}, "MSE": {"compare_label": False}} - conf.evaluation.accuracy.pop("metric", None) - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_dataloader = self.cv_dataloader - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - conf.evaluation.accuracy.multi_metrics = { - "Accuracy": {}, - "MSE": {"compare_label": False}, - "higher_is_better": [False, False], - } - conf.tuning.exit_policy.max_trials = 1 - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_dataloader = self.cv_dataloader - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertEqual(q_model, None) - - conf.tuning.accuracy_criterion.relative = 0.01 - conf.tuning.accuracy_criterion.higher_is_better = True - conf.evaluation.accuracy.multi_metrics = {"Accuracy": {}, "MSE": {"compare_label": False}, "weight": [0.5, 0.5]} - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_dataloader = self.cv_dataloader - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - conf.evaluation.accuracy.multi_metrics = { - "Accuracy": {}, - "MSE": {"compare_label": False}, - "weight": [0.5, 0.5], - "higher_is_better": [False, False], - } - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_dataloader = self.cv_dataloader - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - conf.evaluation.accuracy.multi_metrics = { - "Accuracy": {}, - "MSE": {"compare_label": False}, - "weight": [0.5, 0.5], - "higher_is_better": [False, False], - } - conf.tuning.accuracy_criterion.higher_is_better = False - conf.tuning.exit_policy.max_trials = 2 - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_dataloader = self.cv_dataloader - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertEqual(q_model, None) - - import time - - result = [[0.0, 0.0], [0.0, 0.0], [0.0, 122.0]] - - def sub_eval(model, result): - time.sleep(0.001 * len(result)) - del result[0] - return result[0] - - def eval(model): - return sub_eval(model, result) - - conf.evaluation.accuracy.multi_metrics = { - "Accuracy": {}, - "MSE": {"compare_label": False}, - "higher_is_better": [False, False], - } - conf.tuning.exit_policy.max_trials = 1 - conf.tuning.accuracy_criterion = {"absolute": 0.01, "higher_is_better": False} - from neural_compressor.experimental import Quantization - - quantizer = Quantization(conf) - quantizer.eval_func = eval - quantizer.calib_dataloader = self.cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertEqual(q_model, None) - def test_calibrator(self): from neural_compressor.adaptor.ox_utils.calibrator import CALIBRATOR @@ -1749,40 +898,6 @@ def test_query_block_info(self): q_capability = adaptor.query_fw_capability(Model(self.albert_model)) self.assertEqual(len(q_capability["block_wise"]), 12) - def test_dataloader_input(self): - cv_dataloader = DataLoader(framework="onnxruntime", dataset=DummyCVDataset_list(shape=(3, 224, 224))) - - quantizer = Quantization("qlinear.yaml") - quantizer.calib_dataloader = cv_dataloader - quantizer.eval_dataloader = cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - cv_dataloader = DataLoader(framework="pytorch", dataset=DummyCVDataset_dict(shape=(3, 224, 224))) - quantizer = Quantization("qlinear.yaml") - quantizer.calib_dataloader = cv_dataloader - quantizer.eval_dataloader = cv_dataloader - quantizer.model = self.rn50_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - nlp_dataloader = DummyNLPDataloader_list("distilbert-base-uncased-finetuned-sst-2-english") - quantizer = Quantization("qlinear.yaml") - quantizer.calib_dataloader = nlp_dataloader - quantizer.eval_dataloader = nlp_dataloader - quantizer.model = self.distilbert_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - nlp_dataloader = DummyNLPDataloader_dict("distilbert-base-uncased-finetuned-sst-2-english") - quantizer = Quantization("qlinear.yaml") - quantizer.calib_dataloader = nlp_dataloader - quantizer.eval_dataloader = nlp_dataloader - quantizer.model = self.distilbert_model - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - @patch("logging.Logger.warning") def test_backend(self, mock_warning): framework_specific_info = { diff --git a/test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py b/test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py index 382efeaab6c..7137add6d07 100644 --- a/test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py +++ b/test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py @@ -10,7 +10,7 @@ sys.path.append("..") from neural_compressor.adaptor.ox_utils.calibration import ONNXRTAugment from neural_compressor.data import DATALOADERS, Datasets -from neural_compressor.experimental.data.datasets.dataset import Dataset +from neural_compressor.data.datasets.dataset import Dataset from neural_compressor.model.onnx_model import ONNXModel diff --git a/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_1x.py b/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_1x.py deleted file mode 100644 index b13c6ff5a76..00000000000 --- a/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_1x.py +++ /dev/null @@ -1,1209 +0,0 @@ -import copy -import os -import pickle -import shutil -import unittest - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.quantized as nnq -from packaging.version import Version -from torch.quantization import DeQuantStub, QuantStub - -import neural_compressor.adaptor.pytorch as nc_torch -from neural_compressor.adaptor import FRAMEWORKS -from neural_compressor.conf.config import QuantConf -from neural_compressor.experimental import Quantization, common -from neural_compressor.model import MODELS -from neural_compressor.utils.pytorch import load -from neural_compressor.utils.utility import LazyImport, recover - -try: - import intel_extension_for_pytorch as ipex - - IPEX = True -except: - IPEX = False - -# improve lazy import UT coverage -resnet18 = LazyImport("torchvision.models.resnet18") -q_resnet18 = LazyImport("torchvision.models.quantization.resnet18") - -PT_VERSION = nc_torch.get_torch_version().release -if PT_VERSION >= Version("1.8.0").release: - FX_MODE = True -else: - FX_MODE = False - - -fake_dyn_yaml = """ - model: - name: imagenet - framework: pytorch - - quantization: - approach: post_training_dynamic_quant - op_wise: { - 'decoder': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - } - } - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - - -fake_ptq_yaml = """ - model: - name: imagenet - framework: pytorch - - quantization: - op_wise: { - - 'layer1.0.conv1': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer1.0.conv2': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer2.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer3.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer1.0.add_relu': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - } - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 1 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - -fake_auto_yaml = """ - model: - name: imagenet - framework: pytorch_fx - - quantization: - approach: post_training_auto_quant - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 1 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 1000 - max_trials: 3 - random_seed: 9527 - workspace: - path: saved - """ - - -fake_ptq_yaml_for_fx = """ - model: - name: imagenet - framework: pytorch_fx - - quantization: - approach: post_training_auto_quant - op_wise: { - 'layer1.0.conv1': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer1.0.conv2': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer2.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer3.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer1.0.add_relu': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'conv.module': { - 'weight': {'dtype': ['fp32']}, - 'activation': {'dtype': ['fp32']} - }, - 'default_qconfig': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - } - } - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - - -fake_qat_yaml = """ - model: - name: imagenet - framework: pytorch - - quantization: - approach: quant_aware_training - train: - end_epoch: 1 - iteration: 1 - optimizer: - SGD: - learning_rate: 0.0001 - criterion: - CrossEntropyLoss: - reduction: mean - op_wise: { - 'layer1.0.conv1': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer1.0.conv2': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - }, - 'layer2.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer3.0.conv1': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['kl'], 'granularity': ['per_tensor'], 'scheme':['sym']}, - 'weight': {'dtype': ['int8'], 'algorithm': ['minmax'], 'granularity': ['per_channel'], 'scheme':['sym']} - }, - 'layer1.0.add_relu': { - 'activation': {'dtype': ['fp32']}, - 'weight': {'dtype': ['fp32']} - } - } - evaluation: - accuracy: - metric: - topk: 1 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - - -def build_pytorch_yaml(): - with open("ptq_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_ptq_yaml) - - with open("dynamic_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_dyn_yaml) - - with open("qat_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_qat_yaml) - - with open("auto_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_auto_yaml) - - -def build_pytorch_fx_yaml(): - if PT_VERSION >= Version("1.9.0").release: - fake_fx_ptq_yaml = fake_ptq_yaml_for_fx - else: - fake_fx_ptq_yaml = fake_ptq_yaml.replace("pytorch", "pytorch_fx") - with open("fx_ptq_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_fx_ptq_yaml) - - fake_fx_dyn_yaml = fake_dyn_yaml.replace("pytorch", "pytorch_fx") - with open("fx_dynamic_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_fx_dyn_yaml) - - fake_fx_qat_yaml = fake_qat_yaml.replace("pytorch", "pytorch_fx") - with open("fx_qat_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_fx_qat_yaml) - - -def build_dump_tensors_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: pytorch - - evaluation: - accuracy: - metric: - topk: 1 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - tensorboard: true - """ - with open("dump_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class M(torch.nn.Module): - def __init__(self): - super().__init__() - self.quant = QuantStub() - self.conv = nn.Conv2d(3, 1, 1) - self.linear = nn.Linear(224 * 224, 5) - self.dequant = DeQuantStub() - - def forward(self, x): - x = self.quant(x) - x = self.conv(x) - x = x.view(1, -1) - x = self.linear(x) - x = self.dequant(x) - return x - - -class FP32Model(torch.nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - times = x.size(1) - if times == 1: - return torch.ones(x.shape) - return torch.ones(x.shape) + 1 - - -class DynamicModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv2d(1, 1, 1) - - def forward(self, x): - if x is not None: - x = self.conv(x) - return x - - -class SubModel(torch.nn.Module): - def __init__(self, bypass=True): - super().__init__() - self.quant = QuantStub() - self.conv = nn.Conv2d(1, 1, 1) - self.conv1 = nn.Conv2d(1, 1, 1) - self.bn = nn.BatchNorm2d(1) - self.relu = nn.ReLU() - self.fp32 = FP32Model() - self.norm = nn.LayerNorm([1, 224, 224]) - self.dequant = DeQuantStub() - self.bypass = bypass - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.quant(x) - x = self.relu(x) - x = self.conv1(x) - x = self.dequant(x) - if not self.bypass: - x = self.fp32(x) - x = self.norm(x) - return x - - -class PartialQuantModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.quant = QuantStub() - self.conv = nn.Conv2d(3, 1, 1) - self.bn = nn.BatchNorm2d(1) - self.conv1 = nn.Conv2d(1, 1, 1) - self.bn1 = nn.BatchNorm2d(1) - self.conv2 = nn.Conv2d(1, 1, 1) - self.linear = nn.Linear(224 * 224, 1) - self.dequant = DeQuantStub() - self.sub = SubModel(bypass=False) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - x = self.conv1(x) - x = self.bn1(x) - x = self.sub(x) - x = self.quant(x) - x = self.conv2(x) - x = x.view(1, -1) - x = self.linear(x) - x = self.dequant(x) - return x - - -class DynamicControlModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv2d(3, 1, 1) - self.bn = nn.BatchNorm2d(1) - self.linear = nn.Linear(224 * 224, 1) - self.sub = SubModel() - self.fp32 = FP32Model() - self.dyn = DynamicModel() - - def forward(self, x): - x = self.conv(x) - x = self.dyn(x) - x = self.bn(x) - x = self.sub(x) - x = self.fp32(x) - x = x.view(1, -1) - x = self.linear(x) - return x - - -class LSTMModel(nn.Module): - """Container module with an encoder, a recurrent module, and a decoder.""" - - def __init__(self, ntoken=10, ninp=512, nhid=256, nlayers=5, dropout=0.5): - super(LSTMModel, self).__init__() - self.drop = nn.Dropout(dropout) - self.encoder = nn.Embedding(ntoken, ninp) - self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout) - self.decoder = nn.Linear(nhid, ntoken) - self.init_weights() - self.nhid = nhid - self.nlayers = nlayers - - def init_weights(self): - initrange = 0.1 - self.encoder.weight.data.uniform_(-initrange, initrange) - self.decoder.bias.data.zero_() - self.decoder.weight.data.uniform_(-initrange, initrange) - - def forward(self, input): - input = torch.ones((3, 10), dtype=torch.int32) - h0 = torch.randn(2, 10, 256) - c0 = torch.randn(2, 10, 256) - hidden = (h0, c0) - emb = self.encoder(input) - output, hidden = self.rnn(emb, hidden) - output = self.drop(output) - decoded = self.decoder(output) - return decoded, hidden - - -def eval_func(model): - # switch to evaluate mode - model.eval() - with torch.no_grad(): - input = torch.randn(1, 3, 224, 224) - # compute output - output = model(input) - return 0.0 - - -def q_func(model): - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - # switch to evaluate mode - model.train() - input = torch.randn(1, 3, 224, 224) - # compute output - output = model(input) - loss = output.mean() - optimizer.zero_grad() - loss.backward() - optimizer.step() - return model - - -class TestPytorchAdaptor(unittest.TestCase): - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "workspace_path": "./", - } - framework = "pytorch" - adaptor = FRAMEWORKS[framework](framework_specific_info) - model = q_resnet18() - nc_model = MODELS["pytorch"](model) - - @classmethod - def setUpClass(self): - build_pytorch_yaml() - build_dump_tensors_yaml() - - @classmethod - def tearDownClass(self): - os.remove("ptq_yaml.yaml") - os.remove("dynamic_yaml.yaml") - os.remove("qat_yaml.yaml") - os.remove("dump_yaml.yaml") - os.remove("auto_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_get_all_weight_name(self): - assert len(list(self.nc_model.get_all_weight_names())) == 62 - - def test_get_weight(self): - for name, param in self.model.named_parameters(): - if name == "layer4.1.conv2.weight": - param.data.fill_(0.0) - if name == "fc.bias": - param.data.fill_(0.1) - assert int(torch.sum(self.nc_model.get_weight("layer4.1.conv2.weight"))) == 0 - assert torch.allclose(torch.sum(self.nc_model.get_weight("fc.bias")), torch.tensor(100.0)) - - def test_get_input(self): - model = MODELS["pytorch"](q_resnet18()) - model.model.eval().fuse_model() - model.register_forward_pre_hook() - rand_input = torch.rand(100, 3, 224, 224).float() - model.model(rand_input) - assert torch.equal(model.get_inputs("x"), rand_input) - model.remove_hooks() - - def test_update_weights(self): - self.nc_model.update_weights("fc.bias", torch.zeros([1000])) - assert int(torch.sum(self.nc_model.get_weight("fc.bias"))) == 0 - - def test_get_gradient(self): - with self.assertRaises(AssertionError): - self.nc_model.get_gradient("fc.bias") - - for name, tensor in self.nc_model._model.named_parameters(): - if name == "fc.bias": - tensor.grad = torch.zeros_like(tensor) - break - assert torch.equal(torch.Tensor(self.nc_model.get_gradient("fc.bias")), torch.zeros_like(tensor)) - - rand_input = torch.rand(100, 3, 224, 224).float() - rand_input.grad = torch.ones_like(rand_input) - assert torch.equal(torch.Tensor(self.nc_model.get_gradient(rand_input)), torch.ones_like(rand_input)) - - def test_report_sparsity(self): - df, total_sparsity = self.nc_model.report_sparsity() - self.assertTrue(total_sparsity > 0) - self.assertTrue(len(df) == 22) - - def test_quantization_saved(self): - for fake_yaml in ["dynamic_yaml.yaml", "qat_yaml.yaml", "ptq_yaml.yaml"]: - model = M() - quantizer = Quantization(fake_yaml) - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = True - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - eval_func(q_model) - q_model.save("./saved") - # Load configure and weights by neural_compressor.utils - saved_model = load("./saved", model) - eval_func(saved_model) - # recover int8 model from history - history_file = "./saved/history.snapshot" - model_recover = recover(model, history_file, 0) - eval_func(model_recover) - self.assertEqual(type(saved_model.conv), type(model_recover.conv)) - shutil.rmtree("./saved", ignore_errors=True) - from neural_compressor.experimental import Benchmark - - evaluator = Benchmark("ptq_yaml.yaml") - # Load configure and weights by neural_compressor.model - evaluator.model = model - evaluator.b_dataloader = common.DataLoader(dataset) - evaluator.fit("accuracy") - - for fake_yaml in ["qat_yaml.yaml", "ptq_yaml.yaml"]: - model = copy.deepcopy(self.model) - if fake_yaml == "ptq_yaml.yaml": - model.eval().fuse_model() - conf = QuantConf(fake_yaml) - quantizer = Quantization(conf) - dataset = quantizer.dataset("dummy", (100, 3, 224, 224)) - quantizer.model = model - if fake_yaml == "qat_yaml.yaml": - quantizer.q_func = q_func - else: - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_func = eval_func - q_model = quantizer.fit() - q_model.save("./saved") - # Load configure and weights by neural_compressor.utils - saved_model = load("./saved", model) - eval_func(saved_model) - shutil.rmtree("./saved", ignore_errors=True) - - def test_quantization_new_saved(self): - for fake_yaml in ["dynamic_yaml.yaml", "qat_yaml.yaml", "ptq_yaml.yaml"]: - model = M() - quantizer = Quantization(fake_yaml) - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = True - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - eval_func(q_model) - torch.save(q_model.quantized_state_dict(), "./saved/model.pt") - # Load configure and weights by neural_compressor.utils - from neural_compressor.experimental.common import Model - - common_model = Model(model) - common_model.load_quantized_state_dict(torch.load("./saved/model.pt")) - eval_func(common_model) - self.assertEqual(type(q_model._model.linear), type(common_model._model.linear)) - shutil.rmtree("./saved", ignore_errors=True) - - @unittest.skipIf(IPEX, "this function is affected by IPEX, Fixing now.") - def test_non_quant_module(self): - for fake_yaml in ["qat_yaml.yaml", "ptq_yaml.yaml"]: - model = PartialQuantModel() - conf = QuantConf(fake_yaml) - quantizer = Quantization(conf) - dataset = quantizer.dataset("dummy", (1, 3, 224, 224)) - non_quant_dict = { - "non_quant_module_name": ["conv", "conv1", "sub.conv"], - "non_quant_module_class": ["BatchNorm2d", "FP32Model"], - } - quantizer.model = common.Model(model, **non_quant_dict) - if fake_yaml == "qat_yaml.yaml": - quantizer.q_func = q_func - else: - quantizer.calib_func = eval_func - quantizer.eval_func = eval_func - q_model = quantizer.fit() - self.assertTrue(isinstance(q_model.model.conv, torch.nn.Conv2d)) - self.assertTrue("quantize" in str(q_model.model.conv2.__class__)) - q_model.save("./saved") - saved_model = load("./saved", model, **non_quant_dict) - eval_func(saved_model) - shutil.rmtree("./saved", ignore_errors=True) - - def test_auto_quant(self): - def eval_func(model): - return 1 - - model_origin = LSTMModel( - ntoken=10, - ninp=512, - nhid=256, - nlayers=2, - ) - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization("auto_yaml.yaml") - dataset = quantizer.dataset("dummy", (3, 10), label=True) - quantizer.eval_func = eval_func - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model(model_origin) - q_model = quantizer.fit() - self.assertNotEqual(q_model, None) - - def test_workspace_path(self): - model = M() - quantizer = Quantization("ptq_yaml.yaml") - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = True - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - eval_func(q_model) - torch.save(q_model.quantized_state_dict(), "./saved/best_model.pt") - # Load configure and weights by workspace_path - from neural_compressor.experimental.common import Model - - common_model = Model(model) - common_model.workspace_path = "./saved" - eval_func(common_model) - self.assertEqual(type(q_model._model.linear), type(common_model._model.linear)) - shutil.rmtree("./saved", ignore_errors=True) - - def test_get_graph_info(self): - from neural_compressor.model.torch_model import PyTorchModel - - model = PyTorchModel(self.model) - op_map = model.graph_info - self.assertTrue(op_map["conv1"] == "Conv2d") - - def test_tensorboard(self): - model = copy.deepcopy(self.nc_model) - model.model.eval().fuse_model() - quantizer = Quantization("dump_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - quantizer.model = model.model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_func = eval_func - quantizer.fit() - self.assertTrue(True if os.path.exists("runs/eval/baseline_acc0.0") else False) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.eval_func = None - quantizer.fit() - self.assertTrue(True if os.path.exists("runs/eval/baseline_acc0.0") else False) - - def test_tensor_dump_and_set(self): - model = copy.deepcopy(self.nc_model) - model.model.eval().fuse_model() - quantizer = Quantization("ptq_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = common.DataLoader(dataset) - dataloader = common._generate_common_dataloader(dataloader, "pytorch") - quantizer.eval_dataloader = dataloader - quantizer.calib_dataloader = dataloader - quantizer.model = model.model - q_model = quantizer.fit() - quantizer.strategy.adaptor.inspect_tensor( - model, - dataloader, - op_list=["conv1.0", "layer1.0.conv1.0"], - iteration_list=[1, 2], - inspect_type="all", - save_to_disk=True, - ) - with open("saved/inspect_result.pkl", "rb") as fp: - tensor_dict = pickle.load(fp) - a = tensor_dict["activation"][0] - w = tensor_dict["weight"] - if PT_VERSION >= Version("1.8.0").release: - self.assertTrue(w["conv1.0"]["conv1.0.weight"].shape[0] == a["conv1.0"]["conv1.0.output0"].shape[1]) - else: - self.assertTrue(w["conv1.0"]["conv1.0.weight"].shape[0] == a["conv1.0"]["conv1.1.output0"].shape[1]) - data = np.random.random(w["conv1.0"]["conv1.0.weight"].shape).astype(np.float32) - quantizer.strategy.adaptor.set_tensor(q_model, {"conv1.0.weight": data}) - changed_tensor = q_model.get_weight("conv1.weight") - scales = changed_tensor.q_per_channel_scales() - changed_tensor_fp32 = torch.dequantize(changed_tensor) - self.assertTrue(np.allclose(data, changed_tensor_fp32.numpy(), atol=2 / np.min(scales.numpy()))) - quantizer.strategy.adaptor.inspect_tensor( - q_model, - dataloader, - op_list=["conv1.0", "layer1.0.conv1.0"], - iteration_list=[1, 2], - inspect_type="all", - save_to_disk=False, - ) - - def test_forward_wrapper(self): - vision_model = resnet18() - - class dummymodel(torch.nn.Module): - def __init__(self, model): - super(dummymodel, self).__init__() - self._model = model - - def forward(self, input=None): - return self._model(input) - - data = [ - [{"input": torch.rand(3, 224, 224)}, torch.ones(1, 1)], - ] - # dataloader.batch_size=100 - dataloader = common.DataLoader(data, batch_size=1) - quantizer = Quantization("dynamic_yaml.yaml") - model = dummymodel(vision_model) - quantizer.model = model - quantizer.calib_dataloader = dataloader - quantizer.eval_dataloader = dataloader - model = quantizer.fit() - self.assertTrue(isinstance(model, torch.nn.Module)) - - def test_floatfunctions_fallback(self): - class ModelWithFunctionals(torch.nn.Module): - def __init__(self): - super(ModelWithFunctionals, self).__init__() - self.mycat = nnq.FloatFunctional() - self.myadd = nnq.FloatFunctional() - self.myadd_relu = nnq.FloatFunctional() - # Tracing doesn't work yet for c10 ops with scalar inputs - # https://github.com/pytorch/pytorch/issues/27097 - self.my_scalar_add = nnq.FloatFunctional() - self.mymul = nnq.FloatFunctional() - self.my_scalar_mul = nnq.FloatFunctional() - self.quant = QuantStub() - self.dequant = DeQuantStub() - - def forward(self, x): - x = self.quant(x) - y = self.mycat.cat([x, x, x]) - z = self.myadd.add(y, y) - w = self.myadd_relu.add_relu(z, z) - # Tracing doesn't work yet for c10 ops with scalar inputs - # https://github.com/pytorch/pytorch/issues/27097 - w = self.my_scalar_add.add_scalar(w, -0.5) - w = self.mymul.mul(w, w) - w = self.my_scalar_mul.mul_scalar(w, 0.5) - w = self.dequant(w) - return w - - model = ModelWithFunctionals() - model = MODELS["pytorch"](model) - x = torch.rand(10, 1, dtype=torch.float) - y = model.model(x) - fallback_ops = [] - q_capability = self.adaptor.query_fw_capability(model) - for k, v in q_capability["opwise"].items(): - if k[0] != "quant" and k[0] != "dequant": - fallback_ops.append(k[0]) - model.model.qconfig = torch.quantization.default_qconfig - model.model.quant.qconfig = torch.quantization.default_qconfig - if PT_VERSION >= Version("1.8.0").release: - model.model.dequant.qconfig = torch.quantization.default_qconfig - nc_torch._fallback_quantizable_ops_recursively(model.model, "", fallback_ops, op_qcfgs={}) - if PT_VERSION >= Version("2.0.0").release: - from torch.quantization.quantize import _add_observer_ as add_observer_ - else: - from torch.quantization.quantize import add_observer_ - add_observer_(model.model) - model.model(x) - torch.quantization.convert(model.model, self.adaptor.q_mapping, inplace=True) - qy = model.model(x) - tol = {"atol": 1e-01, "rtol": 1e-03} - self.assertTrue(np.allclose(y, qy, **tol)) - - -@unittest.skipIf(not FX_MODE, "Unsupported Fx Mode with PyTorch Version Below 1.8") -class TestPytorchFXAdaptor(unittest.TestCase): - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "workspace_path": "./", - } - framework = "pytorch_fx" - adaptor = FRAMEWORKS[framework](framework_specific_info) - - @classmethod - def setUpClass(self): - build_pytorch_fx_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fx_ptq_yaml.yaml") - os.remove("fx_dynamic_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_fx_quant(self): - for fake_yaml in ["fx_qat_yaml.yaml", "fx_ptq_yaml.yaml"]: - model_origin = resnet18() - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (10, 3, 224, 224), label=True) - quantizer.eval_func = eval_func - if fake_yaml == "fx_qat_yaml.yaml": - quantizer.q_func = q_func - else: - quantizer.calib_func = eval_func - dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - quantizer.model = common.Model( - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - q_model = quantizer.fit() - q_model.save("./saved") - # Load configure and weights with neural_compressor.utils - model_fx = load( - "./saved", - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule)) - - # recover int8 model with only tune_cfg - history_file = "./saved/history.snapshot" - model_fx_recover = recover( - model_origin, - history_file, - 0, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertEqual(model_fx.code, model_fx_recover.code) - shutil.rmtree("./saved", ignore_errors=True) - - for fake_yaml in ["fx_qat_yaml.yaml", "fx_ptq_yaml.yaml"]: - model_origin = M() - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = True - dataset = quantizer.dataset("dummy", (10, 3, 224, 224), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model( - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - q_model = quantizer.fit() - q_model.save("./saved") - # Load configure and weights with neural_compressor.utils - model_fx = load( - "./saved", - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - "dataloader": quantizer.calib_dataloader, - } - ) - self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule)) - shutil.rmtree("./saved", ignore_errors=True) - - @unittest.skipIf( - PT_VERSION < Version("1.9.0").release, - "Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend", - ) - def test_fx_dynamic_quant(self): - model = LSTMModel( - ntoken=10, - ninp=512, - nhid=256, - nlayers=5, - ) - # run fx_quant in neural_compressor and save the quantized GraphModule - model.eval() - quantizer = Quantization("fx_dynamic_yaml.yaml") - quantizer.model = common.Model( - copy.deepcopy(model), - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - q_model = quantizer.fit() - q_model.save("./saved") - - # Load configure and weights by neural_compressor.utils - model_fx = load( - "./saved", - copy.deepcopy(model), - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule)) - - # Test the functionality of older model saving type - state_dict = torch.load("./saved/best_model.pt") - tune_cfg = state_dict.pop("best_configure") - import yaml - - with open("./saved/best_configure.yaml", "w") as f: - yaml.dump(tune_cfg, f, default_flow_style=False) - torch.save(state_dict, "./saved/best_model_weights.pt") - os.remove("./saved/best_model.pt") - model_fx = load( - "./saved", - copy.deepcopy(model), - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertTrue(isinstance(model_fx, torch.fx.graph_module.GraphModule)) - - # recover int8 model with only tune_cfg - history_file = "./saved/history.snapshot" - model_fx_recover = recover( - model, - history_file, - 0, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertEqual(model_fx.code, model_fx_recover.code) - shutil.rmtree("./saved", ignore_errors=True) - - def test_default_dynamic_quant(self): - def eval_func(model): - return 1 - - def q_func(model): - return model - - # Model Definition - for fake_yaml in ["fx_qat_yaml.yaml", "fx_ptq_yaml.yaml"]: - model_origin = LSTMModel( - ntoken=10, - ninp=512, - nhid=256, - nlayers=2, - ) - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (3, 10), label=True) - quantizer.eval_func = eval_func - if fake_yaml == "fx_qat_yaml.yaml": - quantizer.q_func = q_func - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model(model_origin) - q_model = quantizer.fit() - self.assertTrue("quantize" in str(type(q_model.model.encoder))) - self.assertTrue("quantize" in str(type(q_model.model.rnn))) - - def test_fx_sub_module_quant(self): - for fake_yaml in ["fx_qat_yaml.yaml", "fx_dynamic_yaml.yaml", "fx_ptq_yaml.yaml"]: - model_origin = DynamicControlModel() - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (1, 3, 224, 224), label=True) - quantizer.eval_func = eval_func - if fake_yaml == "fx_qat_yaml.yaml": - quantizer.q_func = q_func - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model( - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - q_model = quantizer.fit() - q_model.save("./saved") - # Load configure and weights with neural_compressor.utils - model_fx = load( - "./saved/best_model.pt", - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertTrue(isinstance(model_fx.sub, torch.fx.graph_module.GraphModule)) - - # recover int8 model with only tune_cfg - history_file = "./saved/history.snapshot" - model_fx_recover = recover( - model_origin, - history_file, - 0, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - self.assertEqual(model_fx.sub.code, model_fx_recover.sub.code) - shutil.rmtree("./saved", ignore_errors=True) - - def test_deepcopy_failure(self): - def eval_func(model): - return 1 - - # To build an object t2, which will fail on deepcopy. - class T1: - def __init__(self, t1) -> None: - self.t1 = t1 - self.j = 1 - - # required for usage with set in T1 - def __hash__(self): - return hash(self.j) - - t1 = set() - t2 = T1([t1]) - t1.add(t2) - - for fake_yaml in ["fx_ptq_yaml.yaml"]: - model_origin = M() - model_origin.tmp = t2 - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (1, 3, 224, 224), label=True) - quantizer.eval_func = eval_func - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model(model_origin) - q_model = quantizer.fit() - self.assertTrue(isinstance(q_model.model, torch.fx.graph_module.GraphModule)) - - @unittest.skipIf( - PT_VERSION < Version("1.11.0").release, - "Please use PyTroch 1.11 or higher version for mixed precision with pytorch_fx or pytorch backend", - ) - def test_bf16_capability(self): - model_origin = DynamicControlModel() - os.environ["FORCE_BF16"] = "1" - q_capability = self.adaptor._get_quantizable_ops(model_origin) - del os.environ["FORCE_BF16"] - - self.assertEqual([elem["weight"]["dtype"] for elem in q_capability["optypewise"]["Conv2d"]], [["int8"], "fp32"]) - self.assertEqual( - [elem["activation"]["dtype"] for elem in q_capability["optypewise"]["Conv2d"]], [["uint8"], "fp32"] - ) - self.assertEqual( - [elem["weight"]["dtype"] for elem in q_capability["opwise"][("conv", "Conv2d")]], [["int8"], "fp32"] - ) - self.assertEqual( - [elem["activation"]["dtype"] for elem in q_capability["opwise"][("conv", "Conv2d")]], [["uint8"], "fp32"] - ) - self.assertEqual( - [elem["weight"]["dtype"] for elem in q_capability["opwise"][("linear", "Linear")]], - [["int8"], "fp32", "bf16"], - ) - self.assertEqual( - [elem["activation"]["dtype"] for elem in q_capability["opwise"][("linear", "Linear")]], - [["uint8"], "fp32", "bf16"], - ) - - @unittest.skipIf( - PT_VERSION < Version("1.11.0").release, - "Please use PyTroch 1.11 or higher version for mixed precision with pytorch_fx or pytorch backend", - ) - def test_mix_precision(self): - fake_yaml = "fx_ptq_yaml.yaml" - model_origin = DynamicControlModel() - # run fx_quant in neural_compressor and save the quantized GraphModule - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (1, 3, 224, 224), label=True) - quantizer.eval_func = eval_func - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = common.Model( - model_origin, - **{ - "prepare_custom_config_dict": {"non_traceable_module_name": ["a"]}, - "convert_custom_config_dict": {"preserved_attributes": []}, - } - ) - q_model = quantizer.fit() - tune_cfg = q_model.q_config - tune_cfg["op"][("conv.module", "Conv2d")].clear() - tune_cfg["op"][("conv.module", "Conv2d")] = {"weight": {"dtype": "bf16"}, "activation": {"dtype": "bf16"}} - tune_cfg["bf16_ops_list"].append(("conv.module", "Conv2d")) - from neural_compressor.adaptor.torch_utils.bf16_convert import Convert - - q_model._model = Convert(q_model._model, tune_cfg) - - self.assertEqual(q_model._model.conv.module.module.weight.dtype, torch.bfloat16) - self.assertEqual(q_model._model.conv.module.module.bias.dtype, torch.bfloat16) - - def test_symbolic_trace(self): - from neural_compressor.adaptor.torch_utils.symbolic_trace import symbolic_trace - - model_origin = DynamicControlModel() - traced_model = symbolic_trace(model_origin, is_qat=False) - if PT_VERSION >= Version("1.11.0").release: - self.assertTrue(isinstance(traced_model.sub, torch.nn.Module)) - self.assertTrue(isinstance(traced_model.conv, torch.fx.graph_module.GraphModule)) - else: - self.assertTrue(isinstance(traced_model.sub, torch.fx.graph_module.GraphModule)) - traced_model_qat = symbolic_trace(model_origin, is_qat=True) - self.assertTrue(isinstance(traced_model_qat.sub, torch.fx.graph_module.GraphModule)) - - def test_tensor_dump(self): - model = resnet18() - model = MODELS["pytorch"](model) - quantizer = Quantization("fx_ptq_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = common.DataLoader(dataset) - dataloader = common._generate_common_dataloader(dataloader, "pytorch") - quantizer.eval_dataloader = dataloader - quantizer.calib_dataloader = dataloader - quantizer.model = model.model - q_model = quantizer.fit() - op_list, _ = quantizer.strategy.adaptor.diagnosis_helper(model, q_model, None) - quantizer.strategy.adaptor.inspect_tensor( - model, dataloader, op_list=op_list, iteration_list=[1], inspect_type="all", save_to_disk=True - ) - with open("saved/inspect_result.pkl", "rb") as fp: - tensor_dict = pickle.load(fp) - a = tensor_dict["activation"][0] - w = tensor_dict["weight"] - self.assertTrue(w["conv1"]["conv1.weight"].shape[0] == a["conv1"]["conv1.output0"].shape[1]) - quantizer.strategy.adaptor.inspect_tensor( - q_model, - dataloader, - op_list=["conv1", "layer2.0.downsample.0"], - iteration_list=[1, 2], - inspect_type="all", - save_to_disk=True, - ) - with open("saved/inspect_result.pkl", "rb") as fp: - tensor_dict = pickle.load(fp) - a = tensor_dict["activation"][0] - w = tensor_dict["weight"] - self.assertTrue( - w["layer2.0.downsample.0"]["layer2.0.downsample.0.weight"].shape[0] - == a["layer2.0.downsample.0"]["layer2.0.downsample.0.output0"].shape[1] - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2x.py b/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2x.py index ea8a18f424e..1bfa38a0bb7 100644 --- a/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2x.py +++ b/test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2x.py @@ -392,21 +392,15 @@ def test_fx_sub_module_quant(self): "Please use PyTroch 1.11 or higher version for mixed precision with pytorch_fx or pytorch backend", ) def test_mix_precision(self): + os.environ["FORCE_BF16"] = "1" model_origin = DynamicControlModel() - # run fx_quant in neural_compressor and save the quantized GraphModule dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224)) dataloader = DataLoader("pytorch", dataset) set_workspace("./saved") + # fx mode usually has .module suffix due to tracing of the entire model fails, so use conv.* to leverage re.match + ptq_fx_op_name_list["conv.*"] = {"weight": {"dtype": "bf16"}, "activation": {"dtype": "bf16"}} conf = PostTrainingQuantConfig(op_name_dict=ptq_fx_op_name_list) q_model = quantization.fit(model_origin, conf, calib_dataloader=dataloader, calib_func=eval_func) - tune_cfg = q_model.q_config - tune_cfg["op"][("conv.module", "Conv2d")].clear() - tune_cfg["op"][("conv.module", "Conv2d")] = {"weight": {"dtype": "bf16"}, "activation": {"dtype": "bf16"}} - tune_cfg["bf16_ops_list"].append(("conv.module", "Conv2d")) - from neural_compressor.adaptor.torch_utils.bf16_convert import Convert - - q_model._model = Convert(q_model._model, tune_cfg) - self.assertEqual(q_model._model.conv.module.module.weight.dtype, torch.bfloat16) self.assertEqual(q_model._model.conv.module.module.bias.dtype, torch.bfloat16) diff --git a/test/adaptor/pytorch_adaptor/test_weight_only_adaptor_pytorch.py b/test/adaptor/pytorch_adaptor/test_weight_only_adaptor_pytorch.py index c3839a80b8a..61456b923f8 100644 --- a/test/adaptor/pytorch_adaptor/test_weight_only_adaptor_pytorch.py +++ b/test/adaptor/pytorch_adaptor/test_weight_only_adaptor_pytorch.py @@ -760,7 +760,7 @@ def test_AutoRound_quant(self): tokenizer = transformers.AutoTokenizer.from_pretrained( "hf-internal-testing/tiny-random-GPTJForCausalLM", trust_remote_code=True ) - dataloader = get_dataloader(tokenizer, 32, dataset_name="NeelNanda/pile-10k", seed=42, bs=8, n_samples=20) + dataloader = get_dataloader(tokenizer, 32, dataset_name="NeelNanda/pile-10k", seed=42, bs=8, nsamples=20) fp32_model = copy.deepcopy(self.gptj) conf = PostTrainingQuantConfig( approach="weight_only", diff --git a/test/adaptor/tensorflow_adaptor/test_bf16_convert.py b/test/adaptor/tensorflow_adaptor/test_bf16_convert.py index 4467f709aba..4a3801f7b32 100644 --- a/test/adaptor/tensorflow_adaptor/test_bf16_convert.py +++ b/test/adaptor/tensorflow_adaptor/test_bf16_convert.py @@ -12,113 +12,6 @@ from neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert import BF16Convert -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: final - device: cpu - use_bf16: True - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - max_trials: 2 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def build_newapi_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: final - device: cpu - use_bf16: True - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - max_trials: 2 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - with open("newapi_fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def build_fake_bf16_rnn_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input_1 - outputs: dense/BiasAdd - device: cpu - use_bf16: True - quantization: - op_wise: { - \"lstm/while/MatMul\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm/while/MatMul_1\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm/while/MatMul_2\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm/while/MatMul_3\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm_1/while/MatMul\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm_1/while/MatMul_1\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm_1/while/MatMul_2\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - \"lstm_1/while/MatMul_3\": { - \"activation\": {\"dtype\": [\"bf16\"]}, - }, - } - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.05 - exit_policy: - performance_only: True - """ - with open("fake_bf16_rnn.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - def create_test_graph(bf16_graph=True): input_node = node_def_pb2.NodeDef() input_node.name = "input" @@ -360,49 +253,11 @@ def setUpClass(self): self.input_graph.ParseFromString(f.read()) self.test_graph = create_test_graph() self.test_fp32_graph = create_test_graph(False) - build_fake_yaml() - build_newapi_fake_yaml() - build_fake_bf16_rnn_yaml() @classmethod def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("newapi_fake_yaml.yaml") - os.remove("fake_bf16_rnn.yaml") shutil.rmtree("saved", ignore_errors=True) - def test_bf16_transpose_b_matmul(self): - from tensorflow.core.framework import attr_value_pb2 - - os.environ["FORCE_BF16"] = "1" - DT_BFLOAT16 = attr_value_pb2.AttrValue(type=dtypes.bfloat16.as_datatype_enum) - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=float) - x = tf.compat.v1.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul", transpose_b=True) - z = tf.nn.relu6(z, name="op_to_store") - is_bf16 = False - with tf.compat.v1.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul" and i.attr["T"] == DT_BFLOAT16: - is_bf16 = True - break - self.assertEqual(is_bf16, True) - @unittest.skipIf(tf.__version__ < "2.0", "currently bf16 convert does not support 1.15up3") def test_rn50_convert(self): bf16_nodes = [node.name for node in self.input_graph.node if node.op in ["Conv2D", "AvgPool", "MatMul"]] @@ -426,75 +281,6 @@ def test_do_transform(self): self.assertEqual(new_relu2.attr["T"].type, dtypes.bfloat16) self.assertEqual(new_conv3.attr["T"].type, dtypes.float32) - def test_bf16_fallback(self): - os.environ["FORCE_BF16"] = "1" - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("newapi_fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(1, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.test_fp32_graph - output_graph = quantizer.fit() - cast_op_count = 0 - for node in output_graph.graph_def.node: - if node.op == "Cast": - cast_op_count += 1 - self.assertTrue(cast_op_count == 0) - - @unittest.skipIf(tf.version.VERSION.find("up") == -1, "Only supports tf 1.x") - def test_bf16_rnn(self): - os.environ["FORCE_BF16"] = "1" - try: - inp = tf.keras.layers.Input(shape=(None, 4)) - lstm_1 = tf.keras.layers.LSTM(units=10, return_sequences=True)(inp) - dropout_1 = tf.keras.layers.Dropout(0.2)(lstm_1) - lstm_2 = tf.keras.layers.LSTM(units=10, return_sequences=False)(dropout_1) - dropout_2 = tf.keras.layers.Dropout(0.2)(lstm_2) - out = tf.keras.layers.Dense(1)(dropout_2) - model = tf.keras.models.Model(inputs=inp, outputs=out) - - model.compile(loss="mse", optimizer=tf.keras.optimizers.RMSprop()) - - # input_names = [t.name.split(":")[0] for t in model.inputs] - output_names = [t.name.split(":")[0] for t in model.outputs] - - q_data = np.random.randn(64, 10, 4) - label = np.random.randn(64, 1) - model.predict(q_data) - - sess = tf.keras.backend.get_session() - - graph = sess.graph - - from tensorflow.compat.v1 import graph_util - - graph_def = graph_util.convert_variables_to_constants( - sess, - graph.as_graph_def(), - output_names, - ) - quant_data = (q_data, label) - evl_data = (q_data, label) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_bf16_rnn.yaml") - quantizer.calib_dataloader = common.DataLoader(dataset=list(zip(quant_data[0], quant_data[1]))) - quantizer.eval_dataloader = common.DataLoader(dataset=list(zip(evl_data[0], evl_data[1]))) - quantizer.model = graph_def - quantized_model = quantizer.fit() - - convert_to_bf16_flag = False - for i in quantized_model.graph_def.node: - if i.name == "lstm/while/MatMul_3" and i.attr["T"].type == dtypes.bfloat16.as_datatype_enum: - convert_to_bf16_flag = True - - self.assertEqual(convert_to_bf16_flag, True) - except NotImplementedError: - # Kernel bug, happens when the version of python is 3.7 and the version of numpy is >= 1.20.0 - pass - if __name__ == "__main__": unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorboard.py b/test/adaptor/tensorflow_adaptor/test_tensorboard.py deleted file mode 100644 index 69b4b1070a8..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorboard.py +++ /dev/null @@ -1,241 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 -from tensorflow.python.framework import dtypes, tensor_util - -from neural_compressor.adaptor.tf_utils.util import version1_gt_version2 - -tf.compat.v1.disable_eager_execution() - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: final - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - tensorboard: true - strategy: - name: basic - exit_policy: - timeout: 200 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu" - relu_node.name = "relu" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([bias_add_node.name]) - - conv2_weight_node = node_def_pb2.NodeDef() - conv2_weight_node.name = "conv2_weights" - conv2_weight_node.op = "Const" - conv2_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv2_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape - ) - ) - ) - - conv2_node = node_def_pb2.NodeDef() - conv2_node.name = "conv2" - conv2_node.op = "Conv2D" - conv2_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_node.input.extend([relu_node.name, conv2_weight_node.name]) - conv2_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv2_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node2 = node_def_pb2.NodeDef() - bias_node2.name = "conv2_bias" - bias_node2.op = "Const" - bias_value2 = np.float32(np.abs(np.random.randn(32))) - bias_node2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node2.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value2, bias_value2.dtype.type, bias_value2.shape) - ) - ) - - bias_add_node2 = node_def_pb2.NodeDef() - bias_add_node2.name = "conv2_bias_add" - bias_add_node2.op = "BiasAdd" - bias_add_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node2.input.extend([conv2_node.name, bias_node2.name]) - bias_add_node2.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node2 = node_def_pb2.NodeDef() - relu_node2.op = "Relu" - relu_node2.name = "relu2" - relu_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node2.input.extend([bias_add_node2.name]) - - conv3_weight_node = node_def_pb2.NodeDef() - conv3_weight_node.name = "conv3_weights" - conv3_weight_node.op = "Const" - conv3_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv3_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape - ) - ) - ) - - conv3_node = node_def_pb2.NodeDef() - conv3_node.name = "conv3" - conv3_node.op = "Conv2D" - conv3_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_node.input.extend([relu_node2.name, conv3_weight_node.name]) - conv3_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv3_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - identity_node = node_def_pb2.NodeDef() - identity_node.name = "final" - identity_node.op = "Identity" - identity_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - identity_node.input.extend([conv3_node.name]) - - graph = graph_pb2.GraphDef() - - graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - relu_node, - conv2_weight_node, - conv2_node, - bias_node2, - bias_add_node2, - relu_node2, - conv3_weight_node, - conv3_node, - identity_node, - ] - ) - - return graph - - -class TestTensorboard(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - shutil.rmtree("saved", ignore_errors=True) - shutil.rmtree("runs/", ignore_errors=True) - - @unittest.skipIf( - version1_gt_version2(tf.version.VERSION, "2.5.0"), "Skip test_bf16_fallback case for tf 2.6.0 and above." - ) - def test_run_basic_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (1, 224, 224, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - self.assertTrue(True if len(os.listdir("./runs/eval")) > 2 else False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_calculate_op_sensitivity.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_calculate_op_sensitivity.py deleted file mode 100644 index 076fb27c666..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_calculate_op_sensitivity.py +++ /dev/null @@ -1,150 +0,0 @@ -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf - - -def build_msev2_yaml(): - mse_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse_v2 - accuracy_criterion: - relative: 0.01 - exit_policy: - max_trials: 10 - timeout: 3600 - """ - with open("mse_yaml.yaml", "w", encoding="utf-8") as f: - f.write(mse_yaml) - - -def build_fake_model(): - try: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d( - input=op, - filters=z, - strides=[1, 1, 1, 1], - padding="VALID", - ) - last_identity = tf.identity(op2, name="op2_to_store") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d(input=op, filters=z, strides=[1, 1, 1, 1], padding="VALID") - last_identity = tf.identity(op2, name="op2_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestGetOutputTensor(unittest.TestCase): - @classmethod - def setUpClass(self): - build_msev2_yaml() - self.model = build_fake_model() - - @classmethod - def tearDownClass(self): - os.remove("mse_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_get_output_op_names(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("mse_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.model - qmodel = quantizer.fit() - - self.assertEqual(quantizer.strategy.adaptor.get_output_op_names(qmodel), ["Conv2D_dummy_biasadd"]) - - def test_calculate_op_sensitivity(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("mse_yaml.yaml") - quantizer.model = self.model - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.pre_process() - - dataloader = quantizer._calib_dataloader - strategy = quantizer.strategy - adaptor = strategy.adaptor - tune_cfg_generator = strategy.next_tune_cfg() - tune_cfg = strategy._tune_cfg_converter(next(tune_cfg_generator)) - output_op_names = ["Conv2D_dummy_biasadd"] - - op_sensitivity = adaptor.calculate_op_sensitivity( - model=quantizer.model, - dataloader=dataloader, - tune_cfg=tune_cfg, - output_op_names=output_op_names, - confidence_batches=1, - fallback=True, - ) - self.assertIn(("op_to_store", "conv2d"), op_sensitivity) - self.assertIn(("Conv2D", "conv2d"), op_sensitivity) - - tune_cfg["op"][("op_to_store", "conv2d")] = { - "activation": {"dtype": "fp32", "quant_mode": "fp32"}, - "weight": {"dtype": "fp32"}, - } - - op_sensitivity = adaptor.calculate_op_sensitivity( - model=quantizer.model, - dataloader=dataloader, - tune_cfg=tune_cfg, - output_op_names=output_op_names, - confidence_batches=1, - fallback=True, - ) - self.assertNotIn(("op_to_store", "conv2d"), op_sensitivity) - self.assertIn(("Conv2D", "conv2d"), op_sensitivity) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_concat.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_concat.py deleted file mode 100644 index da161bd2737..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_concat.py +++ /dev/null @@ -1,153 +0,0 @@ -# -# -# -*- coding: utf-8 -*- -import os -import platform -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -import neural_compressor -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel -from neural_compressor.adaptor.tf_utils.util import disable_random, read_graph - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: predict - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowConcat(unittest.TestCase): - mb_model_url = ( - "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/inceptionv3_fp32_pretrained_model.pb" - ) - pb_path = "/tmp/.neural_compressor/inceptionv3_fp32.pb" - platform = platform.system().lower() - if platform == "windows": - pb_path = "C:\\tmp\\.neural_compressor\\inceptionv3_fp32.pb" - - @classmethod - def setUpClass(self): - if not os.path.exists(self.pb_path) and self.platform == "linux": - os.system("mkdir -p /tmp/.neural_compressor && wget {} -O {} ".format(self.mb_model_url, self.pb_path)) - self.op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), "adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @unittest.skipIf(tf.__version__ < "2.0", "does not support on 1.15up3") - def test_tensorflow_concat_quantization(self): - output_graph_def = read_graph(self.pb_path) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 299, 299, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_quantized_concat_node = False - - target_concat_node_name = "v0/cg/incept_v3_a0/concat_eightbit_quantized_concatv2" - from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer - - cur_graph = GraphAnalyzer() - cur_graph.graph = output_graph.graph_def - graph_info = cur_graph.parse_graph() - found_quantized_concat_node = target_concat_node_name in graph_info - - self.assertEqual(found_quantized_concat_node, True) - min_out, max_out = [], [] - for input_conv_name in graph_info[target_concat_node_name].node.input[:4]: - # print (input_conv_name, graph_info[input_conv_name].node.input) - min_freezed_out_name = graph_info[input_conv_name].node.input[-2] - max_freezed_out_name = graph_info[input_conv_name].node.input[-1] - min_freezed_out_value = (graph_info[min_freezed_out_name].node.attr["value"].tensor.float_val)[0] - max_freezed_out_value = (graph_info[max_freezed_out_name].node.attr["value"].tensor.float_val)[0] - min_out.append(min_freezed_out_value) - max_out.append(max_freezed_out_value) - - self.assertEqual(len(set(min_out)), 1) - self.assertEqual(len(set(max_out)), 1) - - @disable_random() - def test_concat_with_different_input_type(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - sqrt = tf.math.sqrt(x) - relu_sqrt = tf.nn.relu(sqrt) - conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - concat = tf.concat([relu, conv_bias], 1) - final_node = tf.nn.relu(concat, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 128, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_concat = False - for i in output_graph.graph_def.node: - if i.op == "QuantizedConcatV2": - quantized_concat = True - self.assertEqual(quantized_concat, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_as_output.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_as_output.py deleted file mode 100644 index 1cd9fd0f5f6..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_as_output.py +++ /dev/null @@ -1,231 +0,0 @@ -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf -from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 -from tensorflow.python.framework import dtypes, tensor_util - -from neural_compressor.experimental import Quantization, common - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: conv3 - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - max_trials: 2 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def create_test_graph(): - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu" - relu_node.name = "relu" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([bias_add_node.name]) - - conv2_weight_node = node_def_pb2.NodeDef() - conv2_weight_node.name = "conv2_weights" - conv2_weight_node.op = "Const" - conv2_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv2_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape - ) - ) - ) - - conv2_node = node_def_pb2.NodeDef() - conv2_node.name = "conv2" - conv2_node.op = "Conv2D" - conv2_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_node.input.extend([relu_node.name, conv2_weight_node.name]) - conv2_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv2_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node2 = node_def_pb2.NodeDef() - bias_node2.name = "conv2_bias" - bias_node2.op = "Const" - bias_value2 = np.float32(np.abs(np.random.randn(32))) - bias_node2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node2.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value2, bias_value2.dtype.type, bias_value2.shape) - ) - ) - - bias_add_node2 = node_def_pb2.NodeDef() - bias_add_node2.name = "conv2_bias_add" - bias_add_node2.op = "BiasAdd" - bias_add_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node2.input.extend([conv2_node.name, bias_node2.name]) - bias_add_node2.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node2 = node_def_pb2.NodeDef() - relu_node2.op = "Relu" - relu_node2.name = "relu2" - relu_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node2.input.extend([bias_add_node2.name]) - - log_node = node_def_pb2.NodeDef() - log_node.name = "log1" - log_node.op = "Log" - log_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - log_node.input.extend([relu_node2.name]) - - conv3_weight_node = node_def_pb2.NodeDef() - conv3_weight_node.name = "conv3_weights" - conv3_weight_node.op = "Const" - conv3_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv3_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape - ) - ) - ) - - conv3_node = node_def_pb2.NodeDef() - conv3_node.name = "conv3" - conv3_node.op = "Conv2D" - conv3_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_node.input.extend([log_node.name, conv3_weight_node.name]) - conv3_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv3_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - test_graph = graph_pb2.GraphDef() - - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - relu_node, - conv2_weight_node, - conv2_node, - bias_node2, - bias_add_node2, - log_node, - relu_node2, - conv3_weight_node, - conv3_node, - ] - ) - return test_graph - - -@unittest.skipIf(tf.__version__ < "2.8.0", "only support spr-base TF") -class TestConvAsOutput(unittest.TestCase): - @classmethod - def setUpClass(self): - self.test_graph = create_test_graph() - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - shutil.rmtree("saved", ignore_errors=True) - - def test_do_transform(self): - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(1, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = create_test_graph() - output_graph = quantizer.fit() - f = tf.io.gfile.GFile("ut.pb", "wb") - f.write(output_graph.graph_def.SerializeToString()) - for node in output_graph.graph_def.node: - if node.name == "conv3_eightbit_requantize": - self.assertTrue("Quantized" in node.op) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_fusion.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_fusion.py deleted file mode 100644 index 1d10d5287d8..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_conv_fusion.py +++ /dev/null @@ -1,596 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import platform -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -import neural_compressor -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm import FoldBatchNormNodesOptimizer -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes import StripUnusedNodesOptimizer -from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestConvBiasAddAddReluFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - - self.assertEqual(found_conv_fusion, False) - - @disable_random() - @unittest.skipIf(tf.__version__ < "2.0", "does not support on 1.15up3") - def test_depthwiseconv_biasadd_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x_pad, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = normed.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "QuantizedDepthwiseConv2DWithBias": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_fusion_with_negative_input(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x_pad, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = normed.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "QuantizedDepthwiseConv2DWithBias": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, False) - - @unittest.skipUnless( - bool(tf.version.VERSION.find("1.15.0-up") != -1 or tf.version.VERSION >= "2.1.0"), - "not supported the current tf version.", - ) - @disable_random() - def test_conv_biasadd_relu6_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu6": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_add_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - relu = tf.nn.relu(normed2 + tf.constant([3.0])) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_squeeze_biasadd_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - squeeze = tf.squeeze(conv2) - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - relu = tf.nn.relu(normed2) - identity = tf.identity(relu, name="op_to_store") - - out_name = identity.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - correct_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndReluAndRequantize": - correct_conv_fusion = True - break - - self.assertEqual(correct_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_1(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.leaky_relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndRequantize": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_fusion_with_last_matmul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - # paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - # x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - reshape = tf.reshape(pooling, [-1, 3136]) - - y_data = np.random.random([3136, 1]) - - y = tf.constant(y_data, dtype=tf.float32, shape=[3136, 1]) - z = tf.matmul(reshape, y) - relu1 = tf.nn.relu(z) - y_data_1 = np.random.random([1, 1]) - y_1 = tf.constant(y_data_1, dtype=tf.float32, shape=[1, 1]) - - z_2nd_matmul = tf.matmul(relu1, y_1) - relu6 = tf.nn.relu6(z_2nd_matmul, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv_fusion_with_last_conv(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights_2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(pooling, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID") - conv_weights_3 = tf.compat.v1.get_variable( - "weight3", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - relu2 = tf.nn.relu(conv2) - conv3 = tf.nn.conv2d(relu2, conv_weights_3, strides=[1, 2, 2, 1], padding="VALID") - - relu3 = tf.nn.relu(conv3) - relu6 = tf.nn.relu6(relu3, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv_fusion_with_max_pooling(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - - relu = tf.nn.relu(x) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(pooling, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - biasadd = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = biasadd.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_pool_data_type = None - quantized_conv_data_type = None - for i in output_graph.graph_def.node: - if i.op.find("QuantizedMaxPool") != -1: - quantized_pool_data_type = i.attr["T"].type - if i.op.find("QuantizedConv2D") != -1: - quantized_conv_data_type = i.attr["Tinput"].type - - self.assertNotEqual(quantized_pool_data_type, None) - self.assertEqual(quantized_pool_data_type, quantized_conv_data_type) - - -class TestGraphConvFusion(unittest.TestCase): - rn50_fp32_pb_url = ( - "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/resnet50_fp32_pretrained_model.pb" - ) - pb_path = "/tmp/.neural_compressor/resnet50_fp32_pretrained_model.pb" - platform = platform.system().lower() - if platform == "windows": - pb_path = "C:\\tmp\.neural_compressor\\resnet50_fp32_pretrained_model.pb" - inputs = ["input"] - outputs = ["predict"] - - op_wise_config = { - "v0/resnet_v13/conv14/conv2d/Conv2D": (False, "minmax", False, 7.0), - "v0/resnet_v13/conv11/conv2d/Conv2D": (False, "minmax", False, 7.0), - "v0/resnet_v17/conv27/conv2d/Conv2D": (False, "minmax", False, 7.0), - } - - @classmethod - def setUpClass(self): - if not os.path.exists(self.pb_path): - if self.platform == "linux": - os.system( - "mkdir -p /tmp/.neural_compressor && wget {} -O {} ".format(self.rn50_fp32_pb_url, self.pb_path) - ) - elif self.platform == "windows": - os.system("md C:\\tmp\.neural_compressor && cd C:\\tmp\.neural_compressor") - from urllib import request - - request.urlretrieve(self.rn50_fp32_pb_url) - self.input_graph = tf.compat.v1.GraphDef() - with open(self.pb_path, "rb") as f: - self.input_graph.ParseFromString(f.read()) - - def test_conv_biasadd_relu_fusion(self): - tf.compat.v1.disable_eager_execution() - - self._tmp_graph_def = graph_util.remove_training_nodes(self.input_graph, self.outputs) - - self._tmp_graph_def = StripUnusedNodesOptimizer( - self._tmp_graph_def, self.inputs, self.outputs - ).do_transformation() - - self._tmp_graph_def = FoldBatchNormNodesOptimizer(self._tmp_graph_def).do_transformation() - op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), "adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - - output_graph, _, _ = QuantizeGraphForIntel( - self._tmp_graph_def, self.inputs, self.outputs, self.op_wise_config, op_wise_sequences, "cpu" - ).do_transform() - - node_name_type_mapping = {} - for i in output_graph.node: - node_name_type_mapping[i.name] = i.op - - should_disable_sum_node_name = "v0/resnet_v17/conv27/conv2d/Conv2D_eightbit_quantized_conv" - should_enable_sum_node_name = "v0/resnet_v13/conv11/conv2d/Conv2D_eightbit_quantized_conv" - should_disable_sum_flag = ( - should_disable_sum_node_name in node_name_type_mapping - and node_name_type_mapping[should_disable_sum_node_name] == "QuantizedConv2DWithBias" - ) - should_enable_sum_flag = ( - should_enable_sum_node_name in node_name_type_mapping - and node_name_type_mapping[should_enable_sum_node_name] == "QuantizedConv2DWithBiasSumAndRelu" - ) - self.assertEqual(should_enable_sum_flag, True) - self.assertEqual(should_disable_sum_flag, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_debug_mode.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_debug_mode.py deleted file mode 100644 index e522f47c68e..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_debug_mode.py +++ /dev/null @@ -1,99 +0,0 @@ -import logging -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - -logger = logging.getLogger("neural_compressor") -logger.setLevel(logging.DEBUG) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowGraphAdaptorDebugMode(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_graph_adaptor_debug_mode(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="SAME") - normed = tf.nn.bias_add(conv, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - relu = tf.nn.relu(normed + tf.constant([3.0])) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_dump_tensor.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_dump_tensor.py deleted file mode 100644 index 6dd17807807..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_dump_tensor.py +++ /dev/null @@ -1,161 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import yaml - -np.random.seed(0) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_kl(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - optimization: - arithmetic: False # optional. grappler arithmetic optimizer,default value is True. - model_wise: - activation: - algorithm: kl - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.99 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_kl.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - - with tf.compat.v1.Session() as sess: - tf.compat.v1.set_random_seed(0) - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - - beta = tf.compat.v1.get_variable(name="beta", shape=[1], initializer=tf.compat.v1.random_normal_initializer()) - gamma = tf.compat.v1.get_variable(name="gamma", shape=[1], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.compat.v1.layers.batch_normalization(conv1) - x = tf.nn.relu(conv_bias) - pool = tf.nn.max_pool(x, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - - final_node = tf.nn.relu(pool, name="op_to_store") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[final_node.name.split(":")[0]] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestGraphDumpToDisk(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml_kl() - self.kl_log_path = os.path.join(os.getcwd(), "saved/kl.log") - self.calibration_log_path = os.path.join(os.getcwd(), "saved/requant_min_max.log") - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml_kl.yaml") - os.remove(self.calibration_log_path) - - def test_dump_tensor_to_disk(self): - import tensorflow.compat.v1 as tf - - from neural_compressor.experimental import Quantization, common - - tf.disable_v2_behavior() - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - with open(self.calibration_log_path) as f: - data = f.readlines() - - found_kl = False - for i in data: - if i.find("Relu_1__print__;__KL:") != -1: - found_kl = True - - self.assertEqual(os.path.exists(self.calibration_log_path), True) - self.assertGreater(len(data), 1) - self.assertEqual(found_kl, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_input_output.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_input_output.py index 1d320201488..2976fd75e28 100644 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_input_output.py +++ b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_input_output.py @@ -12,63 +12,6 @@ from neural_compressor.adaptor.tf_utils.util import get_input_output_node_names -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: bayesian - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - def build_fake_model_1(): with tf.compat.v1.Session(graph=tf.Graph()) as sess: dataset = tf.data.Dataset.range(10) @@ -124,8 +67,6 @@ class TestGraphInputOutputDetection(unittest.TestCase): @classmethod def setUpClass(self): - build_fake_yaml() - build_fake_yaml_2() if not os.path.exists(self.pb_path): if self.platform == "linux": os.system( @@ -146,8 +87,6 @@ def setUpClass(self): @classmethod def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml_2.yaml") os.remove("model_1.pb") os.remove("model_2.pb") os.remove("model_3.pb") @@ -202,40 +141,6 @@ def test_identify_input_output(self): self.assertEqual(inputs, []) self.assertEqual(outputs, []) - def test_no_input_output_config(self): - g = GraphAnalyzer() - g.graph = self.input_graph - g.parse_graph() - - float_graph_def = g.dump_graph() - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(20, 224, 224, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - self.assertGreater(len(output_graph.graph_def.node), 0) - - def test_invalid_input_output_config(self): - g = GraphAnalyzer() - g.graph = self.input_graph - g.parse_graph() - - float_graph_def = g.dump_graph() - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_2.yaml") - dataset = quantizer.dataset("dummy", shape=(20, 224, 224, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - model = quantizer.fit() - # will detect the right inputs/outputs - self.assertNotEqual(model.input_node_names, ["x"]) - self.assertNotEqual(model.output_node_names, ["op_to_store"]) - if __name__ == "__main__": unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_matmul_fusion.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_matmul_fusion.py deleted file mode 100644 index c17d0facaa6..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_matmul_fusion.py +++ /dev/null @@ -1,484 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow.compat.v1 as tf -import yaml -from tensorflow.python.framework import dtypes - -import neural_compressor -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestGraphMatMulFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - self.op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), "adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_matmul_biasadd_relu_requantize_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "QuantizedMatMulWithBiasAndReluAndRequantize": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_first_matmul_biasadd_relu_fusion(self): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu(z, name="op_to_store") - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if ( - i.op == "QuantizeV2" - and i.name == "MatMul_eightbit_quantize_x" - and i.attr["T"].type == dtypes.quint8 - ): - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.identity(z, name="op_to_store") - found_quantized_matmul = False - if tf.version.VERSION < "2.2.0": - found_quantized_matmul = True - else: - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "QuantizedMatMulWithBiasAndDequantize": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_last_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2], name="op_to_store") - found_quantized_matmul = False - if tf.version.VERSION < "2.2.0": - found_quantized_matmul = True - else: - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "QuantizedMatMulWithBiasAndDequantize" and i.name == "op_to_store": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_disable_matmul_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul") - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "QuantizedMatMulWithBiasAndDequantize" and i.name == "op_to_store": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, False) - - @disable_random() - def test_disable_matmul_fusion_with_transpose_b_true(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul", transpose_b=True) - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "QuantizedMatMulWithBiasAndDequantize" and i.name == "op_to_store": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, False) - - @disable_random() - @unittest.skipIf(float(tf.__version__[:3]) > 2.7, "only tf lower than 2.8 enable dummy biasadd") - def test_matmul_with_dummy_biasadd(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul") - z = tf.identity(z, name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - @unittest.skipIf(float(tf.__version__[:3]) > 2.7, "only tf lower than 2.8 enable dummy biasadd") - def test_matmul_with_nan(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - nan_array = np.empty((2, 2), dtype=np.float32) - nan_array[:] = np.NaN - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - z = tf.matmul(x, nan_array, name="no_quant_matmul") - z = tf.identity(z, name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_with_reshape_transpose(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="no_quant_matmul") - z = tf.nn.bias_add(z, [1, 2], name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_with_add(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="no_quant_matmul") - z = tf.math.add(z, [1, 2], name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_fusion_with_softmax(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - biasadd = tf.nn.bias_add(z, [1, 2]) - biasadd1 = tf.nn.bias_add(biasadd, [1, 1]) - - y1 = tf.constant(x_data, dtype=tf.float32, shape=[2, 2]) - matmul1 = tf.matmul(biasadd1, y1) - - biasadd2 = tf.nn.bias_add(matmul1, [1, 1]) - - z = tf.nn.softmax(biasadd2, name="op_to_store") - found_quantized_matmul = False - if tf.version.VERSION < "2.2.0": - found_quantized_matmul = False - else: - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - count = 0 - for i in output_graph.model.as_graph_def().node: - if i.op == "QuantizedMatMulWithBiasAndDequantize": - count += 1 - found_quantized_matmul = bool(count > 1) - # TF2.6 has enabled matmul_biasadd_requantize_dequantize_fusion_with_softmax - if tf.__version__ < "2.6.0": - self.assertEqual(found_quantized_matmul, False) - else: - self.assertEqual(found_quantized_matmul, True) - - def test_matmul_biasadd_relu_non_const_weight(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.matmul(x, x, name="no_quant_matmul") - biasadd = tf.nn.bias_add(y, [1, 2]) - z = tf.nn.relu(biasadd) - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, False) - - def test_matmul_biasadd_non_const_weight(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.matmul(x, x, name="no_quant_matmul") - z = tf.nn.bias_add(y, [1, 2]) - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_meta_pass.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_meta_pass.py deleted file mode 100644 index e676371f7fe..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_meta_pass.py +++ /dev/null @@ -1,202 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestMetaPass(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_tensorflow_graph_meta_pass_with_different_mode(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - sq = tf.squeeze(relu, [0]) - reshape = tf.reshape(sq, [729, 16]) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [16, 729], initializer=tf.compat.v1.random_normal_initializer() - ) - - matmul = tf.matmul(reshape, conv_weights2) - # normed2 = tf.compat.v1.layers.batch_normalization(matmul) - bias = tf.compat.v1.get_variable("bias", [729], initializer=tf.compat.v1.random_normal_initializer()) - normed2 = tf.nn.bias_add(matmul, bias, name="bias_add") - - relu6 = tf.nn.relu6(normed2) - reshape2 = tf.reshape(relu6, [1, 729, 729, 1], name="op_to_store") - - out_name = reshape2.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_reshape = False - for i in output_graph.graph_def.node: - if i.op == "Reshape": - found_reshape = True - break - - self.assertEqual(found_reshape, True) - - @disable_random() - def test_tensorflow_graph_meta_pass_with_same_mode(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - sq = tf.squeeze(relu, [0]) - reshape = tf.reshape(sq, [1, 27, 27, 16]) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(reshape, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - - relu6 = tf.nn.relu6(normed2, name="op_to_store") - - out_name = relu6.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - quantize_count = 0 - dequantize_count = 0 - - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_count += 1 - if i.op == "Dequantize": - dequantize_count += 1 - - self.assertEqual(quantize_count, 1) - self.assertEqual(dequantize_count, 1) - - @disable_random() - def test_tensorflow_graph_meta_with_reshape_only(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - reshape = tf.reshape(relu, [1, 27, 27, 16]) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(reshape, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - - relu6 = tf.nn.relu6(normed2, name="op_to_store") - - out_name = relu6.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - quantize_count = 0 - dequantize_count = 0 - - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_count += 1 - if i.op == "Dequantize": - dequantize_count += 1 - - self.assertEqual(quantize_count, 1) - self.assertEqual(dequantize_count, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_pad_conv.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_pad_conv.py deleted file mode 100644 index 2be01967846..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_pad_conv.py +++ /dev/null @@ -1,179 +0,0 @@ -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestFoldPadConv(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_fold_pad_conv(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed, name="op_to_store") - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - if tf.__version__ >= "2.0.0": - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - self.assertEqual(found_pad, True) - - @disable_random() - def test_fold_pad_conv2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - - paddings2 = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad2 = tf.pad(x, paddings2, "CONSTANT") - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(x_pad2, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - relu2 = tf.nn.relu(normed2) - add = tf.math.add(relu, relu2, name="op_to_store") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - if tf.__version__ >= "2.0.0": - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - self.assertEqual(found_pad, True) - - @disable_random() - def test_fold_pad_conv3(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(x, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - relu2 = tf.nn.relu(normed2) - add = tf.math.add(relu, relu2, name="op_to_store") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - if tf.__version__ >= "2.0.0": - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - - self.assertEqual(found_pad, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_post_cse_optimize.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_post_cse_optimize.py deleted file mode 100644 index 2732bf2ca6d..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_graph_post_cse_optimize.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.9 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestPostCSEOptimizer(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - import tensorflow as tf - - self.enable_s8 = bool(tf.version.VERSION.find("1.15.0-up") != -1 or tf.version.VERSION >= "2.1.0") - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_post_cse(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - x = tf.nn.relu(x) - xw = tf.constant(np.random.random((2, 2, 16, 16)), dtype=tf.float32, name="y") - x = tf.nn.conv2d(input=x, filters=xw, strides=[1, 1, 1, 1], padding="VALID") - - y = tf.constant(np.random.random((1, 55, 55, 16)), dtype=tf.float32, name="y") - - z = tf.math.add(x, y, name="add") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(z, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(z, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - relu2 = tf.nn.relu(normed2) - add = tf.math.add(relu, relu2, name="op_to_store") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - quantize_v2_count = 0 - - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - - if self.enable_s8: - self.assertEqual(quantize_v2_count, 2) - else: - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_post_cse2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - x = tf.nn.relu(x) - xw = tf.constant(np.random.random((2, 2, 16, 16)), dtype=tf.float32, name="y") - x = tf.nn.conv2d(input=x, filters=xw, strides=[1, 1, 1, 1], padding="VALID") - - y = tf.constant(np.random.random((1, 55, 55, 16)), dtype=tf.float32, name="y") - - z = tf.math.add(x, y, name="add") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(z, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(z, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - relu2 = tf.nn.relu(normed2) - add = tf.math.add(relu, relu2) - ones_const = tf.constant(1, dtype=tf.float32) - ones_const2 = tf.constant(1, dtype=tf.float32) - mul1 = tf.math.multiply(add, ones_const) - mul2 = tf.math.multiply(mul1, ones_const) - mul3 = tf.math.multiply(mul2, ones_const2, name="op_to_store") - out_name = mul3.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - quantize_v2_count = 0 - - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - - if self.enable_s8: - self.assertEqual(quantize_v2_count, 2) - else: - self.assertEqual(quantize_v2_count, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor.py deleted file mode 100644 index efec5651687..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor.py +++ /dev/null @@ -1,277 +0,0 @@ -""" -test_tensorflow_inspect_tensor.py: test inspect_tensor API -1. Create a quantizer for fake tensorflow model -2. Call inspect_tensor to dump the activation in local disk for both fp32 model and quantized model -3. Compare the inspecting result between fp32 model and quantized model - -Note: - use '-s' to disable pytest capturing the sys.stderr which will be used in quantization process -""" - -import logging -import os -import pickle -import shutil -import unittest - -import numpy as np -import yaml -from packaging import version - -np.random.seed(0) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: output - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - - with tf.compat.v1.Session() as sess: - tf.compat.v1.set_random_seed(0) - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 3], name="input") - conv_weights1 = tf.compat.v1.get_variable( - "weight1", [2, 2, 3, 3], initializer=tf.compat.v1.random_normal_initializer() - ) - x = tf.nn.conv2d(x, conv_weights1, strides=[1, 2, 2, 1], padding="SAME", name="conv2d_1") - x = tf.nn.relu(x) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 3], initializer=tf.compat.v1.random_normal_initializer() - ) - x = tf.nn.conv2d(x, conv_weights2, strides=[1, 3, 3, 1], padding="SAME", name="conv2d_2") - x = tf.compat.v1.layers.batch_normalization(x) - x = tf.nn.relu(x) - depthwise_weights = tf.compat.v1.get_variable( - "depthwise_weights", [3, 3, 3, 6], initializer=tf.compat.v1.random_normal_initializer() - ) - x = tf.nn.depthwise_conv2d( - x, depthwise_weights, strides=[1, 1, 1, 1], padding="VALID", name="depthwise_conv2d_1" - ) - x = tf.nn.max_pool(x, ksize=2, strides=[1, 2, 2, 1], padding="SAME", name="pool_1") - # TODO to support inspect max_pool - x = tf.nn.relu(x, name="output") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[x.name.split(":")[0]] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def build_fake_diagnosis_yaml(): - fake_diagnosis_yaml = """ - model: - name: fake_diagnosis_yaml - framework: tensorflow - inputs: input - outputs: output - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: -0.01 - diagnosis: True - """ - y = yaml.load(fake_diagnosis_yaml, Loader=yaml.SafeLoader) - with open("fake_diagnosis_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_diagnosis_yaml2(): - fake_diagnosis_yaml2 = """ - model: - name: fake_diagnosis_yaml2 - framework: tensorflow - inputs: input - outputs: output - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: -0.01 - diagnosis: True - """ - y = yaml.load(fake_diagnosis_yaml2, Loader=yaml.SafeLoader) - with open("fake_diagnosis_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowInspectTensor(unittest.TestCase): - @classmethod - def setUpClass(self): - from neural_compressor.config import options - - build_fake_yaml() - build_fake_diagnosis_yaml() - build_fake_diagnosis_yaml2() - self.model = build_fake_model() - self.fp32_dumped_tensor_path = os.path.join(os.getcwd(), "./fake_graph_inspect_res_fp32/") - self.quan_dumped_tensor_path = os.path.join(os.getcwd(), "./fake_graph_inspect_res_quan/") - self.fp32_dumped_tensor_file_path = os.path.join(self.fp32_dumped_tensor_path, "inspect_result.pkl") - self.quan_dumped_tensor_file_path = os.path.join(self.quan_dumped_tensor_path, "inspect_result.pkl") - self.workspace = os.path.abspath(options.workspace) - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove(self.fp32_dumped_tensor_file_path) - os.rmdir(self.fp32_dumped_tensor_path) - os.remove(self.quan_dumped_tensor_file_path) - os.rmdir(self.quan_dumped_tensor_path) - shutil.rmtree(self.workspace) - # shutil.rmtree(os.path.join(os.getcwd(), 'save_path_test')) - - def test_tensorflow_inspect_tensor(self): - import tensorflow.compat.v1 as tf - - from neural_compressor.experimental import Quantization, common - from neural_compressor.utils.utility import load_data_from_pkl - - tf.disable_v2_behavior() - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(128, 64, 64, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.model - q_model = quantizer.fit() - self.quan_graph_def = q_model.graph_def - self.fp32_graph_def = quantizer.model.graph_def - self.dataloader = quantizer.calib_dataloader - self.node_list = ["conv2d_1", "conv2d_2", "depthwise_conv2d_1"] - # Tensorflow 2.5.0 enabled the s8 input for pooling op - # TODO check the specific version - if version.parse(tf.version.VERSION) >= version.parse("2.6.0"): - self.node_list.append("pool_1") - self.quantizer = quantizer - self.iteration_list = [1, 5] - - logging.getLogger().debug(f"Start to inspect tensor :{self.node_list} in fp32 model.") - quantizer = self.quantizer - quantizer.strategy.adaptor.inspect_tensor( - self.fp32_graph_def, - dataloader=self.dataloader, - op_list=self.node_list, - iteration_list=self.iteration_list, - inspect_type="all", - save_to_disk=True, - save_path=self.fp32_dumped_tensor_path, - quantization_cfg=quantizer.strategy.tune_cfg, - ) - self.assertEqual(os.path.exists(self.fp32_dumped_tensor_file_path), True) - - logging.getLogger().debug(f"Start to inspect tensor :{self.node_list} in quan model.") - quantizer = self.quantizer - quantizer.strategy.adaptor.inspect_tensor( - self.quan_graph_def, - dataloader=self.dataloader, - op_list=self.node_list, - iteration_list=self.iteration_list, - inspect_type="all", - save_to_disk=True, - save_path=self.quan_dumped_tensor_path, - quantization_cfg=quantizer.strategy.tune_cfg, - ) - self.assertEqual(os.path.exists(self.quan_dumped_tensor_file_path), True) - - fp32_data = load_data_from_pkl(self.fp32_dumped_tensor_path, "inspect_result.pkl") - quan_data = load_data_from_pkl(self.quan_dumped_tensor_path, "inspect_result.pkl") - self.assertEqual(fp32_data.keys(), quan_data.keys()) - self.assertIn("activation", fp32_data) - self.assertEqual(len(fp32_data["activation"]), len(quan_data["activation"])) # have same itertaion index - self.assertEqual(len(self.iteration_list), len(fp32_data["activation"])) - for iter_indx, iter in enumerate(self.iteration_list): - fp32_iter_data = fp32_data["activation"][iter_indx] - quan_iter_data = quan_data["activation"][iter_indx] - for node_name in fp32_iter_data.keys(): - self.assertEqual(fp32_iter_data[node_name][node_name].shape, quan_iter_data[node_name][node_name].shape) - - def test_tensorflow_diagnosis(self): - import tensorflow.compat.v1 as tf - - from neural_compressor.experimental import Quantization, common - - tf.disable_v2_behavior() - quantizer = Quantization("fake_diagnosis_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(128, 64, 64, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.model - quantizer.fit() - - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/fp32/inspect_result.pkl")), True) - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/quan/inspect_result.pkl")), True) - - def test_tensorflow_diagnosis2(self): - import tensorflow.compat.v1 as tf - - from neural_compressor.experimental import Quantization, common - - tf.disable_v2_behavior() - quantizer = Quantization("fake_diagnosis_yaml2.yaml") - dataset = quantizer.dataset("dummy", shape=(128, 64, 64, 3), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.model - quantizer.fit() - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/fp32/inspect_result.pkl")), True) - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/quan/inspect_result.pkl")), True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor_in_mse_tuning.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor_in_mse_tuning.py deleted file mode 100644 index d2b7db27d65..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_inspect_tensor_in_mse_tuning.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -test_tensorflow_inspect_tensor_in_mse_tuning.py: -test inspect_tensor API called by mse tuning strategy -1. Create a quantizer for fake tensorflow model -2. The quantizer fitting process will call inspect_tensor API for both fp32 model and quantized model -3. Check the inspecting result in local disk - -Note: - use '-s' to disable pytest capturing the sys.stderr which will be used in quantization process -""" - -import logging -import os -import pickle -import platform -import shutil -import unittest - -import numpy as np -import yaml - -np.random.seed(0) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: output - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - - with tf.compat.v1.Session() as sess: - tf.compat.v1.set_random_seed(0) - x = tf.compat.v1.placeholder(tf.float32, [1, 28, 28, 1], name="input") - conv_weights1 = tf.compat.v1.get_variable( - "weight1", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - x = tf.nn.conv2d(x, conv_weights1, strides=[1, 2, 2, 1], padding="SAME", name="conv2d_1") - x = tf.nn.relu(x) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - x = tf.nn.conv2d(x, conv_weights2, strides=[1, 3, 3, 1], padding="SAME", name="conv2d_2") - x = tf.compat.v1.layers.batch_normalization(x) - x = tf.nn.relu(x) - x = tf.nn.max_pool(x, ksize=1, strides=[1, 2, 2, 1], padding="SAME", name="pool_1") - # TODO to support inspect max_pool - x = tf.nn.relu(x, name="output") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[x.name.split(":")[0]] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def load_data_from_pkl(path, filename): - try: - file_path = os.path.join(path, filename) - with open(file_path, "rb") as fp: - data = pickle.load(fp) - return data - except FileExistsError: - logging.getLogger().info("Can not open %s." % path) - - -class TestTensorflowInspectTensortinMSETuning(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - if platform.system().lower() == "linux": - self.cfg_path = os.path.join(os.getcwd(), "./nc_workspace/") - self.dumped_tensor_path = os.path.join(os.getcwd(), "./nc_workspace/") - else: - self.cfg_path = os.path.join(os.getcwd(), "nc_workspace\\") - self.dumped_tensor_path = os.path.join(os.getcwd(), "nc_workspace\\") - self.cfg_file_path = os.path.join(self.cfg_path, "cfg.pkl") - self.dumped_tensor_file_path = os.path.join(self.dumped_tensor_path, "inspect_result.pkl") - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove(self.dumped_tensor_file_path) - shutil.rmtree(self.dumped_tensor_path) - - def test_tensorflow_inspect_tensort_in_mse_tuning(self): - import tensorflow.compat.v1 as tf - - from neural_compressor.experimental import Quantization, common - - tf.disable_v2_behavior() - model = build_fake_model() - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(128, 28, 28, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = model - quantizer.fit() - self.assertEqual(os.path.exists(self.dumped_tensor_path), True) - data = load_data_from_pkl(self.dumped_tensor_path, "inspect_result.pkl") - self.assertEqual("activation" in data, True) - self.assertEqual(set(data["activation"][0].keys()), set(["pool_1", "conv2d_2", "conv2d_1"])) - self.assertEqual(len(data["activation"][0].keys()), 3) - self.assertEqual(data["activation"][0]["pool_1"]["pool_1"].shape, (1, 3, 3, 1)) - self.assertEqual(data["activation"][0]["conv2d_1"]["conv2d_1"].shape, (1, 14, 14, 1)) - self.assertEqual(data["activation"][0]["conv2d_2"]["conv2d_2"].shape, (1, 5, 5, 1)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_quantize_input.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_quantize_input.py deleted file mode 100644 index d321254a84e..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_quantize_input.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import shutil -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorFlowAdaptor -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestQuantizeInput(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - - @disable_random() - @unittest.skipIf( - tf.version.VERSION < "2.1.0", - "Quantize input needs tensorflow 2.1.0 and newer, so test_quantize_input is skipped", - ) - def test_quantize_input(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - - conv_bias = tf.math.add(conv, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - for i in constant_graph.node: - if i.op.find("Add") != -1: - i.op = "Add" - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("./fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = constant_graph - q_model = quantizer.fit() - - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1978, - "inputs": ["input"], - "outputs": ["op_to_store"], - "workspace_path": "saved", - "format": "default", - "backend": "default", - } - - quantize_input_graph, _ = TensorFlowAdaptor(framework_specific_info).quantize_input(q_model.graph) - Not_found_QuantizedV2 = True - for i in quantize_input_graph.as_graph_def().node: - if i.op == "QuantizeV2": - Not_found_QuantizedV2 = False - break - self.assertEqual(Not_found_QuantizedV2, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_query_yaml.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_query_yaml.py index d0ef998bb31..fba7bb34dd9 100644 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_query_yaml.py +++ b/test/adaptor/tensorflow_adaptor/test_tensorflow_query_yaml.py @@ -199,50 +199,6 @@ def test_convert_internal_patterns(self): self.assertEqual([["AvgPool"]] in internal_patterns, True) self.assertEqual([["MatMul"], ("BiasAdd",), ("Relu",)] in internal_patterns, True) - @disable_random() - def test_grappler_cfg(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - relu2 = tf.nn.relu(relu) - pool = tf.nn.max_pool(relu2, ksize=1, strides=[1, 2, 2, 1], name="maxpool", padding="SAME") - conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - x = tf.nn.relu(conv_bias) - final_node = tf.nn.relu(x, name="op_to_store") - - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_grappler.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - disable_arithmetic = False - for i in output_graph.graph_def.node: - if i.name == "maxpool_eightbit_quantize_Relu_2" and i.input[0] == "Relu_2": - disable_arithmetic = True - # if tf.version.VERSION >= '2.3.0': - # self.assertEqual(False, disable_arithmetic) - # else: - self.assertEqual(True, disable_arithmetic) - class TestFrameworkQueryYaml(unittest.TestCase): @classmethod diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_set_tensor.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_set_tensor.py deleted file mode 100644 index a53f3ae724f..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_set_tensor.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorFlowAdaptor -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestSetTensor(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - - @disable_random() - def test_fp32bias(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias = tf.compat.v1.get_variable( - "bias", [16], dtype=tf.float32, initializer=tf.compat.v1.random_normal_initializer() - ) - - conv_bias = tf.math.add(conv, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("./fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = constant_graph - q_model = quantizer.fit() - - framework_specific_info = { - "device": "cpu", - "workspace_path": "saved", - "random_seed": 1978, - "inputs": ["input"], - "outputs": ["op_to_store"], - "approach": "post_training_static_quant", - "format": "default", - "backend": "default", - } - adaptor = TensorFlowAdaptor(framework_specific_info) - adaptor.set_tensor(q_model, {"bias": np.random.random(16)}) - - from tensorflow.core.framework import attr_value_pb2 - from tensorflow.python.framework import dtypes - - for node in q_model.graph_def.node: - if node.name == "bias": - self.assertEqual(node.attr["dtype"], attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - @disable_random() - def test_int32bias(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias = tf.compat.v1.get_variable("bias", [16], dtype=tf.float32) - - conv_bias = tf.math.add(conv, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="relu_0") - - conv_weights1 = tf.compat.v1.get_variable( - "weight1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(relu6, conv_weights1, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias1 = tf.compat.v1.get_variable("bias1", [16], dtype=tf.float32) - - conv_bias1 = tf.math.add(conv1, conv_bias1) - relu6 = tf.nn.relu6(conv_bias1, name="relu_1") - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(relu6, conv_weights2, strides=[1, 2, 2, 1], padding="VALID") - - conv_bias2 = tf.compat.v1.get_variable("bias2", [16], dtype=tf.float32) - - conv_bias2 = tf.math.add(conv2, conv_bias2) - relu6 = tf.nn.relu6(conv_bias2, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - for i in constant_graph.node: - if i.op.find("Add") != -1: - i.op = "Add" - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("./fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = constant_graph - q_model = quantizer.fit() - - framework_specific_info = { - "device": "cpu", - "workspace_path": "saved", - "random_seed": 1978, - "inputs": ["input"], - "outputs": ["op_to_store"], - "approach": "post_training_static_quant", - "format": "default", - "backend": "default", - } - adaptor = TensorFlowAdaptor(framework_specific_info) - adaptor.set_tensor(q_model, {"bias1": np.random.randint(6, size=2, dtype="int32")}) - from tensorflow.core.framework import attr_value_pb2 - from tensorflow.python.framework import dtypes - - for node in q_model.graph_def.node: - if node.name == "bias2": - self.assertEqual(node.attr["dtype"], attr_value_pb2.AttrValue(type=dtypes.qint32.as_datatype_enum)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/adaptor/tensorflow_adaptor/test_tensorflow_strip_equivalent_nodes.py b/test/adaptor/tensorflow_adaptor/test_tensorflow_strip_equivalent_nodes.py deleted file mode 100644 index 8051d92b941..00000000000 --- a/test/adaptor/tensorflow_adaptor/test_tensorflow_strip_equivalent_nodes.py +++ /dev/null @@ -1,95 +0,0 @@ -# -# -*- coding: utf-8 -*- -# - -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random -from neural_compressor.experimental import Quantization, common - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestConvBiasAddAddReluFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - conv2 = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - conv_add = tf.math.add(conv1, conv2) - relu6 = tf.nn.relu6(conv_add) - out_name = relu6.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_num = 0 - for i in output_graph.graph_def.node: - if "QuantizedConv2D" in i.op: - found_conv_num += 1 - self.assertEqual(found_conv_num, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/benchmark/test_benchmark.py b/test/benchmark/test_benchmark.py deleted file mode 100644 index 1f34af16b19..00000000000 --- a/test/benchmark/test_benchmark.py +++ /dev/null @@ -1,284 +0,0 @@ -"""Tests for neural_compressor benchmark.""" - -import os -import platform -import re -import tempfile -import unittest - -import numpy as np -import psutil -import tensorflow as tf -import yaml - -from neural_compressor.adaptor.tf_utils.util import write_graph - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 5 - iteration: 10 - configs: - cores_per_instance: 4 - num_of_instance: 2 - tuning: - accuracy_criterion: - relative: 0.01 - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_benchmark(): - seq = """ -from argparse import ArgumentParser -arg_parser = ArgumentParser(description='Parse args') -arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel') -args = arg_parser.parse_args() -from neural_compressor.data import Datasets -dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True) -from neural_compressor.experimental import Benchmark, common -from neural_compressor.conf.config import BenchmarkConf -benchmarker = Benchmark('fake_yaml.yaml') -benchmarker.b_dataloader = common.DataLoader(dataset, batch_size=10) -benchmarker.model = args.input_model -benchmarker.fit() - """ - - seq1 = """ -from argparse import ArgumentParser -arg_parser = ArgumentParser(description='Parse args') -arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel') -args = arg_parser.parse_args() -from neural_compressor.data import Datasets -dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True) -from neural_compressor.experimental import Benchmark, common -from neural_compressor.conf.config import BenchmarkConf -conf = BenchmarkConf('fake_yaml.yaml') -benchmarker = Benchmark(conf) -benchmarker.b_dataloader = common.DataLoader(dataset, batch_size=10) -benchmarker.model = args.input_model -benchmarker.fit() - """ - - # test normal case - with open("fake.py", "w", encoding="utf-8") as f: - f.writelines(seq) - # test batchsize > len(dataset), use first batch - fake_data_5 = seq.replace("100, 32, 32, 1", "5, 32, 32, 1") - with open("fake_data_5.py", "w", encoding="utf-8") as f: - f.writelines(fake_data_5) - # test batchsize < len(dataset) < 2*batchsize, discard first batch - fake_data_15 = seq1.replace("100, 32, 32, 1", "15, 32, 32, 1") - with open("fake_data_15.py", "w", encoding="utf-8") as f: - f.writelines(fake_data_15) - # test 2*batchsize < len(dataset) < warmup*batchsize, discard last batch - fake_data_25 = seq1.replace("100, 32, 32, 1", "25, 32, 32, 1") - with open("fake_data_25.py", "w", encoding="utf-8") as f: - f.writelines(fake_data_25) - - -def build_benchmark2(): - seq = [ - "from argparse import ArgumentParser\n", - "arg_parser = ArgumentParser(description='Parse args')\n", - "arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n", - "args = arg_parser.parse_args()\n", - "from neural_compressor.data import Datasets\n", - "dataset = Datasets('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n", - "from neural_compressor.experimental import Benchmark, common\n", - "benchmarker = Benchmark()\n", - "benchmarker.model = args.input_model\n", - "benchmarker.b_dataloader = common.DataLoader(dataset)\n", - "benchmarker.fit()\n", - ] - - seq1 = """ -from argparse import ArgumentParser -arg_parser = ArgumentParser(description='Parse args') -arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel') -args = arg_parser.parse_args() - -from neural_compressor.conf.config import conf -from neural_compressor.experimental import Benchmark, common -conf.evaluation.performance.dataloader.dataset = {'dummy': {'shape': [100,32,32,1], 'label':True}} -benchmarker = Benchmark(conf) -benchmarker.model = args.input_model -benchmarker.fit() - """ - - seq2 = """ -from argparse import ArgumentParser -arg_parser = ArgumentParser(description='Parse args') -arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model') -args = arg_parser.parse_args() - -class Metric: - def update(self, pred, label): - pass - - def reset(self): - pass - - def result(self): - return 1. - -from neural_compressor.conf.config import conf -from neural_compressor.experimental import Benchmark, common -conf.evaluation.accuracy.dataloader.dataset = {'dummy': {'shape': [100,32,32,1], 'label':True}} -benchmarker = Benchmark(conf) -benchmarker.model = args.input_model -benchmarker.metric = Metric() -benchmarker.fit('accuracy') - """ - - with open("fake2.py", "w", encoding="utf-8") as f: - f.writelines(seq) - with open("fake3.py", "w", encoding="utf-8") as f: - f.writelines(seq1) - with open("fake4.py", "w", encoding="utf-8") as f: - f.writelines(seq2) - - -def build_fake_model(): - graph_path = tempfile.mkstemp(suffix=".pb")[1] - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session(graph=graph) as sess: - x = tf.placeholder(tf.float64, shape=(None, 32, 32, 1), name="x") - y_1 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_1") - y_2 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_2") - conv1 = tf.nn.conv2d(input=x, filter=y_1, strides=[1, 1, 1, 1], padding="VALID", name="conv1") - op = tf.nn.conv2d(input=conv1, filter=y_2, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - write_graph(graph_def, graph_path) - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session(graph=graph) as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(None, 32, 32, 1), name="x") - y_1 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_1") - y_2 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_2") - conv1 = tf.nn.conv2d(input=x, filters=y_1, strides=[1, 1, 1, 1], padding="VALID", name="conv1") - op = tf.nn.conv2d(input=conv1, filters=y_2, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - write_graph(graph_def, graph_path) - return graph_path - - -class TestObjective(unittest.TestCase): - @classmethod - def setUpClass(self): - self.graph_path = build_fake_model() - build_fake_yaml() - build_benchmark() - build_benchmark2() - self.cpu_counts = psutil.cpu_count(logical=False) - self.platform = platform.system().lower() - - @classmethod - def tearDownClass(self): - if os.path.exists("fake_yaml.yaml"): - os.remove("fake_yaml.yaml") - if os.path.exists("fake.py"): - os.remove("fake.py") - if os.path.exists("fake2.py"): - os.remove("fake2.py") - if os.path.exists("fake3.py"): - os.remove("fake3.py") - if os.path.exists("fake4.py"): - os.remove("fake4.py") - if os.path.exists("fake_data_5.py"): - os.remove("fake_data_5.py") - if os.path.exists("fake_data_15.py"): - os.remove("fake_data_15.py") - if os.path.exists("fake_data_25.py"): - os.remove("fake_data_25.py") - - def test_benchmark(self): - os.system("python fake.py --input_model={}".format(self.graph_path)) - for i in range(2): - with open(f"2_4_{i}.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_data_5(self): - os.system("python fake_data_5.py --input_model={}".format(self.graph_path)) - for i in range(2): - with open(f"2_4_{i}.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_data_15(self): - os.system("python fake_data_15.py --input_model={}".format(self.graph_path)) - for i in range(2): - with open(f"2_4_{i}.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_data_25(self): - os.system("python fake_data_25.py --input_model={}".format(self.graph_path)) - for i in range(2): - with open(f"2_4_{i}.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_without_yaml(self): - os.system("python fake2.py --input_model={} 2>&1 | tee benchmark.log".format(self.graph_path)) - with open("benchmark.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput sum: (\d+(\.\d+)?)", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_with_conf(self): - os.system("python fake3.py --input_model={}".format(self.graph_path)) - with open(f"1_{self.cpu_counts}_0.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm *.log") - - def test_benchmark_with_custom_metric(self): - os.system("python fake4.py --input_model={} 2>&1 | tee benchmark.log".format(self.graph_path)) - with open("benchmark.log", "r") as f: - for line in f: - accuracy = re.search(r"Accuracy is\s+(\d+(\.\d+)?)", line) - self.assertIsNotNone(accuracy) - os.system("rm *.log") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/config/test_config_1x.py b/test/config/test_config_1x.py deleted file mode 100644 index b7637d92234..00000000000 --- a/test/config/test_config_1x.py +++ /dev/null @@ -1,807 +0,0 @@ -"""Tests for 1.x config file.""" - -import os -import unittest - -from neural_compressor.conf import config as conf -from neural_compressor.utils.constant import * - - -def helper(content): - with open("fake_conf.yaml", "w", encoding="utf-8") as f: - f.write(content) - - -class TestConfig(unittest.TestCase): - def test_config(self): - from neural_compressor import PostTrainingQuantConfig - - config = PostTrainingQuantConfig() - self.assertEqual(config.recipes["smooth_quant"], False) - self.assertEqual(config.recipes["fast_bias_correction"], False) - self.assertEqual(config.recipes["weight_correction"], False) - self.assertEqual(config.recipes["dedicated_qdq_pair"], False) - self.assertEqual(config.recipes["add_qdq_pair_to_weight"], False) - self.assertEqual(config.recipes["graph_optimization_level"], None) - - -class TestPyConf(unittest.TestCase): - def test_config(self): - from neural_compressor.conf.config import ( - BenchmarkConf, - DistillationConf, - GraphOptConf, - PruningConf, - QuantConf, - conf, - ) - - conf.tuning.accuracy_criterion.relative = 0.2 - a = QuantConf(conf) - self.assertEqual(a.usr_cfg.tuning.accuracy_criterion.relative, 0.2) - - conf.quantization.op_wise = { - "op1": FP32, - "op2": {"activation": INT8_SYM_KL_PERTENSOR}, - "op3": {"activation": INT8_SYM_KL_PERCHANNEL, "weight": INT8_SYM_MINMAX_PERTENSOR}, - } - conf.quantization.model_wise = {"activation": INT8_SYM_KL_PERTENSOR, "weight": INT8_SYM_MINMAX_PERTENSOR} - a = QuantConf(conf) - self.assertEqual(a.usr_cfg.quantization.model_wise.weight.scheme, ["sym"]) - - conf.evaluation.performance.dataloader.dataset = {"dummy": {"shape": "224,224,3"}} - conf.evaluation.accuracy.dataloader.dataset = {"dummy": {"shape": "224,224,3", "low": "0.1"}} - - conf.evaluation.performance.dataloader.transform = { - "Resize": {"size": [100, 100]}, - "BilinearImagenet": {"height": 300, "width": 300, "mean_value": [0.2, 0.2, 0.2]}, - } - conf.evaluation.performance.dataloader.batch_size = 6 - conf.evaluation.accuracy.metric = {"RMSE": {}} - conf.tuning.strategy.name = "mse" - a = BenchmarkConf(conf) - self.assertEqual(a.usr_cfg.evaluation.performance.dataloader.batch_size, 6) - self.assertEqual(a.usr_cfg.evaluation.performance.dataloader.dataset, {"dummy": {"shape": (224, 224, 3)}}) - self.assertEqual(a.usr_cfg.evaluation.accuracy.metric, {"RMSE": {}}) - a = QuantConf(conf) - self.assertEqual(a.usr_cfg.tuning.strategy.name, "mse") - - conf.evaluation.accuracy.metric = {"topk": 5} - conf.graph_optimization.precisions = "bf16" - conf.pruning.train.criterion = {"CrossEntropyLoss": {}} - conf.pruning.train.optimizer = {} - a = PruningConf(conf) - self.assertEqual( - a.usr_cfg.pruning.train.criterion, {"CrossEntropyLoss": {"from_logits": False, "reduction": "mean"}} - ) - - self.assertEqual(a.usr_cfg.evaluation.accuracy.metric, {"topk": 5}) - conf.graph_optimization.op_wise = BF16 - a = GraphOptConf(conf) - self.assertEqual( - a.usr_cfg.graph_optimization.op_wise, {"weight": {"dtype": ["bf16"]}, "activation": {"dtype": ["bf16"]}} - ) - - conf.distillation.train.iteration = 900 - a = DistillationConf(conf) - self.assertEqual(a.usr_cfg.distillation.train.iteration, 900) - - -class TestConf(unittest.TestCase): - @classmethod - def tearDownClass(self): - os.remove("fake_conf.yaml") - - def test_main_key(self): - test = """ - model: - name: main_key_yaml - framework: pytorch - test: cpu - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_framework(self): - test = """ - model: - name: framework_yaml - framework: pytorch, mxnet - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - device: cpu - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_device(self): - test = """ - model: - name: device_yaml - framework: mxnet - device: xpu - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: device_yaml - framework: mxnet - device: cpu, gpu - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_version(self): - test = """ - model: - name: version_yaml - framework: mxnet - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.version, 2.0) - - test = """ - version: 2.0 - - model: - name: version_yaml - framework: mxnet - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.version, 2.0) - - def test_calibration(self): - test = """ - model: - name: calib_yaml - framework: mxnet - quantization: - calibration: - sampling_sizes: 10 - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: calib_yaml - framework: mxnet - quantization: - calibration: - sampling_size: - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: calib_yaml - framework: mxnet - quantization: - calibration: - dataloader: - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: calib_yaml - framework: mxnet - quantization: - calibration: - op_wise: { - 'test': { - 'activation': [{'dtype': 'uint8'}, {'algorithm': 'minmax'}] - } - } - - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_quantization(self): - test = """ - model: - name: quant_yaml - framework: mxnet - quantization: - model_wise: - weights: - granularity: per_channel - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: quant_yaml - framework: mxnet - quantization: - model_wise: - approach: - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: quant_yaml - framework: mxnet - quantization: - approach: post_training_static_quant, quant_aware_training - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: quant_yaml - framework: mxnet - quantization: - model_wise: - activation: - scheme: asym - dtype: int8 - weight: - scheme: asym - dtype: int8 - """ - helper(test) - conf.Conf("fake_conf.yaml") - - test = """ - model: - name: quant_yaml - framework: mxnet - quantization: - model_wise: - activation: - scheme: - dtype: int8 - weight: - scheme: asym - dtype: int8 - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_tuning(self): - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - relative: 0.01 - strategy: - name: basic, mse - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - relative: 0.01 - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - relative: 0.01 - strategy: - name: fake - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - relative: - strategy: - name: basic - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - exit_policy: - timeout: 3 - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: tuning_yaml - framework: mxnet - tuning: - accuracy_criterion: - relative: 0.01 - absolute: 0.01 - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_workspace(self): - test = """ - model: - name: workspace_yaml - framework: mxnet - tuning: - workspace: - -path: ./workspace - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_inputs_outputs(self): - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.model.inputs, ["x", "y"]) - - def test_objective(self): - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - tuning: - multi_objectives: - objective: accuracy - higher_is_better: True - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.tuning.multi_objectives.higher_is_better, [True]) - - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - tuning: - multi_objectives: - objective: accuracy, performance - higher_is_better: True, False - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.tuning.multi_objectives.higher_is_better, [True, False]) - - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - tuning: - multi_objectives: - objective: accuracy, performance - higher_is_better: True False - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.tuning.multi_objectives.higher_is_better, [True, False]) - - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - tuning: - multi_objectives: - objective: accuracy, performance - higher_is_better: [True, False] - """ - helper(test) - config = conf.Conf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.tuning.multi_objectives.higher_is_better, [True, False]) - - test = """ - model: - name: inout_yaml - framework: mxnet - inputs: x, y - tuning: - multi_objectives: - objective: accuracy, performance - higher_is_better: True False - weight: [0.2, 0.1, 0.7] - """ - helper(test) - self.assertRaises(RuntimeError, conf.Conf, "fake_conf.yaml") - - def test_modelwise_conf_merge(self): - test = """ - model: - name: inout_yaml - framework: mxnet - quantization: - model_wise: - weight: - algorithm: minmax - activation: - algorithm: minmax - """ - helper(test) - config = conf.QuantConf("fake_conf.yaml") - - framework_modelwise_capability = { - "CONV2D": { - "activation": { - "dtype": ["uint8", "fp32"], - "scheme": ["asym", "sym"], - "granularity": ["per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": { - "dtype": ["int8", "fp32"], - "scheme": [ - "sym", - ], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax"], - }, - }, - } - - tune_space = config.modelwise_tune_space(framework_modelwise_capability) - self.assertEqual(tune_space["CONV2D"]["activation"]["algorithm"], ["minmax"]) - - def test_metric(self): - test = """ - model: - name: metric_yaml - framework: mxnet - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: {} - """ - helper(test) - - metrics = {"topk": 1, "MSE": {}} - config = conf.QuantConf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.evaluation.accuracy.multi_metrics, metrics) - - test = """ - model: - name: metric_yaml - framework: mxnet - evaluation: - accuracy: - multi_metrics: - weight: 0.5 0.5 0.6 - topk: 1 - MSE: {} - """ - helper(test) - self.assertRaises((AssertionError, RuntimeError), conf.Conf, "fake_conf.yaml") - - test = """ - model: - name: metric_yaml - framework: mxnet - evaluation: - accuracy: - multi_metrics: - higher_is_better: True, False - topk: 1 - MSE: {} - """ - helper(test) - config = conf.QuantConf("fake_conf.yaml") - self.assertEqual(config.usr_cfg.evaluation.accuracy.multi_metrics.higher_is_better, [True, False]) - - def test_modelwise_conf_merge2(self): - test = """ - model: - name: inout_yaml - framework: mxnet - quantization: - model_wise: - weight: - algorithm: minmax - activation: - algorithm: minmax - dtype: ['uint8', 'fp32'] - """ - helper(test) - config = conf.QuantConf("fake_conf.yaml") - - framework_modelwise_capability = { - "CONV2D": { - "activation": { - "dtype": ["iint8", "fp32"], - "scheme": ["asym", "sym"], - "granularity": ["per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": { - "dtype": ["int8", "fp32"], - "scheme": [ - "sym", - ], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax"], - }, - }, - } - - tune_space = config.modelwise_tune_space(framework_modelwise_capability) - self.assertEqual(tune_space["CONV2D"]["activation"]["dtype"], ["fp32"]) - - def test_prune(self): - test_pytorch_prune = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - train: - start_epoch: 0 - end_epoch: 4 - dataloader: - batch_size: 30 - dataset: - ImageFolder: - root: /path/to/training/dataset - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - names: ['layer1.0.conv1.weight'] - - - !Pruner - start_epoch: 0 - end_epoch: 4 - target_sparsity: 0.6 - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - """ - helper(test_pytorch_prune) - config = conf.Conf("fake_conf.yaml") - test_tensorflow_prune = """ - model: - name: vit - framework: tensorflow - - pruning: - train: - epoch: 15 - optimizer: - AdamW: - learning_rate: 0.001 - weight_decay: 0.0001 - criterion: - CrossEntropyLoss: - reduction: sum_over_batch_size - from_logits: True - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.7 - start_epoch: 0 - end_epoch: 9 - pruners: - - !Pruner - start_epoch: 0 - end_epoch: 9 - prune_type: basic_magnitude - - evaluation: - accuracy: - metric: - topk: 1 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - helper(test_tensorflow_prune) - config = conf.Conf("fake_conf.yaml") - - def test_data_type(self): - test = """ - model: - name: test - framework: tensorflow - - quantization: - calibration: - sampling_size: 20 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [[224,224], [256,256]] - high: [128., 127] - low: 1, 0 - dtype: ['float32', 'int8'] - """ - helper(test) - cfg = conf.Conf("fake_conf.yaml").usr_cfg - dataset = cfg["quantization"]["calibration"]["dataloader"]["dataset"]["dummy"] - self.assertTrue(isinstance(dataset["shape"][0], tuple)) - self.assertTrue(isinstance(dataset["shape"], list)) - self.assertTrue(isinstance(dataset["high"][1], float)) - self.assertTrue(isinstance(dataset["high"][0], float)) - self.assertTrue(isinstance(dataset["low"][0], float)) - - test = """ - model: - name: test - framework: tensorflow - - quantization: - calibration: - sampling_size: 20 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [224,224] - high: 128 - low: 0.1 - dtype: ['float32', 'int8'] - """ - helper(test) - cfg = conf.Conf("fake_conf.yaml").usr_cfg - dataset = cfg["quantization"]["calibration"]["dataloader"]["dataset"]["dummy"] - self.assertTrue(isinstance(dataset["shape"], tuple)) - self.assertTrue(isinstance(dataset["high"], float)) - - test = """ - model: - name: test - framework: tensorflow - - quantization: - calibration: - sampling_size: 20 - dataloader: - batch_size: 1 - dataset: - style_transfer: - content_folder: test - style_folder: test - crop_ratio: 0.5 - resize_shape: 10,10 - transform: - RandomResizedCrop: - size: 10 - scale: [0.07, 0.99] - ratio: [0.6, 0.8] - """ - helper(test) - cfg = conf.Conf("fake_conf.yaml").usr_cfg - shape_cfg = cfg["quantization"]["calibration"]["dataloader"]["dataset"]["style_transfer"]["resize_shape"] - self.assertTrue(isinstance(shape_cfg, list)) - transform_cfg = cfg["quantization"]["calibration"]["dataloader"]["transform"]["RandomResizedCrop"] - self.assertTrue(isinstance(transform_cfg["scale"], list)) - self.assertTrue(isinstance(transform_cfg["ratio"], list)) - - test = """ - model: - name: test - framework: tensorflow - - quantization: - calibration: - sampling_size: 20 - dataloader: - batch_size: 1 - dataset: - style_transfer: - content_folder: test - style_folder: test - crop_ratio: 0.5 - resize_shape: [10,10] - """ - helper(test) - cfg = conf.Conf("fake_conf.yaml").usr_cfg - shape_cfg = cfg["quantization"]["calibration"]["dataloader"]["dataset"]["style_transfer"]["resize_shape"] - self.assertTrue(isinstance(shape_cfg, list)) - - test = """ - model: - name: test - framework: tensorflow - - quantization: - calibration: - sampling_size: 20 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: 224,224 - transform: - BilinearImagenet: - height: 224 - width: 224 - mean_value: 123.68 116.78 103.94 - """ - helper(test) - cfg = conf.Conf("fake_conf.yaml").usr_cfg - shape_cfg = cfg["quantization"]["calibration"]["dataloader"]["dataset"]["dummy"]["shape"] - self.assertTrue(isinstance(shape_cfg, tuple)) - transform_cfg = cfg["quantization"]["calibration"]["dataloader"]["transform"]["BilinearImagenet"] - self.assertTrue(isinstance(transform_cfg["mean_value"], list)) - - def test_yaml_detection(self): - try: - cfg = conf.Conf("not_exist.yaml").usr_cfg - except: - pass - - def test_deep_set(self): - from neural_compressor.conf.dotdict import DotDict, deep_set - - cfg = {"evaluation": {"accuracy": {}}} - dot_cfg = DotDict(cfg) - deep_set(dot_cfg, "evaluation.accuracy.metric", "iou") - deep_set(dot_cfg, "evaluation.accuracy.multi_metrics.weight", [0.1, 0, 9]) - deep_set(dot_cfg, "evaluation.accuracy.multi_metrics.mAP.anno_path", "anno_path_test") - self.assertTrue(dot_cfg.evaluation == dot_cfg["evaluation"]) - self.assertTrue(dot_cfg.evaluation.accuracy == dot_cfg["evaluation"]["accuracy"]) - self.assertTrue(dot_cfg.evaluation.accuracy.metric == dot_cfg["evaluation"]["accuracy"]["metric"]) - self.assertTrue(dot_cfg.evaluation.accuracy.multi_metrics == dot_cfg["evaluation"]["accuracy"]["multi_metrics"]) - self.assertTrue(dot_cfg.evaluation.accuracy.multi_metrics.weight == [0.1, 0, 9]) - self.assertTrue(dot_cfg.evaluation.accuracy.multi_metrics.mAP.anno_path == "anno_path_test") - multi_metrics1 = dot_cfg.evaluation.accuracy.multi_metrics - multi_metrics2 = dot_cfg["evaluation"]["accuracy"]["multi_metrics"] - self.assertTrue(multi_metrics1 == multi_metrics2) - self.assertTrue(list(multi_metrics1.keys()) == ["weight", "mAP"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/config/test_config_regex.py b/test/config/test_config_regex.py deleted file mode 100644 index 8001aa89778..00000000000 --- a/test/config/test_config_regex.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import tensorflow as tf -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - op_wise: { - \"conv1_[1-2]\": { - \"activation\": {\"dtype\": [\"fp32\"]}, - }, - } - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - timeout: 0 - accuracy_criterion: - relative: 0.05 - exit_policy: - performance_only: True - workspace: - path: saved - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def build_fake_yaml_invalid_model_wise(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - op_wise: { - \"conv1_[1-2]\": { - \"activation\": {\"dtype\": [\"fp32\"]}, - }, - } - model_wise: - weight: - granularity: per_channel - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - timeout: 0 - accuracy_criterion: - relative: 0.05 - workspace: - path: saved - """ - with open("fake_yaml_with_invalid_cfg.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -class TestConfigRegex(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - build_fake_yaml_invalid_model_wise() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml_with_invalid_cfg.yaml") - - @disable_random() - def test_config_regex(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID", name="conv1_1") - normed1 = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed1) - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - for i in output_graph_def.node: - if i.op.find("Add") != -1: - i.op = "Add" - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_fp32_conv = False - found_quantized_conv = False - for i in output_graph.graph_def.node: - if i.op == "Conv2D" and i.name == "conv1_1": - found_fp32_conv = True - - if i.op.find("QuantizedConv2D") != -1 and i.name == "conv1_3_eightbit_requantize": - found_quantized_conv = True - - self.assertEqual(found_fp32_conv, True) - self.assertEqual(found_quantized_conv, True) - - def test_config_regex_with_invalid_cfg(self): - tf.compat.v1.disable_eager_execution() - tf.compat.v1.reset_default_graph() - tf.compat.v1.set_random_seed(1) - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID", name="conv1_1") - normed1 = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed1) - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - for i in output_graph_def.node: - if i.op.find("Add") != -1: - i.op = "Add" - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_with_invalid_cfg.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_fp32_conv = False - found_quantized_conv = False - for i in output_graph.graph_def.node: - if i.op == "Conv2D" and i.name == "conv1_1": - found_fp32_conv = True - - if i.op.find("QuantizedConv2D") != -1 and i.name == "conv1_3_eightbit_requantize": - found_quantized_conv = True - - self.assertEqual(found_fp32_conv, True) - self.assertEqual(found_quantized_conv, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/config/test_pythonic_config.py b/test/config/test_pythonic_config.py deleted file mode 100644 index 6bfa55288b2..00000000000 --- a/test/config/test_pythonic_config.py +++ /dev/null @@ -1,390 +0,0 @@ -"""Tests for pythonic config file.""" - -import copy -import os -import shutil -import unittest - -import numpy as np -import onnx -import onnxruntime as ort -import tensorflow as tf -import torch -from onnx import TensorProto, helper -from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 -from tensorflow.python.framework import dtypes, tensor_util -from torch import nn - -from neural_compressor.adaptor import FRAMEWORKS -from neural_compressor.adaptor.torch_utils.bf16_convert import BF16ModuleWrapper -from neural_compressor.conf.pythonic_config import ActivationConf, OpQuantConf, WeightConf, config -from neural_compressor.data import Datasets -from neural_compressor.experimental import NAS, Distillation, Quantization, common -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - - -def build_matmul_model(): - A = helper.make_tensor_value_info("A", TensorProto.FLOAT, [1, 1, 5, 5]) - B_init = helper.make_tensor( - "B", TensorProto.FLOAT, [1, 1, 5, 1], np.random.random([1, 1, 5, 1]).reshape(5).tolist() - ) - C = helper.make_tensor_value_info("C", TensorProto.FLOAT, [1, 1, 5, 1]) - D = helper.make_tensor_value_info("D", TensorProto.FLOAT, [1, 1, 5, 1]) - H = helper.make_tensor_value_info("H", TensorProto.FLOAT, [1, 1, 5, 1]) - - matmul_node = onnx.helper.make_node("MatMul", ["A", "B"], ["C"], name="Matmul") - e_value = np.random.randint(2, size=(5)).astype(np.float32) - E_init = helper.make_tensor("E", TensorProto.FLOAT, [1, 1, 5, 1], e_value.reshape(5).tolist()) - add = onnx.helper.make_node("Add", ["C", "E"], ["D"], name="add") - f_value = np.random.randint(2, size=(5)).astype(np.float32) - F_init = helper.make_tensor("F", TensorProto.FLOAT, [1, 1, 5, 1], e_value.reshape(5).tolist()) - add2 = onnx.helper.make_node("Add", ["D", "F"], ["H"], name="add2") - graph = helper.make_graph([matmul_node, add, add2], "test_graph_1", [A], [H], [E_init, F_init, B_init]) - model = helper.make_model(graph) - model = helper.make_model(graph, **{"opset_imports": [helper.make_opsetid("", 13)]}) - return model - - -def build_conv2d_model(): - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "out" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - test_graph = graph_pb2.GraphDef() - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - ] - ) - return test_graph - - -class ConvNet(torch.nn.Module): - def __init__(self, channels, dimensions): - super().__init__() - self.conv = torch.nn.Conv2d(3, channels, (3, 3), padding=1) - self.avg_pooling = torch.nn.AvgPool2d((64, 64)) - self.dense = torch.nn.Linear(channels, dimensions) - self.out = torch.nn.Linear(dimensions, 2) - self.activation = torch.nn.Softmax() - - def forward(self, inputs): - outputs = self.conv(inputs) - outputs = self.avg_pooling(outputs).squeeze() - outputs = self.dense(outputs) - outputs = self.out(outputs) - outputs = self.activation(outputs) - return outputs - - -def model_builder(model_arch_params): - channels = model_arch_params["channels"] - dimensions = model_arch_params["dimensions"] - return ConvNet(channels, dimensions) - - -class torch_model(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv2d(3, 1, 1) - self.linear = nn.Linear(224 * 224, 5) - - def forward(self, x): - x = self.conv(x) - x = x.view(1, -1) - x = self.linear(x) - return x - - -class TestPythonicConf(unittest.TestCase): - @classmethod - def tearDownClass(self): - shutil.rmtree("./nc_workspace", ignore_errors=True) - - def test_config_setting(self): - config.quantization.inputs = ["image"] - config.quantization.outputs = ["out"] - config.quantization.approach = "post_training_dynamic_quant" - config.quantization.device = "gpu" - config.quantization.op_type_dict = {"Conv": {"weight": {"dtype": ["fp32"]}, "activation": {"dtype": ["fp32"]}}} - config.quantization.op_name_dict = { - "layer1.0.conv1": {"activation": {"dtype": ["fp32"]}, "weight": {"dtype": ["fp32"]}} - } - config.quantization.strategy = "mse" - config.quantization.objective = "accuracy" - config.quantization.timeout = 100 - config.quantization.max_trials = 100 - config.quantization.accuracy_criterion.relative = 0.5 - config.quantization.reduce_range = False - config.quantization.use_bf16 = False - config.benchmark.cores_per_instance = 10 - - self.assertEqual(config.quantization.inputs, ["image"]) - self.assertEqual(config.quantization.outputs, ["out"]) - self.assertEqual(config.quantization.approach, "post_training_dynamic_quant") - self.assertEqual(config.quantization.device, "gpu") - self.assertEqual( - config.quantization.op_type_dict, - {"Conv": {"weight": {"dtype": ["fp32"]}, "activation": {"dtype": ["fp32"]}}}, - ) - self.assertEqual( - config.quantization.op_name_dict, - {"layer1.0.conv1": {"activation": {"dtype": ["fp32"]}, "weight": {"dtype": ["fp32"]}}}, - ) - self.assertEqual(config.quantization.strategy, "mse") - self.assertEqual(config.quantization.objective, "accuracy") - self.assertEqual(config.quantization.timeout, 100) - self.assertEqual(config.quantization.max_trials, 100) - self.assertEqual(config.quantization.accuracy_criterion.relative, 0.5) - self.assertEqual(config.benchmark.cores_per_instance, 10) - - config.quantization.accuracy_criterion.absolute = 0.4 - self.assertEqual(config.quantization.accuracy_criterion.absolute, 0.4) - self.assertEqual(config.quantization.accuracy_criterion.relative, None) - - config.onnxruntime.precisions = ["int8", "uint8"] - config.onnxruntime.graph_optimization_level = "DISABLE_ALL" - q = Quantization(config) - q.model = build_matmul_model() - self.assertEqual(q.conf.usr_cfg.reduce_range, False) - self.assertEqual(q.conf.usr_cfg.use_bf16, False) - q.pre_process() - self.assertEqual(q.strategy.adaptor.query_handler.get_precisions(), ["int8", "uint8"]) - self.assertNotEqual(config.mxnet, None) - self.assertNotEqual(config.tensorflow, None) - self.assertNotEqual(config.pytorch, None) - self.assertNotEqual(config.keras, None) - - def test_weight_activation_op(self): - opconf = OpQuantConf() - self.assertEqual(opconf.op_type, None) - - opconf = OpQuantConf("MatMul") - self.assertEqual(opconf.op_type, "MatMul") - self.assertNotEqual(opconf.weight, None) - self.assertNotEqual(opconf.activation, None) - - opconf.weight.datatype = ["int8"] - opconf.activation.datatype = ["uint8"] - opconf.weight.scheme = ["asym"] - opconf.activation.scheme = ["sym"] - opconf.weight.granularity = ["per_channel"] - opconf.activation.granularity = ["per_tensor"] - opconf.weight.algorithm = ["minmax"] - opconf.activation.algorithm = ["minmax"] - self.assertEqual(opconf.weight.datatype, ["int8"]) - self.assertEqual(opconf.activation.datatype, ["uint8"]) - self.assertEqual(opconf.weight.scheme, ["asym"]) - self.assertEqual(opconf.activation.scheme, ["sym"]) - self.assertEqual(opconf.weight.granularity, ["per_channel"]) - self.assertEqual(opconf.activation.granularity, ["per_tensor"]) - self.assertEqual(opconf.weight.algorithm, ["minmax"]) - self.assertEqual(opconf.activation.algorithm, ["minmax"]) - - def test_quantization(self): - q = Quantization(config) - q.model = build_matmul_model() - q_model = q() - self.assertTrue(any([i.name.endswith("_quant") for i in q_model.nodes()])) - - config.onnxruntime.precisions = ["fp32"] - q = Quantization(config) - q.model = build_matmul_model() - q_model = q() - self.assertTrue(all([not i.name.endswith("_quant") for i in q_model.nodes()])) - - def test_distillation(self): - config.quantization.device = "cpu" - distiller = Distillation(config) - model = ConvNet(16, 32) - origin_weight = copy.deepcopy(model.out.weight) - distiller.model = model - distiller.teacher_model = ConvNet(16, 32) - - # Customized train, evaluation - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(32, 3, 64, 64), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def train_func(model): - epochs = 3 - iters = 10 - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - for image, target in dummy_dataloader: - print(".", end="") - cnt += 1 - output = model(image).unsqueeze(dim=0) - loss = criterion(output, target) - loss = distiller.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - if cnt >= iters: - break - - def eval_func(model): - model.eval() - acc = 0 - for image, target in dummy_dataloader: - output = model(image).cpu().detach().numpy() - acc += np.sum(output == target) - return {"acc": acc / len(dummy_dataset)} - - distiller.train_func = train_func - distiller.eval_func = eval_func - model = distiller() - weight = model.model.out.weight - self.assertTrue(torch.any(weight != origin_weight)) - - def test_pruning(self): - prune = Pruning(config) - model = ConvNet(16, 32) - origin_weight = copy.deepcopy(model.out.weight) - prune.model = model - - # Customized train, evaluation - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(32, 3, 64, 64), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def train_func(model): - epochs = 3 - iters = 10 - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - prune.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - print(".", end="") - cnt += 1 - prune.on_step_begin(cnt) - output = model(image).unsqueeze(dim=0) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - prune.on_step_end() - if cnt >= iters: - break - prune.on_epoch_end() - - def eval_func(model): - model.eval() - acc = 0 - for image, target in dummy_dataloader: - output = model(image).cpu().detach().numpy() - acc += np.sum(output == target) - return {"acc": acc / len(dummy_dataset)} - - prune.train_func = train_func - prune.eval_func = eval_func - model = prune() - weight = model.model.out.weight - self.assertTrue(torch.any(weight != origin_weight)) - - def test_use_bf16(self): - config.quantization.device = "cpu" - config.quantization.approach = "post_training_dynamic_quant" - config.quantization.use_bf16 = False - q = Quantization(config) - q.model = torch_model() - os.environ["FORCE_BF16"] = "1" - q_model = q() - del os.environ["FORCE_BF16"] - self.assertEqual(isinstance(q_model.model.linear, BF16ModuleWrapper), False) - - def test_quantization_pytorch(self): - config.quantization.device = "cpu" - config.quantization.backend = "default" - config.quantization.approach = "post_training_dynamic_quant" - config.quantization.use_bf16 = False - q = Quantization(config) - q.model = torch_model() - q_model = q() - self.assertEqual(isinstance(q_model.model.linear, torch.nn.quantized.dynamic.modules.linear.Linear), True) - - -class TestTFPyhonicConf(unittest.TestCase): - @classmethod - def tearDownClass(self): - shutil.rmtree("./nc_workspace", ignore_errors=True) - - def test_tf_quantization(self): - config.quantization.inputs = ["input"] - config.quantization.outputs = ["out"] - config.quantization.approach = "post_training_static_quant" - config.quantization.device = "cpu" - config.quantization.strategy = "basic" - config.quantization.objective = "accuracy" - config.quantization.timeout = 100 - config.quantization.accuracy_criterion.relative = 0.5 - config.quantization.reduce_range = False - - q = Quantization(config) - q.model = build_conv2d_model() - dataset = q.dataset("dummy", shape=(1, 224, 224, 3), label=True) - q.calib_dataloader = common.DataLoader(dataset) - q_model = q() - - self.assertTrue(any([i.name.endswith("_requantize") for i in q_model.graph_def.node])) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/data/test_exp_dataloader.py b/test/data/test_exp_dataloader.py deleted file mode 100644 index 52c094aa987..00000000000 --- a/test/data/test_exp_dataloader.py +++ /dev/null @@ -1,239 +0,0 @@ -"""Tests for the dataloader module.""" - -import os -import platform -import shutil -import unittest - -import numpy as np -from PIL import Image - -from neural_compressor.experimental.data import DATALOADERS, TRANSFORMS, Datasets -from neural_compressor.utils.create_obj_from_config import create_dataloader, create_dataset - - -class TestDataloader(unittest.TestCase): - def test_iterable_dataset(self): - class iter_dataset(object): - def __iter__(self): - for i in range(100): - yield np.zeros([256, 256, 3]) - - dataset = iter_dataset() - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data.shape, (1, 256, 256, 3)) - - def test_tensorflow_dummy(self): - datasets = Datasets("tensorflow") - dataset = datasets["dummy"](shape=(4, 256, 256, 3)) - - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - - with self.assertRaises(AssertionError): - dataset = datasets["dummy"](shape=[(4, 256, 256, 3), (256, 256, 3)]) - with self.assertRaises(AssertionError): - dataset = datasets["dummy"](shape=(4, 256, 256, 3), low=[1.0, 0.0]) - with self.assertRaises(AssertionError): - dataset = datasets["dummy"](shape=(4, 256, 256, 3), high=[128.0, 127.0]) - with self.assertRaises(AssertionError): - dataset = datasets["dummy"](shape=(4, 256, 256, 3), dtype=["float32", "int8"]) - - def test_tensorflow_dummy_v2(self): - datasets = Datasets("tensorflow") - # test with label - dataset = datasets["dummy_v2"](input_shape=(256, 256, 3), label_shape=(1,)) - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - self.assertEqual(data[1].shape, (1, 1)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - self.assertEqual(data[1].shape, (2, 1)) - - # test without label - dataset = datasets["dummy_v2"](input_shape=(256, 256, 3)) - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data.shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data.shape, (2, 256, 256, 3)) - - with self.assertRaises(AssertionError): - dataset = datasets["dummy_v2"](input_shape=(256, 256, 3), low=[1.0, 0.0]) - with self.assertRaises(AssertionError): - dataset = datasets["dummy_v2"](input_shape=(256, 256, 3), high=[128.0, 127.0]) - with self.assertRaises(AssertionError): - dataset = datasets["dummy_v2"](input_shape=(256, 256, 3), dtype=["float32", "int8"]) - - def test_tensorflow_sparse_dummy_v2(self): - datasets = Datasets("tensorflow") - # test with label - dataset = datasets["sparse_dummy_v2"]( - dense_shape=[[10, 20], [5, 3]], label_shape=[[1]], sparse_ratio=[0.98, 0.8] - ) - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0][0][0].shape, (1, 4, 2)) - self.assertEqual(data[0][0][1].shape, (1, 4)) - self.assertEqual(data[0][1].shape, (1, 1)) - - # test without label - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), sparse_ratio=0.3) - data_loader = DATALOADERS["tensorflow"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0][0].shape, (1, 137626, 3)) - self.assertEqual(data[0][1].shape, (1, 137626)) - - with self.assertRaises(AssertionError): - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), low=[1.0, 0.0]) - with self.assertRaises(AssertionError): - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), high=[128.0, 127.0]) - with self.assertRaises(AssertionError): - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), dtype=["float32", "int8"]) - with self.assertRaises(AssertionError): - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), dtype=["0.3", "0.5"]) - with self.assertRaises(AssertionError): - dataset = datasets["sparse_dummy_v2"](dense_shape=(256, 256, 3), label_shape=[[1], [2], [3]]) - - def test_tensorflow_dataloader_multi_input(self): - import tensorflow as tf - - x = tf.data.Dataset.from_tensor_slices((np.random.random(20), np.random.random(20))) - y = tf.data.Dataset.from_tensor_slices(np.random.random(20)) - dataset = tf.data.Dataset.zip((x, y)) - dataloader = DATALOADERS["tensorflow"](dataset) - for i, (x, y) in enumerate(dataloader): - self.assertIsNotNone(x) - self.assertIsNotNone(y) - break - - def test_style_transfer_dataset(self): - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpg") - - datasets = Datasets("tensorflow") - dataset = datasets["style_transfer"](content_folder="./", style_folder="./") - length = len(dataset) - image, label = dataset[0] - self.assertEqual(image[0].shape, (256, 256, 3)) - self.assertEqual(image[1].shape, (256, 256, 3)) - os.remove("test.jpg") - - def test_tensorflow_list_dict(self): - dataset = [{"a": 1, "b": 2, "c": 3, "d": 4}, {"a": 5, "b": 6, "c": 7, "d": 8}] - data_loader = DATALOADERS["tensorflow"](dataset) - - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data, {"a": [1], "b": [2], "c": [3], "d": [4]}) - - # test iterable consistent - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data, {"a": [1], "b": [2], "c": [3], "d": [4]}) - - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data, {"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}) - - def test_pytorch_dummy(self): - datasets = Datasets("pytorch") - transform = TRANSFORMS("pytorch", "preprocess")["Resize"](**{"size": 100}) - dataset = datasets["dummy"]( - shape=[(4, 256, 256, 3), (4, 1)], high=[10.0, 10.0], low=[0.0, 0.0], transform=transform - ) - - data_loader = DATALOADERS["pytorch"](dataset) - iterator = iter(data_loader) - data, label = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data, label = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def test_mxnet_dummy(self): - datasets = Datasets("mxnet") - transform = TRANSFORMS("mxnet", "preprocess")["Resize"](**{"size": 100}) - dataset = datasets["dummy"](shape=(4, 256, 256, 3), transform=transform) - - data_loader = DATALOADERS["mxnet"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - - dataset = datasets["dummy"](shape=(4, 256, 256, 3), label=True) - self.assertEqual(dataset[0][1], 0) - - def test_onnxrt_qlinear_dummy(self): - datasets = Datasets("onnxrt_qlinearops") - transform = TRANSFORMS("onnxrt_qlinearops", "preprocess")["Resize"](**{"size": 100}) - dataset = datasets["dummy"](shape=(4, 256, 256, 3), transform=transform) - - data_loader = DATALOADERS["onnxrt_qlinearops"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - - dataset = datasets["dummy"](shape=(4, 256, 256, 3), label=False) - data_loader = DATALOADERS["onnxrt_qlinearops"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(len(data), 1) - - with self.assertRaises(AssertionError): - dataset = datasets["dummy"](shape=[(4, 256, 256, 3), (4, 256, 256, 3)], dtype=["float32", "int8", "int8"]) - - def test_onnx_integer_dummy(self): - datasets = Datasets("onnxrt_integerops") - dataset = datasets["dummy"](shape=(4, 256, 256, 3)) - - data_loader = DATALOADERS["onnxrt_integerops"](dataset) - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (1, 256, 256, 3)) - # dynamic batching - data_loader.batch(batch_size=2, last_batch="rollover") - iterator = iter(data_loader) - data = next(iterator) - self.assertEqual(data[0].shape, (2, 256, 256, 3)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/data/test_exp_transformers.py b/test/data/test_exp_transformers.py deleted file mode 100644 index c2265b2ff1b..00000000000 --- a/test/data/test_exp_transformers.py +++ /dev/null @@ -1,1207 +0,0 @@ -"""Tests for the transform module.""" - -import os -import platform -import random -import unittest - -import numpy as np -from PIL import Image - -from neural_compressor.experimental.data import DATALOADERS, TRANSFORMS -from neural_compressor.utils.create_obj_from_config import create_dataset, get_postprocess -from neural_compressor.utils.utility import LazyImport - -mx = LazyImport("mxnet") -tf = LazyImport("tensorflow") -torch = LazyImport("torch") -torchvision = LazyImport("torchvision") - -random.seed(1) -np.random.seed(1) - - -class TestMetrics(unittest.TestCase): - def test_tensorflow_2(self): - image = np.ones([256, 256, 1]) - resize_kwargs = {"size": [224, 224]} - transforms = TRANSFORMS(framework="tensorflow", process="preprocess") - resize = transforms["Resize"](**resize_kwargs) - random_crop_kwargs = {"size": 128} - random_crop = transforms["RandomCrop"](**random_crop_kwargs) - transform_list = [resize, random_crop] - compose = transforms["Compose"](transform_list) - image_result = compose((image, None)) - self.assertEqual(image_result[0].shape, (128, 128)) - - -class TestONNXQLImagenetTransform(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.img = np.random.random_sample([600, 600]) * 255 - - def testResizeCropImagenetTransform(self): - transforms = TRANSFORMS("onnxrt_qlinearops", "preprocess") - transform = transforms["ResizeCropImagenet"](height=224, width=224, random_crop=True) - sample = (self.img, 0) - result = transform(sample) - resized_input = result[0] - self.assertEqual(len(resized_input), 3) - self.assertEqual(len(resized_input[0]), 224) - self.assertEqual(len(resized_input[0][0]), 224) - - -class TestONNXITImagenetTransform(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.img = np.random.random_sample([600, 600, 3]) * 255 - - def testResizeCropImagenetTransform(self): - transforms = TRANSFORMS("onnxrt_integerops", "preprocess") - transform = transforms["ResizeCropImagenet"](height=224, width=224) - sample = (self.img, 0) - result = transform(sample) - resized_input = result[0] - self.assertEqual(len(resized_input), 3) - self.assertEqual(len(resized_input[0]), 224) - self.assertEqual(len(resized_input[0][0]), 224) - - def testResizeWithAspectRatio(self): - transforms = TRANSFORMS("onnxrt_integerops", "preprocess") - transform = transforms["ResizeWithAspectRatio"](height=224, width=224) - sample = (self.img, 0) - result = transform(sample) - resized_input = result[0] - self.assertEqual(len(resized_input), 256) - self.assertEqual(len(resized_input[0]), 256) - self.assertEqual(len(resized_input[0][0]), 3) - - -class TestTensorflowImagenetTransform(unittest.TestCase): - tf.compat.v1.disable_v2_behavior() - - def testBilinearImagenetTransform(self): - transforms = TRANSFORMS("tensorflow", "preprocess") - transform = transforms["BilinearImagenet"](height=224, width=224) - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 0) - result = transform(sample) - resized_input = result[0].eval(session=tf.compat.v1.Session()) - self.assertEqual(len(resized_input), 224) - self.assertEqual(len(resized_input[0]), 224) - self.assertEqual(len(resized_input[0][0]), 3) - - transforms = TRANSFORMS("onnxrt_qlinearops", "preprocess") - transform = transforms["BilinearImagenet"](height=224, width=224) - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 0) - result = transform(sample) - self.assertEqual(len(resized_input), 224) - self.assertEqual(len(resized_input[0]), 224) - self.assertEqual(len(resized_input[0][0]), 3) - - def testResizeCropImagenetTransform1(self): - transforms = TRANSFORMS("tensorflow", "preprocess") - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 0) - transform = transforms["ResizeCropImagenet"]( - height=224, width=224, random_crop=True, random_flip_left_right=True - ) - result = transform(sample) - resized_input = result[0].eval(session=tf.compat.v1.Session()) - self.assertEqual(len(resized_input), 224) - self.assertEqual(len(resized_input[0]), 224) - self.assertEqual(len(resized_input[0][0]), 3) - - @unittest.skipIf(tf.version.VERSION < "2.5.0", "Skip tf.experimental.numpy.moveaxis") - def testResizeCropImagenetTransform2(self): - transforms = TRANSFORMS("tensorflow", "preprocess") - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 0) - transform = transforms["ResizeCropImagenet"]( - height=224, - width=224, - random_crop=False, - random_flip_left_right=False, - data_format="channels_last", - subpixels="RGB", - ) - result = transform(sample) - resized_input1 = result[0].eval(session=tf.compat.v1.Session()) - transform = transforms["ResizeCropImagenet"]( - height=224, - width=224, - random_crop=False, - random_flip_left_right=False, - data_format="channels_last", - subpixels="BGR", - ) - result = transform(sample) - resized_input2 = result[0].eval(session=tf.compat.v1.Session()) - self.assertTrue((resized_input1[..., 0] == resized_input2[..., -1]).all()) - - transform = transforms["ResizeCropImagenet"]( - height=224, - width=224, - random_crop=False, - random_flip_left_right=False, - data_format="channels_first", - subpixels="BGR", - ) - rand_input = np.moveaxis(rand_input, -1, 0) - sample = (rand_input, 0) - result = transform(sample) - resized_input3 = result[0].eval(session=tf.compat.v1.Session()) - self.assertTrue((resized_input1[..., 0] == resized_input3[..., -1]).all()) - - def testLabelShift(self): - transforms = TRANSFORMS("tensorflow", "postprocess") - transform = transforms["LabelShift"](label_shift=1) - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 1001) - label = transform(sample)[1] - self.assertEqual(label, 1000) - if platform.architecture()[0] == "64bit": - self.assertTrue(isinstance(label, np.int64) or isinstance(label, np.int32)) - else: - self.assertTrue(isinstance(label, np.int32)) - - label = transform((rand_input, [(1, 2, 3)]))[1] - self.assertTrue(isinstance(label, list)) - self.assertTrue(isinstance(label[0], tuple)) - - label = transform((rand_input, [[1, 2, 3]]))[1] - self.assertTrue(isinstance(label, list)) - self.assertTrue(isinstance(label[0], list)) - - label = transform((rand_input, [np.array([1, 2, 3])]))[1] - self.assertTrue(isinstance(label, list)) - self.assertTrue(isinstance(label[0], np.ndarray)) - - def testQuantizedInput(self): - transforms = TRANSFORMS("tensorflow", "preprocess") - transform = transforms["QuantizedInput"](dtype="uint8", scale=100) - rand_input = np.random.random_sample([600, 600, 3]).astype(np.float32) - sample = (rand_input, 1001) - result = transform(sample) - quantized_input = result[0].eval(session=tf.compat.v1.Session()) - self.assertLessEqual(quantized_input.max(), 255) - self.assertGreaterEqual(quantized_input.min(), 0) - - transform = transforms["QuantizedInput"](dtype="uint8") - sample = (rand_input, 1001) - result = transform(sample) - quantized_input = result[0] - self.assertLessEqual(quantized_input.max(), 1) - self.assertGreaterEqual(quantized_input.min(), 0) - - -class TestDataConversion(unittest.TestCase): - @classmethod - def setUpClass(cls): - if platform.system().lower() == "windows": - cls.skipTest(cls, "not support mxnet on windows yet") - cls.img = np.random.random_sample([10, 10, 3]) * 255 - cls.mx_trans = TRANSFORMS("mxnet", "preprocess") - cls.pt_trans = TRANSFORMS("pytorch", "preprocess") - - def testToPILImage(self): - trans = TestDataConversion.pt_trans["ToPILImage"]() - image, _ = trans((TestDataConversion.img.astype(np.uint8), None)) - self.assertTrue(isinstance(image, Image.Image)) - - def testToTensor(self): - trans = TestDataConversion.pt_trans["ToTensor"]() - image, _ = trans((TestDataConversion.img.astype(np.uint8), None)) - self.assertTrue(isinstance(image, torch.Tensor)) - - trans = TestDataConversion.mx_trans["ToTensor"]() - image, _ = trans((mx.nd.array(TestDataConversion.img), None)) - self.assertTrue(isinstance(image, mx.ndarray.NDArray)) # pylint: disable=no-member - - def testToNDArray(self): - trans = TestDataConversion.mx_trans["ToNDArray"]() - image, _ = trans((TestDataConversion.img.astype(np.uint8), None)) - self.assertTrue(isinstance(image, mx.ndarray.NDArray)) - - -class TestSameTransfoms(unittest.TestCase): - @classmethod - def setUpClass(cls): - if platform.system().lower() == "windows": - cls.skipTest(cls, "not support mxnet on windows yet") - cls.img = np.random.random_sample([10, 10, 3]) * 255 - cls.tf_trans = TRANSFORMS("tensorflow", "preprocess") - cls.pt_trans = TRANSFORMS("pytorch", "preprocess") - cls.mx_trans = TRANSFORMS("mxnet", "preprocess") - cls.ox_trans = TRANSFORMS("onnxrt_qlinearops", "preprocess") - cls.mx_img = mx.nd.array(cls.img.astype(np.uint8)) - cls.pt_img = Image.fromarray(cls.img.astype(np.uint8)) - cls.tf_img = tf.constant(cls.img) - _ = TRANSFORMS("tensorflow", "postprocess") - _ = TRANSFORMS("pytorch", "postprocess") - _ = TRANSFORMS("mxnet", "postprocess") - _ = TRANSFORMS("onnxrt_qlinearops", "postprocess") - _ = TRANSFORMS("onnxrt_integerops", "postprocess") - - def testCast(self): - args = {"dtype": "int64"} - tf_func = TestSameTransfoms.tf_trans["Cast"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result[0][0][0].dtype, "int64") - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result[0][0][0].dtype, "int64") - mx_func = TestSameTransfoms.mx_trans["Cast"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None)) - self.assertEqual(mx_result[0][0][0].dtype, np.int64) - ox_func = TestSameTransfoms.ox_trans["Cast"](**args) - ox_result = ox_func((TestSameTransfoms.img, None)) - self.assertEqual(ox_result[0][0][0].dtype, "int64") - - totensor = TestSameTransfoms.pt_trans["ToTensor"]() - cast = TestSameTransfoms.pt_trans["Cast"](**args) - pt_func = TestSameTransfoms.pt_trans["Compose"]([totensor, cast]) - pt_result = pt_func((TestSameTransfoms.pt_img, None)) - self.assertEqual(pt_result[0][0][0].dtype, torch.int64) - - def testCropToBoundingBox(self): - args = {"offset_height": 2, "offset_width": 2, "target_height": 5, "target_width": 5} - pt_func = TestSameTransfoms.pt_trans["CropToBoundingBox"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - self.assertEqual(pt_result.size, (5, 5)) - - ox_func = TestSameTransfoms.ox_trans["CropToBoundingBox"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - self.assertEqual(ox_result.shape, (5, 5, 3)) - - mx_func = TestSameTransfoms.mx_trans["CropToBoundingBox"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(mx_result.shape, (5, 5, 3)) - - tf_func = TestSameTransfoms.tf_trans["CropToBoundingBox"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (5, 5, 3)) - - def testNormalize(self): - args = {} - normalize = TestSameTransfoms.pt_trans["Normalize"](**args) - totensor = TestSameTransfoms.pt_trans["ToTensor"]() - pt_func = TestSameTransfoms.pt_trans["Compose"]([totensor, normalize]) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - self.assertEqual(TestSameTransfoms.img.astype(np.uint8)[0][0][0] / 255.0, pt_result[0][0][0]) - args = {"std": [0.0]} - with self.assertRaises(ValueError): - TestSameTransfoms.pt_trans["Normalize"](**args) - - def testRescale(self): - ox_func = TestSameTransfoms.ox_trans["Rescale"]() - ox_result = ox_func((TestSameTransfoms.img, None))[0] - self.assertAlmostEqual(ox_result[1][2][0], TestSameTransfoms.img[1][2][0] / 255.0) - - def testTranspose(self): - args = {"perm": [2, 0, 1]} - tf_func = TestSameTransfoms.tf_trans["Transpose"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - ox_func = TestSameTransfoms.ox_trans["Transpose"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["Transpose"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - pt_transpose = TestSameTransfoms.pt_trans["Transpose"](**args) - pt_totensor = TestSameTransfoms.pt_trans["ToTensor"]() - pt_compose = TestSameTransfoms.pt_trans["Compose"]([pt_totensor, pt_transpose]) - pt_result = pt_compose((TestSameTransfoms.pt_img, None))[0] - - self.assertEqual(tf_result.shape, (3, 10, 10)) - self.assertEqual(ox_result.shape, (3, 10, 10)) - self.assertEqual(mx_result.shape, (3, 10, 10)) - self.assertEqual(pt_result.shape, (10, 3, 10)) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (3, 10, 10)) - - def testCenterCrop(self): - args = {"size": [4, 4]} - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["CenterCrop"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CenterCrop"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - self.assertEqual(pt_result.size, (4, 4)) - self.assertEqual(mx_result.shape, (4, 4, 3)) - self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0]) - self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0])) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (4, 4, 3)) - - tf_result = tf_func((tf.constant(TestSameTransfoms.img.reshape((1, 10, 10, 3))), None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (1, 4, 4, 3)) - - args = {"size": 4} - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["CenterCrop"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CenterCrop"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - self.assertEqual(pt_result.size, (4, 4)) - self.assertEqual(mx_result.shape, (4, 4, 3)) - self.assertEqual(np.array(pt_result)[0][0][0], mx_result.asnumpy()[0][0][0]) - self.assertEqual(np.array(pt_result)[0][0][0], int(tf_result[0][0][0])) - - args = {"size": [4]} - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - - with self.assertRaises(ValueError): - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((np.array([[TestSameTransfoms.img]]), None)) - with self.assertRaises(ValueError): - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((tf.constant(TestSameTransfoms.img.reshape((1, 1, 10, 10, 3))), None)) - - args = {"size": [20]} - with self.assertRaises(ValueError): - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None)) - with self.assertRaises(ValueError): - tf_func = TestSameTransfoms.tf_trans["CenterCrop"](**args) - tf_result = tf_func((TestSameTransfoms.tf_img, None)) - - def testResizeWithRatio(self): - args = {"padding": True} - label = [[0.1, 0.1, 0.5, 0.5], [], [], []] - tf_func = TestSameTransfoms.tf_trans["ResizeWithRatio"](**args) - tf_result = tf_func((TestSameTransfoms.img, label))[0] - self.assertEqual(tf_result.shape, (1365, 1365, 3)) - - args = {"padding": False} - tf_func = TestSameTransfoms.tf_trans["ResizeWithRatio"](**args) - tf_result = tf_func((TestSameTransfoms.img, label))[0] - self.assertTrue((tf_result.shape[0] == 800 or tf_result.shape[1] == 1365)) - - def testResize(self): - tf_func = TestSameTransfoms.tf_trans["Resize"](**{"size": [4, 5]}) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["Resize"](**{"size": [4, 5]}) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["Resize"](**{"size": [4, 5]}) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (5, 4, 3)) - self.assertEqual(pt_result.size, (5, 4)) - self.assertEqual(mx_result.shape, (4, 5, 3)) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (4, 5, 3)) - - args = {"size": 4} - tf_func = TestSameTransfoms.tf_trans["Resize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["Resize"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["Resize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - self.assertEqual(pt_result.size, (4, 4)) - self.assertEqual(mx_result.shape, (4, 4, 3)) - - args = {"size": [4]} - tf_func = TestSameTransfoms.tf_trans["Resize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["Resize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - self.assertEqual(mx_result.shape, (4, 4, 3)) - - args = {"size": 4, "interpolation": "test"} - with self.assertRaises(ValueError): - TestSameTransfoms.tf_trans["Resize"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.pt_trans["Resize"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.mx_trans["Resize"](**args) - - def testRandomResizedCrop(self): - tf_func = TestSameTransfoms.tf_trans["RandomResizedCrop"](**{"size": [4, 5]}) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["RandomResizedCrop"](**{"size": [4, 5]}) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["RandomResizedCrop"](**{"size": [4, 5]}) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (5, 4, 3)) - self.assertEqual(pt_result.size, (5, 4)) - self.assertEqual(mx_result.shape, (4, 5, 3)) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (4, 5, 3)) - - args = {"size": [4]} - tf_func = TestSameTransfoms.tf_trans["RandomResizedCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - mx_func = TestSameTransfoms.mx_trans["RandomResizedCrop"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(mx_result.shape, (4, 4, 3)) - - args = {"size": 4} - tf_func = TestSameTransfoms.tf_trans["RandomResizedCrop"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["RandomResizedCrop"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["RandomResizedCrop"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertEqual(tf_result.shape, (4, 4, 3)) - self.assertEqual(pt_result.size, (4, 4)) - self.assertEqual(mx_result.shape, (4, 4, 3)) - - args = {"size": 4, "scale": (0.8, 0.2)} - with self.assertRaises(ValueError): - TestSameTransfoms.tf_trans["RandomResizedCrop"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.pt_trans["RandomResizedCrop"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.mx_trans["RandomResizedCrop"](**args) - - args = {"size": 4, "interpolation": "test"} - with self.assertRaises(ValueError): - TestSameTransfoms.tf_trans["RandomResizedCrop"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.pt_trans["RandomResizedCrop"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.mx_trans["RandomResizedCrop"](**args) - - def testCropResize(self): - args = {"x": 0, "y": 0, "width": 10, "height": 10, "size": [5, 5]} - tf_func = TestSameTransfoms.tf_trans["CropResize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CropResize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - ox_func = TestSameTransfoms.ox_trans["CropResize"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["CropResize"](**args) - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - self.assertEqual(mx_result.shape, (5, 5, 3)) - self.assertEqual(ox_result.shape, (5, 5, 3)) - self.assertEqual(pt_result.size, (5, 5)) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (5, 5, 3)) - - args = {"x": 0, "y": 0, "width": 10, "height": 10, "size": 5} - tf_func = TestSameTransfoms.tf_trans["CropResize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CropResize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - ox_func = TestSameTransfoms.ox_trans["CropResize"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - self.assertEqual(mx_result.shape, (5, 5, 3)) - self.assertEqual(ox_result.shape, (5, 5, 3)) - - args = {"x": 0, "y": 0, "width": 10, "height": 10, "size": [5]} - tf_func = TestSameTransfoms.tf_trans["CropResize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CropResize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - ox_func = TestSameTransfoms.ox_trans["CropResize"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - self.assertEqual(mx_result.shape, (5, 5, 3)) - self.assertEqual(ox_result.shape, (5, 5, 3)) - - args = {"x": 0, "y": 0, "width": 10, "height": 10, "size": [5, 5]} - tf_func = TestSameTransfoms.tf_trans["CropResize"](**args) - tf_result = tf_func((TestSameTransfoms.img, None))[0] - mx_func = TestSameTransfoms.mx_trans["CropResize"](**args) - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - ox_func = TestSameTransfoms.ox_trans["CropResize"](**args) - ox_result = ox_func((TestSameTransfoms.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - self.assertEqual(mx_result.shape, (5, 5, 3)) - self.assertEqual(ox_result.shape, (5, 5, 3)) - - args = {"x": 0, "y": 0, "width": 10, "height": 10, "size": 5, "interpolation": "test"} - with self.assertRaises(ValueError): - TestSameTransfoms.ox_trans["CropResize"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.mx_trans["CropResize"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.tf_trans["CropResize"](**args) - with self.assertRaises(ValueError): - TestSameTransfoms.pt_trans["CropResize"](**args) - - def testRandomHorizontalFlip(self): - tf_func = TestSameTransfoms.tf_trans["RandomHorizontalFlip"]() - tf_result = tf_func((TestSameTransfoms.img, None))[0] - ox_func = TestSameTransfoms.ox_trans["RandomHorizontalFlip"]() - ox_result = ox_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["RandomHorizontalFlip"]() - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["RandomHorizontalFlip"]() - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertTrue( - (np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() - or (np.fliplr(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all() - ) - self.assertTrue( - (TestSameTransfoms.img == tf_result).all() or (np.fliplr(TestSameTransfoms.img) == tf_result).all() - ) - self.assertTrue( - (TestSameTransfoms.img == ox_result).all() or (np.fliplr(TestSameTransfoms.img) == ox_result).all() - ) - self.assertTrue( - (TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() - or (np.fliplr(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all() - ) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertTrue( - (TestSameTransfoms.img == tf_result).all() or (np.fliplr(TestSameTransfoms.img) == tf_result).all() - ) - - def testRandomVerticalFlip(self): - tf_func = TestSameTransfoms.tf_trans["RandomVerticalFlip"]() - tf_result = tf_func((TestSameTransfoms.img, None))[0] - ox_func = TestSameTransfoms.ox_trans["RandomVerticalFlip"]() - ox_result = ox_func((TestSameTransfoms.img, None))[0] - pt_func = TestSameTransfoms.pt_trans["RandomVerticalFlip"]() - pt_result = pt_func((TestSameTransfoms.pt_img, None))[0] - mx_func = TestSameTransfoms.mx_trans["RandomVerticalFlip"]() - mx_result = mx_func((TestSameTransfoms.mx_img, None))[0] - self.assertTrue( - (np.array(TestSameTransfoms.pt_img) == np.array(pt_result)).all() - or (np.flipud(np.array(TestSameTransfoms.pt_img)) == np.array(pt_result)).all() - ) - self.assertTrue( - (TestSameTransfoms.img == tf_result).all() or (np.flipud(TestSameTransfoms.img) == tf_result).all() - ) - self.assertTrue( - (TestSameTransfoms.img == ox_result).all() or (np.flipud(TestSameTransfoms.img) == ox_result).all() - ) - self.assertTrue( - (TestSameTransfoms.mx_img.asnumpy() == mx_result.asnumpy()).all() - or (np.flipud(TestSameTransfoms.mx_img.asnumpy()) == mx_result.asnumpy()).all() - ) - - tf_result = tf_func((TestSameTransfoms.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertTrue( - (TestSameTransfoms.img == tf_result).all() or (np.flipud(TestSameTransfoms.img) == tf_result).all() - ) - - -class TestTFTransorm(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.img = np.ones([10, 10, 3]) - cls.tf_img = tf.constant(cls.img) - cls.transforms = TRANSFORMS("tensorflow", "preprocess") - cls.tf_img = tf.constant(cls.img) - - def testRandomCrop(self): - args = {"size": [50]} - transform = TestTFTransorm.transforms["RandomCrop"](**args) - self.assertRaises(ValueError, transform, (TestTFTransorm.img, None)) - self.assertRaises(ValueError, transform, (TestTFTransorm.tf_img, None)) - - args = {"size": [5, 5]} - transform = TestTFTransorm.transforms["RandomCrop"](**args) - img_result = transform((TestTFTransorm.img, None))[0] - self.assertEqual(img_result.shape, (5, 5, 3)) - tf_result = transform((tf.constant(TestTFTransorm.img.reshape((1, 10, 10, 3))), None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (1, 5, 5, 3)) - - args = {"size": [10, 10]} - transform = TestTFTransorm.transforms["RandomCrop"](**args) - img_result = transform((TestTFTransorm.img, None))[0] - self.assertEqual(img_result.shape, (10, 10, 3)) - tf_result = transform((TestTFTransorm.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (10, 10, 3)) - - def testPaddedCenterCrop(self): - args = {"size": [4, 4]} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (10, 10, 3)) - - args = {"size": [4, 4], "crop_padding": 4} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - - args = {"size": 4} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (10, 10, 3)) - - args = {"size": 4, "crop_padding": 4} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - - args = {"size": [4]} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (10, 10, 3)) - - args = {"size": [4], "crop_padding": 4} - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None))[0] - self.assertEqual(tf_result.shape, (5, 5, 3)) - - args = {"size": [4, 5], "crop_padding": 4} - with self.assertRaises(ValueError): - tf_func = TestTFTransorm.transforms["PaddedCenterCrop"](**args) - tf_result = tf_func((TestTFTransorm.img, None)) - - def testRescale(self): - transform = TestTFTransorm.transforms["Rescale"]() - img_result = transform((TestTFTransorm.img, None))[0] - comp_result = np.array(TestTFTransorm.img) / 255.0 - self.assertAlmostEqual(img_result[0][0][0], comp_result[0][0][0], places=5) - - tf_result = transform((TestTFTransorm.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertAlmostEqual(tf_result[0][0][0], comp_result[0][0][0], places=5) - - def testNormalize(self): - args = {"mean": [0.0, 0.0, 0.0], "std": [0.2, 0.5, 0.1]} - normalize = TestTFTransorm.transforms["Normalize"](**args) - img_result = normalize((TestTFTransorm.img, None))[0] - comp_result = np.array(TestTFTransorm.img) / [0.2, 0.5, 0.1] - self.assertAlmostEqual(img_result[0][0][0], comp_result[0][0][0], places=5) - self.assertAlmostEqual(img_result[0][0][1], comp_result[0][0][1], places=5) - self.assertAlmostEqual(img_result[0][0][2], comp_result[0][0][2], places=5) - - tf_result = normalize((TestTFTransorm.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertAlmostEqual(tf_result[0][0][0], comp_result[0][0][0], places=5) - - args = {"mean": [0.0, 0.0, 0.0], "std": [0, 0, 0]} - with self.assertRaises(ValueError): - TestTFTransorm.transforms["Normalize"](**args) - - def testRandomResizedCrop(self): - args = {"size": [50]} - randomresizedcrop = TestTFTransorm.transforms["RandomResizedCrop"](**args) - compose = TestTFTransorm.transforms["Compose"]([randomresizedcrop]) - image_result = compose((TestTFTransorm.img, None))[0] - self.assertEqual(image_result.shape, (50, 50, 3)) - args = {"size": [100, 100]} - randomresizedcrop = TestTFTransorm.transforms["RandomResizedCrop"](**args) - compose = TestTFTransorm.transforms["Compose"]([randomresizedcrop]) - image_result = compose((TestTFTransorm.img, None))[0] - self.assertEqual(image_result.shape, (100, 100, 3)) - tf_result = randomresizedcrop((TestTFTransorm.tf_img, None))[0] - tf_result = tf_result.eval(session=tf.compat.v1.Session()) - self.assertEqual(tf_result.shape, (100, 100, 3)) - args = {"size": [100, 100], "scale": (0.8, 0.1)} - with self.assertRaises(ValueError): - TestTFTransorm.transforms["RandomResizedCrop"](**args) - - def testSquadV1(self): - import json - import ssl - import urllib - - ssl._create_default_https_context = ssl._create_unverified_context - - vocab_url = ( - "https://raw.githubusercontent.com/microsoft/SDNet/master/bert_vocab_files/bert-large-uncased-vocab.txt" - ) - urllib.request.urlretrieve(vocab_url, "./vocab.txt") - label = [ - { - "paragraphs": [ - { - "context": "Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season.", - "qas": [ - { - "answers": [ - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - ], - "question": "Which NFL team represented the AFC at Super Bowl 50?", - "id": "56be4db0acb8001400a502ec", - } - ], - } - ] - } - ] - fake_json = json.dumps({"data": label}) - with open("dev.json", "w") as f: - f.write(fake_json) - args = {"label_file": "./dev.json", "vocab_file": "./vocab.txt"} - post_transforms = TRANSFORMS("tensorflow", "postprocess") - squadv1 = post_transforms["SquadV1"](**args) - - preds_0 = np.array([1000000000]) - preds_1 = np.random.uniform(low=-12.3, high=6.8, size=(1, 384)) - preds_2 = np.random.uniform(low=-10.8, high=7.4, size=(1, 384)) - preds = [preds_0, preds_1, preds_2] - result = squadv1((preds, label)) - self.assertTrue(result[1][0]["paragraphs"][0]["qas"][0]["id"] in result[0]) - os.remove("dev.json") - os.remove("vocab.txt") - - -class TestAlignImageChannel(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.img1 = np.random.random_sample([100, 100, 3]) * 255 - cls.img2 = np.random.random_sample([100, 100]) * 255 - cls.img3 = np.random.random_sample([100, 100, 4]) * 255 - cls.pt_img1 = Image.fromarray(cls.img1.astype(np.uint8)) - cls.pt_img2 = Image.fromarray(cls.img2.astype(np.uint8)) - cls.pt_img3 = Image.fromarray(cls.img3.astype(np.uint8)) - - def testTensorflow(self): - transforms = TRANSFORMS("tensorflow", "preprocess") - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 3}) - image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 3) - - align = transforms["AlignImageChannel"](**{"dim": 2}) - self.assertRaises(ValueError, align, (TestAlignImageChannel.img1.astype(np.uint8), None)) - - with self.assertRaises(ValueError): - transforms["AlignImageChannel"](**{"dim": 5}) - - def testONNX(self): - transforms = TRANSFORMS("onnxrt_qlinearops", "preprocess") - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 3}) - image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 3) - - align = transforms["AlignImageChannel"](**{"dim": 2}) - self.assertRaises(ValueError, align, (TestAlignImageChannel.img1.astype(np.uint8), None)) - - with self.assertRaises(ValueError): - transforms["AlignImageChannel"](**{"dim": 5}) - - def testPyTorch(self): - transforms = TRANSFORMS("pytorch", "preprocess") - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.pt_img1, None)) - self.assertEqual(image.mode, "L") - - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.pt_img2, None)) - self.assertEqual(image.mode, "L") - - align = transforms["AlignImageChannel"](**{"dim": 3}) - image, _ = align((TestAlignImageChannel.pt_img3, None)) - self.assertEqual(image.mode, "RGB") - - with self.assertRaises(ValueError): - align = transforms["AlignImageChannel"](**{"dim": 2}) - - with self.assertRaises(ValueError): - transforms["AlignImageChannel"](**{"dim": 5}) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def testMXNet(self): - transforms = TRANSFORMS("mxnet", "preprocess") - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img1.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 1}) - image, _ = align((TestAlignImageChannel.img2.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 1) - - align = transforms["AlignImageChannel"](**{"dim": 3}) - image, _ = align((TestAlignImageChannel.img3.astype(np.uint8), None)) - self.assertEqual(image.shape[-1], 3) - - align = transforms["AlignImageChannel"](**{"dim": 2}) - self.assertRaises(ValueError, align, (TestAlignImageChannel.img1.astype(np.uint8), None)) - - with self.assertRaises(ValueError): - transforms["AlignImageChannel"](**{"dim": 5}) - - -class TestToArray(unittest.TestCase): - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def testParse(self): - random_array = np.random.random_sample([10, 10, 3]) * 255 - random_array = random_array.astype(np.uint8) - img1 = Image.fromarray(random_array) - onnx_transforms = TRANSFORMS("onnxrt_qlinearops", "preprocess") - onnx_parse = onnx_transforms["ToArray"]() - img, _ = onnx_parse((img1, None)) - self.assertTrue(isinstance(img, np.ndarray)) - - mxnet_transforms = TRANSFORMS("mxnet", "preprocess") - mxnet_parse = mxnet_transforms["ToArray"]() - img, _ = mxnet_parse((mx.nd.array(random_array), None)) - self.assertTrue(isinstance(img, np.ndarray)) - self.assertRaises(ValueError, mxnet_parse, ([1, 2], None)) - - -class TestMXNetTransform(unittest.TestCase): - @classmethod - def setUpClass(cls): - if platform.system().lower() == "windows": - cls.skipTest(cls, "not support mxnet on windows yet") - array = np.random.random_sample([100, 100, 3]) * 255 - cls.img = mx.nd.array(array) - cls.transforms = TRANSFORMS("mxnet", "preprocess") - - def testRandomCrop(self): - args = {"size": [50]} - randomcrop = TestMXNetTransform.transforms["RandomCrop"](**args) - compose = TestMXNetTransform.transforms["Compose"]([randomcrop]) - image_result = compose((TestMXNetTransform.img, None)) - self.assertEqual(image_result[0].shape, (50, 50, 3)) - - def testNormalize(self): - args = {"mean": [0.0, 0.0, 0.0], "std": [0.29, 0.24, 0.25]} - normalize = TestMXNetTransform.transforms["Normalize"](**args) - image_result = normalize((TestMXNetTransform.img, None)) - self.assertAlmostEqual( - image_result[0].asnumpy()[0][0][0], (TestMXNetTransform.img.asnumpy() / [0.29])[0][0][0], places=3 - ) - - -class TestONNXTransfrom(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.img = np.random.random_sample([100, 100, 3]) * 255 - cls.transforms = TRANSFORMS("onnxrt_qlinearops", "preprocess") - - def testResize(self): - args = {"size": [224]} - resize = TestONNXTransfrom.transforms["Resize"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([resize]) - image_result = compose((self.img, None)) - self.assertEqual(image_result[0].shape, (224, 224, 3)) - args = {"size": [100, 100], "interpolation": "test"} - with self.assertRaises(ValueError): - TestONNXTransfrom.transforms["Resize"](**args) - - args = {"size": 224} - resize = TestONNXTransfrom.transforms["Resize"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([resize]) - image_result = compose((self.img, None)) - self.assertEqual(image_result[0].shape, (224, 224, 3)) - - args = {"size": [224, 224]} - resize = TestONNXTransfrom.transforms["Resize"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([resize]) - image_result = compose((self.img, None)) - self.assertEqual(image_result[0].shape, (224, 224, 3)) - - def testNormalize(self): - args = {"mean": [0.0, 0.0, 0.0], "std": [0.29, 0.24, 0.25]} - normalize = TestONNXTransfrom.transforms["Normalize"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([normalize]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertTrue((image_result[0] == np.array(TestONNXTransfrom.img) / [0.29, 0.24, 0.25]).all()) - - args = {"mean": [0.0, 0.0, 0.0], "std": [0, 0, 0]} - with self.assertRaises(ValueError): - TestONNXTransfrom.transforms["Normalize"](**args) - - def testRandomCrop(self): - args = {"size": [50]} - randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([randomcrop]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (50, 50, 3)) - args = {"size": [1000, 1000]} - with self.assertRaises(ValueError): - trans = TestONNXTransfrom.transforms["RandomCrop"](**args) - trans((TestONNXTransfrom.img, None)) - - args = {"size": 50} - randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([randomcrop]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (50, 50, 3)) - - args = {"size": [100, 100]} - randomcrop = TestONNXTransfrom.transforms["RandomCrop"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([randomcrop]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (100, 100, 3)) - - def testCenterCrop(self): - args = {"size": [100]} - centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([centercrop]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (100, 100, 3)) - args = {"size": 5} - centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args) - image_result = centercrop((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (5, 5, 3)) - args = {"size": [5, 6]} - centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args) - image_result = centercrop((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (5, 6, 3)) - args = {"size": [150]} - centercrop = TestONNXTransfrom.transforms["CenterCrop"](**args) - with self.assertRaises(ValueError): - centercrop((TestONNXTransfrom.img, None)) - - def testRandomResizedCrop(self): - args = {"size": [150]} - randomresizedcrop = TestONNXTransfrom.transforms["RandomResizedCrop"](**args) - compose = TestONNXTransfrom.transforms["Compose"]([randomresizedcrop]) - image_result = compose((TestONNXTransfrom.img, None)) - self.assertEqual(image_result[0].shape, (150, 150, 3)) - args = {"size": [150, 150], "scale": (0.9, 0.3)} - with self.assertRaises(ValueError): - TestONNXTransfrom.transforms["RandomResizedCrop"](**args) - - args = {"size": 150, "interpolation": "test"} - with self.assertRaises(ValueError): - TestONNXTransfrom.transforms["RandomResizedCrop"](**args) - - -class TestImagenetTransform(unittest.TestCase): - def testParseDecodeImagenet(self): - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpeg") - - image = tf.compat.v1.gfile.FastGFile("test.jpeg", "rb").read() - label = 10 - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/class/label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[20])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[200])), - } - ) - ) - with tf.io.TFRecordWriter("test-0-of-0") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset("tensorflow", {"ImageRecord": {"root": "./"}}, {"ParseDecodeImagenet": {}}, None) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0][0], 10) - break - - from neural_compressor.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet - - func = ParseDecodeImagenet() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - from neural_compressor.experimental.data.datasets.dataset import TensorflowTFRecordDataset - - ds = TensorflowTFRecordDataset("test-0-of-0", func) - dataloader = DATALOADERS["tensorflow"](dataset=ds, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0][0], 10) - break - - os.remove("test-0-of-0") - os.remove("test.jpeg") - - -class TestCOCOTransform(unittest.TestCase): - def testCOCODecode(self): - tf.compat.v1.disable_eager_execution() - - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpeg") - - image = tf.compat.v1.gfile.FastGFile("test.jpeg", "rb").read() - source_id = "000000397133.jpg".encode("utf-8") - label = "person".encode("utf-8") - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/object/class/text": tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])), - "image/source_id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[source_id])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - } - ) - ) - - with tf.io.TFRecordWriter("test.record") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", - {"COCORecord": {"root": "test.record"}}, - { - "ParseDecodeCoco": {}, - "Resize": {"size": 50}, - "Cast": {"dtype": "int64"}, - "CropToBoundingBox": {"offset_height": 2, "offset_width": 2, "target_height": 5, "target_width": 5}, - "CenterCrop": {"size": [4, 4]}, - "RandomResizedCrop": {"size": [4, 5]}, - }, - None, - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 4, 5, 3)) - self.assertEqual(labels[0].shape, (1, 1, 4)) - - from neural_compressor.experimental.data.datasets.coco_dataset import ParseDecodeCoco - from neural_compressor.experimental.data.transforms.transform import TensorflowResizeWithRatio - - func = ParseDecodeCoco() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - func = ParseDecodeCoco() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - func = TensorflowResizeWithRatio(**{"padding": True}) - out = func(out) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (1365, 1365, 3)) - - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/source_id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[source_id])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - } - ) - ) - - with tf.io.TFRecordWriter("test2.record") as writer: - writer.write(example.SerializeToString()) - self.assertRaises( - ValueError, create_dataset, "tensorflow", {"COCORecord": {"root": "test2.record"}}, None, None - ) - - os.remove("test2.record") - os.remove("test.record") - os.remove("test.jpeg") - - -class TestVOCTransform(unittest.TestCase): - def testVOCDecode(self): - import shutil - - tf.compat.v1.disable_eager_execution() - - def _bytes_list_feature(values): - import six - - def norm2bytes(value): - return value.encode() if isinstance(value, str) and six.PY3 else value - - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) - - def _int64_list_feature(values): - import collections - import collections.abc - - if not isinstance(values, collections.abc.Iterable): - values = [values] - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpg") - random_array = np.random.random_sample([100, 100, 3]) * 0 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.png") - image_data = tf.compat.v1.gfile.GFile("test.jpg", "rb").read() - seg_data = tf.compat.v1.gfile.GFile("test.png", "rb").read() - filename = "test" - - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": _bytes_list_feature(image_data), - "image/filename": _bytes_list_feature(filename), - "image/format": _bytes_list_feature("png"), - "image/height": _int64_list_feature(100), - "image/width": _int64_list_feature(100), - "image/channels": _int64_list_feature(3), - "image/segmentation/class/encoded": (_bytes_list_feature(seg_data)), - "image/segmentation/class/format": _bytes_list_feature("png"), - } - ) - ) - - if not os.path.exists("./test_record"): - os.mkdir("./test_record") - with tf.io.TFRecordWriter("./test_record/val-test.record") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", {"VOCRecord": {"root": "./test_record"}}, {"ParseDecodeVoc": {}}, None - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0].shape, (100, 100, 1)) - - from neural_compressor.experimental.data.transforms.transform import ParseDecodeVocTransform - - func = ParseDecodeVocTransform() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - os.remove("./test_record/val-test.record") - os.remove("test.jpg") - os.remove("test.png") - shutil.rmtree("./test_record") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/data/test_tokenization.py b/test/data/test_tokenization.py deleted file mode 100644 index 0a8e1018cdc..00000000000 --- a/test/data/test_tokenization.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import shutil -import unittest - -from neural_compressor.experimental.data.transforms.tokenization import FullTokenizer -from neural_compressor.utils.utility import LazyImport - -tf = LazyImport("tensorflow") - -basic_text = ["un", "##aff", "##able"] - - -class TestFullTokenizer(unittest.TestCase): - @classmethod - def setUpClass(cls): - os.makedirs("val", exist_ok=True) - vocab_file = "val/temp.txt" - with tf.io.gfile.GFile(vocab_file, "w+") as f: - for vocab in basic_text: - f.write(vocab + "\n") - f.close() - - @classmethod - def tearDownClass(cls): - if os.path.exists("val"): - shutil.rmtree("val") - - def test_tokenizer(self): - tokenizer = FullTokenizer("val/temp.txt") - ids = [2, 1, 0] - tokens = basic_text[::-1] - tokens_to_ids = tokenizer.convert_tokens_to_ids(tokens) - self.assertEqual(tokens_to_ids, ids) - ids_to_tokens = tokenizer.convert_ids_to_tokens(ids) - self.assertEqual(ids_to_tokens, tokens) - split_tokens = tokenizer.tokenize("unaffable") - self.assertEqual(split_tokens, basic_text) - split_tokens = tokenizer.tokenize("example") - self.assertEqual(split_tokens, ["[UNK]"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/data/test_transform.py b/test/data/test_transform.py index 9775187e9ae..02798127008 100644 --- a/test/data/test_transform.py +++ b/test/data/test_transform.py @@ -996,212 +996,5 @@ def testRandomResizedCrop(self): TestONNXTransfrom.transforms["RandomResizedCrop"](**args) -class TestImagenetTransform(unittest.TestCase): - def testParseDecodeImagenet(self): - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpeg") - - image = tf.compat.v1.gfile.FastGFile("test.jpeg", "rb").read() - label = 10 - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/class/label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[20])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[200])), - } - ) - ) - with tf.io.TFRecordWriter("test-0-of-0") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset("tensorflow", {"ImageRecord": {"root": "./"}}, {"ParseDecodeImagenet": {}}, None) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0][0], 10) - break - - from neural_compressor.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet - - func = ParseDecodeImagenet() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - from neural_compressor.experimental.data.datasets.dataset import TensorflowTFRecordDataset - - ds = TensorflowTFRecordDataset("test-0-of-0", func) - dataloader = DATALOADERS["tensorflow"](dataset=ds, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0][0], 10) - break - - os.remove("test-0-of-0") - os.remove("test.jpeg") - - -class TestCOCOTransform(unittest.TestCase): - def testCOCODecode(self): - tf.compat.v1.disable_eager_execution() - - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpeg") - - image = tf.compat.v1.gfile.FastGFile("test.jpeg", "rb").read() - source_id = "000000397133.jpg".encode("utf-8") - label = "person".encode("utf-8") - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/object/class/text": tf.train.Feature(bytes_list=tf.train.BytesList(value=[label])), - "image/source_id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[source_id])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - } - ) - ) - - with tf.io.TFRecordWriter("test.record") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", - {"COCORecord": {"root": "test.record"}}, - { - "ParseDecodeCoco": {}, - "Resize": {"size": 50}, - "Cast": {"dtype": "int64"}, - "CropToBoundingBox": {"offset_height": 2, "offset_width": 2, "target_height": 5, "target_width": 5}, - "CenterCrop": {"size": [4, 4]}, - "RandomResizedCrop": {"size": [4, 5]}, - }, - None, - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 4, 5, 3)) - self.assertEqual(labels[0].shape, (1, 1, 4)) - - from neural_compressor.experimental.data.datasets.coco_dataset import ParseDecodeCoco - from neural_compressor.experimental.data.transforms.transform import TensorflowResizeWithRatio - - func = ParseDecodeCoco() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - func = ParseDecodeCoco() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - func = TensorflowResizeWithRatio(**{"padding": True}) - out = func(out) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (1365, 1365, 3)) - - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])), - "image/source_id": tf.train.Feature(bytes_list=tf.train.BytesList(value=[source_id])), - "image/object/bbox/xmin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/ymin": tf.train.Feature(float_list=tf.train.FloatList(value=[10])), - "image/object/bbox/xmax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - "image/object/bbox/ymax": tf.train.Feature(float_list=tf.train.FloatList(value=[100])), - } - ) - ) - - with tf.io.TFRecordWriter("test2.record") as writer: - writer.write(example.SerializeToString()) - self.assertRaises( - ValueError, create_dataset, "tensorflow", {"COCORecord": {"root": "test2.record"}}, None, None - ) - - os.remove("test2.record") - os.remove("test.record") - os.remove("test.jpeg") - - -class TestVOCTransform(unittest.TestCase): - def testVOCDecode(self): - import shutil - - tf.compat.v1.disable_eager_execution() - - def _bytes_list_feature(values): - import six - - def norm2bytes(value): - return value.encode() if isinstance(value, str) and six.PY3 else value - - return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) - - def _int64_list_feature(values): - import collections - import collections.abc - - if not isinstance(values, collections.abc.Iterable): - values = [values] - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - random_array = np.random.random_sample([100, 100, 3]) * 255 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.jpg") - random_array = np.random.random_sample([100, 100, 3]) * 0 - random_array = random_array.astype(np.uint8) - im = Image.fromarray(random_array) - im.save("test.png") - image_data = tf.compat.v1.gfile.GFile("test.jpg", "rb").read() - seg_data = tf.compat.v1.gfile.GFile("test.png", "rb").read() - filename = "test" - - example = tf.train.Example( - features=tf.train.Features( - feature={ - "image/encoded": _bytes_list_feature(image_data), - "image/filename": _bytes_list_feature(filename), - "image/format": _bytes_list_feature("png"), - "image/height": _int64_list_feature(100), - "image/width": _int64_list_feature(100), - "image/channels": _int64_list_feature(3), - "image/segmentation/class/encoded": (_bytes_list_feature(seg_data)), - "image/segmentation/class/format": _bytes_list_feature("png"), - } - ) - ) - - if not os.path.exists("./test_record"): - os.mkdir("./test_record") - with tf.io.TFRecordWriter("./test_record/val-test.record") as writer: - writer.write(example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", {"VOCRecord": {"root": "./test_record"}}, {"ParseDecodeVoc": {}}, None - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=1) - for inputs, labels in dataloader: - self.assertEqual(inputs.shape, (1, 100, 100, 3)) - self.assertEqual(labels[0].shape, (100, 100, 1)) - - from neural_compressor.experimental.data.transforms.transform import ParseDecodeVocTransform - - func = ParseDecodeVocTransform() - out = func(example.SerializeToString()) - self.assertEqual(out[0].eval(session=tf.compat.v1.Session()).shape, (100, 100, 3)) - - os.remove("./test_record/val-test.record") - os.remove("test.jpg") - os.remove("test.png") - shutil.rmtree("./test_record") - - if __name__ == "__main__": unittest.main() diff --git a/test/distillation/test_distillation_1.x.py b/test/distillation/test_distillation_1.x.py deleted file mode 100644 index 802f81148a5..00000000000 --- a/test/distillation/test_distillation_1.x.py +++ /dev/null @@ -1,266 +0,0 @@ -import copy -import os -import shutil -import unittest - -import tensorflow as tf -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.config import DistillationConfig, KnowledgeDistillationLossConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_distillation - framework: pytorch - - distillation: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - frequency: 1 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - KnowledgeDistillationLoss: - temperature: 1.0 - loss_types: ['CE', 'KL'] - loss_weights: [0.5, 0.5] - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml_1(): - fake_yaml = """ - model: - name: imagenet_distillation - framework: tensorflow - - distillation: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - frequency: 1 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - KnowledgeDistillationLoss: - temperature: 1.0 - loss_types: ['CE', 'CE'] - loss_weights: [0.5, 0.5] - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 224, 224, 3] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 224, 224, 3] - label: True - """ - with open("fake_1.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml_2(): - fake_yaml = """ - model: - name: imagenet_distillation - framework: pytorch - - distillation: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - IntermediateLayersKnowledgeDistillationLoss: - layer_mappings: [ - ['layer1.0', ], - [['layer1.1.conv1', ''], ['layer1.1.conv1', '0']], - ] - loss_types: ['KL', 'MSE'] - loss_weights: [0.5, 0.5] - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake_2.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestDistillation(unittest.TestCase): - student_model = torchvision.models.resnet18() - teacher_model = torchvision.models.resnet34() - - student_model_tf = tf.keras.applications.mobilenet.MobileNet() - teacher_model_tf = tf.keras.applications.mobilenet_v2.MobileNetV2() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - build_fake_yaml_1() - build_fake_yaml_2() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - os.remove("fake_1.yaml") - os.remove("fake_2.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_distillation(self): - from neural_compressor.conf.config import DistillationConf - from neural_compressor.experimental import Distillation - - conf = DistillationConf("fake.yaml") - distiller = Distillation(conf) - distiller = Distillation() - - from neural_compressor.conf.config import conf - - conf.model.framework = "pytorch" - conf.distillation.train.end_epoch = 3 - conf.distillation.train.iteration = 10 - conf.distillation.train.optimizer = { - "SGD": {"learning_rate": 0.001, "momentum": 0.1, "nesterov": True, "weight_decay": 0.001} - } - conf.distillation.train.dataloader.batch_size = 30 - conf.distillation.train.dataloader.dataset = {"dummy": {"shape": [128, 3, 224, 224], "label": True}} - conf.evaluation.accuracy.dataloader.batch_size = 30 - conf.evaluation.accuracy.dataloader.dataset = {"dummy": {"shape": [128, 3, 224, 224], "label": True}} - distiller = Distillation(conf) - distiller.student_model = self.student_model - distiller.teacher_model = self.teacher_model - print("student model: {}".format(distiller.student_model)) - distilled_model = distiller.fit() - distilled_model.save("./saved") - stat = torch.load("./saved/best_model.pt") - self.student_model.load_state_dict(stat) - - def test_distillation_intermediate_layers(self): - from neural_compressor.conf.config import DistillationConf - from neural_compressor.experimental import Distillation, common - - conf = DistillationConf("fake_2.yaml") - conf.usr_cfg.distillation.train.criterion.IntermediateLayersKnowledgeDistillationLoss.layer_mappings[1][1][ - -1 - ] = lambda x: x[:, :2, ...] - distiller = Distillation(conf) - distiller.student_model = common.Model(self.student_model) - distiller.teacher_model = common.Model(self.teacher_model) - print("student model: {}".format(distiller.student_model)) - _ = distiller.fit() - - def test_distillation_external(self): - from neural_compressor.experimental.common.criterion import TensorflowKnowledgeDistillationLossExternal - - criterion = TensorflowKnowledgeDistillationLossExternal() - criterion.teacher_model_forward(None) - y_true = [[0, 1, 0]] - y_pred = [[0.05, 0.95, 0]] - criterion.teacher_student_loss_cal(y_pred, y_true) - criterion.student_targets_loss_cal(y_pred, y_true) - - def test_distillation_external_new_API(self): - from neural_compressor.training import prepare_compression - - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(100, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - criterion = nn.CrossEntropyLoss() - distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) - optimizer = torch.optim.SGD(self.student_model.parameters(), lr=0.0001) - conf = DistillationConfig(self.teacher_model, distillation_criterion) - compression_manager = prepare_compression(copy.deepcopy(self.student_model), conf) - model = compression_manager.model - - epochs = 3 - iters = 10 - for nepoch in range(epochs): - model.train() - cnt = 0 - compression_manager.callbacks.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - compression_manager.callbacks.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = compression_manager.callbacks.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - compression_manager.callbacks.on_step_end() - if cnt >= iters: - break - compression_manager.callbacks.on_epoch_end() - - model.save("./saved") - stat = torch.load("./saved/best_model.pt") - opt_model = self.student_model.load_state_dict(stat) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/distillation/test_distillation_2.x.py b/test/distillation/test_distillation_2.x.py index 3d433a5930c..51d57dea8be 100644 --- a/test/distillation/test_distillation_2.x.py +++ b/test/distillation/test_distillation_2.x.py @@ -11,7 +11,6 @@ from neural_compressor.adaptor import FRAMEWORKS from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.conf.dotdict import DotDict from neural_compressor.config import ( DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig, @@ -22,6 +21,7 @@ from neural_compressor.data.dataloaders.tensorflow_dataloader import TensorflowDataLoader from neural_compressor.training import prepare_compression from neural_compressor.utils import create_obj_from_config +from neural_compressor.utils.utility import DotDict class TestDistillation(unittest.TestCase): diff --git a/test/distillation/test_self_distillation_2.x.py b/test/distillation/test_self_distillation_2.x.py index d36a986734e..37dbab9f00d 100644 --- a/test/distillation/test_self_distillation_2.x.py +++ b/test/distillation/test_self_distillation_2.x.py @@ -7,76 +7,12 @@ import torchvision from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: self_distillation - framework: pytorch - - distillation: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - frequency: 1 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - SelfKnowledgeDistillationLoss: - layer_mappings: [ - [['resblock.1.feature.output', 'resblock.deepst.feature.output'], - ['resblock.2.feature.output','resblock.deepst.feature.output']], - [['resblock.2.fc','resblock.deepst.fc'], - ['resblock.3.fc','resblock.deepst.fc']], - [['resblock.1.fc','resblock.deepst.fc'], - ['resblock.2.fc','resblock.deepst.fc'], - ['resblock.3.fc','resblock.deepst.fc']] - ] - temperature: 3.0 - loss_types: ['L2', 'KL', 'CE'] - loss_weights: [0.5, 0.05, 0.02] - add_origin_loss: True - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) +from neural_compressor.data.dataloaders.pytorch_dataloader import PyTorchDataLoader class TestSelfDistillation(unittest.TestCase): model = torchvision.models.resnet50() - @classmethod - def setUpClass(cls): - build_fake_yaml() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - def test_self_distillation(self): import copy diff --git a/test/distributed/test_distributed_pt_train.py b/test/distributed/test_distributed_pt_train.py deleted file mode 100644 index 598b7ffae8a..00000000000 --- a/test/distributed/test_distributed_pt_train.py +++ /dev/null @@ -1,149 +0,0 @@ -import os -import shutil -import signal -import subprocess -import unittest - -import torchvision - - -def build_fake_py(): - fake_py = """ -import os -import shutil -import unittest - -import torch -import torchvision -import torch.nn as nn -import horovod.torch as hvd - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - - -class TestPruning(unittest.TestCase): - - model = torchvision.models.resnet18() - - def test_pruning_internal(self): - from neural_compressor.experimental import Pruning, common - prune = Pruning('fake.yaml') - - prune.model = self.model - _ = prune() - print('rank {} in size {}'.format(hvd.rank(), hvd.size())) - -if __name__ == "__main__": - unittest.main() - """ - with open("fake.py", "w", encoding="utf-8") as f: - f.write(fake_py) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - train: - start_epoch: 0 - end_epoch: 4 - iteration: 10 - dataloader: - batch_size: 30 - distributed: True - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: gradient_sensitivity - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - distributed: True - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestDistributed(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - build_fake_py() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - os.remove("fake.py") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_distributed(self): - distributed_cmd = "horovodrun -np 2 python fake.py" - p = subprocess.Popen( - distributed_cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True - ) # nosec - try: - out, error = p.communicate() - import re - - matches = re.findall(r".*rank ([01]) in size 2.*", out.decode("utf-8")) - assert "0" in matches - assert "1" in matches - except KeyboardInterrupt: - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - assert 0 - - def test_single_node(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - - prune.model = self.model - _ = prune() - # assert hvd hook is registered. pruner has 2 on_train_begin hooks: hvd and prune - assert len(prune.hooks_dict["on_train_begin"]) == 2 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/graph_optimization/test_graph_optimization.py b/test/graph_optimization/test_graph_optimization.py deleted file mode 100644 index 072b90f8281..00000000000 --- a/test/graph_optimization/test_graph_optimization.py +++ /dev/null @@ -1,1011 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import platform -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from packaging.version import Version -from tensorflow.compat.v1 import graph_util -from tensorflow.core.framework import graph_pb2 -from tensorflow.python.framework import dtypes - -from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper -from neural_compressor.adaptor.tf_utils.util import disable_random -from neural_compressor.utils.utility import CpuInfo - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - graph_optimization: - precisions: [bf16] - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_2(): - fake_yaml_2 = """ - model: - name: fake_yaml_2 - framework: tensorflow - inputs: input - outputs: op_to_store - graph_optimization: - precisions: [bf16] - """ - y = yaml.load(fake_yaml_2, Loader=yaml.SafeLoader) - with open("fake_yaml_2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_3(): - fake_yaml_3 = """ - model: - name: fake_yaml_3 - framework: tensorflow - inputs: input - outputs: op_to_store - graph_optimization: - precisions: - - bf16 - - fp32 - """ - y = yaml.load(fake_yaml_3, Loader=yaml.SafeLoader) - with open("fake_yaml_3.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_4(): - fake_yaml_4 = """ - model: - name: fake_yaml_4 - framework: pytorch - inputs: input - outputs: op_to_store - graph_optimization: - precisions: [bf16] - """ - y = yaml.load(fake_yaml_4, Loader=yaml.SafeLoader) - with open("fake_yaml_4.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_5(): - fake_yaml = """ - model: - name: fake_yaml_5 - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - graph_optimization: - precisions: [bf16] - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - weight: [1, 0] - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - exit_policy: - max_trials: 3 - timeout: 50 - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_5.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_6(): - fake_yaml = """ - model: - name: fake_yaml_6 - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - graph_optimization: - precisions: [bf16] - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - weight: [1, 0] - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - exit_policy: - max_trials: 3 - timeout: 50 - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_6.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class MyMetric(object): - def __init__(self, *args): - self.pred_list = [] - self.label_list = [] - self.samples = 0 - - def update(self, predict, label): - self.pred_list.extend(predict) - self.label_list.extend(label) - self.samples += len(label) - - def reset(self): - self.pred_list = [] - self.label_list = [] - self.samples = 0 - - def result(self): - pred = np.array(self.pred_list) - label = np.array(self.label_list) - ones = np.ones(pred.ndim, dtype=np.int32) - ones[0] = label.shape[0] - label = np.array(self.label_list).reshape(ones) - correct_num = np.sum(pred == label) - return correct_num / self.samples - - -class TestGraphOptimizationOnNonBF16Host(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - @unittest.skipIf(tf.__version__ < "2.0", "does not support on 1.15up3") - def test_bf16_cfg_on_non_bf16_enabled_host(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.conf.config import GraphOptConf - from neural_compressor.experimental import GraphOptimization, common - - conf = GraphOptConf("fake_yaml.yaml") - graph_optimizer = GraphOptimization(conf) - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - if CpuInfo().bf16: - self.assertEqual(found_cast_op, True) - else: - self.assertEqual(found_cast_op, False) - - -@unittest.skipIf(tf.__version__ < "2.0", "does not support on 1.15up3") -class TestGraphOptimization(unittest.TestCase): - @classmethod - def setUpClass(self): - os.environ["FORCE_BF16"] = "1" - if platform.system().lower() == "windows": - self.skipTest(self, "Graph Optimization NOT Support Windows Yet") - build_fake_yaml() - build_fake_yaml_2() - build_fake_yaml_3() - build_fake_yaml_4() - build_fake_yaml_5() - build_fake_yaml_6() - - @classmethod - def tearDownClass(self): - del os.environ["FORCE_BF16"] - os.remove("fake_yaml.yaml") - os.remove("fake_yaml_2.yaml") - os.remove("fake_yaml_3.yaml") - os.remove("fake_yaml_4.yaml") - os.remove("fake_yaml_5.yaml") - os.remove("fake_yaml_6.yaml") - - def test_not_supported_model(self): - import neural_compressor.adaptor.pytorch as nc_torch - - PT_VERSION = nc_torch.get_torch_version() - if PT_VERSION > Version("1.8.0-rc1") and PT_VERSION < Version("1.9.0-rc1"): - pass - else: - import torchvision - - model = torchvision.models.resnet18() - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization("fake_yaml_4.yaml") - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store" - graph_optimizer.model = model - try: - output_graph = graph_optimizer.fit() - except SystemExit: - pass - - def test_not_supported_model_without_yaml(self): - import neural_compressor.adaptor.pytorch as nc_torch - - PT_VERSION = nc_torch.get_torch_version() - if PT_VERSION > Version("1.8.0-rc1") and PT_VERSION < Version("1.9.0-rc1"): - pass - else: - import torchvision - - model = torchvision.models.resnet18() - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store" - try: - graph_optimizer.model = model - except SystemExit: - pass - - def test_not_supported_model_with_conf(self): - import neural_compressor.adaptor.pytorch as nc_torch - - PT_VERSION = nc_torch.get_torch_version() - if PT_VERSION > Version("1.8.0-rc1") and PT_VERSION < Version("1.9.0-rc1"): - pass - else: - import torchvision - - from neural_compressor.conf.config import conf - from neural_compressor.experimental import Graph_Optimization - - model = torchvision.models.resnet18() - - conf.model.inputs = "input" - conf.model.outputs = "op_to_store" - conf.graph_optimization.precisions = "bf16" - graph_optimizer = Graph_Optimization(conf) - try: - graph_optimizer.model = model - except SystemExit: - pass - - @disable_random() - def test_graph_optimization_with_evaluation(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import GraphOptimization, common - - graph_optimizer = GraphOptimization("fake_yaml.yaml") - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_without_evaluation(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization, common - - graph_optimizer = Graph_Optimization("fake_yaml_2.yaml") - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_without_yaml(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - relu62 = tf.nn.relu6(conv_bias, name="op2_to_store") - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, - input_graph_def=sess.graph_def, - output_node_names=[relu6.name.split(":")[0], relu62.name.split(":")[0]], - ) - - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.precisions = "fp32" - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store, op2_to_store" - - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - input_name = graph_optimizer.input - output_name = graph_optimizer.output - self.assertEqual(found_cast_op, False) - self.assertEqual(input_name, "input") - self.assertEqual(output_name, "op_to_store, op2_to_store") - - @disable_random() - def test_graph_optimization_with_yaml(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization("fake_yaml_3.yaml") - - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_with_custom_metric_without_postprocess(self): - os.environ["FORCE_BF16"] = "1" - - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization, common - - graph_optimizer = Graph_Optimization("fake_yaml_3.yaml") - graph_optimizer.metric = common.Metric(MyMetric) - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.precisions = ["fp32", "bf16"] - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - self.assertIsNotNone(output_graph.graph_def) - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_without_custom_metric_with_postprocess(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.data import Postprocess - from neural_compressor.experimental import Graph_Optimization, common, data - - graph_optimizer = Graph_Optimization("fake_yaml.yaml") - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.postprocess = Postprocess(data.transforms.transform.TensorflowWrapFunction(np.array)) - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_with_eval_func(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization, common - - graph_optimizer = Graph_Optimization("fake_yaml.yaml") - - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - graph_optimizer.eval_func = None - - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_multimetric_noweight(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization, common - - graph_optimizer = Graph_Optimization("fake_yaml_5.yaml") - - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - graph_optimizer.eval_func = None - - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_multimetric_weight(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 300, 300, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization, common - - graph_optimizer = Graph_Optimization("fake_yaml_6.yaml") - - dataset = graph_optimizer.dataset("dummy", shape=(100, 300, 300, 16), label=True) - graph_optimizer.eval_dataloader = common.DataLoader(dataset) - graph_optimizer.model = output_graph_def - graph_optimizer.eval_func = None - - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_with_force_bf16(self): - os.environ["FORCE_BF16"] = "1" - - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store" - - graph_optimizer.precisions = "bf16" - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, True) - - @disable_random() - def test_graph_optimization_with_bn(self): - input_constant_name = "input_constant" - relu_name = "relu" - float_graph_def = graph_pb2.GraphDef() - input_constant = Helper.create_constant_node( - input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 6] - ) - float_graph_def.node.extend([input_constant]) - relu_node = Helper.create_node("Relu", relu_name, [input_constant_name]) - Helper.set_attr_dtype(relu_node, "T", dtypes.float32) - float_graph_def.node.extend([relu_node]) - - b_constant_name = "b_constant" - conv2d_name = "conv2d_1" - b_constant = Helper.create_constant_node( - b_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 6] - ) - float_graph_def.node.extend([b_constant]) - - conv2d_node = Helper.create_node("Conv2D", conv2d_name, [relu_name, b_constant_name]) - Helper.set_attr_dtype(conv2d_node, "T", dtypes.float32) - Helper.set_attr_string(conv2d_node, "padding", b"SAME") - Helper.set_attr_int_list(conv2d_node, "strides", [1, 1, 1, 1]) - - float_graph_def.node.extend([conv2d_node]) - - bias_add_name = "bias_add" - offset_constant_name = "offset_constant" - - offset_constant = Helper.create_constant_node( - offset_constant_name, value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6] - ) - float_graph_def.node.extend([offset_constant]) - - bias_add_node = Helper.create_node("BiasAdd", bias_add_name, [conv2d_name, offset_constant_name]) - Helper.set_attr_dtype(bias_add_node, "T", dtypes.float32) - float_graph_def.node.extend([bias_add_node]) - - bn_scale_name = "bn_scale" - bn_scale_node = Helper.create_constant_node( - bn_scale_name, value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6] - ) - bn_offset_name = "bn_offset" - bn_offset_node = Helper.create_constant_node( - bn_offset_name, value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6] - ) - bn_mean_name = "bn_mean" - bn_mean_node = Helper.create_constant_node( - bn_mean_name, - value=[ - 1, - 2, - ], - dtype=dtypes.float32, - shape=[ - 2, - ], - ) - bn_var_name = "bn_var" - bn_var_node = Helper.create_constant_node(bn_var_name, value=[], dtype=dtypes.float32, shape=[0]) - fused_bn_node_name = "bn" - fused_bn_node = Helper.create_node( - "FusedBatchNormV3", - fused_bn_node_name, - [bias_add_name, bn_scale_name, bn_offset_name, bn_mean_name, bn_var_name], - ) - Helper.set_attr_dtype(fused_bn_node, "T", dtypes.float32) - Helper.set_attr_dtype(fused_bn_node, "U", dtypes.float32) - float_graph_def.node.extend([fused_bn_node, bn_scale_node, bn_offset_node, bn_mean_node, bn_var_node]) - - post_relu_name = "post_relu" - post_relu_node = Helper.create_node("Relu", post_relu_name, [fused_bn_node_name]) - Helper.set_attr_dtype(post_relu_node, "T", dtypes.float32) - - float_graph_def.node.extend([post_relu_node]) - - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - - graph_optimizer.precisions = "bf16" - graph_optimizer.model = float_graph_def - output_graph = graph_optimizer.fit() - bn_bf16 = False - for i in output_graph.graph_def.node: - if i.op == "FusedBatchNormV3" and i.attr["T"].type == dtypes.bfloat16: - bn_bf16 = True - if i.op == "Conv2D" and i.attr["T"].type == dtypes.bfloat16: - bn_bf16 = True - self.assertEqual(bn_bf16, True) - - -class TestGraphOptmizationFP32(unittest.TestCase): - @disable_random() - def test_graph_optimization_without_yaml_without_precisions(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store" - - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - precision = graph_optimizer.precisions - self.assertEqual(found_cast_op, False) - self.assertEqual(precision, "fp32") - - @disable_random() - def test_graph_optimization_without_yaml_with_precisions(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.precisions = "fp32" - - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, False) - - @disable_random() - def test_graph_optimization_fp32_only_with_force_bf16(self): - os.environ["FORCE_BF16"] = "1" - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Graph_Optimization - - graph_optimizer = Graph_Optimization() - graph_optimizer.input = "input" - graph_optimizer.output = "op_to_store" - - graph_optimizer.model = output_graph_def - output_graph = graph_optimizer.fit() - found_cast_op = False - - for i in output_graph.graph_def.node: - if i.op == "Cast": - found_cast_op = True - break - - self.assertEqual(found_cast_op, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/ipex/test_adaptor_ipex.py b/test/ipex/test_adaptor_ipex.py index 7cb3555e2e6..c40700ed45c 100644 --- a/test/ipex/test_adaptor_ipex.py +++ b/test/ipex/test_adaptor_ipex.py @@ -9,9 +9,7 @@ import neural_compressor.adaptor.pytorch as nc_torch from neural_compressor import mix_precision, set_workspace -from neural_compressor.conf.pythonic_config import config from neural_compressor.config import MixedPrecisionConfig -from neural_compressor.experimental import common from neural_compressor.utils.pytorch import load from neural_compressor.utils.utility import LazyImport @@ -95,57 +93,12 @@ def __iter__(self): yield torch.randn(1, 3, 224, 224).to(self.device) -@unittest.skipIf( - PT_VERSION >= Version("1.12.0").release or PT_VERSION < Version("1.10.0").release, - "Please use Intel extension for Pytorch version 1.10 or 1.11", -) -class TestPytorchIPEX_1_10_Adaptor(unittest.TestCase): - @classmethod - def setUpClass(self): - config.quantization.backend = "ipex" - config.quantization.approach = "post_training_static_quant" - config.quantization.use_bf16 = False - set_workspace("./saved") - - @classmethod - def tearDownClass(self): - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_tuning_ipex(self): - from neural_compressor.experimental import Quantization - - model = M() - quantizer = Quantization(config) - quantizer.model = model - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = True - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = torch.utils.data.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - quantizer.eval_dataloader = dataloader - nc_model = quantizer.fit() - nc_model.save("./saved") - self.assertTrue(isinstance(nc_model._model, torch.jit.ScriptModule)) - q_model = load("./saved", model, dataloader=dataloader) - from neural_compressor.experimental import Benchmark - - evaluator = Benchmark(config) - evaluator.model = q_model - evaluator.b_dataloader = dataloader - evaluator.fit("accuracy") - - @unittest.skipIf( PT_VERSION < Version("1.12.0").release, "Please use Intel extension for Pytorch version higher or equal to 1.12" ) class TestPytorchIPEX_1_12_Adaptor(unittest.TestCase): @classmethod def setUpClass(self): - config.quantization.backend = "ipex" - config.quantization.accuracy_criterion.tolerable_loss = 0.0001 - config.quantization.accuracy_criterion.higher_is_better = False - config.quantization.approach = "post_training_static_quant" - config.quantization.use_bf16 = False set_workspace("./saved") @classmethod @@ -153,53 +106,6 @@ def tearDownClass(self): shutil.rmtree("./saved", ignore_errors=True) shutil.rmtree("runs", ignore_errors=True) - def test_tuning_ipex(self): - from neural_compressor.experimental import Quantization - - model = M() - quantizer = Quantization(config) - quantizer.model = model - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = False - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = torch.utils.data.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - quantizer.calib_func = calib_func - quantizer.eval_dataloader = dataloader - nc_model = quantizer.fit() - sparsity = nc_model.report_sparsity() - self.assertTrue(sparsity[-1] >= 0.0) - nc_model.save("./saved") - self.assertTrue(isinstance(nc_model._model, torch.jit.ScriptModule)) - q_model = load("./saved", model, dataloader=dataloader) - from neural_compressor.experimental import Benchmark - - evaluator = Benchmark(config) - evaluator.model = q_model - evaluator.b_dataloader = dataloader - evaluator.fit("accuracy") - - def test_tuning_ipex_for_ipex_autotune_func(self): - from neural_compressor.experimental import Quantization - - model = M() - if PT_VERSION < Version("2.1").release: - qconfig = ipex.quantization.default_static_qconfig - else: - qconfig = ipex.quantization.default_static_qconfig_mapping - prepared_model = ipex.quantization.prepare( - model, qconfig, example_inputs=torch.ones(1, 3, 224, 224), inplace=False - ) - quantizer = Quantization(config) - quantizer.model = prepared_model - quantizer.conf.usr_cfg.tuning.exit_policy["max_trials"] = 5 - quantizer.conf.usr_cfg.tuning.exit_policy["timeout"] = 100 - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = torch.utils.data.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - quantizer.eval_dataloader = dataloader - nc_model = quantizer.fit() - self.assertTrue(isinstance(nc_model._model, torch.jit.ScriptModule)) - def test_copy_prepared_model(self): model = M() if PT_VERSION < Version("2.1").release: @@ -212,42 +118,6 @@ def test_copy_prepared_model(self): copy_model = torch_utils.util.auto_copy(prepared_model) self.assertTrue(isinstance(copy_model, torch.nn.Module)) - def test_bf16(self): - from neural_compressor.experimental import Quantization - - model = M() - if PT_VERSION < Version("2.1").release: - qconfig = ipex.quantization.default_static_qconfig - else: - qconfig = ipex.quantization.default_static_qconfig_mapping - prepared_model = ipex.quantization.prepare( - model, qconfig, example_inputs=torch.ones(1, 3, 224, 224), inplace=False - ) - config.quantization.use_bf16 = True - config.quantization.performance_only = True - quantizer = Quantization(config) - quantizer.model = model - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = torch.utils.data.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - quantizer.eval_dataloader = dataloader - nc_model = quantizer.fit() - self.assertTrue(isinstance(nc_model._model, torch.jit.ScriptModule)) - - def test_example_inputs(self): - from neural_compressor.experimental import Quantization - - model = M() - config.quantization.example_inputs = torch.randn([1, 3, 224, 224]) - quantizer = Quantization(config) - quantizer.model = model - quantizer.conf.usr_cfg.tuning.exit_policy["performance_only"] = False - dataset = quantizer.dataset("dummy", (100, 3, 224, 224), label=True) - dataloader = torch.utils.data.DataLoader(dataset) - quantizer.calib_dataloader = dataloader - nc_model = quantizer.fit() - self.assertTrue(isinstance(nc_model._model, torch.jit.ScriptModule)) - def test_new_API(self): model = M() from neural_compressor import PostTrainingQuantConfig, quantization diff --git a/test/itex/test_tensorflow_itex_basic.py b/test/itex/test_tensorflow_itex_basic.py deleted file mode 100644 index 7705a4acb09..00000000000 --- a/test/itex/test_tensorflow_itex_basic.py +++ /dev/null @@ -1,496 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import platform -import shutil -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.python.platform import gfile - -from neural_compressor.adaptor.tf_utils.util import disable_random, version1_gte_version2, version1_lt_version2 -from neural_compressor.experimental import Benchmark, Quantization, common - - -def build_fake_yaml(fake_yaml, save_path, **kwargs): - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open(file=save_path, mode=kwargs["mode"], encoding=kwargs["encoding"]) as f: - yaml.dump(y, f) - - -@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") -class TestItexEnabling(unittest.TestCase): - @classmethod - def setUpClass(self): - os.system("rm *.log") - fake_yaml_1 = """ - model: - name: fake_model_cpu - framework: tensorflow_itex - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: workspace_1 - """ - - fake_yaml_2 = """ - model: - name: fake_model_gpu - framework: tensorflow_itex - inputs: input - device: gpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - performance: - warmup: 10 - iteration: 100 - configs: - cores_per_instance: 1 - num_of_instance: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: workspace_2 - """ - - fake_yaml_3 = """ - model: - name: fake_model_default_device - framework: tensorflow_itex - inputs: input - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: workspace_3 - """ - build_fake_yaml(fake_yaml_1, "fake_yaml_1.yaml", mode="w", encoding="utf-8") - build_fake_yaml(fake_yaml_2, "fake_yaml_2.yaml", mode="w", encoding="utf-8") - build_fake_yaml(fake_yaml_3, "fake_yaml_3.yaml", mode="w", encoding="utf-8") - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml_1.yaml") - os.remove("fake_yaml_2.yaml") - os.remove("fake_yaml_3.yaml") - if version1_gte_version2(tf.version.VERSION, "2.8.0"): - shutil.rmtree("workspace_1") - shutil.rmtree("workspace_2") - shutil.rmtree("workspace_3") - - @disable_random() - def test_itex_convert_basic_default_device(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_3.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if "min" in i.name or "max" in i.name: - self.assertEqual(i.op, "HostConst") - if i.op == "HostConst": - self.assertTrue("min" in i.name or "max" in i.name) - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 5) - self.assertEqual(quantize_count, 4) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_itex_convert_basic_cpu(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 5) - self.assertEqual(quantize_count, 4) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_itex_convert_basic_gpu(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_2.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if i.op == "HostConst": - self.assertTrue("min" in i.name or "max" in i.name) - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 5) - self.assertEqual(quantize_count, 4) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_depthwiseconv2d_case(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - out_name = conv.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - reshape_counter = 0 - - for i in output_graph.graph_def.node: - if i.op == "Reshape": - reshape_counter += 1 - self.assertEqual(reshape_counter, 2) - - @disable_random() - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.8.0") or platform.system().lower() == "windows", - "Only supports tf greater 2.7.0 and Linux", - ) - def test_itex_benchmark_gpu(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - out_name = relu6.name.split(":")[0] - num_of_instance = 1 - cores_per_instance = 1 - log_file = "" - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_2.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - evaluator = Benchmark("fake_yaml_2.yaml") - evaluator.b_dataloader = common.DataLoader(dataset) - num_of_instance = evaluator.conf.usr_cfg.evaluation.performance.configs.num_of_instance - cores_per_instance = evaluator.conf.usr_cfg.evaluation.performance.configs.cores_per_instance - log_file = "{}_{}_{}.log".format(num_of_instance, cores_per_instance, 0) - if gfile.Exists(log_file): - os.remove(log_file) - evaluator.model = output_graph - evaluator("performance") - - found_multi_instance_log = False - for file_name in os.listdir(os.getcwd()): - if file_name == log_file: - found_multi_instance_log = True - break - - self.assertEqual(found_multi_instance_log, False) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_itex_convert_shared_y_pattern_normal_case(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 1, 1, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.Add(x=top_relu, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 2) - self.assertEqual(quantize_count, 2) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_itex_convert_share_y_pattern_abnormal_case1(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.Add(x=relu, y=relu2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 4) - self.assertEqual(quantize_count, 3) - - @disable_random() - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0") - def test_itex_convert_share_y_pattern_abnormal_case2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - dequant_count = 0 - quantize_count = 0 - for i in output_graph.graph_def.node: - if i.op == "Dequantize": - dequant_count += 1 - if i.op == "QuantizeV2": - quantize_count += 1 - - self.assertEqual(dequant_count, 2) - self.assertEqual(quantize_count, 2) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py b/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py index d9e35afc356..0bc8b94bb8c 100644 --- a/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py +++ b/test/itex/test_tensorflow_qdq_convert_to_onnx_qdq.py @@ -1,59 +1,19 @@ -# -# -*- coding: utf-8 -*- -# -import os import shutil import unittest import tensorflow as tf -import yaml from tensorflow.compat.v1 import graph_util from neural_compressor.adaptor.tf_utils.util import disable_random, version1_gte_version2, version1_lt_version2 -from neural_compressor.experimental import Benchmark, Quantization, common - - -def build_fake_yaml(fake_yaml, save_path, **kwargs): - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open(file=save_path, mode=kwargs["mode"], encoding=kwargs["encoding"]) as f: - yaml.dump(y, f) class TestConvertTensorflowQDQToOnnxQDQ(unittest.TestCase): @classmethod def setUpClass(self): - fake_yaml = """ - model: - name: fake_model_cpu - framework: tensorflow_itex - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: workspace - """ - build_fake_yaml(fake_yaml, "fake_yaml.yaml", mode="w", encoding="utf-8") + pass @classmethod def tearDownClass(self): - os.remove("fake_yaml.yaml") if version1_gte_version2(tf.version.VERSION, "2.8.0"): shutil.rmtree("workspace") diff --git a/test/metric/test_exp_metrics.py b/test/metric/test_exp_metrics.py deleted file mode 100644 index de6eb66c83e..00000000000 --- a/test/metric/test_exp_metrics.py +++ /dev/null @@ -1,1291 +0,0 @@ -"""Tests for the metrics module.""" - -import platform -import unittest - -import numpy as np - -from neural_compressor.experimental.metric import METRICS -from neural_compressor.experimental.metric.evaluate_squad import evaluate as evaluate_squad -from neural_compressor.experimental.metric.f1 import evaluate - - -class InCorrectMetric: - def __init__(self): - self.item = None - - -class CorrectMetric: - def __init__(self): - self.item = [] - - def update(self, samples): - self.item.append(samples) - - def result(self): - return 0 - - def reset(self): - self.item = [] - - -class CorrectMetric_v2: - def __init__(self): - self.item = [] - - def update(self, labels, preds): - self.item.append(preds) - - def result(self): - return "res", 0 - - def reset(self): - self.item = [] - - -class TestMetrics(unittest.TestCase): - def testUserMetric(self): - from neural_compressor.experimental import Benchmark, Graph_Optimization, Quantization, common - - for i in [Quantization(), Benchmark(), Graph_Optimization()]: - item = i - with self.assertRaises(AssertionError): - item.metric = InCorrectMetric() - item.framework = "tensorflow" - item.metric = common.Metric(CorrectMetric, str(i)) - - def testmIOU(self): - metrics = METRICS("tensorflow") - miou = metrics["mIOU"]() - preds = np.array([0, 0, 1, 1]) - labels = np.array([0, 1, 0, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.33333334) - - miou.reset() - preds = np.array([0, 0, 1, 1]) - labels = np.array([0, 1, 1, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.58333333) - - def testBLEU(self): - metrics = METRICS("tensorflow") - bleu = metrics["BLEU"]() - preds = ["Gutach: Mehr Sicherheit für Fußgänger"] - labels = ("Gutach: Noch mehr Sicherheit für Fußgänger",) - bleu.update(preds, labels) - self.assertAlmostEqual(bleu.result(), 51.1507809) - bleu.reset() - - preds = ["Dies wurde auch von Peter Arnold vom Offenburg District Office bestätigt."] - labels = ("Dies bestätigt auch Peter Arnold vom Landratsamt Offenburg.",) - bleu.update(preds, labels) - self.assertAlmostEqual(bleu.result(), 16.108992695) - with self.assertRaises(ValueError): - bleu.update(["a", "b"], ("c",)) - - def test_onnxrt_GLUE(self): - metrics = METRICS("onnxrt_qlinearops") - glue = metrics["GLUE"]("mrpc") - preds = [ - np.array( - [ - [-3.2443411, 3.0909934], - [2.0500996, -2.3100944], - [1.870293, -2.0741048], - [-2.8377204, 2.617834], - [2.008347, -2.0215416], - [-2.9693947, 2.7782154], - [-2.9949608, 2.7887983], - [-3.0623112, 2.8748074], - ] - ) - ] - labels = [np.array([1, 0, 0, 1, 0, 1, 0, 1])] - glue.update(preds, labels) - self.assertEqual(glue.result(), 0.875) - preds_2 = [ - np.array( - [ - [-3.1296735, 2.8356276], - [-3.172515, 2.9173899], - [-3.220131, 3.0916846], - [2.1452675, -1.9398905], - [1.5475761, -1.9101546], - [-2.9797182, 2.721741], - [-3.2052834, 2.9934788], - [-2.7451005, 2.622343], - ] - ) - ] - labels_2 = [np.array([1, 1, 1, 0, 0, 1, 1, 1])] - glue.update(preds_2, labels_2) - self.assertEqual(glue.result(), 0.9375) - - glue.reset() - glue.update(preds, labels) - self.assertEqual(glue.result(), 0.875) - - def test_tensorflow_F1(self): - metrics = METRICS("tensorflow") - F1 = metrics["F1"]() - preds = [1, 1, 1, 1] - labels = [0, 1, 1, 0] - - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.5) - - def test_squad_evaluate(self): - label = [ - { - "paragraphs": [ - { - "qas": [ - { - "answers": [ - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - ], - "question": "Which NFL team represented the AFC at Super Bowl 50?", - "id": "56be4db0acb8001400a502ec", - } - ] - } - ] - } - ] - preds = {"56be4db0acb8001400a502ec": "Denver Broncos"} - f1 = evaluate(preds, label) - self.assertEqual(f1, 100.0) - dataset = [ - { - "paragraphs": [ - { - "qas": [ - { - "answers": [ - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - ], - "question": "Which NFL team represented the AFC at Super Bowl 50?", - "id": "56be4db0acb8001400a502ec", - } - ] - } - ] - } - ] - predictions = {"56be4db0acb8001400a502ec": "Denver Broncos"} - f1_squad = evaluate_squad(dataset, predictions) - self.assertEqual(f1_squad["f1"], 100.0) - self.assertEqual(f1_squad["exact_match"], 100.0) - - def test_pytorch_F1(self): - metrics = METRICS("pytorch") - F1 = metrics["F1"]() - F1.reset() - preds = [1, 1] - labels = [2, 1, 1] - - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.8) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def test_mxnet_F1(self): - metrics = METRICS("mxnet") - F1 = metrics["F1"]() - preds = [0, 1, 1, 1, 1, 0] - labels = [0, 1, 1, 1] - - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.8) - - def test_onnx_topk(self): - metrics = METRICS("onnxrt_qlinearops") - top1 = metrics["topk"]() - top1.reset() - self.assertEqual(top1.result(), 0) - self.assertEqual(top1.result(), 0) - top2 = metrics["topk"](k=2) - top3 = metrics["topk"](k=3) - - predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - - labels = [[0, 1, 0, 0], [0, 0, 1, 0]] - sparse_labels = [2, 2] - single_label = 2 - - # test functionality of one-hot label - top1.update(predicts, labels) - top2.update(predicts, labels) - top3.update(predicts, labels) - self.assertEqual(top1.result(), 0.0) - self.assertEqual(top2.result(), 0.5) - self.assertEqual(top3.result(), 1) - - # test functionality of sparse label - top1.update(predicts, sparse_labels) - top2.update(predicts, sparse_labels) - top3.update(predicts, sparse_labels) - self.assertEqual(top1.result(), 0.25) - self.assertEqual(top2.result(), 0.75) - self.assertEqual(top3.result(), 1) - - # test functionality of single label - top1.update(single_predict, single_label) - top2.update(single_predict, single_label) - top3.update(single_predict, single_label) - self.assertEqual(top1.result(), 0.4) - self.assertEqual(top2.result(), 0.8) - self.assertEqual(top3.result(), 1) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def test_mxnet_topk(self): - metrics = METRICS("mxnet") - top1 = metrics["topk"]() - top1.reset() - self.assertEqual(top1.result(), 0) - top2 = metrics["topk"](k=2) - top3 = metrics["topk"](k=3) - - predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - - labels = [[0, 1, 0, 0], [0, 0, 1, 0]] - sparse_labels = [2, 2] - single_label = 2 - - # test functionality of one-hot label - top1.update(predicts, labels) - top2.update(predicts, labels) - top3.update(predicts, labels) - self.assertEqual(top1.result(), 0.0) - self.assertEqual(top2.result(), 0.5) - self.assertEqual(top3.result(), 1) - - # test functionality of sparse label - top1.update(predicts, sparse_labels) - top2.update(predicts, sparse_labels) - top3.update(predicts, sparse_labels) - self.assertEqual(top1.result(), 0.25) - self.assertEqual(top2.result(), 0.75) - self.assertEqual(top3.result(), 1) - - # test functionality of single label - top1.update(single_predict, single_label) - top2.update(single_predict, single_label) - top3.update(single_predict, single_label) - self.assertEqual(top1.result(), 0.4) - self.assertEqual(top2.result(), 0.8) - self.assertEqual(top3.result(), 1) - - def test_tensorflow_topk(self): - metrics = METRICS("tensorflow") - top1 = metrics["topk"]() - top1.reset() - self.assertEqual(top1.result(), 0) - top2 = metrics["topk"](k=2) - top3 = metrics["topk"](k=3) - - predicts = [[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - int_predict = 0 - ndarry_predict = np.array([[0, 0.2, 0.9, 0.3], [0, 0.9, 0.8, 0]]) - - labels = [[0, 1, 0, 0], [0, 0, 1, 0]] - sparse_labels = [2, 2] - single_label = 2 - tuple_label = tuple( - [ - [0, 1], - [ - 0, - 0, - ], - ] - ) - list_tuple_label = [ - tuple( - [ - [0, 1], - [ - 0, - 0, - ], - ] - ) - ] - - # test functionality of one-hot label - top1.update(predicts, labels) - top2.update(predicts, labels) - top3.update(predicts, labels) - self.assertEqual(top1.result(), 0.0) - self.assertEqual(top2.result(), 0.5) - self.assertEqual(top3.result(), 1) - - # test functionality of sparse label - top1.update(predicts, sparse_labels) - top2.update(predicts, sparse_labels) - top3.update(predicts, sparse_labels) - self.assertEqual(top1.result(), 0.25) - self.assertEqual(top2.result(), 0.75) - self.assertEqual(top3.result(), 1) - - # test functionality of single label - top1.update(single_predict, single_label) - top2.update(single_predict, single_label) - top3.update(single_predict, single_label) - self.assertEqual(top1.result(), 0.4) - self.assertEqual(top2.result(), 0.8) - self.assertEqual(top3.result(), 1) - - # test functionality of int pred and label - top1.reset() - top1.update(int_predict, single_label) - self.assertEqual(top1.result(), 0) - - # test functionality of ndarray pred and tuple label - top1.reset() - top1.update(ndarry_predict, tuple_label) - self.assertEqual(top1.result(), 0.5) - - # test functionality of ndarray pred and tuple label - top1.reset() - top1.update(ndarry_predict, list_tuple_label) - self.assertEqual(top1.result(), 0.5) - - def test_tensorflow_mAP(self): - import json - import os - - metrics = METRICS("tensorflow") - fake_dict = "dog: 1" - with open("anno.yaml", "w", encoding="utf-8") as f: - f.write(fake_dict) - mAP = metrics["mAP"]("anno.yaml") - self.assertEqual(mAP.category_map_reverse["dog"], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth = [ - np.array([[[0.5633255, 0.34003124, 0.69857144, 0.4009531], [0.4763466, 0.7769531, 0.54334897, 0.9675937]]]), - np.array([["a", "b"]]), - np.array([[]]), - np.array([b"000000397133.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth) - - detection = [ - np.array( - [[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.62706745, 0.35748824, 0.6892729, 0.41513762]]] - ), - np.array([[0.9267181, 0.8510787]]), - np.array([[1.0, 1.0]]), - ] - ground_truth = [ - np.array( - [[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.62706745, 0.35748824, 0.6892729, 0.41513762]]] - ), - np.array([[b"dog", b"dog"]]), - np.array([[]]), - np.array([b"000000397133.jpg"]), - ] - mAP.update(detection, ground_truth) - mAP.result() - self.assertEqual(format(mAP.result(), ".5f"), "1.00000") - - detection = [ - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - detection_2 = [ - np.array([[8]]), - np.array( - [ - [ - [0.82776225, 0.5865939, 0.8927653, 0.6302338], - [0.8375764, 0.6424138, 0.9055594, 0.6921875], - [0.57902956, 0.39394334, 0.8342961, 0.5577197], - [0.7949219, 0.6513021, 0.8472295, 0.68427753], - [0.809729, 0.5947042, 0.8539927, 0.62916476], - [0.7258591, 0.08907133, 1.0, 0.86224866], - [0.43100086, 0.37782395, 0.8384069, 0.5616918], - [0.32005906, 0.84334356, 1.0, 1.0], - ] - ] - ), - np.array([[0.86698544, 0.7562499, 0.66414887, 0.64498234, 0.63083494, 0.46618757, 0.3914739, 0.3094324]]), - np.array([[55.0, 55.0, 79.0, 55.0, 55.0, 67.0, 79.0, 82.0]]), - ] - ground_truth = [ - np.array( - [ - [ - [0.5633255, 0.34003124, 0.69857144, 0.4009531], - [0.56262296, 0.0015625, 1.0, 0.5431719], - [0.16374707, 0.60728127, 0.813911, 0.77823436], - [0.5841452, 0.21182813, 0.65156907, 0.24670312], - [0.8056206, 0.048875, 0.90124124, 0.1553125], - [0.6729742, 0.09317187, 0.7696956, 0.21203125], - [0.3848478, 0.002125, 0.61522245, 0.303], - [0.61548007, 0.0, 0.7015925, 0.097125], - [0.6381967, 0.1865625, 0.7184075, 0.22534375], - [0.6274239, 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375], - [0.2673302, 0.245625, 0.3043794, 0.27353126], - [0.7137705, 0.15429688, 0.726815, 0.17114063], - [0.6003747, 0.25942189, 0.6438876, 0.27320313], - [0.68845433, 0.13501562, 0.714637, 0.17245312], - [0.69358313, 0.10959375, 0.7043091, 0.12409375], - [0.493911, 0.0, 0.72571427, 0.299], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466, 0.7769531, 0.54334897, 0.9675937], - ] - ] - ), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51, 56, 50, 56, 56, 79, 57, 81]]), - np.array([b"000000397133.jpg"]), - ] - ground_truth_2 = [ - np.array( - [ - [ - [0.51508695, 0.2911648, 0.5903478, 0.31360796], - [0.9358696, 0.07528409, 0.99891305, 0.25], - [0.8242174, 0.3309659, 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696, 0.8179261], - [0.32582608, 0.8575, 0.98426086, 0.9984659], - [0.77795655, 0.6268466, 0.89930433, 0.73434657], - [0.5396087, 0.39053977, 0.8483913, 0.5615057], - [0.58473915, 0.75661933, 0.5998261, 0.83579546], - [0.80391306, 0.6129829, 0.8733478, 0.66201705], - [0.8737391, 0.6579546, 0.943, 0.7053693], - [0.775, 0.6549716, 0.8227391, 0.6882955], - [0.8130869, 0.58292615, 0.90526086, 0.62551135], - [0.7844348, 0.68735796, 0.98182607, 0.83329546], - [0.872, 0.6190057, 0.9306522, 0.6591761], - ] - ] - ), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b"000000037777.jpg"]), - ] - - mAP = metrics["mAP"]() - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.18182") - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), ".5f"), "0.20347") - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.18182") - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b"000000037777.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64]]), - np.array([b"000000037700.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000011.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000012.jpg"]), - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - os.remove("anno.yaml") - - def test_tensorflow_VOCmAP(self): - import os - - metrics = METRICS("tensorflow") - fake_dict = "dog: 1" - with open("anno.yaml", "w", encoding="utf-8") as f: - f.write(fake_dict) - mAP = metrics["VOCmAP"]("anno.yaml") - self.assertEqual(mAP.iou_thrs, 0.5) - self.assertEqual(mAP.map_points, 0) - self.assertEqual(mAP.category_map_reverse["dog"], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth = [ - np.array([[[0.5633255, 0.34003124, 0.69857144, 0.4009531], [0.4763466, 0.7769531, 0.54334897, 0.9675937]]]), - np.array([["a", "b"]]), - np.array([[]]), - np.array([b"000000397133.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth) - - os.remove("anno.yaml") - - mAP = metrics["VOCmAP"]() - detection = [ - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - detection_2 = [ - np.array([[8]]), - np.array( - [ - [ - [0.82776225, 0.5865939, 0.8927653, 0.6302338], - [0.8375764, 0.6424138, 0.9055594, 0.6921875], - [0.57902956, 0.39394334, 0.8342961, 0.5577197], - [0.7949219, 0.6513021, 0.8472295, 0.68427753], - [0.809729, 0.5947042, 0.8539927, 0.62916476], - [0.7258591, 0.08907133, 1.0, 0.86224866], - [0.43100086, 0.37782395, 0.8384069, 0.5616918], - [0.32005906, 0.84334356, 1.0, 1.0], - ] - ] - ), - np.array([[0.86698544, 0.7562499, 0.66414887, 0.64498234, 0.63083494, 0.46618757, 0.3914739, 0.3094324]]), - np.array([[55.0, 55.0, 79.0, 55.0, 55.0, 67.0, 79.0, 82.0]]), - ] - ground_truth = [ - np.array( - [ - [ - [0.5633255, 0.34003124, 0.69857144, 0.4009531], - [0.56262296, 0.0015625, 1.0, 0.5431719], - [0.16374707, 0.60728127, 0.813911, 0.77823436], - [0.5841452, 0.21182813, 0.65156907, 0.24670312], - [0.8056206, 0.048875, 0.90124124, 0.1553125], - [0.6729742, 0.09317187, 0.7696956, 0.21203125], - [0.3848478, 0.002125, 0.61522245, 0.303], - [0.61548007, 0.0, 0.7015925, 0.097125], - [0.6381967, 0.1865625, 0.7184075, 0.22534375], - [0.6274239, 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375], - [0.2673302, 0.245625, 0.3043794, 0.27353126], - [0.7137705, 0.15429688, 0.726815, 0.17114063], - [0.6003747, 0.25942189, 0.6438876, 0.27320313], - [0.68845433, 0.13501562, 0.714637, 0.17245312], - [0.69358313, 0.10959375, 0.7043091, 0.12409375], - [0.493911, 0.0, 0.72571427, 0.299], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466, 0.7769531, 0.54334897, 0.9675937], - ] - ] - ), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51, 56, 50, 56, 56, 79, 57, 81]]), - np.array([b"000000397133.jpg"]), - ] - ground_truth_2 = [ - np.array( - [ - [ - [0.51508695, 0.2911648, 0.5903478, 0.31360796], - [0.9358696, 0.07528409, 0.99891305, 0.25], - [0.8242174, 0.3309659, 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696, 0.8179261], - [0.32582608, 0.8575, 0.98426086, 0.9984659], - [0.77795655, 0.6268466, 0.89930433, 0.73434657], - [0.5396087, 0.39053977, 0.8483913, 0.5615057], - [0.58473915, 0.75661933, 0.5998261, 0.83579546], - [0.80391306, 0.6129829, 0.8733478, 0.66201705], - [0.8737391, 0.6579546, 0.943, 0.7053693], - [0.775, 0.6549716, 0.8227391, 0.6882955], - [0.8130869, 0.58292615, 0.90526086, 0.62551135], - [0.7844348, 0.68735796, 0.98182607, 0.83329546], - [0.872, 0.6190057, 0.9306522, 0.6591761], - ] - ] - ), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b"000000037777.jpg"]), - ] - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.18182") - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), ".5f"), "0.20347") - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.18182") - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b"000000037777.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64]]), - np.array([b"000000037700.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000011.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000012.jpg"]), - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test_tensorflow_COCOmAP(self): - import os - - output_index_mapping = {"num_detections": 0, "boxes": 1, "scores": 2, "classes": 3} - metrics = METRICS("tensorflow") - fake_dict = "dog: 1" - with open("anno.yaml", "w", encoding="utf-8") as f: - f.write(fake_dict) - mAP = metrics["COCOmAP"]("anno.yaml") - mAP2 = metrics["COCOmAPv2"]("anno.yaml", output_index_mapping=output_index_mapping) - self.assertEqual(mAP.category_map_reverse["dog"], 1) - self.assertEqual(mAP2.category_map_reverse["dog"], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth = [ - np.array([[[0.5633255, 0.34003124, 0.69857144, 0.4009531], [0.4763466, 0.7769531, 0.54334897, 0.9675937]]]), - np.array([["a", "b"]]), - np.array([[]]), - np.array([b"000000397133.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth) - - os.remove("anno.yaml") - - mAP = metrics["COCOmAP"]() - mAP2 = metrics["COCOmAPv2"]() - detection = [ - np.array( - [ - [ - [0.16117382, 0.59801614, 0.81511605, 0.7858219], - [0.5589304, 0.0, 0.98301625, 0.520178], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - [0.40032804, 0.01218696, 0.6924763, 0.30341768], - [0.62706745, 0.35748824, 0.6892729, 0.41513762], - ] - ] - ), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - detection_2 = [ - np.array([[8]]), - np.array( - [ - [ - [0.82776225, 0.5865939, 0.8927653, 0.6302338], - [0.8375764, 0.6424138, 0.9055594, 0.6921875], - [0.57902956, 0.39394334, 0.8342961, 0.5577197], - [0.7949219, 0.6513021, 0.8472295, 0.68427753], - [0.809729, 0.5947042, 0.8539927, 0.62916476], - [0.7258591, 0.08907133, 1.0, 0.86224866], - [0.43100086, 0.37782395, 0.8384069, 0.5616918], - [0.32005906, 0.84334356, 1.0, 1.0], - ] - ] - ), - np.array([[0.86698544, 0.7562499, 0.66414887, 0.64498234, 0.63083494, 0.46618757, 0.3914739, 0.3094324]]), - np.array([[55.0, 55.0, 79.0, 55.0, 55.0, 67.0, 79.0, 82.0]]), - ] - ground_truth = [ - np.array( - [ - [ - [0.5633255, 0.34003124, 0.69857144, 0.4009531], - [0.56262296, 0.0015625, 1.0, 0.5431719], - [0.16374707, 0.60728127, 0.813911, 0.77823436], - [0.5841452, 0.21182813, 0.65156907, 0.24670312], - [0.8056206, 0.048875, 0.90124124, 0.1553125], - [0.6729742, 0.09317187, 0.7696956, 0.21203125], - [0.3848478, 0.002125, 0.61522245, 0.303], - [0.61548007, 0.0, 0.7015925, 0.097125], - [0.6381967, 0.1865625, 0.7184075, 0.22534375], - [0.6274239, 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375], - [0.2673302, 0.245625, 0.3043794, 0.27353126], - [0.7137705, 0.15429688, 0.726815, 0.17114063], - [0.6003747, 0.25942189, 0.6438876, 0.27320313], - [0.68845433, 0.13501562, 0.714637, 0.17245312], - [0.69358313, 0.10959375, 0.7043091, 0.12409375], - [0.493911, 0.0, 0.72571427, 0.299], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466, 0.7769531, 0.54334897, 0.9675937], - ] - ] - ), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51, 56, 50, 56, 56, 79, 57, 81]]), - np.array([b"000000397133.jpg"]), - ] - ground_truth_2 = [ - np.array( - [ - [ - [0.51508695, 0.2911648, 0.5903478, 0.31360796], - [0.9358696, 0.07528409, 0.99891305, 0.25], - [0.8242174, 0.3309659, 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696, 0.8179261], - [0.32582608, 0.8575, 0.98426086, 0.9984659], - [0.77795655, 0.6268466, 0.89930433, 0.73434657], - [0.5396087, 0.39053977, 0.8483913, 0.5615057], - [0.58473915, 0.75661933, 0.5998261, 0.83579546], - [0.80391306, 0.6129829, 0.8733478, 0.66201705], - [0.8737391, 0.6579546, 0.943, 0.7053693], - [0.775, 0.6549716, 0.8227391, 0.6882955], - [0.8130869, 0.58292615, 0.90526086, 0.62551135], - [0.7844348, 0.68735796, 0.98182607, 0.83329546], - [0.872, 0.6190057, 0.9306522, 0.6591761], - ] - ] - ), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b"000000037777.jpg"]), - ] - - self.assertEqual(mAP.result(), 0) - self.assertEqual(mAP2.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.14149") - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), ".5f"), "0.13366") - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), ".5f"), "0.14149") - - mAP2.update(detection, ground_truth) - - mAP2.update(detection, ground_truth) - self.assertEqual(format(mAP2.result(), ".5f"), "0.14149") - - mAP2 = metrics["COCOmAPv2"](output_index_mapping=output_index_mapping) - - mAP2.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP2.result(), ".5f"), "0.20520") - mAP2.reset() - mAP2.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP2.result(), ".5f"), "0.20520") - - mAP2 = metrics["COCOmAPv2"]() - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b"000000037777.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - self.assertRaises(ValueError, mAP2.update, detection, ground_truth_1) - - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64]]), - np.array([b"000000037700.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - self.assertRaises(ValueError, mAP2.update, detection, ground_truth_2) - - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787, 0.60418576, 0.35155892, 0.31158054]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000011.jpg"]), - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - self.assertRaises(ValueError, mAP2.update, detection_1, ground_truth_1) - - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648, 0.5903478, 0.31360796], [0.872, 0.6190057, 0.9306522, 0.6591761]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b"000000012.jpg"]), - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219], [0.5589304, 0.0, 0.98301625, 0.520178]]]), - np.array([[0.9267181, 0.8510787]]), - np.array([[1.0, 67.0, 51.0, 79.0, 47.0]]), - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - self.assertRaises(ValueError, mAP2.update, detection_2, ground_truth_2) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows now") - def test__accuracy(self): - predicts1 = [1, 0, 1, 1] - labels1 = [0, 1, 1, 1] - - predicts2 = [[0, 0], [0, 0]] - labels2 = [[0, 1], [1, 1]] - - predicts3 = [[[0, 1], [0, 0], [0, 1]], [[0, 1], [0, 1], [0, 1]]] - labels3 = [[[0, 1], [0, 1], [1, 0]], [[1, 0], [1, 0], [1, 0]]] - - predicts4 = [[0.2, 0.8], [0.1, 0.9], [0.3, 0.7], [0.4, 0.6]] # 1,1,1,1 - labels4 = [0, 1, 0, 0] - - predicts5 = [[0], [0]] - labels5 = [0, 1] - - metrics = METRICS("pytorch") - acc = metrics["Accuracy"]() - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts5, labels5) - self.assertEqual(acc.result(), 1.0) - - metrics = METRICS("mxnet") - acc = metrics["Accuracy"]() - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - - metrics = METRICS("onnxrt_qlinearops") - acc = metrics["Accuracy"]() - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - - acc.reset() - acc.update(1, 1) - self.assertEqual(acc.result(), 1.0) - - wrong_predictions = [1, 0, 0] - wrong_labels = [[0, 1, 1]] - self.assertRaises(ValueError, acc.update, wrong_predictions, wrong_labels) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet") - def test_mxnet_accuracy(self): - metrics = METRICS("mxnet") - acc = metrics["Accuracy"]() - predicts = [1, 0, 1, 1] - labels = [0, 1, 1, 1] - acc.update(predicts, labels) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows now") - def test_mse(self): - predicts1 = [1, 0, 0, 1] - labels1 = [0, 1, 0, 0] - predicts2 = [1, 1, 1, 1] - labels2 = [0, 1, 1, 0] - - metrics = METRICS("onnxrt_qlinearops") - mse = metrics["MSE"](compare_label=False) - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - metrics = METRICS("tensorflow") - mse = metrics["MSE"](compare_label=False) - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - metrics = METRICS("mxnet") - mse = metrics["MSE"]() - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - metrics = METRICS("pytorch") - mse = metrics["MSE"]() - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows now") - def test_mae(self): - predicts1 = [1, 0, 0, 1] - labels1 = [0, 1, 0, 0] - predicts2 = [1, 1, 1, 1] - labels2 = [1, 1, 1, 0] - - metrics = METRICS("tensorflow") - mae = metrics["MAE"]() - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(0, 1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.8) - mae.reset() - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.25) - - metrics = METRICS("pytorch") - mae = metrics["MAE"]() - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.5) - - metrics = METRICS("mxnet") - mae = metrics["MAE"]() - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.5) - - metrics = METRICS("onnxrt_qlinearops") - mae = metrics["MAE"]() - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.5) - - self.assertRaises(AssertionError, mae.update, [1], [1, 2]) - self.assertRaises(AssertionError, mae.update, 1, [1, 2]) - self.assertRaises(AssertionError, mae.update, [1, 2], [1]) - self.assertRaises(AssertionError, mae.update, 1, np.array([1, 2])) - - @unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows now") - def test_rmse(self): - predicts1 = [1, 0, 0, 1] - labels1 = [1, 0, 0, 0] - predicts2 = [1, 1, 1, 1] - labels2 = [1, 0, 0, 0] - - metrics = METRICS("tensorflow") - rmse = metrics["RMSE"]() - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.reset() - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.75)) - - metrics = METRICS("pytorch") - rmse = metrics["RMSE"]() - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.5)) - - metrics = METRICS("mxnet") - rmse = metrics["RMSE"]() - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.5)) - - metrics = METRICS("onnxrt_qlinearops") - rmse = metrics["RMSE"]() - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.5)) - - def test_loss(self): - metrics = METRICS("pytorch") - loss = metrics["Loss"]() - predicts = [1, 0, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - predicts = [1, 1, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - predicts = [1, 0, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - self.assertEqual(loss.result(), 0.5) - - metrics = METRICS("onnxrt_qlinearops") - loss = metrics["Loss"]() - predicts = [1, 0, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - predicts = [1, 1, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - predicts = [1, 0, 0, 1] - labels = [0, 1, 0, 0] - loss.update(predicts, labels) - self.assertEqual(loss.result(), 0.5) - - def test_ROC(self): - metrics = METRICS("pytorch") - roc = metrics["ROC"]() - predicts = [[1, 0, 0, 1]] - labels = [[0, 1, 0, 0]] - roc.update(predicts, labels) - roc_result = roc.result() - self.assertEqual(roc_result, 0.25) - predicts = [[1]] - labels = [[0]] - roc.update(predicts, labels) - roc_result = roc.result() - self.assertEqual(roc_result, 0.2) - roc.reset() - predicts = [[1, 0, 0, 1]] - labels = [[0, 1, 0, 0]] - roc.update(predicts, labels) - self.assertEqual(roc.result(), 0.25) - - def test_tensorflow_SquadF1(self): - metrics = METRICS("tensorflow") - squad = metrics["SquadF1"]() - labels = [ - { - "paragraphs": [ - { - "qas": [ - { - "answers": [ - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - ], - "question": "Which NFL team represented the AFC at Super Bowl 50?", - "id": "56be4db0acb8001400a502ec", - } - ] - } - ] - } - ] - predicts = {"56be4db0acb8001400a502ec": "Denver Broncos"} - squad.update(predicts, labels) - self.assertEqual(squad.result(), 100.0) - squad.reset() - squad.update(predicts, labels) - self.assertEqual(squad.result(), 100.0) - - def test_PyTorchLoss(self): - import torch - - from neural_compressor.experimental.metric.metric import PyTorchLoss - - pytorch_loss = PyTorchLoss() - pytorch_loss.update([torch.ones(2, 3), torch.ones(2, 3)]) - self.assertEqual(pytorch_loss.compute(), 3) - pytorch_loss.reset() - self.assertEqual(pytorch_loss._num_examples, 0) - - def test_WrapMetric(self): - metirc = CorrectMetric() - metric_v2 = CorrectMetric_v2() - - from neural_compressor.experimental.metric.metric import WrapPyTorchMetric - - pytorch_metric = WrapPyTorchMetric(metirc) - self.assertIsInstance(pytorch_metric.metric, CorrectMetric) - self.assertIsNone(pytorch_metric.hvd) - pytorch_metric.update([1], [1]) - self.assertEqual(pytorch_metric.result(), 0) - pytorch_metric.reset() - self.assertEqual(len(pytorch_metric.metric.item), 0) - - from neural_compressor.experimental.metric.metric import WrapONNXRTMetric - - onnx_metric = WrapONNXRTMetric(metric_v2) - self.assertIsInstance(onnx_metric.metric, CorrectMetric_v2) - self.assertIsNone(onnx_metric.hvd) - onnx_metric.update([1], [1]) - self.assertEqual(onnx_metric.result(), 0) - onnx_metric.reset() - self.assertEqual(len(onnx_metric.metric.item), 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/metric/test_metrics.py b/test/metric/test_metrics.py index 0e7bdb2308c..e91928b782f 100644 --- a/test/metric/test_metrics.py +++ b/test/metric/test_metrics.py @@ -30,15 +30,6 @@ def reset(self): class TestMetrics(unittest.TestCase): - def testUserMetric(self): - from neural_compressor.experimental import Benchmark, Graph_Optimization, Quantization, common - - for i in [Quantization(), Benchmark(), Graph_Optimization()]: - item = i - with self.assertRaises(AssertionError): - item.metric = InCorrectMetric() - item.framework = "tensorflow" - item.metric = common.Metric(CorrectMetric, str(i)) def testmIOU(self): metrics = METRICS("tensorflow") diff --git a/test/metric/test_mse_metric.py b/test/metric/test_mse_metric.py deleted file mode 100644 index 5a266a060f0..00000000000 --- a/test/metric/test_mse_metric.py +++ /dev/null @@ -1,234 +0,0 @@ -import copy -import os -import shutil -import unittest - -import numpy as np -import torch -import torchvision -from packaging.version import Version - -import neural_compressor.adaptor.pytorch as nc_torch -from neural_compressor.adaptor import FRAMEWORKS -from neural_compressor.experimental import Quantization, common -from neural_compressor.model import MODELS - -try: - try: - import intel_pytorch_extension as ipex - except: - import intel_extension_for_pytorch as ipex - TEST_IPEX = True -except: - TEST_IPEX = False - -PT_VERSION = nc_torch.get_torch_version() -if PT_VERSION >= Version("1.8.0-rc1"): - FX_MODE = True -else: - FX_MODE = False - -torch.manual_seed(1) - -fake_ptq_yaml = """ - model: - name: imagenet - framework: pytorch - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - absolute: 100.0 - higher_is_better: False - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - -fake_dynamic_yaml = """ - model: - name: imagenet - framework: pytorch - - quantization: - approach: post_training_dynamic_quant - evaluation: - accuracy: - metric: - MSE: - compare_label: False - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - absolute: 100.0 - higher_is_better: False - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - - -def build_ptq_yaml(): - with open("ptq_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_ptq_yaml) - - -def build_dynamic_yaml(): - with open("dynamic_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_dynamic_yaml) - - -def build_fx_ptq_yaml(): - fake_fx_ptq_yaml = fake_ptq_yaml.replace("pytorch", "pytorch_fx") - with open("fx_ptq_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_fx_ptq_yaml) - - -def build_fx_dynamic_yaml(): - fake_fx_dynamic_yaml = fake_dynamic_yaml.replace("pytorch", "pytorch_fx") - with open("fx_dynamic_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_fx_dynamic_yaml) - - -def build_ipex_yaml(): - fake_yaml = """ - model: - name: imagenet - framework: pytorch_ipex - - evaluation: - accuracy: - metric: - MSE: - compare_label: False - performance: - warmup: 5 - iteration: 10 - - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - workspace: - path: saved - """ - with open("ipex_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -@unittest.skipIf(TEST_IPEX, "TODO: Please wait to IPEX + PyTorch1.7 release") -class TestPytorchAdaptor(unittest.TestCase): - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "workspace_path": "./", - } - framework = "pytorch" - adaptor = FRAMEWORKS[framework](framework_specific_info) - model = torchvision.models.quantization.resnet18() - nc_model = MODELS["pytorch"](model) - - @classmethod - def setUpClass(self): - build_ptq_yaml() - build_dynamic_yaml() - - @classmethod - def tearDownClass(self): - os.remove("ptq_yaml.yaml") - os.remove("dynamic_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_quantization_saved(self): - for fake_yaml in ["dynamic_yaml.yaml", "ptq_yaml.yaml"]: - if fake_yaml in ["dynamic_yaml.yaml"]: - model = torchvision.models.quantization.resnet18() - else: - model = copy.deepcopy(self.model) - if fake_yaml in ["ptq_yaml.yaml"]: - model.eval().fuse_model() - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (100, 3, 256, 256), label=True) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - self.assertTrue(bool(q_model)) - - -@unittest.skipIf(not FX_MODE, "Unsupported Fx Mode with PyTorch Version Below 1.8") -class TestPytorchFXAdaptor(unittest.TestCase): - framework_specific_info = { - "device": "cpu", - "approach": "post_training_static_quant", - "random_seed": 1234, - "q_dataloader": None, - "workspace_path": "./", - } - framework = "pytorch_fx" - adaptor = FRAMEWORKS[framework](framework_specific_info) - model = torchvision.models.quantization.resnet18() - nc_model = MODELS["pytorch_fx"](model) - - @classmethod - def setUpClass(self): - build_fx_ptq_yaml() - build_fx_dynamic_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fx_ptq_yaml.yaml") - os.remove("fx_dynamic_yaml.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_fx_static_quantization_saved(self): - fake_yaml = "fx_ptq_yaml.yaml" - model = copy.deepcopy(self.model) - model.eval().fuse_model() - quantizer = Quantization(fake_yaml) - dataset = quantizer.dataset("dummy", (100, 3, 256, 256), label=True) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - self.assertTrue(bool(q_model)) - - @unittest.skipIf( - PT_VERSION < Version("1.9.0-rc1"), - "Please use PyTroch 1.9 or higher version for dynamic quantization with pytorch_fx backend", - ) - def test_fx_dynamic_quantization_saved(self): - fake_yaml = "fx_dynamic_yaml.yaml" - model = torchvision.models.resnet18() - quantizer = Quantization(fake_yaml) - quantizer.model = model - dataset = quantizer.dataset("dummy", (100, 3, 256, 256), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - q_model = quantizer.fit() - self.assertTrue(bool(q_model)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/metric/test_mse_metric_1x.py b/test/metric/test_mse_metric_1x.py deleted file mode 100644 index ee2e6bf4c78..00000000000 --- a/test/metric/test_mse_metric_1x.py +++ /dev/null @@ -1,303 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import torch -import torchvision -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - exit_policy: - max_trials: 5 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml3(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - tuning: - strategy: - name: mse - exit_policy: - max_trials: 5 - timeout: 50 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml3.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml4(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - weight: [1, 0] - tuning: - strategy: - name: mse - exit_policy: - max_trials: 5 - timeout: 50 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml4.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_ox_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: onnxrt_qlinearops - inputs: input - outputs: output - evaluation: - accuracy: - metric: - Accuracy: {} - tuning: - strategy: - name: mse - accuracy_criterion: - relative: -0.01 - higher_is_better: False - exit_policy: - max_trials: 3 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("ox_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def build_ox_model(): - path = "mb_v2.onnx" - model = torchvision.models.mobilenet_v2() - - x = torch.randn(100, 3, 224, 224, requires_grad=True) - torch_out = model(x) - - torch.onnx.export( - model, - x, - path, - export_params=True, - opset_version=12, - do_constant_folding=True, - input_names=["input"], - output_names=["output"], - dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}}, - ) - - -class dataset: - def __init__(self): - self.data = [] - self.label = [] - for i in range(10): - self.data.append(np.zeros((3, 224, 224)).astype(np.float32)) - self.label.append(0) - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index], self.label[index] - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - build_ox_model() - build_ox_yaml() - build_fake_yaml3() - build_fake_yaml4() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - os.remove("ox_yaml.yaml") - os.remove("mb_v2.onnx") - os.remove("fake_yaml4.yaml") - os.remove("fake_yaml3.yaml") - - shutil.rmtree("saved", ignore_errors=True) - - def test_ru_mse_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ru_mse_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ru_mse_max_trials_multimetric(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml3.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ru_mse_max_trials_multimetric_weight(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml4.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ox_mse(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("ox_yaml.yaml") - ds = dataset() - quantizer.calib_dataloader = common.DataLoader(ds) - quantizer.eval_dataloader = common.DataLoader(ds) - quantizer.model = "mb_v2.onnx" - quantizer.fit() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/metric/test_register_metric_transform.py b/test/metric/test_register_metric_transform.py deleted file mode 100644 index e4fa1cc9d11..00000000000 --- a/test/metric/test_register_metric_transform.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Tests for neural_compressor register metric and postprocess.""" - -import os -import platform -import re -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: resnet_v1_101 - framework: tensorflow - inputs: input - outputs: resnet_v1_101/predictions/Reshape_1 - device: cpu - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_benchmark(): - seq = [ - "from argparse import ArgumentParser\n", - "arg_parser = ArgumentParser(description='Parse args')\n", - "arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n", - "args = arg_parser.parse_args()\n", - "import os\n", - "import numpy as np\n", - "import PIL.Image\n", - "image = np.array(PIL.Image.open('images/cat.jpg'))\n", - "resize_image = np.resize(image, (224, 224, 3))\n", - "mean = [123.68, 116.78, 103.94]\n", - "resize_image = resize_image - mean\n", - "images = np.expand_dims(resize_image, axis=0)\n", - "labels = [768]\n", - "from neural_compressor.data.transforms.imagenet_transform import LabelShift\n", - "from neural_compressor.experimental import Benchmark, common\n", - "from neural_compressor.experimental.common import Metric, Postprocess\n", - "from neural_compressor.metric import TensorflowTopK\n", - "os.environ['NC_ENV_CONF'] = 'True'\n", - "evaluator = Benchmark('fake_yaml.yaml')\n", - "nc_postprocess = Postprocess(LabelShift, 'label_benchmark', label_shift=1)\n", - "evaluator.postprocess = nc_postprocess\n", - "nc_metric = Metric(TensorflowTopK, 'topk_benchmark')\n", - "evaluator.metric = nc_metric\n", - "evaluator.b_dataloader = common.DataLoader(dataset=list(zip(images, labels)))\n", - "evaluator.model = args.input_model\n", - "evaluator.fit()\n", - ] - - with open("fake.py", "w", encoding="utf-8") as f: - f.writelines(seq) - - -def build_benchmark2(): - seq = [ - "from argparse import ArgumentParser\n", - "arg_parser = ArgumentParser(description='Parse args')\n", - "arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n", - "args = arg_parser.parse_args()\n", - "import os\n", - "import numpy as np\n", - "import PIL.Image\n", - "image = np.array(PIL.Image.open('images/cat.jpg'))\n", - "resize_image = np.resize(image, (224, 224, 3))\n", - "mean = [123.68, 116.78, 103.94]\n", - "resize_image = resize_image - mean\n", - "images = np.expand_dims(resize_image, axis=0)\n", - "labels = [768]\n", - "from neural_compressor.data.transforms.imagenet_transform import LabelShift\n", - "from neural_compressor.experimental import Benchmark, common\n", - "from neural_compressor.experimental.common import Metric, Postprocess\n", - "from neural_compressor.metric import TensorflowTopK\n", - "os.environ['NC_ENV_CONF'] = 'True'\n", - "evaluator = Benchmark('fake_yaml.yaml')\n", - "nc_metric = Metric(TensorflowTopK, 'topk_second')\n", - "evaluator.metric = nc_metric\n", - "evaluator.b_dataloader = common.DataLoader(dataset=list(zip(images, labels)))\n", - "evaluator.model = args.input_model\n\n", - "evaluator.fit()\n", - ] - - with open("fake2.py", "w", encoding="utf-8") as f: - f.writelines(seq) - - -class TestRegisterMetric(unittest.TestCase): - model_url = ( - "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/resnet101_fp32_pretrained_model.pb" - ) - pb_path = "/tmp/.neural_compressor/resnet101_fp32_pretrained_model.pb" - # image_path = 'images/1024px-Doll_face_silver_Persian.jpg' - image_path = "images/cat.jpg" - platform = platform.system().lower() - if platform == "windows": - pb_path = "C:\\tmp\.neural_compressor\\resnet101_fp32_pretrained_model.pb" - - @classmethod - def setUpClass(self): - build_fake_yaml() - build_benchmark() - build_benchmark2() - if not os.path.exists(self.pb_path) and self.platform == "linux": - os.system("mkdir -p /tmp/.neural_compressor && wget {} -O {}".format(self.model_url, self.pb_path)) - - @classmethod - def tearDownClass(self): - if os.path.exists("fake.py"): - os.remove("fake.py") - if os.path.exists("fake2.py"): - os.remove("fake2.py") - - def test_register_metric_postprocess(self): - os.system("python fake.py --input_model={} 2>&1 | tee benchmark.log".format(self.pb_path)) - with open("benchmark.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm benchmark.log") - - os.system("python fake2.py --input_model={} 2>&1 | tee benchmark.log".format(self.pb_path)) - with open("benchmark.log", "r") as f: - for line in f: - throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?) images/sec", line) - self.assertIsNotNone(throughput) - os.system("rm benchmark.log") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/mixed_precision/test_mixed_precision.py b/test/mixed_precision/test_mixed_precision.py index 1f333dde0fb..10ab9afccf2 100644 --- a/test/mixed_precision/test_mixed_precision.py +++ b/test/mixed_precision/test_mixed_precision.py @@ -331,15 +331,6 @@ def test_mixed_precision_with_evaluation(self): ) self.assertTrue(any([i.op_type == "Cast" for i in output_model.nodes()])) - def test_mixed_precision_with_evaluation_old_api(self): - from neural_compressor.conf.config import MixedPrecision_Conf - from neural_compressor.experimental import MixedPrecision - - converter = MixedPrecision(MixedPrecision_Conf("test.yaml")) - converter.model = self.onnx_model - output_model = converter.fit() - self.assertTrue(any([i.op_type != "Cast" for i in output_model.nodes()])) - def test_mixed_precision_with_eval_func(self): def eval(model): return 0.5 diff --git a/test/nas/test_nas.py b/test/nas/test_nas.py deleted file mode 100644 index 98ccb79fb40..00000000000 --- a/test/nas/test_nas.py +++ /dev/null @@ -1,210 +0,0 @@ -import os -import shutil -import unittest - -import numpy as np -import torch - -from neural_compressor.conf.config import NASConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental import NAS, common -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(approach=None, search_algorithm=None, metrics=["acc"]): - fake_yaml = """ - model: - name: imagenet_nas - framework: pytorch - - nas: - %s - search: - search_space: {'channels': [16, 32, 64], 'dimensions': [32, 64, 128]} - %s - %s - max_trials: 3 - train: - start_epoch: 0 - end_epoch: 1 - iteration: 10 - optimizer: - SGD: - learning_rate: 0.001 - criterion: - CrossEntropyLoss: - reduction: sum - dataloader: - batch_size: 8 - dataset: - dummy: - shape: [32, 3, 64, 64] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 8 - dataset: - dummy: - shape: [32, 3, 64, 64] - label: True - """ % ( - "approach: '{}'".format(approach) if approach else "", - "search_algorithm: '{}'".format(search_algorithm) if search_algorithm else "", - "metrics: [{}]".format(",".join(["'{}'".format(m) for m in metrics])) if metrics else "", - ) - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_dynas_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_nas - framework: pytorch - - nas: - approach: dynas - search: - search_algorithm: nsga2 - dynas: - supernet: ofa_resnet50 - metrics: ['accuracy_top1', 'macs'] - results_csv_path: './search_results.csv' - """ - with open("dynas_fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_dynas_results_csv(): - results_csv = """ -Sub-network,Date,Latency (ms), MACs,Top-1 Acc (%) -"{'wid': None, 'ks': [7, 7, 3, 3, 5, 7, 7, 3, 5, 5, 3, 3, 7, 3, 5, 5, 5, 7, 5, 7], 'e': [3, 4, 4, 4, 4, 6, 6, 4, 4, 3, 4, 4, 3, 6, 4, 3, 4, 6, 3, 3], 'd': [2, 4, 4, 2, 3], 'r': [224]}",2022-07-07 03:13:06.306540,39,391813792,77.416 -"{'wid': None, 'ks': [3, 5, 5, 7, 5, 5, 3, 3, 7, 7, 7, 5, 7, 3, 7, 5, 3, 5, 3, 3], 'e': [4, 6, 3, 4, 4, 4, 4, 6, 3, 6, 4, 3, 4, 3, 4, 3, 6, 4, 4, 6], 'd': [4, 3, 3, 2, 3], 'r': [224]}",2022-07-07 03:14:50.398553,41,412962768,77.234 -"{'wid': None, 'ks': [5, 5, 5, 3, 7, 5, 7, 5, 7, 3, 3, 7, 7, 5, 7, 3, 5, 5, 7, 3], 'e': [6, 4, 3, 3, 3, 3, 4, 4, 3, 4, 3, 6, 4, 4, 3, 6, 4, 3, 4, 6], 'd': [4, 4, 4, 2, 4], 'r': [224]}",2022-07-07 03:16:53.105436,44,444295456,77.632 -"{'wid': None, 'ks': [3, 5, 3, 7, 3, 5, 7, 5, 3, 3, 3, 7, 3, 5, 3, 5, 3, 3, 7, 3], 'e': [4, 6, 3, 3, 6, 3, 3, 6, 6, 4, 4, 6, 3, 4, 3, 6, 3, 6, 3, 4], 'd': [4, 4, 2, 2, 4], 'r': [224]}",2022-07-07 03:18:47.301137,41,410969240,76.79 -"{'wid': None, 'ks': [3, 3, 3, 3, 7, 5, 3, 5, 3, 5, 5, 7, 7, 7, 3, 5, 7, 5, 3, 7], 'e': [3, 6, 6, 4, 6, 3, 3, 4, 3, 6, 3, 4, 4, 6, 3, 6, 4, 3, 6, 3], 'd': [2, 3, 4, 4, 2], 'r': [224]}",2022-07-07 03:20:35.391443,40,405868672,77.338 -"{'wid': None, 'ks': [3, 3, 3, 7, 5, 7, 7, 3, 3, 3, 3, 5, 7, 3, 7, 5, 3, 7, 5, 5], 'e': [4, 6, 3, 6, 4, 3, 3, 6, 3, 6, 4, 6, 4, 4, 3, 6, 4, 3, 4, 4], 'd': [3, 4, 4, 2, 2], 'r': [224]}",2022-07-07 03:22:14.504855,37,370501152,76.448 -"{'wid': None, 'ks': [7, 5, 3, 5, 7, 5, 3, 3, 5, 3, 3, 7, 7, 3, 5, 3, 3, 5, 5, 7], 'e': [3, 3, 4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 4, 3, 6, 3, 3, 3, 4], 'd': [4, 4, 3, 4, 2], 'r': [224]}",2022-07-07 03:24:12.500905,48,482299704,77.7 -"{'wid': None, 'ks': [7, 3, 5, 7, 5, 5, 7, 5, 3, 3, 3, 5, 5, 3, 7, 5, 5, 7, 3, 7], 'e': [3, 6, 4, 6, 6, 3, 3, 3, 6, 3, 6, 4, 4, 6, 4, 4, 4, 4, 6, 6], 'd': [4, 4, 2, 2, 2], 'r': [224]}",2022-07-07 03:25:50.198665,42,423721952,76.506 -"{'wid': None, 'ks': [7, 7, 3, 7, 5, 7, 5, 5, 5, 3, 5, 3, 3, 7, 3, 5, 3, 7, 7, 3], 'e': [3, 3, 3, 4, 4, 3, 4, 4, 4, 4, 4, 6, 6, 4, 3, 3, 3, 6, 3, 4], 'd': [4, 2, 2, 3, 3], 'r': [224]}",2022-07-07 03:27:26.901886,37,373770104,77.258 -"{'wid': None, 'ks': [3, 7, 5, 5, 7, 3, 5, 3, 5, 5, 5, 3, 5, 5, 3, 5, 7, 3, 7, 5], 'e': [3, 4, 6, 6, 4, 3, 6, 6, 6, 3, 3, 3, 3, 6, 3, 6, 6, 3, 6, 3], 'd': [3, 2, 3, 2, 3], 'r': [224]}",2022-07-07 03:29:00.989578,36,369186480,77.096 -"{'wid': None, 'ks': [7, 7, 5, 5, 7, 5, 3, 3, 3, 5, 7, 3, 7, 7, 5, 5, 3, 7, 3, 7], 'e': [6, 3, 6, 3, 4, 3, 3, 3, 4, 3, 6, 4, 3, 3, 6, 4, 4, 3, 4, 3], 'd': [4, 4, 3, 4, 4], 'r': [224]}",2022-07-07 03:31:07.608402,51,518341312,78.104 - """ - with open("search_results.csv", "w", encoding="utf-8") as f: - f.write(results_csv) - - -def model_builder(model_arch_params): - channels = model_arch_params["channels"] - dimensions = model_arch_params["dimensions"] - return ConvNet(channels, dimensions) - - -class ConvNet(torch.nn.Module): - def __init__(self, channels, dimensions): - super().__init__() - self.conv = torch.nn.Conv2d(3, channels, (3, 3), padding=1) - self.avg_pooling = torch.nn.AvgPool2d((64, 64)) - self.dense = torch.nn.Linear(channels, dimensions) - self.out = torch.nn.Linear(dimensions, 1) - self.activation = torch.nn.Sigmoid() - - def forward(self, inputs): - outputs = self.conv(inputs) - outputs = self.avg_pooling(outputs).squeeze() - outputs = self.dense(outputs) - outputs = self.out(outputs) - outputs = self.activation(outputs) - return outputs - - -class TestNAS(unittest.TestCase): - @classmethod - def setUpClass(cls): - build_fake_yaml() - build_dynas_fake_yaml() - build_dynas_results_csv() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - os.remove("dynas_fake.yaml") - os.remove("search_results.csv") - shutil.rmtree(os.path.join(os.getcwd(), "NASResults"), ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_basic_nas(self): - # Built-in train, evaluation - nas_agent = NAS("fake.yaml") - nas_agent.model_builder = lambda model_arch_params: common.Model(model_builder(model_arch_params)) - best_model_archs = nas_agent() - self.assertTrue(len(best_model_archs) > 0) - - # Customized train, evaluation - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(32, 3, 64, 64), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def train_func(model): - epochs = 2 - iters = 10 - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - for image, target in dummy_dataloader: - print(".", end="") - cnt += 1 - output = model(image).unsqueeze(dim=0) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - if cnt >= iters: - break - - def eval_func(model): - model.eval() - acc = 0 - for image, target in dummy_dataloader: - output = model(image).cpu().detach().numpy() - acc += np.sum(output == target) - return {"acc": acc / len(dummy_dataset)} - - for approach, search_algorithm in [(None, None), ("basic", "grid"), ("basic", "random"), ("basic", "bo")]: - print("{fix}Search algorithm: {msg}{fix}".format(msg=search_algorithm, fix="=" * 30)) - search_space = {"channels": [16, 32], "dimensions": [32]} - nas_config = NASConfig(approach=approach, search_space=search_space, search_algorithm=search_algorithm) - nas_config.usr_cfg.model.framework = "pytorch" - nas_agent = NAS(nas_config) - nas_agent.model_builder = model_builder - nas_agent.train_func = train_func - nas_agent.eval_func = eval_func - best_model_archs = nas_agent() - self.assertTrue(len(best_model_archs) > 0) - - def test_dynas(self): - nas_agent = NAS("dynas_fake.yaml") - for search_algorithm, supernet in [ - ("nsga2", "ofa_mbv3_d234_e346_k357_w1.2"), - ("age", "ofa_mbv3_d234_e346_k357_w1.2"), - ]: - config = NASConfig(approach="dynas", search_algorithm=search_algorithm) - config.dynas.supernet = supernet - config.dynas.metrics = ["params", "latency"] - config.dynas.population = 10 - config.dynas.num_evals = 10 - config.nas.search.seed = 71 - config.dynas.batch_size = 1 - config.dynas.results_csv_path = "search_results.csv" - nas_agent = NAS(config) - best_model_archs = nas_agent.search() - self.assertTrue(len(best_model_archs) == config.dynas.population) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/neural_coder/test_common.py b/test/neural_coder/test_common.py deleted file mode 100644 index 72142aa18e5..00000000000 --- a/test/neural_coder/test_common.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest - -from neural_coder.utils import common - - -class TestCommon(unittest.TestCase): - def test_move_element_to_front(self): - f = common.move_element_to_front - self.assertEqual(f([1, 2, 3, 4], 0), [1, 2, 3, 4]) - self.assertEqual(f([1, 2, 3, 4], 1), [1, 2, 3, 4]) - self.assertEqual(f([1, 2, 3, 4], 2), [2, 1, 3, 4]) - self.assertEqual(f([1, 2, 3, 4], 3), [3, 1, 2, 4]) - self.assertEqual(f([1, 2, 3, 4], 4), [4, 1, 2, 3]) - self.assertEqual(f([1, 2, 3, 4], "a"), [1, 2, 3, 4]) - self.assertEqual(f(["a", "b", "c", "d"], "d"), ["d", "a", "b", "c"]) - self.assertEqual(f(["ab", "a", "ac", "ad"], "a"), ["a", "ab", "ac", "ad"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/neural_coder/test_line_operation.py b/test/neural_coder/test_line_operation.py deleted file mode 100644 index 822621bb39d..00000000000 --- a/test/neural_coder/test_line_operation.py +++ /dev/null @@ -1,46 +0,0 @@ -import unittest - -from neural_coder.utils import line_operation - - -class TestLineOperation(unittest.TestCase): - def test_get_line_indent_level(self): - f = line_operation.get_line_indent_level - self.assertEqual(f(" model(input)"), 4) - self.assertEqual(f(" model(input)"), 8) - self.assertEqual(f("model(input)"), 0) - self.assertEqual(f("# model(input)"), 0) - - def test_single_line_comment_or_empty_line_detection(self): - f = line_operation.single_line_comment_or_empty_line_detection - self.assertEqual(f("# test"), True) - self.assertEqual(f("test # test"), False) - self.assertEqual(f(" "), True) - self.assertEqual(f(" test"), False) - self.assertEqual(f('"""test"""'), True) - - def test_is_eval_func_model_name(self): - f = line_operation.is_eval_func_model_name - self.assertEqual(f("model", "model(input)")[0], True) - self.assertEqual(f("model", "model()")[0], True) - self.assertEqual(f("model", "# model(input)")[0], False) - self.assertEqual(f("model", "test # model(input)")[0], False) - self.assertEqual(f("model", "output = model(input)")[0], True) - self.assertEqual(f("model", "model = Net()")[0], False) - - def test_get_line_left_hand_side(self): - f = line_operation.get_line_left_hand_side - self.assertEqual(f("output = model(input)"), "output") - self.assertEqual(f("output=model(input)"), "output") - self.assertEqual(f("test = num"), "test") - - def test_of_definition_format(self): - f = line_operation.of_definition_format - self.assertEqual(f("output = model(input)"), (True, "output", "model")) - self.assertEqual(f("output=model(input)"), (True, "output", "model")) - self.assertEqual(f("model = Net()"), (True, "model", "Net")) - self.assertEqual(f("model = Net"), (False, "", "")) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/objective/test_objective.py b/test/objective/test_objective.py index 3f926cf862b..c1171031379 100644 --- a/test/objective/test_objective.py +++ b/test/objective/test_objective.py @@ -1,367 +1,9 @@ """Tests for neural_compressor quantization.""" -import importlib -import os -import random -import shutil import unittest -import numpy as np -import yaml - - -def build_fake_yaml_footprint(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - performance: {} - tuning: - objective: footprint - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_footprint.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_model_size(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - performance: {} - tuning: - objective: modelsize - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_model_size.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - performance: {} - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session(graph=graph) as sess: - x = tf.placeholder(tf.float64, shape=(1, 256, 256, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - import tensorflow as tf - - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session(graph=graph) as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 256, 256, 1), name="x") - y = tf.compat.v1.constant(np.random.random((3, 3, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def build_fake_model1(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session(graph=graph) as sess: - x = tf.placeholder(tf.float64, shape=(1, 256, 256, 1), name="x") - y_1 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_1") - y_2 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_2") - conv1 = tf.nn.conv2d(input=x, filter=y_1, strides=[1, 1, 1, 1], padding="VALID", name="conv1") - op = tf.nn.conv2d(input=conv1, filter=y_2, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - import tensorflow as tf - - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session(graph=graph) as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 256, 256, 1), name="x") - y_1 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_1") - y_2 = tf.constant(np.random.random((3, 3, 1, 1)), name="y_2") - conv1 = tf.nn.conv2d(input=x, filters=y_1, strides=[1, 1, 1, 1], padding="VALID", name="conv1") - op = tf.nn.conv2d(input=conv1, filters=y_2, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def build_fake_strategy(): - with open( - os.path.join( - os.path.dirname(importlib.util.find_spec("neural_compressor").origin), "experimental/strategy/fake.py" - ), - "w", - encoding="utf-8", - ) as f: - seq = [ - "import time \n", - "import copy \n", - "import numpy as np \n", - "from collections import OrderedDict \n", - "from .strategy import strategy_registry, TuneStrategy \n", - "from ...utils import logger \n", - "from .utils.tuning_sampler import OpTypeWiseTuningSampler, FallbackTuningSampler \n", - "from .utils.tuning_structs import OpTuningConfig \n", - "import copy \n", - "@strategy_registry \n", - "class FakeTuneStrategy(TuneStrategy): \n", - " def __init__(self, model, cfg, q_dataloader, q_func=None, eval_dataloader=None, \n", - " eval_func=None, dicts=None, q_hooks=None): \n", - " self.id = 0 \n", - " self.resume = True if dicts else False \n", - " super(FakeTuneStrategy, self).__init__(model, cfg, q_dataloader, \n", - " q_func, eval_dataloader, eval_func, dicts) \n", - " def __getstate__(self): \n", - " for history in self.tuning_history: \n", - " if self._same_yaml(history['cfg'], self.cfg): \n", - " history['id'] = self.id \n", - " save_dict = super(FakeTuneStrategy, self).__getstate__() \n", - " return save_dict \n", - " def next_tune_cfg(self): \n", - " if self.resume: \n", - " #assert self.id == 1 \n", - " assert len(self.tuning_history) == 1 \n", - " history = self.tuning_history[0] \n", - " assert self._same_yaml(history['cfg'], self.cfg) \n", - " assert len(history['history']) \n", - " for h in history['history']: \n", - " assert h \n", - " from copy import deepcopy \n", - " tuning_space = self.tuning_space \n", - " initial_op_tuning_cfg = {} \n", - " for item in tuning_space.root_item.options: \n", - " if item.item_type == 'op': \n", - " op_name, op_type = item.name \n", - " initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, 'fp32', tuning_space) \n", - " calib_sampling_size_lst = tuning_space.root_item.get_option_by_name('calib_sampling_size').options \n", - " for calib_sampling_size in calib_sampling_size_lst: \n", - " # step1. collect the ops that support static and dynamic \n", - " quant_mode_wise_items = OrderedDict() \n", - " query_order = ['static', 'dynamic', 'bf16', 'fp16', 'fp32'] \n", - " pre_items = set() \n", - " for quant_mode in query_order: \n", - " items = tuning_space.query_items_by_quant_mode(quant_mode) \n", - " filtered_items = [item for item in items if item not in pre_items] \n", - " pre_items = pre_items.union(set(items)) \n", - " quant_mode_wise_items[quant_mode] = filtered_items \n", - " def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): \n", - " for item in items_lst: \n", - " op_item_dtype_dict[item.name] = target_quant_mode \n", - " op_item_dtype_dict = OrderedDict() \n", - " for quant_mode, quant_mode_items in quant_mode_wise_items.items(): \n", - " initial_op_quant_mode(quant_mode_items, quant_mode, op_item_dtype_dict) \n", - " # step3. optype-wise tuning tuning items: the algorithm/scheme/granularity of activation(weight) \n", - " early_stop_tuning = False \n", - " stage1_cnt = 0 \n", - " int8_ops = quant_mode_wise_items['dynamic'] + quant_mode_wise_items['static'] \n", - " stage1_max = min(5, len(int8_ops)) # TODO set a more appropriate value \n", - " op_wise_tuning_sampler = OpTypeWiseTuningSampler(tuning_space, [], [], \n", - " op_item_dtype_dict, initial_op_tuning_cfg) \n", - " for op_tuning_cfg in op_wise_tuning_sampler: \n", - " stage1_cnt += 1 \n", - " if early_stop_tuning and stage1_cnt > stage1_max: \n", - " logger.info('Early stopping the stage 1.') \n", - " break \n", - " op_tuning_cfg['calib_sampling_size'] = calib_sampling_size \n", - " self.id += 1 \n", - " yield op_tuning_cfg \n", - ] - f.writelines(seq) - f.close() - - -class TestObjective(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - self.constant_graph_1 = build_fake_model1() - build_fake_yaml() - build_fake_yaml_footprint() - build_fake_yaml_model_size() - build_fake_strategy() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml_model_size.yaml") - os.remove("fake_yaml_footprint.yaml") - os.remove( - os.path.join( - os.path.dirname(importlib.util.find_spec("neural_compressor").origin), "experimental/strategy/fake.py" - ) - ) - shutil.rmtree("./saved", ignore_errors=True) - - def test_performance(self): - from neural_compressor.data import Datasets - - dataset = Datasets("tensorflow")["dummy"]((100, 256, 256, 1), label=True) - - from neural_compressor.experimental import Quantization, common - from neural_compressor.model import tensorflow_model - - quantizer = Quantization("fake_yaml.yaml") - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - q_model = quantizer.fit() - self.assertTrue(isinstance(q_model, tensorflow_model.TensorflowBaseModel)) - - from neural_compressor.experimental import Benchmark, common - - benchmarker = Benchmark("fake_yaml.yaml") - benchmarker.b_dataloader = common.DataLoader(dataset) - benchmarker.model = self.constant_graph_1 - benchmarker.fit(mode="accuracy") - - def test_model_size(self): - from neural_compressor.data import Datasets - from neural_compressor.experimental import Benchmark, common - - dataset = Datasets("tensorflow")["dummy"]((100, 256, 256, 1), label=True) - - benchmarker = Benchmark("fake_yaml_model_size.yaml") - benchmarker.b_dataloader = common.DataLoader(dataset) - benchmarker.model = self.constant_graph_1 - benchmarker(mode="accuracy") - - def test_footprint(self): - from neural_compressor.data import Datasets - from neural_compressor.experimental import Benchmark, common - - dataset = Datasets("tensorflow")["dummy"]((100, 256, 256, 1), label=True) - - benchmarker = Benchmark("fake_yaml_footprint.yaml") - benchmarker.b_dataloader = common.DataLoader(dataset) - benchmarker.model = self.constant_graph_1 - benchmarker.fit(mode="accuracy") - - -def build_matmul_model(): - from onnx import TensorProto, helper - - A = helper.make_tensor_value_info("A", TensorProto.FLOAT, [1, 1, 5, 5]) - B = helper.make_tensor_value_info("B", TensorProto.FLOAT, [1, 1, 5, 1]) - C = helper.make_tensor_value_info("C", TensorProto.FLOAT, [1, 1, 5, 1]) - matmul_node = helper.make_node("MatMul", ["A", "B"], ["C"], name="Matmul") - graph = helper.make_graph([matmul_node], "test_graph_1", [A, B], [C]) - model = helper.make_model(graph) - model = helper.make_model(graph, **{"opset_imports": [helper.make_opsetid("", 13)]}) - return model - class TestObjs(unittest.TestCase): - def test_model(self): - def eval(model): - return random.random() - - model = build_matmul_model() - - from neural_compressor.conf.config import conf - from neural_compressor.experimental import Quantization - from neural_compressor.model import onnx_model - - conf.model.framework = "onnxrt_integerops" - conf.quantization.approach = "post_training_dynamic_quant" - conf.tuning.accuracy_criterion.absolute = 0.3 - conf.tuning.multi_objectives.objective = ["accuracy", "performance"] - conf.tuning.multi_objectives.weight = [0.8, 0.2] - conf.tuning.exit_policy.timeout = 10000 - conf.tuning.exit_policy.max_trials = 2 - quantize = Quantization(conf) - quantize.model = model - quantize.eval_func = eval - q_model = quantize() - self.assertTrue(isinstance(q_model, onnx_model.ONNXModel)) - self.assertTrue("quantize" in str(q_model.model.producer_name)) def test_tune_data(self): from neural_compressor.objective import MultiObjective diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_experimental.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_experimental.py deleted file mode 100644 index 11bd2031344..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_experimental.py +++ /dev/null @@ -1,202 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning import Pruning # old API - - -def build_fake_yaml_basic(): - fake_snip_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 0 - sparsity_decay_type: "cos" - end_step: 10 - prune_type: "magnitude" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "tile_pattern_4x1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "snip_momentum" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "tile_pattern_2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "tile_pattern_16x1" - sparsity_decay_type: "cube" - - """ - with open("fake_snip.yaml", "w", encoding="utf-8") as f: - f.write(fake_snip_yaml) - - -def build_fake_yaml_channel(): - fake_channel_pruning_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 5 - end_step: 5 - prune_type: "pattern_lock" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "channelx1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "pattern_lock" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "1xchannel" - sparsity_decay_type: "cube" - - """ - - with open("fake_channel_pruning.yaml", "w", encoding="utf-8") as f: - f.write(fake_channel_pruning_yaml) - - -class TestPytorchPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml_basic() - build_fake_yaml_channel() - - @classmethod - def tearDownClass(cls): - os.remove("fake_channel_pruning.yaml") - os.remove("fake_snip.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_pytorch_pruning_basic(self): - prune = Pruning("fake_snip.yaml") - prune.update_items_for_all_pruners(start_step=1) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.prepare() - prune.on_train_begin() - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - def test_pytorch_pruner_channel_pruning(self): - prune = Pruning("fake_channel_pruning.yaml") - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.prepare() - prune.on_train_begin() - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_gradient_sensitivity.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_gradient_sensitivity.py deleted file mode 100644 index 05ab9b1e918..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_gradient_sensitivity.py +++ /dev/null @@ -1,258 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: gradient_sensitivity_prune - framework: pytorch - pruning: - approach: - weight_compression: - start_epoch: 0 - end_epoch: 1 - pruners: - - !Pruner - start_epoch: 0 - end_epoch: 1 - prune_type: gradient_sensitivity - update_frequency: 1 - names: [ - 'bert.encoder.layer.*.attention.self.query.weight', - 'bert.encoder.layer.*.attention.self.query.bias', - 'bert.encoder.layer.*.attention.self.key.weight', - 'bert.encoder.layer.*.attention.self.key.bias', - 'bert.encoder.layer.*.attention.self.value.weight', - 'bert.encoder.layer.*.attention.self.value.bias', - ] - parameters: { - target: 8, - normalize: True, - stride: 64, - transpose: False, - importance_inputs: ['head_mask'], - importance_metric: abs_gradient, - } - - - !Pruner - start_epoch: 0 - end_epoch: 1 - prune_type: gradient_sensitivity - update_frequency: 1 - names: [ - 'bert.encoder.layer.*.attention.output.dense.weight', - ] - parameters: { - target: 8, - normalize: True, - stride: 64, - transpose: True, - importance_inputs: ['head_mask'], - importance_metric: abs_gradient, - } - - - !Pruner - prune_type: gradient_sensitivity - names: [ - 'bert.encoder.layer.*.intermediate.dense.weight', - 'bert.encoder.layer.*.intermediate.dense.bias', - ] - parameters: { - target: 600, - normalize: False, - stride: 1, - transpose: False, - importance_inputs: [ - 'bert.encoder.layer.*.intermediate.dense.weight', - 'bert.encoder.layer.*.intermediate.dense.bias', - ], - importance_metric: 'weighted_gradient', - } - - - !Pruner - prune_type: gradient_sensitivity - names: [ - 'bert.encoder.layer.*.output.dense.weight', - ] - parameters: { - target: 600, - normalize: False, - stride: 1, - transpose: True, - importance_inputs: [ - 'bert.encoder.layer.*.intermediate.dense.weight', - 'bert.encoder.layer.*.intermediate.dense.bias', - ], - importance_metric: 'weighted_gradient', - } - - tuning: - accuracy_criterion: - relative: 0.1 # only verifying workflow, accuracy loss percentage: 10% - exit_policy: - timeout: 0 # tuning timeout (seconds) - random_seed: 9527 # random seed - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml_unstructured(): - fake_yaml_unstructured = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression: - initial_sparsity: 0.0 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - target_sparsity: 0.8 - prune_type: gradient_sensitivity - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake_unstructured.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml_unstructured) - - -class TestGradientSensitivity(unittest.TestCase): - @classmethod - def setUpClass(cls): - build_fake_yaml() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_gradient_sensitivity(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - - from transformers import BertForSequenceClassification - - model = BertForSequenceClassification.from_pretrained("bert-base-uncased") - - def training_func_for_nc(model): - inputs = { - "input_ids": torch.rand([1, 12]).long(), - "attention_mask": torch.rand([1, 12]).long(), - "labels": torch.tensor([1]).long(), - } - model.eval() - - # To calculate head prune - prune.on_epoch_begin(0) - head_mask = torch.ones(model.config.num_hidden_layers, model.config.num_attention_heads) - head_mask.requires_grad_(requires_grad=True) - - outputs = model(output_attentions=True, **inputs, head_mask=head_mask) - tmp_eval_loss, logits = outputs[:2] - tmp_eval_loss.backward() - prune.on_step_end() - prune.on_epoch_end() - - def eval_func_for_nc(model): - pass - - prune.model = model - prune.pruning_func = training_func_for_nc - prune.eval_func = eval_func_for_nc - _ = prune() - for bertlayer in model.bert.encoder.layer: - self.assertEqual(bertlayer.attention.self.query.weight.shape, (512, 768)) - self.assertEqual(bertlayer.attention.self.key.weight.shape, (512, 768)) - self.assertEqual(bertlayer.attention.self.value.weight.shape, (512, 768)) - self.assertEqual(bertlayer.attention.output.dense.weight.shape, (768, 512)) - self.assertEqual(bertlayer.intermediate.dense.weight.shape, (600, 768)) - self.assertEqual(bertlayer.output.dense.weight.shape, (768, 600)) - - -class TestGradientSensitivityUnstructured(unittest.TestCase): - cv_model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml_unstructured() - - @classmethod - def tearDownClass(cls): - os.remove("fake_unstructured.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_unstructured_pruning(self): - from neural_compressor.experimental import Pruning, common - - prune_cv = Pruning("fake_unstructured.yaml") - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(100, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def training_func_for_cv(model): - epochs = 5 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - prune_cv.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - prune_cv.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - prune_cv.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - prune_cv.on_step_end() - if cnt >= iters: - break - prune_cv.on_epoch_end() - prune_cv.on_train_end() - - prune_cv.model = self.cv_model - prune_cv.pruning_func = training_func_for_cv - prune_cv.eval_dataloader = dummy_dataloader - prune_cv.train_dataloader = dummy_dataloader - _ = prune_cv() - - # assert sparsity ratio - conv1_weight = self.cv_model.layer1[0].conv1.weight - conv2_weight = self.cv_model.layer1[0].conv2.weight - self.assertAlmostEqual((conv1_weight == 0).sum().item() / conv1_weight.numel(), 0.8, delta=0.01) - self.assertAlmostEqual((conv2_weight == 0).sum().item() / conv2_weight.numel(), 0.48, delta=0.01) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_group_lasso.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_group_lasso.py deleted file mode 100644 index 9a8325005c5..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_group_lasso.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - train: - start_epoch: 0 - end_epoch: 4 - iteration: 10 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: group_lasso - names: ['layer1.0.conv1.weight'] - parameters: { - alpha: 0.006, - pattern: tile_pattern_1x16 - } - - - !Pruner - target_sparsity: 0.6 - prune_type: group_lasso - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - parameters: { - alpha: 0.006, - pattern: tile_pattern_1x16 - } - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestPruningGroupLasso(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_pruning_internal(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - - prune.model = self.model - _ = prune() - - # assert sparsity ratio - conv1_weight = self.model.layer1[0].conv1.weight - conv2_weight = self.model.layer1[0].conv2.weight - self.assertAlmostEqual((conv1_weight == 0).sum().item() / conv1_weight.numel(), 0.97, delta=0.01) - self.assertAlmostEqual((conv2_weight == 0).sum().item() / conv2_weight.numel(), 0.48, delta=0.01) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern.py deleted file mode 100644 index 0b4f0ff73b6..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern.py +++ /dev/null @@ -1,44 +0,0 @@ -import copy -import random -import unittest - -import numpy as np - -from neural_compressor.experimental.pruning_recipes.patterns import patterns - - -class TestPruningPattern(unittest.TestCase): - tensor_4d = np.random.random([560, 560, 3, 3]) - tensor_2d = np.random.random([1280, 640]) - - def test_tile_pattern(self): - for tensor in [self.tensor_2d, self.tensor_4d]: - shape = list(tensor.shape) - size = tensor.size - - for mask_shape in [(1, 1), (2, 2), (1, 16), (4, 1), (1, 2)]: - m0 = mask_shape[0] - m1 = mask_shape[1] - pattern = patterns["tile_pattern_{}x{}".format(m0, m1)]() - new_shape = [shape[0] / m0] + [size // shape[0] / m1] - sparse_tensor = self.sparsify_tensor(tensor, [m0, m1], 0.2) - reduced_tensor = pattern.reduce(sparse_tensor) - self.assertEqual(list(reduced_tensor.shape), new_shape) - self.assertAlmostEqual(pattern.compute_sparsity(sparse_tensor), 0.2, delta=0.01) - mask = reduced_tensor == 0 - repeat_mask = pattern.repeat_mask(mask, ori_shape=tensor.shape) - self.assertEqual(repeat_mask.shape, tensor.shape) - - def sparsify_tensor(self, tensor, mask_shape, ratio): - tensor = copy.deepcopy(tensor) - for i in range(tensor.shape[0] // mask_shape[0]): - for j in range(tensor.shape[1] // mask_shape[1]): - if random.random() < ratio: - tensor[ - i * mask_shape[0] : (i + 1) * mask_shape[0], j * mask_shape[1] : (j + 1) * mask_shape[1], ... - ] = 0 - return tensor - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern_lock.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern_lock.py deleted file mode 100644 index 8b00cbf7bb3..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pattern_lock.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.data.datasets.dummy_dataset import DummyDataset - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression: - start_epoch: 0 - pruners: - - !Pruner - prune_type: pattern_lock - names: ['layer1.0.conv1.weight'] - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestPatternLock(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_pattern_lock(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - - weight = self.model.layer1[0].conv1.weight - mask = torch.ones(weight.numel()) - mask[: round(weight.numel() * 0.9)] = 0.0 - mask = mask[torch.randperm(mask.numel())].view(weight.shape) - weight.data = weight * mask - - self.assertTrue(self.model.layer1[0].conv1.weight.ne(0).eq(mask).all()) - - dummy_dataset = DummyDataset([tuple([100, 3, 256, 256])]) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def training_func_for_nc(model): - epochs = 2 - iters = 30 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - prune.on_epoch_begin(nepoch) - for i, (image, target) in enumerate(dummy_dataloader): - prune.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - prune.on_step_end() - if cnt >= iters: - break - prune.on_epoch_end() - - dummy_dataset = DummyDataset(tuple([100, 3, 256, 256]), label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune.model = self.model - prune.pruning_func = training_func_for_nc - prune.eval_dataloader = dummy_dataloader - prune.train_dataloader = dummy_dataloader - _ = prune() - self.assertTrue(self.model.layer1[0].conv1.weight.ne(0).eq(mask).all()) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pure_yaml.py b/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pure_yaml.py deleted file mode 100644 index 89a22b8865b..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v1/test_pruning_pure_yaml.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - train: - start_epoch: 0 - end_epoch: 4 - iteration: 10 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: gradient_sensitivity - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 30 - dataset: - dummy: - shape: [128, 3, 224, 224] - label: True - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_pruning_internal(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - - prune.model = self.model - _ = prune() - - # assert sparsity ratio - conv1_weight = self.model.layer1[0].conv1.weight - conv2_weight = self.model.layer1[0].conv2.weight - self.assertAlmostEqual((conv1_weight == 0).sum().item() / conv1_weight.numel(), 0.97, delta=0.01) - self.assertAlmostEqual((conv2_weight == 0).sum().item() / conv2_weight.numel(), 0.48, delta=0.01) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning.py deleted file mode 100644 index 97b0773ab46..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning.py +++ /dev/null @@ -1,66 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - - -class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_basic(self): - local_configs = [ - { - "op_names": ["layer1.*"], - "target_sparsity": 0.5, - "pattern": "8x2", - "pruning_type": "magnitude_progressive", - "false_key": "this is to test unsupported keys", - }, - {"op_names": ["layer2.*"], "target_sparsity": 0.5, "pattern": "2:4"}, - {"op_names": ["layer3.*"], "target_sparsity": 0.7, "pattern": "5x1", "pruning_type": "snip_progressive"}, - ] - conf = WeightPruningConfig(local_configs, target_sparsity=0.8) - config = Config(quantization=None, benchmark=None, pruning=conf, distillation=None) - prune = Pruning(config) - prune.update_config(start_step=1, end_step=10) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.on_train_begin() - prune.update_config(pruning_frequency=4) - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_config.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning_config.py deleted file mode 100644 index 7eb2874956b..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_config.py +++ /dev/null @@ -1,77 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - - -class TestPytorchPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_class_config(self): - local_configs = [ - { - "op_names": ["layer1.*", "layer2.*"], - "excluded_op_names": ["downsample.*"], - "target_sparsity": 0.6, - "pattern": "channelx1", - "pruning_type": "snip_progressive", - "pruning_scope": "local", - "start_step": 0, - "end_step": 10, - }, - {"op_names": ["layer3.*"], "pruning_type": "pattern_lock"}, - ] - conf = WeightPruningConfig( - local_configs, - pruning_frequency=2, - target_sparsity=0.8, - ) - config = Config(quantization=None, benchmark=None, pruning=conf, distillation=None) - prune = Pruning(config) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(12, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.update_config(pruning_frequency=4) - prune.on_train_begin() - assert prune.pruners[0].config["pruning_frequency"] == 4 - assert prune.pruners[0].config["target_sparsity"] == 0.6 - assert prune.pruners[1].config["target_sparsity"] == 0.8 - assert prune.pruners[0].config["pattern"] == "channelx1" - assert prune.pruners[1].config["pruning_type"] == "pattern_lock" - - for epoch in range(1): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_criteria.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning_criteria.py deleted file mode 100644 index ed76f5d90bb..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_criteria.py +++ /dev/null @@ -1,83 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - - -class TestPruningCriteria(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_criteria(self): - local_configs = [ - { - "op_names": ["layer1.*"], - "target_sparsity": 0.4, - "pattern": "8x2", - "pruning_type": "magnitude_progressive", - "pruning_scope": "local", - "sparsity_decay_type": "cube", - }, - { - "op_names": ["layer2.*"], - "target_sparsity": 0.45, - "pattern": "2:4", - "pruning_type": "snip", - "start_step": 6, - "end_step": 6, - }, - { - "op_names": ["layer3.*"], - "excluded_op_names": ["downsample.*"], - "target_sparsity": 0.7, - "pattern": "4x1", - "pruning_type": "snip_momentum_progressive", - "pruning_frequency": 4, - "min_sparsity_ratio_per_op": 0.5, - "max_sparsity_ratio_per_op": 0.8, - }, - ] - conf = WeightPruningConfig(local_configs, target_sparsity=0.8, sparsity_decay_type="cube") - config = Config(quantization=None, benchmark=None, pruning=conf, distillation=None) - prune = Pruning(config) - prune.update_config(start_step=1, end_step=10) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.on_train_begin() - prune.update_config(pruning_frequency=4) - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_patterns.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning_patterns.py deleted file mode 100644 index b9db1bbb58f..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_patterns.py +++ /dev/null @@ -1,74 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - - -class TestPruningPatterns(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_pattern(self): - local_configs = [ - {"op_names": ["layer1.*"], "target_sparsity": 0.5, "pattern": "5:8", "pruning_type": "magnitude"}, - {"op_names": ["layer2.*"], "pattern": "1xchannel", "pruning_scope": "global"}, - { - "start_step": 2, - "end_step": 20, - "op_names": ["layer3.*"], - "target_sparsity": 0.666666, - "pattern": "4x2", - "pruning_type": "snip_progressive", - "pruning_frequency": 5, - }, - ] - conf = WeightPruningConfig( - local_configs, - target_sparsity=0.8, - sparsity_decay_type="cos", - excluded_op_names=["downsample.*"], - pruning_scope="local", - min_sparsity_ratio_per_op=0.1, - ) - config = Config(quantization=None, benchmark=None, pruning=conf, distillation=None) - prune = Pruning(config) - prune.update_config(start_step=1, end_step=10) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.on_train_begin() - for epoch in range(5): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_schedulers.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning_schedulers.py deleted file mode 100644 index 6c89e1511a0..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_schedulers.py +++ /dev/null @@ -1,85 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - -local_schedulers_config = [ - { - "start_step": 0, - "end_step": 2, - "pruning_type": "magnitude", - "op_names": ["layer1.*"], - "excluded_op_names": ["layer2.*"], - "pruning_scope": "global", - "target_sparsity": 0.5, - "pattern": "4x1", - }, - { - "start_step": 1, - "end_step": 10, - "pruning_type": "snip_momentum", - "pruning_frequency": 2, - "op_names": ["layer2.*"], - "pruning_scope": "local", - "target_sparsity": 0.75, - "pattern": "32x1", - "sparsity_decay_type": "exp", - }, -] - -fake_snip_config = WeightPruningConfig( - local_schedulers_config, - target_sparsity=0.9, - start_step=0, - end_step=10, - pruning_frequency=1, - sparsity_decay_type="exp", -) - - -class TestPruningCriteria(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_schedulers(self): - config = Config(quantization=None, benchmark=None, pruning=fake_snip_config, distillation=None) - prune = Pruning(config) - prune.update_config(start_step=1) - prune.model = self.model - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune.on_train_begin() - prune.update_config(pruning_frequency=1) - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_types.py b/test/pruning_with_pt/pruning_1.x_v2/test_pruning_types.py deleted file mode 100644 index 4dd3f2518d3..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pruning_types.py +++ /dev/null @@ -1,88 +0,0 @@ -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.conf.pythonic_config import Config, WeightPruningConfig -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pruning_v2 import Pruning - -local_types_config = [ - { - "start_step": 0, - "end_step": 0, - "pruning_type": "pattern_lock", - "op_names": ["layer1.*"], - "excluded_op_names": ["layer2.*"], - "pruning_scope": "global", - }, - { - "start_step": 1, - "end_step": 1, - "target_sparsity": 0.5, - "pruning_type": "snip_momentum_progressive", - "pruning_frequency": 2, - "op_names": ["layer2.*"], - "pruning_scope": "local", - "pattern": "4x1", - "sparsity_decay_type": "exp", - }, - { - "start_step": 2, - "end_step": 8, - "target_sparsity": 0.8, - "pruning_type": "snip_progressive", - "pruning_frequency": 1, - "op_names": ["layer3.*"], - "pruning_scope": "local", - "pattern": "16x1", - "sparsity_decay_type": "cube", - }, -] - -fake_snip_config = WeightPruningConfig( - local_types_config, target_sparsity=0.9, start_step=0, end_step=10, pruning_frequency=3, sparsity_decay_type="exp" -) - - -class TestPruningTypes(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_pruning_types(self): - config = Config(quantization=None, benchmark=None, pruning=fake_snip_config, distillation=None) - prune = Pruning(config) - prune.model = self.model - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune.on_train_begin() - prune.update_config(pruning_frequency=1) - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_1.x_v2/test_pytorch_pruning_experimental.py b/test/pruning_with_pt/pruning_1.x_v2/test_pytorch_pruning_experimental.py deleted file mode 100644 index 1e7a71da386..00000000000 --- a/test/pruning_with_pt/pruning_1.x_v2/test_pytorch_pruning_experimental.py +++ /dev/null @@ -1,202 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.pytorch_pruner.pruning import Pruning - - -def build_fake_yaml_basic(): - fake_snip_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 0 - sparsity_decay_type: "cos" - end_step: 10 - prune_type: "magnitude" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "tile_pattern_4x1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "snip_momentum" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "tile_pattern_2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "tile_pattern_16x1" - sparsity_decay_type: "cube" - - """ - with open("fake_snip.yaml", "w", encoding="utf-8") as f: - f.write(fake_snip_yaml) - - -def build_fake_yaml_channel(): - fake_channel_pruning_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression_pytorch: - initial_sparsity: 0.0 - target_sparsity: 0.9 - start_step: 0 - end_step: 10 - excluded_names: ["classifier"] - - update_frequency_on_step: 1 - sparsity_decay_type: "exp" - pruners: - - !Pruner - start_step: 5 - end_step: 5 - prune_type: "pattern_lock" - names: ['layer1.*'] - extra_excluded_names: ['layer2.*'] - prune_domain: "global" - pattern: "channelx1" - - - !Pruner - start_step: 1 - end_step: 1 - target_sparsity: 0.5 - prune_type: "pattern_lock" - update_frequency: 2 - names: ['layer2.*'] - prune_domain: local - pattern: "2:4" - - - !Pruner - start_step: 2 - end_step: 8 - target_sparsity: 0.8 - prune_type: "snip" - names: ['layer3.*'] - prune_domain: "local" - pattern: "1xchannel" - sparsity_decay_type: "cube" - - """ - - with open("fake_channel_pruning.yaml", "w", encoding="utf-8") as f: - f.write(fake_channel_pruning_yaml) - - -class TestPytorchPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - @classmethod - def setUpClass(cls): - build_fake_yaml_basic() - build_fake_yaml_channel() - - @classmethod - def tearDownClass(cls): - os.remove("fake_channel_pruning.yaml") - os.remove("fake_snip.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def test_pytorch_pruning_basic(self): - prune = Pruning("fake_snip.yaml") - prune.update_items_for_all_pruners(start_step=1) - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.prepare() - prune.on_train_begin() - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - prune.get_sparsity_ratio() - prune.on_train_end() - prune.on_before_eval() - prune.on_after_eval() - - def test_pytorch_pruner_channel_pruning(self): - prune = Pruning("fake_channel_pruning.yaml") - prune.model = self.model - - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001) - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - prune.prepare() - prune.on_train_begin() - for epoch in range(2): - self.model.train() - prune.on_epoch_begin(epoch) - local_step = 0 - for image, target in dummy_dataloader: - prune.on_step_begin(local_step) - output = self.model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - prune.on_before_optimizer_step() - optimizer.step() - prune.on_after_optimizer_step() - prune.on_step_end() - local_step += 1 - - prune.on_epoch_end() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py b/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py index 0eb2d04005a..641525fd26d 100644 --- a/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py +++ b/test/pruning_with_pt/pruning_2.x/test_auto_excluding_classifier.py @@ -24,7 +24,6 @@ def forward(self, x): class TestPruning(unittest.TestCase): def test_pruning_basic(self): - # import pdb;pdb.set_trace() hidden_size = 32 model = NaiveMLP(hidden_size) # import classifier searching functions diff --git a/test/pruning_with_pt/pruning_2.x/test_auto_slim.py b/test/pruning_with_pt/pruning_2.x/test_auto_slim.py index 7af5cf8de20..b5f09a3c41d 100644 --- a/test/pruning_with_pt/pruning_2.x/test_auto_slim.py +++ b/test/pruning_with_pt/pruning_2.x/test_auto_slim.py @@ -50,7 +50,6 @@ def test_pruning_basic(self): # run mha and ffn pruning compression_manager = prepare_compression(model=model, confs=configs) compression_manager.callbacks.on_train_begin() - # import pdb;pdb.set_trace() for epoch in range(3): model.train() compression_manager.callbacks.on_epoch_begin(epoch) diff --git a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_distributed_pruning.py b/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_distributed_pruning.py deleted file mode 100644 index 6e9fbdb12b9..00000000000 --- a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_distributed_pruning.py +++ /dev/null @@ -1,464 +0,0 @@ -"""Tests for the TensorFlow pruning with distributed training and inference.""" - -import hashlib -import os -import re -import shutil -import signal -import subprocess -import sys -import time -import unittest -from platform import platform, system - -import cpuinfo -import tensorflow as tf - -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.utils import logger - - -def build_fake_ut(): - fake_ut = ''' -from __future__ import print_function -import tensorflow -from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation -from tensorflow.keras.layers import AveragePooling2D, Input, Flatten -from tensorflow.keras.callbacks import LearningRateScheduler -from tensorflow.keras.callbacks import ReduceLROnPlateau -from tensorflow.keras.regularizers import l2 -from tensorflow.keras.models import Model -from tensorflow.keras.datasets import cifar10 -import numpy as np -import os -import sys -import cpuinfo -import shutil -import unittest -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.utils import logger -from neural_compressor.utils.utility import CpuInfo - -def lr_schedule(epoch): - """Learning Rate Schedule - Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. - Called automatically every epoch as part of callbacks during training. - # Arguments - epoch (int): The number of epochs - # Returns - lr (float32): learning rate - """ - lr = 1e-3 - if epoch > 180: - lr *= 0.5e-3 - elif epoch > 160: - lr *= 1e-3 - elif epoch > 120: - lr *= 1e-2 - elif epoch > 80: - lr *= 1e-1 - print('Learning rate: ', lr) - return lr - -def resnet_layer(inputs, - num_filters=8, - kernel_size=3, - strides=1, - activation='relu', - batch_normalization=True, - conv_first=True): - """2D Convolution-Batch Normalization-Activation stack builder - # Arguments - inputs (tensor): input tensor from input image or previous layer - num_filters (int): Conv2D number of filters - kernel_size (int): Conv2D square kernel dimensions - strides (int): Conv2D square stride dimensions - activation (string): activation name - batch_normalization (bool): whether to include batch normalization - conv_first (bool): conv-bn-activation (True) or - bn-activation-conv (False) - # Returns - x (tensor): tensor as input to the next layer - """ - conv = Conv2D(num_filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=True, - kernel_initializer='he_normal', - kernel_regularizer=l2(1e-4)) - - x = inputs - if conv_first: - x = conv(x) - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = Activation(activation)(x) - else: - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = Activation(activation)(x) - x = conv(x) - return x - -def resnet_v2(input_shape, depth, num_classes=10): - """ResNet Version 2 Model builder [b] - Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as - bottleneck layer - First shortcut connection per layer is 1 x 1 Conv2D. - Second and onwards shortcut connection is identity. - At the beginning of each stage, the feature map size is halved (downsampled) - by a convolutional layer with strides=2, while the number of filter maps is - doubled. Within each stage, the layers have the same number filters and the - same filter map sizes. - Features maps sizes: - conv1 : 32x32, 16 - stage 0: 32x32, 64 - stage 1: 16x16, 128 - stage 2: 8x8, 256 - # Arguments - input_shape (tensor): shape of input image tensor - depth (int): number of core convolutional layers - num_classes (int): number of classes (CIFAR10 has 10) - # Returns - model (Model): Keras model instance - """ - if (depth - 2) % 9 != 0: - raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])') - # Start model definition. - num_filters_in = 4 - num_res_blocks = int((depth - 2) / 9) - - inputs = Input(shape=input_shape) - # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths - x = resnet_layer(inputs=inputs, - num_filters=num_filters_in, - conv_first=True) - - # Instantiate the stack of residual units - for stage in range(1): - for res_block in range(num_res_blocks): - activation = 'relu' - batch_normalization = True - strides = 1 - if stage == 0: - num_filters_out = num_filters_in * 4 - if res_block == 0: # first layer and first stage - activation = None - batch_normalization = False - else: - num_filters_out = num_filters_in * 2 - if res_block == 0: # first layer but not first stage - strides = 2 # downsample - - # bottleneck residual unit - y = resnet_layer(inputs=x, - num_filters=num_filters_in, - kernel_size=1, - strides=strides, - activation=activation, - batch_normalization=batch_normalization, - conv_first=False) - y = resnet_layer(inputs=y, - num_filters=num_filters_in, - conv_first=False) - - y = resnet_layer(inputs=y, - num_filters=num_filters_out, - kernel_size=1, - conv_first=False) - if res_block == 0: - # linear projection residual shortcut connection to match - # changed dims - x = resnet_layer(inputs=x, - num_filters=num_filters_out, - kernel_size=1, - strides=strides, - activation=None, - batch_normalization=False) - x = tensorflow.keras.layers.add([x, y]) - - num_filters_in = num_filters_out - - # Add classifier on top. - # v2 has BN-ReLU before Pooling - # x = BatchNormalization()(x) - x = Activation('relu')(x) - x = AveragePooling2D(pool_size=8)(x) - y = Flatten()(x) - outputs = Dense(num_classes, - activation='softmax', - kernel_initializer='he_normal')(y) - - # Instantiate model. - model = Model(inputs=inputs, outputs=outputs) - return model - -# Training parameters -batch_size = 128 # orig paper trained all networks with batch_size=128 -epochs = 1 -num_classes = 10 - -# Subtracting pixel mean improves accuracy -subtract_pixel_mean = True - -n = 1 -depth = n * 9 + 2 - -def train(): - # Load the CIFAR10 data. - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - - # Input image dimensions. - input_shape = x_train.shape[1:] - # Normalize data. - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - - model = resnet_v2(input_shape=input_shape, depth=depth) - - model.compile(loss='categorical_crossentropy', - optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.01), - metrics=['accuracy']) - model.summary() - - lr_scheduler = LearningRateScheduler(lr_schedule) - - lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), - cooldown=0, - patience=5, - min_lr=0.5e-6) - - callbacks = [lr_reducer, lr_scheduler] - - # Run training, with or without data augmentation. - model.fit(x_train, y_train, - batch_size=batch_size, - epochs=epochs, - validation_data=(x_test, y_test), - shuffle=True, - callbacks=callbacks) - - # Score trained model. - scores = model.evaluate(x_test, y_test, verbose=1) - print('Test loss:', scores[0]) - print('Test accuracy:', scores[1]) - model.save("baseline_model") - -class TrainDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - x_train, y_train = x_train[:100], y_train[:100] - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - self.train_images = x_train - self.train_labels = y_train - - def __len__(self): - return len(self.train_images) - - def __getitem__(self, idx): - return self.train_images[idx], self.train_labels[idx] - -class EvalDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - - def __len__(self): - return len(self.test_images) - - def __getitem__(self, idx): - return self.test_images[idx], self.test_labels[idx] - -class TestTensorflowPruning(unittest.TestCase): - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\\n") - - def test_tensorflow_pruning(self): - from neural_compressor.experimental import Pruning, common - from neural_compressor.utils import logger - prune = Pruning("./fake_yaml.yaml") - prune.train_distributed = True - prune.evaluation_distributed = True - prune.train_dataloader = common.DataLoader(TrainDataset(), batch_size=16) - prune.eval_dataloader = common.DataLoader(EvalDataset(), batch_size=32) - prune.model = './baseline_model' - pruned_model = prune() - stats, sparsity = pruned_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - self.assertGreater(sparsity, 20) - self.assertGreater(prune.baseline_score, 0.729) - if bool(CpuInfo().bf16): - self.assertGreater(prune.last_score, 0.742) - else: - self.assertGreater(prune.last_score, 0.743) - - -if __name__ == '__main__': - unittest.main() - ''' - with open("fake_ut.py", "w", encoding="utf-8") as f: - f.write(fake_ut) - build_fake_yaml() - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: resnet_v2_prune - framework: tensorflow - pruning: - train: - epoch: 4 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.2 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def dir_md5_check(dir): - files_list = [] - md5_list = [] - - def get_files_list(path, list_name): - for file in sorted(os.listdir(path)): - file_path = os.path.join(path, file) - if os.path.isdir(file_path): - get_files_list(file_path, list_name) - else: - list_name.append(file_path) - - get_files_list(dir, files_list) - for file_path in files_list: - with open(file_path, "rb") as fp: - data = fp.read() - file_md5 = hashlib.md5(data).hexdigest() - md5_list.append(file_md5) - return md5_list - - -class TestDistributed(unittest.TestCase): - dst_path = "./baseline_model" - - @classmethod - def setUpClass(cls): - build_fake_ut() - build_fake_yaml() - if system().lower() == "windows": - src_path = "C:\\tmp\\.neural_compressor\\inc_ut\\resnet_v2\\" - elif system().lower() == "linux": - src_path = "/tmp/.neural_compressor/inc_ut/resnet_v2/" - if os.path.exists(src_path): - shutil.copytree(src_path, os.getcwd(), dirs_exist_ok=True) - if not os.path.exists(cls.dst_path): - raise FileNotFoundError(f"'{cls.dst_path}' doesn't exist.") - elif dir_md5_check(cls.dst_path) != [ - "65625fef42f44e6853d4d6d5e4188a49", - "a783396652bf62db3db4c9f647953175", - "c7259753419d9fc053df5b2059aef8c0", - "77f2a1045cffee9f6a43f2594a5627ba", - ]: - logger.warning("resnet_v2 baseline_model md5 verification failed.") - raise ValueError(f"'{cls.dst_path}' md5 verification failed.") - else: - logger.info("resnet_v2 baseline_model for TF distributed pruning md5 verification succeeded.") - - @classmethod - def tearDownClass(cls): - os.remove("fake_ut.py") - os.remove("fake_yaml.yaml") - shutil.rmtree("nc_workspace", ignore_errors=True) - shutil.rmtree("baseline_model", ignore_errors=True) - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") - def test_tf_distributed_pruning(self): - distributed_cmd = "horovodrun -np 2 python fake_ut.py" - p = subprocess.Popen( - distributed_cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True - ) - try: - out, _ = p.communicate() - for line in out.splitlines(): - print(line.decode().strip()) - matches = re.findall(r"FAILED", out.decode("utf-8")) - self.assertEqual(matches, []) - - matches = re.findall(r"OK", out.decode("utf-8")) - self.assertTrue(len(matches) > 0) - except KeyboardInterrupt: - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - assert 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning.py b/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning.py deleted file mode 100644 index 3ec84d834be..00000000000 --- a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning.py +++ /dev/null @@ -1,487 +0,0 @@ -"""Tests for the TensorFlow pruning.""" - -from __future__ import print_function - -import hashlib -import os -import shutil -import sys -import types -import unittest -from platform import platform, system - -import cpuinfo -import numpy as np -import tensorflow as tf - -from neural_compressor.adaptor import FRAMEWORKS -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.conf.dotdict import DotDict -from neural_compressor.experimental import Pruning, common -from neural_compressor.experimental.pruning import TfPruningCallback -from neural_compressor.utils import logger -from neural_compressor.utils.create_obj_from_config import create_train_func - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: resnet_v2_prune - framework: tensorflow - pruning: - train: - epoch: 4 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.9 - nesterov: True - criterion: - CrossEntropyLoss: - reduction: sum_over_batch_size - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.2 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def lr_schedule(epoch): - """Learning Rate Schedule - Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. - - Called automatically every epoch as part of callbacks during training. - # Arguments - epoch (int): The number of epochs - # Returns - lr (float32): learning rate - """ - lr = 1e-3 - if epoch > 180: - lr *= 0.5e-3 - elif epoch > 160: - lr *= 1e-3 - elif epoch > 120: - lr *= 1e-2 - elif epoch > 80: - lr *= 1e-1 - print("Learning rate: ", lr) - return lr - - -def resnet_layer( - inputs, num_filters=8, kernel_size=3, strides=1, activation="relu", batch_normalization=True, conv_first=True -): - """2D Convolution-Batch Normalization-Activation stack builder - # Arguments - inputs (tensor): input tensor from input image or previous layer - num_filters (int): Conv2D number of filters - kernel_size (int): Conv2D square kernel dimensions - strides (int): Conv2D square stride dimensions - activation (string): activation name - batch_normalization (bool): whether to include batch normalization - conv_first (bool): conv-bn-activation (True) or - bn-activation-conv (False) - # Returns - x (tensor): tensor as input to the next layer.""" - conv = tf.keras.layers.Conv2D( - num_filters, - kernel_size=kernel_size, - strides=strides, - padding="same", - use_bias=True, - kernel_initializer="he_normal", - kernel_regularizer=tf.keras.regularizers.l2(1e-4), - ) - - x = inputs - if conv_first: - x = conv(x) - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = tf.keras.layers.Activation(activation)(x) - else: - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = tf.keras.layers.Activation(activation)(x) - x = conv(x) - return x - - -def resnet_v2(input_shape, depth, num_classes=10): - """ResNet Version 2 Model builder [b] - Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as - bottleneck layer - First shortcut connection per layer is 1 x 1 Conv2D. - - Second and onwards shortcut connection is identity. - At the beginning of each stage, the feature map size is halved (downsampled) - by a convolutional layer with strides=2, while the number of filter maps is - doubled. Within each stage, the layers have the same number filters and the - same filter map sizes. - Features maps sizes: - conv1 : 32x32, 16 - stage 0: 32x32, 64 - stage 1: 16x16, 128 - stage 2: 8x8, 256 - # Arguments - input_shape (tensor): shape of input image tensor - depth (int): number of core convolutional layers - num_classes (int): number of classes (CIFAR10 has 10) - # Returns - model (Model): Keras model instance - """ - if (depth - 2) % 9 != 0: - raise ValueError("depth should be 9n+2 (eg 56 or 110 in [b])") - # Start model definition. - num_filters_in = 4 - num_res_blocks = int((depth - 2) / 9) - - inputs = tf.keras.layers.Input(shape=input_shape) - # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths - x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True) - - # Instantiate the stack of residual units - for stage in range(1): - for res_block in range(num_res_blocks): - activation = "relu" - batch_normalization = True - strides = 1 - if stage == 0: - num_filters_out = num_filters_in * 4 - if res_block == 0: # first layer and first stage - activation = None - batch_normalization = False - else: - num_filters_out = num_filters_in * 2 - if res_block == 0: # first layer but not first stage - strides = 2 # downsample - - # bottleneck residual unit - y = resnet_layer( - inputs=x, - num_filters=num_filters_in, - kernel_size=1, - strides=strides, - activation=activation, - batch_normalization=batch_normalization, - conv_first=False, - ) - y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False) - - y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False) - if res_block == 0: - # linear projection residual shortcut connection to match - # changed dims - x = resnet_layer( - inputs=x, - num_filters=num_filters_out, - kernel_size=1, - strides=strides, - activation=None, - batch_normalization=False, - ) - x = tf.keras.layers.add([x, y]) - - num_filters_in = num_filters_out - - # Add classifier on top. - # v2 has BN-ReLU before Pooling - # x = BatchNormalization()(x) - x = tf.keras.layers.Activation("relu")(x) - x = tf.keras.layers.AveragePooling2D(pool_size=8)(x) - y = tf.keras.layers.Flatten()(x) - outputs = tf.keras.layers.Dense(num_classes, activation="softmax", kernel_initializer="he_normal")(y) - - # Instantiate model. - model = tf.keras.models.Model(inputs=inputs, outputs=outputs) - return model - - -# Training parameters -batch_size = 128 # orig paper trained all networks with batch_size=128 -epochs = 1 -num_classes = 10 - -# Subtracting pixel mean improves accuracy -subtract_pixel_mean = True - -n = 1 -depth = n * 9 + 2 - - -def train(dst_path): - # Load the CIFAR10 data. - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - - # Input image dimensions. - input_shape = x_train.shape[1:] - # Normalize data. - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - - model = resnet_v2(input_shape=input_shape, depth=depth) - - model.compile( - loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), metrics=["accuracy"] - ) - model.summary() - - lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule) - - lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) - - callbacks = [lr_reducer, lr_scheduler] - - # Run training, with or without data augmentation. - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs, - validation_data=(x_test, y_test), - shuffle=True, - callbacks=callbacks, - ) - - # Score trained model. - scores = model.evaluate(x_test, y_test, verbose=1) - print("Test loss:", scores[0]) - print("Test accuracy:", scores[1]) - model.save(dst_path) - - -def dir_md5_check(dir): - files_list = [] - md5_list = [] - - def get_files_list(path, list_name): - for file in sorted(os.listdir(path)): - file_path = os.path.join(path, file) - if os.path.isdir(file_path): - get_files_list(file_path, list_name) - else: - list_name.append(file_path) - - get_files_list(dir, files_list) - for file_path in files_list: - with open(file_path, "rb") as fp: - data = fp.read() - file_md5 = hashlib.md5(data).hexdigest() - md5_list.append(file_md5) - return md5_list - - -class TrainDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - x_train, y_train = x_train[:64], y_train[:64] - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - self.train_images = x_train - self.train_labels = y_train - - def __len__(self): - return len(self.train_images) - - def __getitem__(self, idx): - return self.train_images[idx], self.train_labels[idx] - - -class EvalDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - - def __len__(self): - return len(self.test_images) - - def __getitem__(self, idx): - return self.test_images[idx], self.test_labels[idx] - - -class TestTensorflowPruning(unittest.TestCase): - dst_path = "./baseline_model" - - @classmethod - def setUpClass(cls): - build_fake_yaml() - if system().lower() == "windows": - src_path = "C:\\tmp\\.neural_compressor\\inc_ut\\resnet_v2\\" - elif system().lower() == "linux": - src_path = "/tmp/.neural_compressor/inc_ut/resnet_v2/" - if os.path.exists(src_path): - shutil.copytree(src_path, os.getcwd(), dirs_exist_ok=True) - if not os.path.exists(cls.dst_path): - logger.warning("resnet_v2 baseline_model doesn't exist.") - return unittest.skip("resnet_v2 baseline_model doesn't exist")(TestTensorflowPruning) - elif dir_md5_check(cls.dst_path) != [ - "65625fef42f44e6853d4d6d5e4188a49", - "a783396652bf62db3db4c9f647953175", - "c7259753419d9fc053df5b2059aef8c0", - "77f2a1045cffee9f6a43f2594a5627ba", - ]: - logger.warning("resnet_v2 baseline_model md5 verification failed.") - return unittest.skip("resnet_v2 baseline_model md5 verification failed.")(TestTensorflowPruning) - else: - logger.info("resnet_v2 baseline_model for TF pruning md5 verification succeeded.") - - @classmethod - def tearDownClass(cls): - os.remove("fake_yaml.yaml") - shutil.rmtree("nc_workspace", ignore_errors=True) - shutil.rmtree("baseline_model", ignore_errors=True) - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_create_train_func1(self): - framework = "tensorflow" - framework_specific_info = DotDict( - { - "device": "cpu", - "random_seed": 1978, - "workspace_path": "./nc_workspace/", - "q_dataloader": None, - "inputs": [], - "outputs": [], - "format": "default", - "backend": "default", - } - ) - adaptor = FRAMEWORKS[framework](framework_specific_info) - - dataloader = common.DataLoader(TrainDataset(), batch_size=32) - train_cfg = DotDict( - { - "epoch": 1, - "optimizer": {"AdamW": {"learning_rate": 0.001, "weight_decay": 0.0001}}, - "criterion": {"CrossEntropyLoss": {"reduction": "sum_over_batch_size", "from_logits": True}}, - "execution_mode": "eager", - "start_epoch": 0, - } - ) - callbacks = TfPruningCallback - hooks = {} - pruning_func1 = create_train_func(framework, dataloader, adaptor, train_cfg, hooks, callbacks) - self.assertTrue(isinstance(pruning_func1, types.FunctionType)) - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_create_train_func2(self): - framework = "tensorflow" - framework_specific_info = DotDict( - { - "device": "cpu", - "random_seed": 1978, - "workspace_path": "./nc_workspace/", - "q_dataloader": None, - "inputs": [], - "outputs": [], - "format": "default", - "backend": "default", - } - ) - adaptor = FRAMEWORKS[framework](framework_specific_info) - - dataloader = common.DataLoader(TrainDataset(), batch_size=32) - train_cfg = DotDict( - { - "epoch": 1, - "dataloader": { - "distributed": False, - "batch_size": 32, - "dataset": {"ImageRecord": {"root": "./ImageNet"}}, - "transform": { - "ResizeCropImagenet": {"height": 224, "width": 224, "mean_value": [123.68, 116.78, 103.94]} - }, - "last_batch": "rollover", - "shuffle": False, - }, - "postprocess": {"transform": {"LabelShift": 1}}, - "optimizer": {"SGD": {"learning_rate": 0.0001, "momentum": 0.9, "nesterov": True}}, - "criterion": {"SparseCategoricalCrossentropy": {"reduction": "sum_over_batch_size"}}, - "execution_mode": "eager", - "start_epoch": 0, - } - ) - pruning_func2 = create_train_func(framework, dataloader, adaptor, train_cfg) - self.assertTrue(isinstance(pruning_func2, types.FunctionType)) - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_tensorflow_pruning(self): - prune = Pruning("./fake_yaml.yaml") - prune.train_dataloader = common.DataLoader(TrainDataset(), batch_size=32) - prune.eval_dataloader = common.DataLoader(EvalDataset(), batch_size=32) - prune.model = self.dst_path - pruned_model = prune() - stats, sparsity = pruned_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - self.assertGreater(sparsity, 20) - self.assertGreater(prune.baseline_score, 0.72) - self.assertGreater(prune.last_score, 0.73) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning_utility.py b/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning_utility.py deleted file mode 100644 index 00d9bb6acd7..00000000000 --- a/test/pruning_with_tf/pruning_1.x_v1/test_tensorflow_pruning_utility.py +++ /dev/null @@ -1,64 +0,0 @@ -import shutil -import unittest - - -def train_func(): - import tensorflow as tf - from tensorflow import keras - - # Load MNIST dataset - mnist = keras.datasets.mnist - (train_images, train_labels), (test_images, test_labels) = mnist.load_data() - - # Normalize the input image so that each pixel value is between 0 to 1. - train_images = train_images / 255.0 - test_images = test_images / 255.0 - - # Define the model architecture. - model = keras.Sequential( - [ - keras.layers.InputLayer(input_shape=(28, 28)), - keras.layers.Reshape(target_shape=(28, 28, 1)), - keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"), - keras.layers.MaxPooling2D(pool_size=(2, 2)), - keras.layers.Flatten(), - keras.layers.Dense(10), - ] - ) - # Train the digit classification model - model.compile( - optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"] - ) - - model.fit( - train_images, - train_labels, - epochs=1, - validation_split=0.1, - ) - - model.save("baseline_model") - - -class TestTensorflowPruning(unittest.TestCase): - @classmethod - def setUpClass(self): - train_func() - - @classmethod - def tearDownClass(self): - shutil.rmtree("baseline_model", ignore_errors=True) - - def test_pruning_utility(self): - from neural_compressor.model import Model - - pruning_model = Model("baseline_model") - all_weights_name = pruning_model.get_all_weight_names() - df, sparsity = pruning_model.report_sparsity() - self.assertEqual(all_weights_name, [1, 4]) - self.assertEqual(df.empty, False) - self.assertNotEqual(sparsity, None) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_tf/pruning_1.x_v2/test_tensorflow_pruning.py b/test/pruning_with_tf/pruning_1.x_v2/test_tensorflow_pruning.py deleted file mode 100644 index ff8d3103602..00000000000 --- a/test/pruning_with_tf/pruning_1.x_v2/test_tensorflow_pruning.py +++ /dev/null @@ -1,487 +0,0 @@ -"""Tests for the TensorFlow pruning.""" - -from __future__ import print_function - -import hashlib -import os -import shutil -import sys -import types -import unittest -from platform import platform, system - -import cpuinfo -import numpy as np -import tensorflow as tf - -from neural_compressor.adaptor import FRAMEWORKS -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.conf.dotdict import DotDict -from neural_compressor.experimental import Pruning, common -from neural_compressor.experimental.pruning_v2 import TfPruningCallback -from neural_compressor.utils import logger -from neural_compressor.utils.create_obj_from_config import create_train_func - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: resnet_v2_prune - framework: tensorflow - pruning: - train: - epoch: 4 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.9 - nesterov: True - criterion: - CrossEntropyLoss: - reduction: sum_over_batch_size - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.2 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def lr_schedule(epoch): - """Learning Rate Schedule - Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. - - Called automatically every epoch as part of callbacks during training. - # Arguments - epoch (int): The number of epochs - # Returns - lr (float32): learning rate - """ - lr = 1e-3 - if epoch > 180: - lr *= 0.5e-3 - elif epoch > 160: - lr *= 1e-3 - elif epoch > 120: - lr *= 1e-2 - elif epoch > 80: - lr *= 1e-1 - print("Learning rate: ", lr) - return lr - - -def resnet_layer( - inputs, num_filters=8, kernel_size=3, strides=1, activation="relu", batch_normalization=True, conv_first=True -): - """2D Convolution-Batch Normalization-Activation stack builder - # Arguments - inputs (tensor): input tensor from input image or previous layer - num_filters (int): Conv2D number of filters - kernel_size (int): Conv2D square kernel dimensions - strides (int): Conv2D square stride dimensions - activation (string): activation name - batch_normalization (bool): whether to include batch normalization - conv_first (bool): conv-bn-activation (True) or - bn-activation-conv (False) - # Returns - x (tensor): tensor as input to the next layer.""" - conv = tf.keras.layers.Conv2D( - num_filters, - kernel_size=kernel_size, - strides=strides, - padding="same", - use_bias=True, - kernel_initializer="he_normal", - kernel_regularizer=tf.keras.regularizers.l2(1e-4), - ) - - x = inputs - if conv_first: - x = conv(x) - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = tf.keras.layers.Activation(activation)(x) - else: - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = tf.keras.layers.Activation(activation)(x) - x = conv(x) - return x - - -def resnet_v2(input_shape, depth, num_classes=10): - """ResNet Version 2 Model builder [b] - Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as - bottleneck layer - First shortcut connection per layer is 1 x 1 Conv2D. - - Second and onwards shortcut connection is identity. - At the beginning of each stage, the feature map size is halved (downsampled) - by a convolutional layer with strides=2, while the number of filter maps is - doubled. Within each stage, the layers have the same number filters and the - same filter map sizes. - Features maps sizes: - conv1 : 32x32, 16 - stage 0: 32x32, 64 - stage 1: 16x16, 128 - stage 2: 8x8, 256 - # Arguments - input_shape (tensor): shape of input image tensor - depth (int): number of core convolutional layers - num_classes (int): number of classes (CIFAR10 has 10) - # Returns - model (Model): Keras model instance - """ - if (depth - 2) % 9 != 0: - raise ValueError("depth should be 9n+2 (eg 56 or 110 in [b])") - # Start model definition. - num_filters_in = 4 - num_res_blocks = int((depth - 2) / 9) - - inputs = tf.keras.layers.Input(shape=input_shape) - # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths - x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True) - - # Instantiate the stack of residual units - for stage in range(1): - for res_block in range(num_res_blocks): - activation = "relu" - batch_normalization = True - strides = 1 - if stage == 0: - num_filters_out = num_filters_in * 4 - if res_block == 0: # first layer and first stage - activation = None - batch_normalization = False - else: - num_filters_out = num_filters_in * 2 - if res_block == 0: # first layer but not first stage - strides = 2 # downsample - - # bottleneck residual unit - y = resnet_layer( - inputs=x, - num_filters=num_filters_in, - kernel_size=1, - strides=strides, - activation=activation, - batch_normalization=batch_normalization, - conv_first=False, - ) - y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False) - - y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False) - if res_block == 0: - # linear projection residual shortcut connection to match - # changed dims - x = resnet_layer( - inputs=x, - num_filters=num_filters_out, - kernel_size=1, - strides=strides, - activation=None, - batch_normalization=False, - ) - x = tf.keras.layers.add([x, y]) - - num_filters_in = num_filters_out - - # Add classifier on top. - # v2 has BN-ReLU before Pooling - # x = BatchNormalization()(x) - x = tf.keras.layers.Activation("relu")(x) - x = tf.keras.layers.AveragePooling2D(pool_size=8)(x) - y = tf.keras.layers.Flatten()(x) - outputs = tf.keras.layers.Dense(num_classes, activation="softmax", kernel_initializer="he_normal")(y) - - # Instantiate model. - model = tf.keras.models.Model(inputs=inputs, outputs=outputs) - return model - - -# Training parameters -batch_size = 128 # orig paper trained all networks with batch_size=128 -epochs = 1 -num_classes = 10 - -# Subtracting pixel mean improves accuracy -subtract_pixel_mean = True - -n = 1 -depth = n * 9 + 2 - - -def train(dst_path): - # Load the CIFAR10 data. - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - - # Input image dimensions. - input_shape = x_train.shape[1:] - # Normalize data. - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - - model = resnet_v2(input_shape=input_shape, depth=depth) - - model.compile( - loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), metrics=["accuracy"] - ) - model.summary() - - lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_schedule) - - lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) - - callbacks = [lr_reducer, lr_scheduler] - - # Run training, with or without data augmentation. - model.fit( - x_train, - y_train, - batch_size=batch_size, - epochs=epochs, - validation_data=(x_test, y_test), - shuffle=True, - callbacks=callbacks, - ) - - # Score trained model. - scores = model.evaluate(x_test, y_test, verbose=1) - print("Test loss:", scores[0]) - print("Test accuracy:", scores[1]) - model.save(dst_path) - - -def dir_md5_check(dir): - files_list = [] - md5_list = [] - - def get_files_list(path, list_name): - for file in sorted(os.listdir(path)): - file_path = os.path.join(path, file) - if os.path.isdir(file_path): - get_files_list(file_path, list_name) - else: - list_name.append(file_path) - - get_files_list(dir, files_list) - for file_path in files_list: - with open(file_path, "rb") as fp: - data = fp.read() - file_md5 = hashlib.md5(data).hexdigest() - md5_list.append(file_md5) - return md5_list - - -class TrainDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - x_train, y_train = x_train[:64], y_train[:64] - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - self.train_images = x_train - self.train_labels = y_train - - def __len__(self): - return len(self.train_images) - - def __getitem__(self, idx): - return self.train_images[idx], self.train_labels[idx] - - -class EvalDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() - - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tf.keras.utils.to_categorical(y_train, num_classes) - y_test = tf.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - - def __len__(self): - return len(self.test_images) - - def __getitem__(self, idx): - return self.test_images[idx], self.test_labels[idx] - - -class TestTensorflowPruning(unittest.TestCase): - dst_path = "./baseline_model" - - @classmethod - def setUpClass(cls): - build_fake_yaml() - if system().lower() == "windows": - src_path = "C:\\tmp\\.neural_compressor\\inc_ut\\resnet_v2\\" - elif system().lower() == "linux": - src_path = "/tmp/.neural_compressor/inc_ut/resnet_v2/" - if os.path.exists(src_path): - shutil.copytree(src_path, os.getcwd(), dirs_exist_ok=True) - if not os.path.exists(cls.dst_path): - logger.warning("resnet_v2 baseline_model doesn't exist.") - return unittest.skip("resnet_v2 baseline_model doesn't exist")(TestTensorflowPruning) - elif dir_md5_check(cls.dst_path) != [ - "65625fef42f44e6853d4d6d5e4188a49", - "a783396652bf62db3db4c9f647953175", - "c7259753419d9fc053df5b2059aef8c0", - "77f2a1045cffee9f6a43f2594a5627ba", - ]: - logger.warning("resnet_v2 baseline_model md5 verification failed.") - return unittest.skip("resnet_v2 baseline_model md5 verification failed.")(TestTensorflowPruning) - else: - logger.info("resnet_v2 baseline_model for TF pruning md5 verification succeeded.") - - @classmethod - def tearDownClass(cls): - os.remove("fake_yaml.yaml") - shutil.rmtree("nc_workspace", ignore_errors=True) - shutil.rmtree("baseline_model", ignore_errors=True) - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_create_train_func1(self): - framework = "tensorflow" - framework_specific_info = DotDict( - { - "device": "cpu", - "random_seed": 1978, - "workspace_path": "./nc_workspace/", - "q_dataloader": None, - "inputs": [], - "outputs": [], - "format": "default", - "backend": "default", - } - ) - adaptor = FRAMEWORKS[framework](framework_specific_info) - - dataloader = common.DataLoader(TrainDataset(), batch_size=32) - train_cfg = DotDict( - { - "epoch": 1, - "optimizer": {"AdamW": {"learning_rate": 0.001, "weight_decay": 0.0001}}, - "criterion": {"CrossEntropyLoss": {"reduction": "sum_over_batch_size", "from_logits": True}}, - "execution_mode": "eager", - "start_epoch": 0, - } - ) - callbacks = TfPruningCallback - hooks = {} - pruning_func1 = create_train_func(framework, dataloader, adaptor, train_cfg, hooks, callbacks) - self.assertTrue(isinstance(pruning_func1, types.FunctionType)) - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_create_train_func2(self): - framework = "tensorflow" - framework_specific_info = DotDict( - { - "device": "cpu", - "random_seed": 1978, - "workspace_path": "./nc_workspace/", - "q_dataloader": None, - "inputs": [], - "outputs": [], - "format": "default", - "backend": "default", - } - ) - adaptor = FRAMEWORKS[framework](framework_specific_info) - - dataloader = common.DataLoader(TrainDataset(), batch_size=32) - train_cfg = DotDict( - { - "epoch": 1, - "dataloader": { - "distributed": False, - "batch_size": 32, - "dataset": {"ImageRecord": {"root": "./ImageNet"}}, - "transform": { - "ResizeCropImagenet": {"height": 224, "width": 224, "mean_value": [123.68, 116.78, 103.94]} - }, - "last_batch": "rollover", - "shuffle": False, - }, - "postprocess": {"transform": {"LabelShift": 1}}, - "optimizer": {"SGD": {"learning_rate": 0.0001, "momentum": 0.9, "nesterov": True}}, - "criterion": {"SparseCategoricalCrossentropy": {"reduction": "sum_over_batch_size"}}, - "execution_mode": "eager", - "start_epoch": 0, - } - ) - pruning_func2 = create_train_func(framework, dataloader, adaptor, train_cfg) - self.assertTrue(isinstance(pruning_func2, types.FunctionType)) - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.3.0"), - "Keras model need tensorflow version >= 2.3.0, so the case is skipped", - ) - def test_tensorflow_pruning(self): - prune = Pruning("./fake_yaml.yaml") - prune.train_dataloader = common.DataLoader(TrainDataset(), batch_size=32) - prune.eval_dataloader = common.DataLoader(EvalDataset(), batch_size=32) - prune.model = self.dst_path - pruned_model = prune() - stats, sparsity = pruned_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - self.assertGreater(sparsity, 20) - self.assertGreater(prune.baseline_score, 0.72) - self.assertGreater(prune.last_score, 0.73) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/pruning_with_tf/pruning_2.x/test_pruning_keras.py b/test/pruning_with_tf/pruning_2.x/test_pruning_keras.py index 4f78ff380bc..b4cc81d870b 100644 --- a/test/pruning_with_tf/pruning_2.x/test_pruning_keras.py +++ b/test/pruning_with_tf/pruning_2.x/test_pruning_keras.py @@ -7,11 +7,11 @@ def test_pruning_keras(self): from neural_compressor import WeightPruningConfig from neural_compressor.adaptor import FRAMEWORKS - from neural_compressor.conf.config import default_workspace - from neural_compressor.conf.dotdict import DotDict + from neural_compressor.config import default_workspace from neural_compressor.data import DataLoader, Datasets from neural_compressor.training import prepare_compression from neural_compressor.utils import create_obj_from_config, logger + from neural_compressor.utils.utility import DotDict model = tf.keras.applications.ResNet50V2(weights="imagenet") diff --git a/test/quantization/test_quantization.py b/test/quantization/test_quantization.py deleted file mode 100644 index 878e0669e3f..00000000000 --- a/test/quantization/test_quantization.py +++ /dev/null @@ -1,516 +0,0 @@ -"""Tests for neural_compressor quantization.""" - -import importlib -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - resume: ./saved/history.snapshot - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml3(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - MSE: - compare_label: False - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml3.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml4(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml4.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml5(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - exit_policy: - max_trials: 10 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml5.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml6(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - tuning: - strategy: - name: fake - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml6.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - from tensorflow.compat.v1 import graph_util - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def build_fake_strategy(): - with open( - os.path.join( - os.path.dirname(importlib.util.find_spec("neural_compressor").origin), "experimental/strategy/fake.py" - ), - "w", - encoding="utf-8", - ) as f: - seq = [ - "import time \n", - "import copy \n", - "import numpy as np \n", - "from collections import OrderedDict \n", - "from .strategy import strategy_registry, TuneStrategy \n", - "from ...utils import logger \n", - "from .utils.tuning_sampler import OpTypeWiseTuningSampler, FallbackTuningSampler \n", - "from .utils.tuning_structs import OpTuningConfig \n", - "import copy \n", - "@strategy_registry \n", - "class FakeTuneStrategy(TuneStrategy): \n", - " def __init__(self, model, cfg, q_dataloader, q_func=None, eval_dataloader=None, \n", - " eval_func=None, dicts=None, q_hooks=None): \n", - " self.id = 0 \n", - " self.resume = True if dicts else False \n", - " super(FakeTuneStrategy, self).__init__(model, cfg, q_dataloader, \n", - " q_func, eval_dataloader, eval_func, dicts) \n", - " def __getstate__(self): \n", - " for history in self.tuning_history: \n", - " if self._same_yaml(history['cfg'], self.cfg): \n", - " history['id'] = self.id \n", - " save_dict = super(FakeTuneStrategy, self).__getstate__() \n", - " return save_dict \n", - " def next_tune_cfg(self): \n", - " if self.resume: \n", - " #assert self.id == 1 \n", - " assert len(self.tuning_history) == 1 \n", - " history = self.tuning_history[0] \n", - " assert self._same_yaml(history['cfg'], self.cfg) \n", - " assert len(history['history']) \n", - " for h in history['history']: \n", - " assert h \n", - " from copy import deepcopy \n", - " tuning_space = self.tuning_space \n", - " initial_op_tuning_cfg = {} \n", - " for item in tuning_space.root_item.options: \n", - " if item.item_type == 'op': \n", - " op_name, op_type = item.name \n", - " initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, 'fp32', tuning_space) \n", - " calib_sampling_size_lst = tuning_space.root_item.get_option_by_name('calib_sampling_size').options \n", - " for calib_sampling_size in calib_sampling_size_lst: \n", - " # step1. collect the ops that support static and dynamic \n", - " quant_mode_wise_items = OrderedDict() \n", - " query_order = ['static', 'dynamic', 'bf16', 'fp16', 'fp32'] \n", - " pre_items = set() \n", - " for quant_mode in query_order: \n", - " items = tuning_space.query_items_by_quant_mode(quant_mode) \n", - " filtered_items = [item for item in items if item not in pre_items] \n", - " pre_items = pre_items.union(set(items)) \n", - " quant_mode_wise_items[quant_mode] = filtered_items \n", - " def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): \n", - " for item in items_lst: \n", - " op_item_dtype_dict[item.name] = target_quant_mode \n", - " op_item_dtype_dict = OrderedDict() \n", - " for quant_mode, quant_mode_items in quant_mode_wise_items.items(): \n", - " initial_op_quant_mode(quant_mode_items, quant_mode, op_item_dtype_dict) \n", - " # step3. optype-wise tuning tuning items: the algorithm/scheme/granularity of activation(weight) \n", - " early_stop_tuning = False \n", - " stage1_cnt = 0 \n", - " int8_ops = quant_mode_wise_items['dynamic'] + quant_mode_wise_items['static'] \n", - " stage1_max = min(5, len(int8_ops)) # TODO set a more appropriate value \n", - " op_wise_tuning_sampler = OpTypeWiseTuningSampler(tuning_space, [], [], \n", - " op_item_dtype_dict, initial_op_tuning_cfg) \n", - " for op_tuning_cfg in op_wise_tuning_sampler: \n", - " stage1_cnt += 1 \n", - " if early_stop_tuning and stage1_cnt > stage1_max: \n", - " logger.info('Early stopping the stage 1.') \n", - " break \n", - " op_tuning_cfg['calib_sampling_size'] = calib_sampling_size \n", - " self.id += 1 \n", - " yield op_tuning_cfg \n", - ] - f.writelines(seq) - f.close() - - -class Metric: - def update(self, predict, label): - pass - - def reset(self): - pass - - def result(self): - return 0.5 - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - build_fake_yaml3() - build_fake_yaml4() - build_fake_yaml5() - build_fake_yaml6() - build_fake_strategy() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - os.remove("fake_yaml3.yaml") - os.remove("fake_yaml4.yaml") - os.remove("fake_yaml5.yaml") - os.remove("fake_yaml6.yaml") - os.remove( - os.path.join( - os.path.dirname(importlib.util.find_spec("neural_compressor").origin), "experimental/strategy/fake.py" - ) - ) - shutil.rmtree("./saved", ignore_errors=True) - - def test_resume(self): - import tensorflow as tf - from tensorflow.compat.v1 import graph_util - - tf.compat.v1.disable_eager_execution() - tf.compat.v1.reset_default_graph() - tf.compat.v1.set_random_seed(1) - x = tf.compat.v1.placeholder(tf.float32, [1, 32, 32, 3], name="x") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 3], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml5.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 32, 32, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - self.assertTrue(os.path.exists("./saved")) - quantizer = Quantization("fake_yaml2.yaml") - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - # self.assertNotEqual(output_graph, None) # disable this check, the code has bug of recover from resume - - def test_autodump(self): - # test auto_dump using old api - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml3.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_performance_only(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml4.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_fit_method(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml4.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_quantization_without_yaml(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization() - quantizer.model = self.constant_graph - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_invalid_eval_func(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - - def invalid_eval_func(model): - return [[1.0]] - - quantizer.eval_func = invalid_eval_func - output_graph = quantizer.fit() - self.assertEqual(output_graph, None) - - def invalid_eval_func(model): - return "0.1" - - quantizer.eval_func = invalid_eval_func - output_graph = quantizer.fit() - self.assertEqual(output_graph, None) - - def test_custom_metric(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml6.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.metric = Metric() - quantizer.fit() - self.assertEqual(quantizer.strategy.evaluation_result[0], 0.5) - - def test_custom_objective(self): - import tracemalloc - - from neural_compressor.experimental import Quantization, common - from neural_compressor.objective import Objective, objective_registry - - class MyObjective(Objective): - representation = "MyObj" - - def __init__(self): - super().__init__() - - def start(self): - tracemalloc.start() - - def end(self): - _, peak = tracemalloc.get_traced_memory() - tracemalloc.stop() - self._result_list.append(peak // 1048576) - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.objective = MyObjective() - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - class MyObjective(Objective): - representation = "Accuracy" - - def __init__(self): - super().__init__() - - def start(self): - tracemalloc.start() - - def end(self): - _, peak = tracemalloc.get_traced_memory() - tracemalloc.stop() - self._result_list.append(peak // 1048576) - - quantizer = Quantization() - with self.assertRaises(ValueError): - quantizer.objective = MyObjective() - - with self.assertRaises(ValueError): - - @objective_registry - class MyObjective(Objective): - representation = "Accuracy" - - def __init__(self): - super().__init__() - - def start(self): - tracemalloc.start() - - def end(self): - _, peak = tracemalloc.get_traced_memory() - tracemalloc.stop() - self._result_list.append(peak // 1048576) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/quantization/test_tensorflow_recipe.py b/test/quantization/test_tensorflow_recipe.py deleted file mode 100644 index 5498126672d..00000000000 --- a/test/quantization/test_tensorflow_recipe.py +++ /dev/null @@ -1,500 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml_disable_first_quantization(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - first_conv_or_matmul_quantization: False - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_disable_first_quantization.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_enable_first_quantization(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - first_conv_or_matmul_quantization: True - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_enable_first_quantization.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_disable_scale_propagation(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - scale_propagation_max_pooling: False - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_disable_scale_propagation.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_enable_scale_propagation(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - scale_propagation_max_pooling: True - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_enable_scale_propagation.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_enable_scale_unification(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - scale_propagation_concat: True - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_enable_scale_unification.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_disable_scale_unification(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - recipes: - scale_propagation_concat: False - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: False - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_disable_scale_unification.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowInt8Recipe(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml_disable_first_quantization() - build_fake_yaml_enable_first_quantization() - build_fake_yaml_disable_scale_propagation() - build_fake_yaml_enable_scale_propagation() - build_fake_yaml_enable_scale_unification() - build_fake_yaml_disable_scale_unification() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml_disable_first_quantization.yaml") - os.remove("fake_yaml_enable_first_quantization.yaml") - os.remove("fake_yaml_disable_scale_propagation.yaml") - os.remove("fake_yaml_enable_scale_propagation.yaml") - os.remove("fake_yaml_disable_scale_unification.yaml") - os.remove("fake_yaml_enable_scale_unification.yaml") - - @disable_random() - def test_disable_first_quantization(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_disable_first_quantization.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_fp32_conv = False - - for i in output_graph.graph_def.node: - if i.op == "Conv2D": - found_fp32_conv = True - break - - self.assertEqual(found_fp32_conv, True) - - @disable_random() - def test_enable_first_quantization(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_enable_first_quantization.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_fp32_conv = False - - for i in output_graph.graph_def.node: - if i.op == "Conv2D": - found_fp32_conv = True - break - - self.assertEqual(found_fp32_conv, False) - - @disable_random() - def test_enable_scale_propagation(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pool = tf.nn.avg_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - x = tf.nn.relu(conv_bias) - final_node = tf.nn.relu(x, name="op_to_store") - - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_enable_scale_propagation.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - max_freezed_out = [] - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndReluAndRequantize": - max_freezed_out.append(i.input[-1]) - - self.assertEqual(1, len(set(max_freezed_out))) - - @disable_random() - def test_disable_scale_propagation(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pool = tf.nn.avg_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - x = tf.nn.relu(conv_bias) - final_node = tf.nn.relu(x, name="op_to_store") - - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_disable_scale_propagation.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - max_freezed_out = [] - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndReluAndRequantize": - max_freezed_out.append(i.input[-1]) - self.assertEqual(2, len(set(max_freezed_out))) - - @disable_random() - def test_enable_scale_unification(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - sqrt = tf.math.sqrt(x) - relu_sqrt = tf.nn.relu(sqrt) - conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - relu1 = tf.nn.relu(conv_bias) - concat = tf.concat([relu, relu1], 1) - final_node = tf.nn.relu(concat, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_enable_scale_unification.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 128, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - max_freezed_out = [] - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndReluAndRequantize": - max_freezed_out.append(i.input[-1]) - self.assertEqual(1, len(set(max_freezed_out))) - - @disable_random() - def test_disable_scale_unification(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - - x = tf.nn.relu(x) - sqrt = tf.math.sqrt(x) - relu_sqrt = tf.nn.relu(sqrt) - conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - relu1 = tf.nn.relu(conv_bias) - concat = tf.concat([relu, relu1], 1) - final_node = tf.nn.relu(concat, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_disable_scale_unification.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - max_freezed_out = [] - for i in output_graph.graph_def.node: - if i.op == "QuantizedConv2DWithBiasAndReluAndRequantize": - max_freezed_out.append(i.input[-1]) - self.assertEqual(2, len(set(max_freezed_out))) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/quantization/test_tensorflow_recover.py b/test/quantization/test_tensorflow_recover.py deleted file mode 100644 index ad1d84f5962..00000000000 --- a/test/quantization/test_tensorflow_recover.py +++ /dev/null @@ -1,217 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import shutil -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import tensor_util -from tensorflow.python.platform import gfile - -from neural_compressor.adaptor.tf_utils.util import disable_random - -logger = logging.getLogger("neural_compressor") -logger.setLevel(logging.DEBUG) - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - graph_optimization: - precisions: [bf16] - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.0001 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowRecover(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("test.pb") - shutil.rmtree("./saved", ignore_errors=True) - - @disable_random() - def test_tensorflow_recover(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - with gfile.GFile("./test.pb", "wb") as f: - f.write(constant_graph.SerializeToString()) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("./fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = constant_graph - q_model = quantizer.fit() - - from neural_compressor.utils.utility import recover - - recover_model = recover("./test.pb", "./saved/history.snapshot", 0) - - q_model_const_value = {} - for node in q_model.graph_def.node: - if node.op == "Const": - tensor_value = tensor_util.MakeNdarray(node.attr["value"].tensor) - if not tensor_value.shape: - q_model_const_value[node.name] = tensor_value - for node in recover_model.graph_def.node: - if node.op == "Const": - tensor_value = tensor_util.MakeNdarray(node.attr["value"].tensor) - if node.name in q_model_const_value: - self.assertEqual(tensor_value, q_model_const_value[node.name]) - - -class TestTensorflowRecoverForceBF16(unittest.TestCase): - @classmethod - def setUpClass(self): - os.environ["FORCE_BF16"] = "1" - build_fake_yaml_2() - - @classmethod - def tearDownClass(self): - del os.environ["FORCE_BF16"] - os.remove("fake_yaml_2.yaml") - if os.path.exists("./test.pb"): - os.remove("test.pb") - shutil.rmtree("./saved", ignore_errors=True) - - @disable_random() - @unittest.skipIf(tf.__version__ < "2.0", "currently bf16 converter only support tf > 2.0") - def test_tensorflow_recover_bf16(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_weights_2 = tf.compat.v1.get_variable( - "weight_2", [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID", name="conv1_3") - conv_bias = tf.math.add(conv_1, conv_bias) - relu6 = tf.nn.relu6(conv_bias, name="op_to_store") - - out_name = relu6.name.split(":")[0] - - def eval(model): - return 0.5 - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - with gfile.GFile("./test.pb", "wb") as f: - f.write(constant_graph.SerializeToString()) - - from neural_compressor.experimental import MixedPrecision - - convert = MixedPrecision("./fake_yaml_2.yaml") - convert.model = constant_graph - convert.eval_func = eval - output_model = convert.fit() - found_cast_op = False - - from neural_compressor.utils.utility import recover - - recover_model = recover("./test.pb", "./saved/history.snapshot", 0) - - q_model_const_value = {} - for node in output_model.graph_def.node: - if node.op == "Const": - tensor_value = tensor_util.MakeNdarray(node.attr["value"].tensor) - if not tensor_value.shape: - q_model_const_value[node.name] = tensor_value - for node in recover_model.graph_def.node: - if node.op == "Cast": - found_cast_op = True - continue - if node.op == "Const": - tensor_value = tensor_util.MakeNdarray(node.attr["value"].tensor) - if node.name in q_model_const_value: - self.assertEqual(tensor_value, q_model_const_value[node.name]) - - self.assertEqual(found_cast_op, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/requirements.txt b/test/requirements.txt index 3a24001cfd2..1999f21e668 100644 --- a/test/requirements.txt +++ b/test/requirements.txt @@ -1,6 +1,6 @@ --find-links https://download.pytorch.org/whl/torch_stable.html accelerate==0.21.0 -auto-round +auto-round @ git+https://github.com/intel/auto-round.git@e24b9074af6cdb099e31c92eb81b7f5e9a4a244e dynast==1.6.0rc1 horovod intel-extension-for-pytorch diff --git a/test/scheduler/test_oneshot.py b/test/scheduler/test_oneshot.py deleted file mode 100644 index ac1e914bc20..00000000000 --- a/test/scheduler/test_oneshot.py +++ /dev/null @@ -1,589 +0,0 @@ -import copy -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision -from packaging.version import Version - -import neural_compressor.adaptor.pytorch as nc_torch -from neural_compressor.conf.config import DistillationConf, PruningConf -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.scheduler import Scheduler -from neural_compressor.training import prepare_compression -from neural_compressor.utils import logger -from neural_compressor.utils.pytorch import load - -PT_VERSION = nc_torch.get_torch_version() -if PT_VERSION >= Version("1.8.0-rc1"): - FX_MODE = True -else: - FX_MODE = False - -fake_yaml = """ -model: - name: imagenet_prune - framework: pytorch - -pruning: - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 3 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] -evaluation: - accuracy: - metric: - topk: 1 -""" - -fake2_yaml = """ -model: - name: imagenet_qat - framework: pytorch - -quantization: - approach: quant_aware_training - -evaluation: - accuracy: - metric: - topk: 1 -tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 -""" - -fake3_yaml = """ -model: - name: imagenet_distillation - framework: pytorch - -distillation: - train: - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - KnowledgeDistillationLoss: - temperature: 1.0 - loss_types: ['CE', 'KL'] - loss_weights: [0.5, 0.5] - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - -evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True -tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 -""" - - -def build_fake_yaml(): - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml2(): - with open("fake2.yaml", "w", encoding="utf-8") as f: - f.write(fake2_yaml) - - -def build_fake_yaml3(): - with open("fake3.yaml", "w", encoding="utf-8") as f: - f.write(fake3_yaml) - - -def build_fx_fake_yaml(): - fx_fake_yaml = fake_yaml.replace("pytorch", "pytorch_fx") - with open("fx_fake.yaml", "w", encoding="utf-8") as f: - f.write(fx_fake_yaml) - - -def build_fx_fake_yaml2(): - fx_fake2_yaml = fake2_yaml.replace("pytorch", "pytorch_fx") - with open("fx_fake2.yaml", "w", encoding="utf-8") as f: - f.write(fx_fake2_yaml) - - -def build_fx_fake_yaml3(): - fx_fake3_yaml = fake3_yaml.replace("pytorch", "pytorch_fx") - with open("fx_fake3.yaml", "w", encoding="utf-8") as f: - f.write(fx_fake3_yaml) - - -class DynamicControlModel(torch.nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv2d(3, 1, 1) - self.bn = nn.BatchNorm2d(1) - self.linear = nn.Linear(224 * 224, 1) - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - if x.size(1) == 1: - x = x.view(1, -1) - x = self.linear(x) - return x - - -class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - q_model = torchvision.models.quantization.resnet18() - q_model.fuse_model() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - build_fake_yaml2() - build_fake_yaml3() - build_fx_fake_yaml() - build_fx_fake_yaml2() - build_fx_fake_yaml3() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - os.remove("fake2.yaml") - os.remove("fake3.yaml") - os.remove("fx_fake.yaml") - os.remove("fx_fake2.yaml") - os.remove("fx_fake3.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - shutil.rmtree("nc_workspace", ignore_errors=True) - - def test_prune_qat_oneshot(self): - from neural_compressor.experimental import Pruning, Quantization - - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - q_model = copy.deepcopy(self.q_model) - prune = Pruning("./fake.yaml") - quantizer = Quantization("./fake2.yaml") - scheduler = Scheduler() - scheduler.model = q_model - combination = scheduler.combine(prune, quantizer) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - opt_model.save("./saved") - logger.info(20 * "=" + "test_prune_qat_oneshot" + 20 * "=") - - try: - conv_weight = opt_model.model.layer1[0].conv1.weight().dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization") - # reloading int8 model - reloaded_model = load("./saved", copy.deepcopy(self.q_model)) - try: - reloaded_conv_weight = reloaded_model.layer1[0].conv1.weight().dequantize() - except: - reloaded_conv_weight = reloaded_model.layer1[0].conv1.weight - self.assertEqual(reloaded_conv_weight.sum().item(), conv_weight.sum().item()) - - def test_distillation_qat_oneshot(self): - from neural_compressor.experimental import Distillation, Quantization - - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - model = copy.deepcopy(self.model) - q_model = copy.deepcopy(self.q_model) - distiller = Distillation("./fake3.yaml") - quantizer = Quantization("./fake2.yaml") - scheduler = Scheduler() - distiller.teacher_model = model - scheduler.model = q_model - combination = scheduler.combine(distiller, quantizer) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = combination.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - opt_model.save("./saved") - logger.info(20 * "=" + "test_distillation_qat_oneshot" + 20 * "=") - - self.assertEqual(combination.__repr__().lower(), "combination of distillation,quantization") - # reloading int8 model - reloaded_model = load("./saved", copy.deepcopy(self.q_model)) - - def test_prune_qat_distillation_oneshot(self): - from neural_compressor.experimental import Distillation, Pruning, Quantization - - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - model = copy.deepcopy(self.model) - q_model = copy.deepcopy(self.q_model) - prune = Pruning("./fake.yaml") - quantizer = Quantization("./fake2.yaml") - distiller = Distillation("./fake3.yaml") - scheduler = Scheduler() - distiller.teacher_model = model - scheduler.model = q_model - combination = scheduler.combine(prune, quantizer, distiller) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = combination.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - return model - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - logger.info(20 * "=" + "test_prune_qat_distillation_oneshot" + 20 * "=") - - try: - conv_weight = opt_model.model.layer1[0].conv1.weight().dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization,distillation") - - def test_prune_qat_oneshot_fx(self): - from neural_compressor.experimental import Pruning, Quantization - - datasets = Datasets("pytorch_fx") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - prune = Pruning("./fx_fake.yaml") - quantizer = Quantization("./fx_fake2.yaml") - scheduler = Scheduler() - model = copy.deepcopy(self.model) - scheduler.model = model - combination = scheduler.combine(prune, quantizer) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin(dummy_dataloader) - model = combination.model.model - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - return model - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - opt_model.save("./saved") - logger.info(20 * "=" + "test_prune_qat_oneshot_fx" + 20 * "=") - conv_weight = opt_model.model.state_dict()["layer1.0.conv1.weight"] - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization") - # reloading int8 model - reloaded_model = load("./saved", copy.deepcopy(self.model), dataloader=dummy_dataloader) - reloaded_conv_weight = reloaded_model.state_dict()["layer1.0.conv1.weight"] - self.assertTrue(torch.equal(reloaded_conv_weight, conv_weight)) - - @unittest.skipIf(PT_VERSION < Version("1.9.0-rc1"), "requires higher version of torch than 1.9.0") - def test_distillation_qat_oneshot_fx(self): - from neural_compressor.experimental import Distillation, Quantization - - datasets = Datasets("pytorch_fx") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - model = DynamicControlModel() - distiller = Distillation("./fx_fake3.yaml") - quantizer = Quantization("./fx_fake2.yaml") - scheduler = Scheduler() - distiller.teacher_model = copy.deepcopy(model) - scheduler.model = model - combination = scheduler.combine(distiller, quantizer) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = combination.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - return model - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - opt_model.save("./saved") - logger.info(20 * "=" + "test_distillation_qat_oneshot_fx" + 20 * "=") - - self.assertEqual(combination.__repr__().lower(), "combination of distillation,quantization") - # reloading int8 model - model = DynamicControlModel() - reloaded_model = load("./saved", model, dataloader=dummy_dataloader) - - def test_distillation_prune_oneshot_fx(self): - from neural_compressor.experimental import Distillation, Pruning - - datasets = Datasets("pytorch_fx") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - distiller = Distillation("./fx_fake3.yaml") - pruner = Pruning("./fx_fake.yaml") - scheduler = Scheduler() - model = copy.deepcopy(self.model) - distiller.teacher_model = copy.deepcopy(model) - scheduler.model = model - combination = scheduler.combine(distiller, pruner) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin(dummy_dataloader) - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = combination.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - return model - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - logger.info(20 * "=" + "test_distillation_prune_oneshot_fx" + 20 * "=") - - try: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight().dequantize() - except: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of distillation,pruning") - - @unittest.skipIf(PT_VERSION < Version("1.9.0-rc1"), "requires higher version of torch than 1.9.0") - def test_prune_qat_distillation_oneshot_fx(self): - from neural_compressor.experimental import Distillation, Pruning, Quantization - - datasets = Datasets("pytorch_fx") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - model = copy.deepcopy(self.model) - prune = Pruning("./fx_fake.yaml") - quantizer = Quantization("./fx_fake2.yaml") - distiller = Distillation("./fx_fake3.yaml") - scheduler = Scheduler() - distiller.teacher_model = copy.deepcopy(model) - scheduler.model = model - combination = scheduler.combine(prune, quantizer, distiller) - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - combination.on_train_begin() - for nepoch in range(epochs): - model.train() - cnt = 0 - combination.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - combination.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - loss = combination.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - optimizer.step() - combination.on_step_end() - if cnt >= iters: - break - combination.on_epoch_end() - combination.on_train_end() - return model - - combination.train_func = train_func_for_nc - combination.eval_dataloader = dummy_dataloader - combination.train_dataloader = dummy_dataloader - scheduler.append(combination) - opt_model = scheduler() - logger.info(20 * "=" + "test_prune_qat_distillation_oneshot_fx" + 20 * "=") - - try: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight().dequantize() - except: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization,distillation") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/scheduler/test_orchestration.py b/test/scheduler/test_orchestration.py deleted file mode 100644 index 60ecef37c5d..00000000000 --- a/test/scheduler/test_orchestration.py +++ /dev/null @@ -1,84 +0,0 @@ -import copy -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision - -from neural_compressor.config import ( - DistillationConfig, - KnowledgeDistillationLossConfig, - QuantizationAwareTrainingConfig, - WeightPruningConfig, -) -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.training import prepare_compression - - -class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - - def test_distillation_prune_qat_oneshot_with_new_API(self): - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.model.to(device) - model = copy.deepcopy(self.model) - distillation_criterion = KnowledgeDistillationLossConfig(loss_types=["CE", "KL"]) - d_conf = DistillationConfig(copy.deepcopy(self.model), distillation_criterion) - p_conf = WeightPruningConfig([{"start_step": 0, "end_step": 2}], target_sparsity=0.64, pruning_scope="local") - q_conf = QuantizationAwareTrainingConfig() - compression_manager = prepare_compression(model=model, confs=[d_conf, p_conf, q_conf]) - compression_manager.callbacks.on_train_begin() - model = compression_manager.model - - def train_func_for_nc(model): - epochs = 3 - iters = 3 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.1, nesterov=True, weight_decay=0.001) - for nepoch in range(epochs): - model.train() - cnt = 0 - compression_manager.callbacks.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - compression_manager.callbacks.on_step_begin(cnt) - print(".", end="") - cnt += 1 - image = image.to(device) - target = target.to(device) - output = model(image) - loss = criterion(output, target) - loss = compression_manager.callbacks.on_after_compute_loss(image, output, loss) - optimizer.zero_grad() - loss.backward() - compression_manager.callbacks.on_before_optimizer_step() - optimizer.step() - compression_manager.callbacks.on_after_optimizer_step() - compression_manager.callbacks.on_step_end() - if cnt >= iters: - break - compression_manager.callbacks.on_epoch_end() - compression_manager.callbacks.on_train_end() - return model - - train_func_for_nc(model) - print(20 * "=" + "test_distillation_prune_qat_oneshot" + 20 * "=") - try: - conv_weight = dict(model.model.layer1.named_modules())["0.conv1"].weight().dequantize() - except: - conv_weight = dict(model.model.layer1.named_modules())["0.conv1"].weight() - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertTrue("quantized" in str(type(dict(model.model.layer1.named_modules())["0.conv1"]))) - self.assertEqual( - str(compression_manager.callbacks.callbacks_list), - "[Distillation Callbacks, Pruning Callbacks, Quantization Aware Training Callbacks]", - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/scheduler/test_scheduler.py b/test/scheduler/test_scheduler.py deleted file mode 100644 index 4dc2b04ed5b..00000000000 --- a/test/scheduler/test_scheduler.py +++ /dev/null @@ -1,468 +0,0 @@ -import os -import shutil -import unittest - -import torch -import torch.nn as nn -import torchvision -from packaging.version import Version - -import neural_compressor.adaptor.pytorch as nc_torch -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader -from neural_compressor.experimental.scheduler import Scheduler - -PT_VERSION = nc_torch.get_torch_version() - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 3 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch - - pruning: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 3 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - start_epoch: 0 - end_epoch: 3 - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - """ - with open("fake2.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml3(): - fake_yaml = """ - model: - name: imagenet_qat - framework: pytorch - - quantization: - approach: quant_aware_training - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("fake3.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml4(): - fake_yaml = """ - model: - name: imagenet_prune - framework: pytorch_fx - - pruning: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.97 - start_epoch: 0 - end_epoch: 3 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - names: ['layer1.0.conv1.weight'] - - - !Pruner - start_epoch: 0 - end_epoch: 3 - target_sparsity: 0.6 - prune_type: basic_magnitude - update_frequency: 2 - names: ['layer1.0.conv2.weight'] - - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - """ - with open("fake4.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml5(): - fake_yaml = """ - model: - name: imagenet_qat - framework: pytorch_fx - - quantization: - approach: quant_aware_training - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - optimizer: - SGD: - learning_rate: 0.1 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - evaluation: - accuracy: - metric: - topk: 1 - tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 - """ - with open("fake5.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def build_fake_yaml6(): - fake_yaml = """ - model: - name: imagenet_distillation - framework: pytorch - - distillation: - train: - start_epoch: 0 - end_epoch: 3 - iteration: 10 - frequency: 1 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.001 - criterion: - KnowledgeDistillationLoss: - temperature: 1.0 - loss_types: ['CE', 'KL'] - loss_weights: [0.5, 0.5] - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - evaluation: - accuracy: - metric: - topk: 1 - dataloader: - batch_size: 1 - dataset: - dummy: - shape: [16, 3, 224, 224] - label: True - """ - with open("fake6.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -class TestPruning(unittest.TestCase): - model = torchvision.models.resnet18() - q_model = torchvision.models.quantization.resnet18() - q_model_teacher = torchvision.models.quantization.resnet50() - - @classmethod - def setUpClass(cls): - build_fake_yaml() - build_fake_yaml2() - build_fake_yaml3() - build_fake_yaml4() - build_fake_yaml5() - build_fake_yaml6() - - @classmethod - def tearDownClass(cls): - os.remove("fake.yaml") - os.remove("fake2.yaml") - os.remove("fake3.yaml") - os.remove("fake4.yaml") - os.remove("fake5.yaml") - os.remove("fake6.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - shutil.rmtree("nc_workspace", ignore_errors=True) - - def test_pruning(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake.yaml") - scheduler = Scheduler() - scheduler.model = self.model - datasets = Datasets("pytorch") - dummy_dataset = datasets["dummy"](shape=(16, 3, 224, 224), low=0.0, high=1.0, label=True) - dummy_dataloader = PyTorchDataLoader(dummy_dataset) - - def training_func_for_nc(model): - epochs = 2 - iters = 2 - criterion = nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - for nepoch in range(epochs): - model.train() - cnt = 0 - prune.on_epoch_begin(nepoch) - for image, target in dummy_dataloader: - prune.on_step_begin(cnt) - print(".", end="") - cnt += 1 - output = model(image) - loss = criterion(output, target) - optimizer.zero_grad() - loss.backward() - optimizer.step() - prune.on_step_end() - if cnt >= iters: - break - prune.on_epoch_end() - - prune.pruning_func = training_func_for_nc - prune.eval_dataloader = dummy_dataloader - prune.train_dataloader = dummy_dataloader - scheduler.append(prune) - opt_model = scheduler.fit() - - def test_pure_yaml_pruning(self): - from neural_compressor.experimental import Pruning, common - - prune = Pruning("fake2.yaml") - scheduler = Scheduler() - scheduler.model = self.model - scheduler.append(prune) - opt_model = scheduler.fit() - opt_model.report_sparsity() - try: - conv_weight = opt_model.model.layer1[0].conv1.weight.dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - - def test_scheduler_qat_distillation(self): - from neural_compressor.experimental import Distillation, Quantization, common - - self.q_model = torchvision.models.quantization.resnet18() - self.q_model.fuse_model() - quantizer = Quantization("./fake3.yaml") - distiller = Distillation("./fake6.yaml") - scheduler = Scheduler() - scheduler.model = self.q_model - distiller.teacher_model = self.q_model_teacher - scheduler.append(distiller) - scheduler.append(quantizer) - opt_model = scheduler.fit() - opt_model.report_sparsity() - try: - conv_weight = opt_model.model.layer1[0].conv1.weight().dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.01, delta=0.01) - - def test_combine_qat_pruning(self): - from neural_compressor.experimental import Pruning, Quantization, common - - self.q_model = torchvision.models.quantization.resnet18() - self.q_model.fuse_model() - quantizer = Quantization("./fake3.yaml") - prune = Pruning("./fake2.yaml") - scheduler = Scheduler() - scheduler.model = self.q_model - combination = scheduler.combine(prune, quantizer) - scheduler.append(combination) - opt_model = scheduler.fit() - opt_model.report_sparsity() - try: - conv_weight = opt_model.model.layer1[0].conv1.weight().dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization") - - def test_combine_qat_distillation(self): - from neural_compressor.experimental import Distillation, Quantization, common - - self.q_model.fuse_model() - quantizer = Quantization("./fake3.yaml") - distiller = Distillation("./fake6.yaml") - scheduler = Scheduler() - scheduler.model = self.q_model - distiller.teacher_model = self.q_model_teacher - combination = scheduler.combine(distiller, quantizer) - scheduler.append(combination) - opt_model = scheduler.fit() - opt_model.report_sparsity() - try: - conv_weight = opt_model.model.layer1[0].conv1.weight().dequantize() - except: - conv_weight = opt_model.model.layer1[0].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.01, delta=0.01) - self.assertEqual(combination.__repr__().lower(), "combination of distillation,quantization") - - @unittest.skipIf( - PT_VERSION < Version("1.9.0-rc1"), - "Please use PyTroch 1.9 or higher version for Quantization & Pruning with pytorch_fx backend", - ) - def test_combine_fx(self): - from neural_compressor.experimental import Pruning, Quantization, common - - quantizer = Quantization("./fake5.yaml") - prune = Pruning("./fake4.yaml") - scheduler = Scheduler() - scheduler.model = self.model - combination = scheduler.combine(prune, quantizer) - scheduler.append(combination) - opt_model = scheduler.fit() - opt_model.report_sparsity() - try: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight().dequantize() - except: - conv_weight = dict(opt_model.model.layer1.named_modules())["0"].conv1.weight - self.assertAlmostEqual((conv_weight == 0).sum().item() / conv_weight.numel(), 0.64, delta=0.05) - self.assertEqual(combination.__repr__().lower(), "combination of pruning,quantization") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_basic.py b/test/strategy/test_basic.py index 55ba1fc857d..56e82f6994b 100644 --- a/test/strategy/test_basic.py +++ b/test/strategy/test_basic.py @@ -85,21 +85,6 @@ def fake_eval(model): q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=fake_eval) self.assertIsNotNone(q_model) - def test_diagnosis(self): - from neural_compressor.config import PostTrainingQuantConfig - from neural_compressor.data import DATALOADERS, Datasets - from neural_compressor.quantization import fit - - # dataset and dataloader - dataset = Datasets("tensorflow")["dummy"](((100, 3, 3, 1))) - dataloader = DATALOADERS["tensorflow"](dataset) - - # tuning and accuracy criterion - conf = PostTrainingQuantConfig(diagnosis=True) - q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader=dataloader, eval_func=lambda model: 1) - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/fp32/inspect_result.pkl")), True) - self.assertEqual(os.path.exists(os.path.join(self.workspace, "inspect_saved/quan/inspect_result.pkl")), True) - def test_run_create_eval_from_metric_and_dataloader(self): from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.data import DATALOADERS, Datasets diff --git a/test/strategy/test_basic_1x.py b/test/strategy/test_basic_1x.py deleted file mode 100644 index f327c55aad2..00000000000 --- a/test/strategy/test_basic_1x.py +++ /dev/null @@ -1,282 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_recipe(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - quantization: - approach: - post_training_auto_quant - tuning: - strategy: - name: basic - exit_policy: - max_trials: 10 - accuracy_criterion: - absolute: -1 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml_recipe.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - exit_policy: - max_trials: 3 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml3(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - tuning: - strategy: - name: basic - exit_policy: - max_trials: 3 - timeout: 50 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml3.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml4(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - multi_metrics: - topk: 1 - MSE: - compare_label: False - weight: [1, 0] - tuning: - strategy: - name: basic - exit_policy: - max_trials: 3 - timeout: 50 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml4.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d( - input=op, - filters=z, - strides=[1, 1, 1, 1], - padding="VALID", - ) - last_identity = tf.identity(op2, name="op2_to_store") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d(input=op, filters=z, strides=[1, 1, 1, 1], padding="VALID") - last_identity = tf.identity(op2, name="op2_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestBasicTuningStrategy(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - build_fake_yaml3() - build_fake_yaml4() - build_fake_yaml_recipe() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - os.remove("fake_yaml3.yaml") - os.remove("fake_yaml4.yaml") - os.remove("fake_yaml_recipe.yaml") - shutil.rmtree("saved", ignore_errors=True) - - def test_run_basic_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - # resume tuning history - quantizer.conf.usr_cfg.tuning.workspace.resume = "saved/history.snapshot" - quantizer.fit() - - def test_run_basic_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_run_basic_recipe(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml_recipe.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_run_basic_max_trials_multimetric(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml3.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_run_basic_max_trials_multimetric_weight(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml4.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_bayesian_1x.py b/test/strategy/test_bayesian_1x.py deleted file mode 100644 index 51f25e45dc1..00000000000 --- a/test/strategy/test_bayesian_1x.py +++ /dev/null @@ -1,340 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - quantization: - calibration: - sampling_size: 10 - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: bayesian - exit_policy: - max_trials: 1 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: final - device: cpu - quantization: - calibration: - sampling_size: 10, 20 - op_wise: { - \"conv1\": { - \"activation\": {\"dtype\": [\"fp32\"]}, - }, - } - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: bayesian - exit_policy: - max_trials: 3 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def create_test_graph(): - from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 - from tensorflow.python.framework import dtypes, tensor_util - - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu" - relu_node.name = "relu" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([bias_add_node.name]) - - conv2_weight_node = node_def_pb2.NodeDef() - conv2_weight_node.name = "conv2_weights" - conv2_weight_node.op = "Const" - conv2_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv2_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape - ) - ) - ) - - conv2_node = node_def_pb2.NodeDef() - conv2_node.name = "conv2" - conv2_node.op = "Conv2D" - conv2_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_node.input.extend([relu_node.name, conv2_weight_node.name]) - conv2_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv2_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node2 = node_def_pb2.NodeDef() - bias_node2.name = "conv2_bias" - bias_node2.op = "Const" - bias_value2 = np.float32(np.abs(np.random.randn(32))) - bias_node2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node2.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value2, bias_value2.dtype.type, bias_value2.shape) - ) - ) - - bias_add_node2 = node_def_pb2.NodeDef() - bias_add_node2.name = "conv2_bias_add" - bias_add_node2.op = "BiasAdd" - bias_add_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node2.input.extend([conv2_node.name, bias_node2.name]) - bias_add_node2.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node2 = node_def_pb2.NodeDef() - relu_node2.op = "Relu" - relu_node2.name = "relu2" - relu_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node2.input.extend([bias_add_node2.name]) - - conv3_weight_node = node_def_pb2.NodeDef() - conv3_weight_node.name = "conv3_weights" - conv3_weight_node.op = "Const" - conv3_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv3_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape - ) - ) - ) - - conv3_node = node_def_pb2.NodeDef() - conv3_node.name = "conv3" - conv3_node.op = "Conv2D" - conv3_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_node.input.extend([relu_node2.name, conv3_weight_node.name]) - conv3_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv3_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - identity_node = node_def_pb2.NodeDef() - identity_node.name = "final" - identity_node.op = "Identity" - identity_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - identity_node.input.extend([conv3_node.name]) - - test_graph = graph_pb2.GraphDef() - - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - relu_node, - conv2_weight_node, - conv2_node, - bias_node2, - bias_add_node2, - relu_node2, - conv3_weight_node, - conv3_node, - identity_node, - ] - ) - return test_graph - - -def objective_func(params): - return params["x1"] ** 2 + params["x2"] - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - self.test_graph = create_test_graph() - build_fake_yaml() - build_fake_yaml2() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - - shutil.rmtree("saved", ignore_errors=True) - - def test_run_bayesian_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_run_bayesian_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", shape=(1, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.test_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_bayesian_opt_class(self): - from neural_compressor.experimental.strategy.bayesian import BayesianOptimization - - pbounds = {} - pbounds["x1"] = (0, 1) - pbounds["x2"] = (0, 1) - np.random.seed(9527) - bayes_opt = BayesianOptimization(pbounds=pbounds, random_seed=9527) - for i in range(10): - params = bayes_opt.gen_next_params() - try: - bayes_opt._space.register(params, objective_func(params)) - except KeyError: - pass - self.assertTrue(bayes_opt._space.max()["target"] == 2.0) - self.assertTrue(len(bayes_opt._space.res()) == 8) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_exhaustive_1x.py b/test/strategy/test_exhaustive_1x.py deleted file mode 100644 index 53ccf237be1..00000000000 --- a/test/strategy/test_exhaustive_1x.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: exhaustive - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: exhaustive - exit_policy: - max_trials: 5 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - - shutil.rmtree("saved", ignore_errors=True) - - def test_ru_exhaustive_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ru_exhaustive_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_mse_1x.py b/test/strategy/test_mse_1x.py deleted file mode 100644 index 09f8a34641e..00000000000 --- a/test/strategy/test_mse_1x.py +++ /dev/null @@ -1,323 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - quantization: - calibration: - sampling_size: 10 - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - exit_policy: - max_trials: 1 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: final - device: cpu - quantization: - calibration: - sampling_size: 10, 20 - op_wise: { - \"conv1\": { - \"activation\": {\"dtype\": [\"fp32\"]}, - }, - } - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - exit_policy: - max_trials: 3 - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -def create_test_graph(): - from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 - from tensorflow.python.framework import dtypes, tensor_util - - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu" - relu_node.name = "relu" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([bias_add_node.name]) - - conv2_weight_node = node_def_pb2.NodeDef() - conv2_weight_node.name = "conv2_weights" - conv2_weight_node.op = "Const" - conv2_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv2_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape - ) - ) - ) - - conv2_node = node_def_pb2.NodeDef() - conv2_node.name = "conv2" - conv2_node.op = "Conv2D" - conv2_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv2_node.input.extend([relu_node.name, conv2_weight_node.name]) - conv2_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv2_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv2_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node2 = node_def_pb2.NodeDef() - bias_node2.name = "conv2_bias" - bias_node2.op = "Const" - bias_value2 = np.float32(np.abs(np.random.randn(32))) - bias_node2.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node2.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value2, bias_value2.dtype.type, bias_value2.shape) - ) - ) - - bias_add_node2 = node_def_pb2.NodeDef() - bias_add_node2.name = "conv2_bias_add" - bias_add_node2.op = "BiasAdd" - bias_add_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node2.input.extend([conv2_node.name, bias_node2.name]) - bias_add_node2.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - relu_node2 = node_def_pb2.NodeDef() - relu_node2.op = "Relu" - relu_node2.name = "relu2" - relu_node2.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node2.input.extend([bias_add_node2.name]) - - conv3_weight_node = node_def_pb2.NodeDef() - conv3_weight_node.name = "conv3_weights" - conv3_weight_node.op = "Const" - conv3_weight_value = np.float32(np.abs(np.random.randn(3, 3, 32, 32))) - conv3_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape - ) - ) - ) - - conv3_node = node_def_pb2.NodeDef() - conv3_node.name = "conv3" - conv3_node.op = "Conv2D" - conv3_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv3_node.input.extend([relu_node2.name, conv3_weight_node.name]) - conv3_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv3_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv3_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - identity_node = node_def_pb2.NodeDef() - identity_node.name = "final" - identity_node.op = "Identity" - identity_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - identity_node.input.extend([conv3_node.name]) - - test_graph = graph_pb2.GraphDef() - - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - relu_node, - conv2_weight_node, - conv2_node, - bias_node2, - bias_add_node2, - relu_node2, - conv3_weight_node, - conv3_node, - identity_node, - ] - ) - return test_graph - - -def objective_func(params): - return params["x1"] ** 2 + params["x2"] - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - self.test_graph = create_test_graph() - build_fake_yaml() - build_fake_yaml2() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - - shutil.rmtree("saved", ignore_errors=True) - - def test_run_mse_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - def test_run_mse_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", shape=(1, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = self.test_graph - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_mse_v2.py b/test/strategy/test_mse_v2.py deleted file mode 100644 index dd5d1bc2c82..00000000000 --- a/test/strategy/test_mse_v2.py +++ /dev/null @@ -1,171 +0,0 @@ -import copy -import os -import shutil -import unittest - -import numpy as np -import tensorflow as tf -import torchvision - -from neural_compressor.experimental import Quantization, common - - -def build_mse_yaml_tf(): - mse_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse_v2 - accuracy_criterion: - relative: 0.01 - exit_policy: - max_trials: 10 - timeout: 3600 - random_seed: 9527 - """ - with open("mse_yaml_tf.yaml", "w", encoding="utf-8") as f: - f.write(mse_yaml) - - -def build_mse_yaml_pytorch(): - mse_yaml = """ - model: - name: resnet18 - framework: pytorch_fx - - tuning: - strategy: - name: mse_v2 - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - """ - with open("mse_yaml_pytorch.yaml", "w", encoding="utf-8") as f: - f.write(mse_yaml) - - -def build_fake_model(): - try: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d( - input=op, - filters=z, - strides=[1, 1, 1, 1], - padding="VALID", - ) - last_identity = tf.identity(op2, name="op2_to_store") - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d(input=op, filters=z, strides=[1, 1, 1, 1], padding="VALID") - last_identity = tf.identity(op2, name="op2_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class Test_MSEV2Strategy_Tensorflow(unittest.TestCase): - @classmethod - def setUpClass(self): - build_mse_yaml_tf() - self.model = build_fake_model() - - @classmethod - def tearDownClass(self): - os.remove("mse_yaml_tf.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - shutil.rmtree("nc_workspace", ignore_errors=True) - - def test_quantization_saved(self): - i = [0] # use a mutable type (list) to wrap the int object - - def fake_eval_func(_): - # 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 - eval_list = [0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1] - i[0] += 1 - return eval_list[i[0]] - - quantizer = Quantization("mse_yaml_tf.yaml") - - quantizer.model = self.model - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.eval_func = fake_eval_func - q_model = quantizer.fit() - self.assertIsNotNone(q_model) - q_model.save("./saved") - - -class Test_MSEV2Strategy_PyTorch(unittest.TestCase): - @classmethod - def setUpClass(self): - build_mse_yaml_pytorch() - self.model = torchvision.models.resnet18() - - @classmethod - def tearDownClass(self): - os.remove("mse_yaml_pytorch.yaml") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - shutil.rmtree("nc_workspace", ignore_errors=True) - - def test_quantization_saved(self): - i = [0] - - def fake_eval_func(model): - acc_lst = [1, 1, 0, 0, 0, 0, 1, 1.1, 1.5, 1.1] - - i[0] += 1 - return acc_lst[i[0]] - - model = copy.deepcopy(self.model) - quantizer = Quantization("mse_yaml_pytorch.yaml") - dataset = quantizer.dataset("dummy", (1, 3, 224, 224)) - quantizer.model = model - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_func = fake_eval_func - q_model = quantizer.fit() - self.assertIsNotNone(q_model) - q_model.save("./saved") - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_random_1x.py b/test/strategy/test_random_1x.py deleted file mode 100644 index 9f4d7f4fa95..00000000000 --- a/test/strategy/test_random_1x.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: random - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: random - exit_policy: - max_trials: 3 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - with tf.Session() as sess: - x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - - shutil.rmtree("saved", ignore_errors=True) - - def test_ru_random_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_ru_random_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_sigopt_1x.py b/test/strategy/test_sigopt_1x.py deleted file mode 100644 index 964add7d80a..00000000000 --- a/test/strategy/test_sigopt_1x.py +++ /dev/null @@ -1,159 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - -CONDITION = False - - -def build_fake_yaml(sigopt_api_token, sigopt_project_id): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: sigopt - sigopt_api_token: {} - sigopt_project_id: {} - sigopt_experiment_name: nc-tune - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """.format( - sigopt_api_token, sigopt_project_id - ) - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(sigopt_api_token, sigopt_project_id): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op2_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: sigopt - sigopt_api_token: {} - sigopt_project_id: {} - sigopt_experiment_name: nc-tune - exit_policy: - max_trials: 3 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """.format( - sigopt_api_token, sigopt_project_id - ) - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=tf.nn.relu(x), filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d( - input=tf.nn.relu(op), filters=z, strides=[1, 1, 1, 1], padding="VALID", name="op2_to_store" - ) - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="y") - z = tf.constant(np.random.random((1, 1, 1, 1)).astype(np.float32), name="z") - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - op2 = tf.nn.conv2d(input=op, filters=z, strides=[1, 1, 1, 1], padding="VALID", name="op2_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op2_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -@unittest.skipIf(CONDITION, "missing the env variables 'SIGOPT_API_TOKEN' or 'SIGOPT_PROJECT_ID'") -class TestSigoptTuningStrategy(unittest.TestCase): - @classmethod - def setUpClass(self): - sigopt_api_token = os.getenv("SIGOPT_API_TOKEN") - sigopt_project_id = os.getenv("SIGOPT_PROJECT_ID") - self.constant_graph = build_fake_model() - build_fake_yaml(sigopt_api_token, sigopt_project_id) - build_fake_yaml2(sigopt_api_token, sigopt_project_id) - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - shutil.rmtree("saved", ignore_errors=True) - - def test_run_basic_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_run_basic_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_tpe_1x.py b/test/strategy/test_tpe_1x.py deleted file mode 100644 index 9f5b8eec8a4..00000000000 --- a/test/strategy/test_tpe_1x.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Tests for quantization.""" - -import os -import shutil -import unittest - -import numpy as np -import yaml - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: tpe - accuracy_criterion: - relative: 0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml2(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: tpe - exit_policy: - max_trials: 5 - accuracy_criterion: - relative: -0.01 - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_model(): - import tensorflow as tf - - try: - graph = tf.Graph() - graph_def = tf.GraphDef() - - with tf.Session() as sess: - x = tf.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.constant(np.random.random((2, 2, 1, 1)), name="y", dtype=tf.float32) - op = tf.nn.conv2d(input=x, filter=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.global_variables_initializer()) - constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["op_to_store"]) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - except: - graph = tf.Graph() - graph_def = tf.compat.v1.GraphDef() - with tf.compat.v1.Session() as sess: - x = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="x") - y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name="y", dtype=tf.float32) - op = tf.nn.conv2d(input=x, filters=y, strides=[1, 1, 1, 1], padding="VALID", name="op_to_store") - - sess.run(tf.compat.v1.global_variables_initializer()) - constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants( - sess, sess.graph_def, ["op_to_store"] - ) - - graph_def.ParseFromString(constant_graph.SerializeToString()) - with graph.as_default(): - tf.import_graph_def(graph_def, name="") - return graph - - -class TestQuantization(unittest.TestCase): - @classmethod - def setUpClass(self): - self.constant_graph = build_fake_model() - build_fake_yaml() - build_fake_yaml2() - - @classmethod - def tearDownClass(self): - try: - os.remove("fake_yaml.yaml") - os.remove("fake_yaml2.yaml") - - shutil.rmtree("saved", ignore_errors=True) - except: - print("Error while deleting file ") - - def test_run_tpe_one_trial(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_run_tpe_max_trials(self): - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml2.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - quantizer.fit() - - def test_loss_calculation(self): - from neural_compressor.experimental import Quantization, common - from neural_compressor.experimental.contrib.strategy.tpe import TpeTuneStrategy - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", (100, 3, 3, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = self.constant_graph - - testObject = TpeTuneStrategy(quantizer.model, quantizer.conf, quantizer.calib_dataloader) - testObject._calculate_loss_function_scaling_components(0.01, 2, testObject.loss_function_config) - # check if latency difference between min and max corresponds to 10 points of loss function - tmp_val = testObject.calculate_loss(0.01, 2, testObject.loss_function_config) - tmp_val2 = testObject.calculate_loss(0.01, 1, testObject.loss_function_config) - self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False) - # check if 1% of acc difference corresponds to 10 points of loss function - tmp_val = testObject.calculate_loss(0.02, 2, testObject.loss_function_config) - tmp_val2 = testObject.calculate_loss(0.03, 2, testObject.loss_function_config) - self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_tuning_sampler_1x.py b/test/strategy/test_tuning_sampler_1x.py deleted file mode 100644 index d17188c5b10..00000000000 --- a/test/strategy/test_tuning_sampler_1x.py +++ /dev/null @@ -1,200 +0,0 @@ -import unittest -from collections import OrderedDict -from copy import deepcopy - -from neural_compressor.experimental.strategy.utils.tuning_sampler import ( - FallbackTuningSampler, - ModelWiseTuningSampler, - OpTypeWiseTuningSampler, - OpWiseTuningSampler, -) -from neural_compressor.experimental.strategy.utils.tuning_space import TuningSpace -from neural_compressor.experimental.strategy.utils.tuning_structs import OpTuningConfig - -op_cap = { - ("op_name1", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - ("op_name2", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - ("op_name3", "op_type2"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - ("op_name4", "op_type3"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - }, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - }, - }, - { - "activation": {"dtype": "fp32"}, - }, - ], -} - - -class TestTuningSampler(unittest.TestCase): - def test_tuning_sampler(self): - capability = {"calib": {"calib_sampling_size": [1, 10, 50]}, "op": op_cap} - conf = None - tuning_space = TuningSpace(capability, conf) - - initial_op_tuning_cfg = {} - for item in tuning_space.root_item.options: - if item.item_type == "op": - op_name, op_type = item.name - initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, "fp32", tuning_space) - print(initial_op_tuning_cfg[item.name]) - quant_mode_wise_items = OrderedDict() - from neural_compressor.experimental.strategy.utils.constant import auto_query_order as query_order - - pre_items = set() - for quant_mode in query_order: - items = tuning_space.query_items_by_quant_mode(quant_mode) - filtered_items = [item for item in items if item not in pre_items] - pre_items = pre_items.union(set(items)) - quant_mode_wise_items[quant_mode] = filtered_items - - def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): - for item in items_lst: - op_item_dtype_dict[item.name] = target_quant_mode - - op_item_dtype_dict = OrderedDict() - for quant_mode, quant_mode_items in quant_mode_wise_items.items(): - initial_op_quant_mode(quant_mode_items, quant_mode, op_item_dtype_dict) - - op_wise_tuning_sampler = OpWiseTuningSampler( - deepcopy(tuning_space), [], [], op_item_dtype_dict, initial_op_tuning_cfg - ) - self.assertEqual(len(list(op_wise_tuning_sampler)), 128) - optype_wise_tuning_sampler = OpTypeWiseTuningSampler( - deepcopy(tuning_space), [], [], op_item_dtype_dict, initial_op_tuning_cfg - ) - cfg_lst = list(optype_wise_tuning_sampler) - self.assertEqual(len(cfg_lst), 16) - model_wise_tuning_sampler = ModelWiseTuningSampler( - deepcopy(tuning_space), [], [], op_item_dtype_dict, initial_op_tuning_cfg - ) - model_wise_pool = [] - best_tune_cfg = None - for tune_cfg in model_wise_tuning_sampler: - best_tune_cfg = tune_cfg - model_wise_pool.append(tune_cfg) - self.assertEqual(len(model_wise_pool), 8) - - # fallback test - quant_ops = quant_mode_wise_items.get("static", []) - quant_ops += quant_mode_wise_items.get("dynamic", []) - target_dtype = "fp32" - target_type_lst = tuning_space.query_items_by_quant_mode(target_dtype) - fallback_items_lst = [item for item in quant_ops if item in target_type_lst] - if fallback_items_lst: - print(f"Start to fallback op to {target_dtype} one by one.") - fallback_items_name_lst = [item.name for item in fallback_items_lst] - op_dtypes = OrderedDict(zip(fallback_items_name_lst[::-1], [target_dtype] * len(fallback_items_name_lst))) - initial_op_tuning_cfg = deepcopy(best_tune_cfg) - fallback_sampler = FallbackTuningSampler( - tuning_space, - tuning_order_lst=[], - initial_op_tuning_cfg=initial_op_tuning_cfg, - op_dtypes=op_dtypes, - accumulate=False, - ) - fallback_cnt = [] - fp32_lst = [] - for op_cfgs in fallback_sampler: - cnt = 0 - for op_name, op_cfg in op_cfgs.items(): - op_state = op_cfg.get_state() - if "fp32" == op_state["activation"]["dtype"] and ( - "fp32" == op_state["weight"]["dtype"] if "weight" in op_state else True - ): - cnt = cnt + 1 - fp32_lst.append(op_name) - fallback_cnt.append(cnt) - self.assertListEqual(fallback_cnt, [1, 1, 1, 1]) - self.assertListEqual(fp32_lst, fallback_items_name_lst[::-1]) - - fallback_sampler_acc = FallbackTuningSampler( - tuning_space, - tuning_order_lst=[], - initial_op_tuning_cfg=initial_op_tuning_cfg, - op_dtypes=op_dtypes, - accumulate=True, - ) - fallback_cnt = [] - for op_cfgs in fallback_sampler_acc: - cnt = 0 - for op_name, op_cfg in op_cfgs.items(): - op_state = op_cfg.get_state() - if "fp32" == op_state["activation"]["dtype"] and ( - "fp32" == op_state["weight"]["dtype"] if "weight" in op_state else True - ): - cnt = cnt + 1 - fallback_cnt.append(cnt) - self.assertListEqual(fallback_cnt, [2, 3, 4]) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_tuning_space.py b/test/strategy/test_tuning_space.py index 9c4dbff6ecb..9a09f0e8829 100644 --- a/test/strategy/test_tuning_space.py +++ b/test/strategy/test_tuning_space.py @@ -1,9 +1,9 @@ import unittest from copy import deepcopy -from neural_compressor.conf.dotdict import DotDict from neural_compressor.strategy.utils.tuning_space import TuningItem, TuningSpace from neural_compressor.utils import logger +from neural_compressor.utils.utility import DotDict op_cap = { # op have both weight and activation and support static/dynamic/fp32 diff --git a/test/strategy/test_tuning_space_1x.py b/test/strategy/test_tuning_space_1x.py deleted file mode 100644 index ab7fb6ee3c6..00000000000 --- a/test/strategy/test_tuning_space_1x.py +++ /dev/null @@ -1,297 +0,0 @@ -import unittest -from copy import deepcopy - -from neural_compressor.conf.dotdict import DotDict -from neural_compressor.experimental.strategy.utils.tuning_space import TuningSpace -from neural_compressor.utils import logger - -op_cap = { - # op have both weight and activation and support static/dynamic/fp32 - ("op_name1", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - # op have both weight and activation and support static/dynamic/fp32 - ("op_name2", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - # op have both weight and activation and support static/fp32 - ("op_name3", "op_type2"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel"], - "algorithm": ["minmax", "kl"], - }, - "weight": { - "dtype": ["int8"], - "scheme": ["sym"], - "granularity": ["per_channel"], - "algorithm": ["minmax", "kl"], - }, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - # op only have activation and support dynamic/fp32 - ("op_name4", "op_type3"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax"], - }, - }, - { - "activation": {"dtype": "fp32"}, - }, - ], -} - - -op_cap2 = { - # The granularity of op activation do not support per_tensor. - ("op_name4", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - ] -} - - -class TestTuningSampler(unittest.TestCase): - def setUp(self) -> None: - self.capability = {"calib": {"calib_sampling_size": [1, 10, 50]}, "op": deepcopy(op_cap)} - # for optype1,'algorithm': ['minmax', 'kl'] -> ['minmax'] - self.optype_wise_user_config = { - "op_type1": { - "activation": { - "algorithm": ["minmax"], - "granularity": ["per_channel", "per_tensor"], - } - } - } - self.model_wise_user_config = { - "activation": { - "granularity": ["per_channel"], - } - } - # fallback op_name4 - self.op_wise_user_config = { - "op_name4": { - "activation": { - "dtype": ["fp32"], - } - } - } - - self.op_wise_user_config2 = { - "op_name4": { - "activation": { - "granularity": ["per_tensor"], - } - } - } - - self.capability2 = {"calib": {"calib_sampling_size": [1, 10]}, "op": deepcopy(op_cap2)} - - def test_tuning_space_merge_op_wise_not_exist(self): - # op-wise - conf = { - "usr_cfg": { - "quantization": { - "op_wise": deepcopy(self.op_wise_user_config2), - } - } - } - conf = DotDict(conf) - tuning_space2 = TuningSpace(deepcopy(self.capability2), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - - def test_tuning_space_creation(self): - conf = None - # Test the creation of tuning space - tuning_space = TuningSpace(self.capability, conf) - logger.debug(tuning_space.root_item.get_details()) - # ops supported static - static_items = tuning_space.query_items_by_quant_mode("static") - static_items_name = [item.name for item in static_items] - self.assertEqual(set(static_items_name), set(op_cap.keys())) - # ops supported dynamic - dynamic_items = tuning_space.query_items_by_quant_mode("dynamic") - dynamic_items_name = [item.name for item in dynamic_items] - all_items_name = list(op_cap.keys()) - all_items_name.remove(("op_name3", "op_type2")) - self.assertEqual(set(dynamic_items_name), set(all_items_name)) - # ops supported fp32 - fp32_items = tuning_space.query_items_by_quant_mode("fp32") - fp32_items_name = [item.name for item in fp32_items] - self.assertEqual(set(fp32_items_name), set(op_cap.keys())) - # all optype - self.assertEqual(list(tuning_space.op_type_wise_items.keys()), ["op_type1", "op_type2", "op_type3"]) - - def test_tuning_space_merge_model_wise(self): - # Test merge with user config, model-wise, optype-wise, op-wise - # model-wise - self.capability = {"calib": {"calib_sampling_size": [1, 10, 50]}, "op": op_cap} - conf = { - "usr_cfg": { - "quantization": { - "model_wise": self.model_wise_user_config, - } - } - } - conf = DotDict(conf) - tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - found_per_tensor = False - for quant_mode in ["static", "dynamic"]: - for op_item in tuning_space2.query_items_by_quant_mode(quant_mode): - for path in tuning_space2.ops_path_set[op_item.name]: - mode_item = tuning_space2.query_quant_mode_item_by_full_path(op_item.name, path) - act_algo_item = mode_item.get_option_by_name(("activation", "granularity")) - if act_algo_item and "per_tensor" in act_algo_item.options: - found_per_tensor = True - break - self.assertFalse(found_per_tensor) - - def test_tuning_space_merge_optype_wise(self): - # optype-wise - conf = { - "usr_cfg": { - "quantization": { - "optype_wise": self.optype_wise_user_config, - } - } - } - conf = DotDict(conf) - tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - found_act_algo_kl_optype1 = False - found_act_algo_kl_others = False - for quant_mode in ["static", "dynamic"]: - for op_item in tuning_space2.query_items_by_quant_mode(quant_mode): - for path in tuning_space2.ops_path_set[op_item.name]: - mode_item = tuning_space2.query_quant_mode_item_by_full_path(op_item.name, path) - act_algo_item = mode_item.get_option_by_name(("activation", "algorithm")) - if act_algo_item and op_item.name[1] == "op_type1" and "kl" in act_algo_item.options: - found_act_algo_kl_optype1 = True - break - if act_algo_item and op_item.name[1] != "op_type1" and "kl" in act_algo_item.options: - found_act_algo_kl_others = True - self.assertFalse(found_act_algo_kl_optype1) - self.assertTrue(found_act_algo_kl_others) - - def test_tuning_space_merge_op_wise(self): - # op-wise - conf = { - "usr_cfg": { - "quantization": { - "op_wise": self.op_wise_user_config, - } - } - } - conf = DotDict(conf) - tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - found_quant_op_name4 = False - found_fp32_op_name4 = False - for quant_mode in ["static", "dynamic"]: - for item in tuning_space2.query_items_by_quant_mode(quant_mode): - if "op_name4" in item.name: - found_quant_op_name4 = True - break - - for item in tuning_space2.query_items_by_quant_mode("fp32"): - if "op_name4" in item.name: - found_fp32_op_name4 = True - break - self.assertFalse(found_quant_op_name4) - self.assertTrue(found_fp32_op_name4) - - def test_tuning_space_merge_model_wise_and_opty_wise(self): - # Test mode-wise + optype-wise - conf = { - "usr_cfg": { - "quantization": { - "model_wise": self.model_wise_user_config, - "optype_wise": self.optype_wise_user_config, - } - } - } - # the optype_wise config will overwrite the model-wise config - conf = DotDict(conf) - tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - found_per_tensor = False - for quant_mode in ["static", "dynamic"]: - for op_item in tuning_space2.query_items_by_quant_mode(quant_mode): - for path in tuning_space2.ops_path_set[op_item.name]: - mode_item = tuning_space2.query_quant_mode_item_by_full_path(op_item.name, path) - act_algo_item = mode_item.get_option_by_name(("activation", "granularity")) - if act_algo_item and "per_tensor" in act_algo_item.options: - found_per_tensor = True - break - self.assertTrue(found_per_tensor) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/strategy/test_tuning_space_v2.py b/test/strategy/test_tuning_space_v2.py index c89e30d864f..9e7111d42fe 100644 --- a/test/strategy/test_tuning_space_v2.py +++ b/test/strategy/test_tuning_space_v2.py @@ -1,9 +1,9 @@ import unittest from copy import deepcopy -from neural_compressor.conf.dotdict import DotDict from neural_compressor.strategy.utils.tuning_space import TuningItem, TuningSpace from neural_compressor.utils import logger +from neural_compressor.utils.utility import DotDict op_cap = { # op1 have both weight and activation and support static/dynamic/fp32/b16 diff --git a/test/strategy/test_tuning_space_v2_1x.py b/test/strategy/test_tuning_space_v2_1x.py deleted file mode 100644 index feab5fed6c1..00000000000 --- a/test/strategy/test_tuning_space_v2_1x.py +++ /dev/null @@ -1,195 +0,0 @@ -import unittest -from copy import deepcopy - -from neural_compressor.conf.dotdict import DotDict -from neural_compressor.experimental.strategy.utils.tuning_space import TuningSpace -from neural_compressor.utils import logger - -op_cap = { - # op1 have both weight and activation and support static/dynamic/fp32/b16 - ("op_name1", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int4"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["uint4"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "bf16"}, "weight": {"dtype": "bf16"}}, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - # op2 have both weight and activation and support static/dynamic/fp32 - ("op_name2", "op_type1"): [ - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "dynamic", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], - # op3 have both weight and activation and support int4 - ("op_name3", "op_type3"): [ - { - "activation": { - "dtype": ["int4"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int4"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - { - "activation": { - "dtype": ["int8"], - "quant_mode": "static", - "scheme": ["sym"], - "granularity": ["per_channel", "per_tensor"], - "algorithm": ["minmax", "kl"], - }, - "weight": {"dtype": ["int8"], "scheme": ["sym"], "granularity": ["per_channel", "per_tensor"]}, - }, - {"activation": {"dtype": "fp32"}, "weight": {"dtype": "fp32"}}, - ], -} - - -class TestTuningSpaceV2(unittest.TestCase): - def setUp(self) -> None: - self.capability = {"calib": {"calib_sampling_size": [1, 10, 50]}, "op": deepcopy(op_cap)} - - self.op_wise_user_cfg_for_fallback = { - "op_name1": {"activation": {"dtype": ["fp32"]}, "weight": {"dtype": ["fp32"]}}, - } - - def test_tuning_sampler_int4(self): - # op-wise - conf = {"usr_cfg": {}} - conf = DotDict(conf) - # test space construction - tuning_space = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space.root_item.get_details()) - found_int4_activation = False - found_int4_weight = False - op3_act_item = tuning_space.query_quant_mode_item_by_full_path( - ("op_name3", "op_type3"), ("static", "activation") - ) - for dtype_item in op3_act_item.options: - if dtype_item.name == "int4": - found_int4_activation = True - self.assertTrue(found_int4_activation) - op3_weight_item = tuning_space.query_quant_mode_item_by_full_path( - ("op_name3", "op_type3"), ("static", "weight") - ) - for dtype_item in op3_weight_item.options: - if dtype_item.name == "int4": - found_int4_weight = True - self.assertTrue(found_int4_weight) - - def test_sampler_int4(self): - # test sampler - from collections import OrderedDict - - from neural_compressor.strategy.utils.tuning_sampler import OpWiseTuningSampler - from neural_compressor.strategy.utils.tuning_structs import OpTuningConfig - - # op-wise - conf = {"usr_cfg": {}} - conf = DotDict(conf) - # test space construction - tuning_space = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space.root_item.get_details()) - initial_op_tuning_cfg = {} - for item in tuning_space.root_item.options: - if item.item_type == "op": - op_name, op_type = item.name - initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, "fp32", tuning_space) - quant_mode_wise_items = OrderedDict() - from neural_compressor.strategy.utils.constant import auto_query_order as query_order - - pre_items = set() - for quant_mode in query_order: - items = tuning_space.query_items_by_quant_mode(quant_mode) - filtered_items = [item for item in items if item not in pre_items] - pre_items = pre_items.union(set(items)) - quant_mode_wise_items[quant_mode] = filtered_items - - def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict): - for item in items_lst: - op_item_dtype_dict[item.name] = target_quant_mode - - op_item_dtype_dict = OrderedDict() - for quant_mode, quant_mode_items in quant_mode_wise_items.items(): - initial_op_quant_mode(quant_mode_items, quant_mode, op_item_dtype_dict) - - op_wise_tuning_sampler = OpWiseTuningSampler( - deepcopy(tuning_space), [], [], op_item_dtype_dict, initial_op_tuning_cfg - ) - op3 = ("op_name3", "op_type3") - for tune_cfg in op_wise_tuning_sampler: - op_cfg = tune_cfg[op3].get_state() - act_dtype = op_cfg["activation"]["dtype"] - weight_dtype = op_cfg["weight"]["dtype"] - self.assertTrue(act_dtype == weight_dtype == "int4") - - def test_tuning_space_merge_op_wise(self): - # op-wise - conf = { - "usr_cfg": { - "quantization": { - "op_wise": self.op_wise_user_cfg_for_fallback, - } - } - } - conf = DotDict(conf) - # test fallback - tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf)) - logger.debug(tuning_space2.root_item.get_details()) - op_name1_only_fp32 = True - for quant_mode in ["static", "dynamic"]: - for item in tuning_space2.query_items_by_quant_mode(quant_mode): - if item.name[0] == "op_name1": - op_name1_only_fp32 = False - self.assertTrue(op_name1_only_fp32) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_bias_correction.py b/test/tfnewapi/test_tensorflow_bias_correction.py deleted file mode 100644 index 45e5523705c..00000000000 --- a/test/tfnewapi/test_tensorflow_bias_correction.py +++ /dev/null @@ -1,220 +0,0 @@ -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel -from neural_compressor.adaptor.tf_utils.quantize_graph_common import QuantizeGraphHelper -from neural_compressor.adaptor.tf_utils.transform_graph.bias_correction import BiasCorrection -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestBiasCorrectionNewApi(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_bias_correction_new_api(self): - tf.compat.v1.disable_eager_execution() - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - if tf.version.VERSION <= "2.1.0": - x = tf.nn.relu(x) - conv1_weights = tf.compat.v1.get_variable( - "weights1", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 1, 1, 1], padding="SAME") - normed = tf.nn.bias_add( - conv1, - tf.constant( - [ - 3.0, - 1.2, - 1.0, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 0, - 1, - 4.0, - 5.2, - 8.1, - 2, - 4, - 5, - 8, - 9, - 10, - 12, - 11, - 2, - 5.0, - 7.2, - 3.2, - 3, - 4, - 5, - 7, - 8, - ] - ), - ) - relu1 = tf.nn.relu(normed, name="Relu_1") - op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu1.name.split(":")[0]] - ) - output_graph_def = QuantizeGraphHelper.remove_training_nodes( - output_graph_def, protected_nodes=[relu1.name.split(":")[0]] - ) - inputs = [x.name.split(":")[0]] - outputs = [relu1.name.split(":")[0]] - op_wise_config = { - "Conv2D": (False, "minmax", False, 7.0), - } - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - int8_output_graph = quantizer.fit() - - correct_graph_def = BiasCorrection( - int8_output_graph.graph_def, output_graph_def, "weight_empirical", True - ).do_transformation() - - self.assertEqual(len(correct_graph_def.node), len(int8_output_graph.graph_def.node)) - - -class TestBiasCorrectionOldApi(unittest.TestCase): - @disable_random() - def test_bias_correction_old_api(self): - tf.compat.v1.disable_eager_execution() - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - if tf.version.VERSION <= "2.1.0": - x = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weights", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - normed = tf.nn.bias_add( - conv, - tf.constant( - [ - 3.0, - 1.2, - 1.0, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 0, - 1, - 4.0, - 5.2, - 8.1, - 2, - 4, - 5, - 8, - 9, - 10, - 12, - 11, - 2, - 5.0, - 7.2, - 3.2, - 3, - 4, - 5, - 7, - 8, - ] - ), - ) - relu = tf.nn.relu(normed, name="Relu_0") - op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - output_graph_def = QuantizeGraphHelper.remove_training_nodes( - output_graph_def, protected_nodes=[relu.name.split(":")[0]] - ) - inputs = [x.name.split(":")[0]] - outputs = [relu.name.split(":")[0]] - op_wise_config = { - "Conv2D": (False, "minmax", False, 7.0), - } - - int8_graph_def, _, _ = QuantizeGraphForIntel( - output_graph_def, inputs, outputs, op_wise_config, op_wise_sequences, "cpu" - ).do_transform() - - correct_graph_def = BiasCorrection(int8_graph_def, output_graph_def).do_transformation() - self.assertEqual(len(correct_graph_def.node), len(int8_graph_def.node)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_fuse_reshape_transpose.py b/test/tfnewapi/test_tensorflow_fuse_reshape_transpose.py deleted file mode 100644 index b80dc785d97..00000000000 --- a/test/tfnewapi/test_tensorflow_fuse_reshape_transpose.py +++ /dev/null @@ -1,139 +0,0 @@ -import imp -import os -import unittest - -import numpy as np -import tensorflow.compat.v1 as tf -import yaml -from numpy.core.fromnumeric import squeeze -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestFuseReshapeTransposeOptimizer(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_fuse_enter_reshape_transpose(self): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - enter = tf.raw_ops.Enter(data=y, frame_name="test") - enter_perm = tf.raw_ops.Enter(data=[1, 0], frame_name="test", is_constant=True) - transpose = tf.transpose(enter, perm=enter_perm) - enter_reshape = tf.raw_ops.Enter(data=[2, 2], frame_name="test", is_constant=True) - reshape = tf.reshape(transpose, enter_reshape) - x_enter = tf.raw_ops.Enter(data=x, frame_name="test") - z = tf.raw_ops.MatMul(a=x_enter, b=reshape, name="matmul_1") - z = tf.raw_ops.Exit(data=z) - found_quantized_matmul = True - found_transpose = False - found_reshape = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - if i.op == "Transpose": - found_transpose = True - if i.op == "Reshape": - found_reshape = True - self.assertEqual(found_quantized_matmul, True) - self.assertEqual(found_transpose, False) - self.assertEqual(found_reshape, False) - - @disable_random() - def test_fuse_reshape_transpose(self): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.raw_ops.MatMul(a=x, b=reshape, name="matmul_2") - z = tf.nn.bias_add(z, [1, 2], name="op_to_store") - found_quantized_matmul = True - found_transpose = False - found_reshape = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - if i.op == "Transpose": - found_transpose = True - if i.op == "Reshape": - found_reshape = True - self.assertEqual(found_quantized_matmul, True) - self.assertEqual(found_transpose, False) - self.assertEqual(found_reshape, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_biasadd_add_fusion.py b/test/tfnewapi/test_tensorflow_graph_biasadd_add_fusion.py deleted file mode 100644 index ef6b7745271..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_biasadd_add_fusion.py +++ /dev/null @@ -1,135 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestConvBiasAddAddFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv_biasadd_add_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="SAME") - normed = tf.nn.bias_add(conv, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - add = normed + tf.constant([3.0]) - relu = tf.nn.relu6(add) - mul1 = tf.math.multiply(relu, tf.constant([0.1])) - mul2 = tf.math.multiply(mul1, tf.constant([0.8]), name="op_to_store") - - out_name = mul2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add import ( - FuseBiasAddAndAddOptimizer, - ) - - output_graph_def = FuseBiasAddAndAddOptimizer(output_graph_def).do_transformation() - - found_addv2 = False - - for i in output_graph_def.node: - if i.op.find("AddV2") != -1: - found_addv2 = True - break - - self.assertEqual(found_addv2, False) - - def test_conv_biasadd_add_relu_no_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - add_y = tf.compat.v1.get_variable("add_y", [16], initializer=tf.compat.v1.random_normal_initializer()) - add = normed2 + add_y - relu = tf.nn.relu6(add) - mul1 = tf.math.multiply(relu, tf.constant([0.1])) - mul2 = tf.math.multiply(mul1, tf.constant([0.8]), name="op_to_store") - - out_name = mul2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add import ( - FuseBiasAddAndAddOptimizer, - ) - - output_graph_def = FuseBiasAddAndAddOptimizer(output_graph_def).do_transformation() - - found_addv2 = False - - for i in output_graph_def.node: - if i.op.find("AddV2") != -1: - found_addv2 = True - break - - self.assertEqual(found_addv2, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_conv_fusion.py b/test/tfnewapi/test_tensorflow_graph_conv_fusion.py deleted file mode 100644 index 19e38a7a8f8..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_conv_fusion.py +++ /dev/null @@ -1,817 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from pkg_resources import parse_version -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import function - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm import FoldBatchNormNodesOptimizer -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes import StripUnusedNodesOptimizer -from neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq import OptimizeQDQGraph -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestConvBiasAddAddReluFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv_single_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv1_weights = tf.compat.v1.get_variable( - "weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x_pad, conv1_weights, strides=[1, 2, 2, 1], padding="VALID") - matmul_weights = tf.compat.v1.get_variable( - "weight_matmul", [1, 28, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - matmul = tf.linalg.matmul(conv1, matmul_weights) - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [7, 7, 32, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(matmul, conv2_weights, strides=[1, 2, 2, 1], padding="VALID") - leaky_relu = tf.nn.leaky_relu(conv2, name="op_to_store") - - out_name = leaky_relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - find_single_qconv = [] - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - find_single_qconv.append(i.attr["fused_ops"].list.s == [b"Requantize"]) - - self.assertEqual(find_single_qconv, [False, False]) - - @disable_random() - def test_spacetobatchnd_conv2d_batchtospacend_fusion(self): - i = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - x = tf.space_to_batch_nd(i, block_shape=[2, 2], paddings=[[0, 0], [0, 0]]) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - y = tf.compat.v1.batch_to_space_nd(conv, block_shape=[2, 2], crops=[[0, 0], [0, 0]]) - out = tf.identity(y, name="op_to_store") - out_name = out.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_op = False - - for i in output_graph.graph_def.node: - if i.op == "SpaceToBatchND" or i.op == "BatchToSpaceND": - found_op = True - break - - self.assertEqual(found_op, False) - - @disable_random() - def test_conv_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - - self.assertEqual(found_conv_fusion, False) - - @disable_random() - def test_conv_biasadd_relu6_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu6": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_swishf32_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - @function.Defun(tf.float32, func_name="swish_f32") - def swish_f32(x): - return tf.nn.silu(x, beta=1.0) - - swish = swish_f32(normed, name="swish_f32_output_node") - - out_name = swish.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "swish_f32": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_addv2_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv1_weights = tf.compat.v1.get_variable( - "weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 2, 2, 1], padding="SAME") - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(x, conv2_weights, strides=[1, 2, 2, 1], padding="SAME") - sumadd = tf.raw_ops.AddV2(x=conv1, y=conv2, name="addv2") - - out_name = sumadd.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_add_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - relu = tf.nn.relu(normed2 + tf.constant([3.0])) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_1(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.leaky_relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Relu", - b"Requantize", - ]: - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Requantize"]: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_fusion_with_last_matmul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - # paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - # x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - reshape = tf.reshape(pooling, [-1, 3136]) - - y_data = np.random.random([3136, 1]) - - y = tf.constant(y_data, dtype=tf.float32, shape=[3136, 1]) - z = tf.matmul(reshape, y) - relu1 = tf.nn.relu(z) - y_data_1 = np.random.random([1, 1]) - y_1 = tf.constant(y_data_1, dtype=tf.float32, shape=[1, 1]) - - z_2nd_matmul = tf.matmul(relu1, y_1) - relu6 = tf.nn.relu6(z_2nd_matmul, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv_fusion_with_last_conv(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights_2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(pooling, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID") - conv_weights_3 = tf.compat.v1.get_variable( - "weight3", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - relu2 = tf.nn.relu(conv2) - conv3 = tf.nn.conv2d(relu2, conv_weights_3, strides=[1, 2, 2, 1], padding="VALID") - - relu3 = tf.nn.relu(conv3) - relu6 = tf.nn.relu6(relu3, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv_fusion_with_max_pooling(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - - relu = tf.nn.relu(x) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(pooling, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - biasadd = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = biasadd.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_pool_data_type = None - quantized_conv_data_type = None - for i in output_graph.graph_def.node: - if i.op.find("QuantizedMaxPool") != -1: - quantized_pool_data_type = i.attr["T"].type - if i.op.find("QuantizedConv2D") != -1: - quantized_conv_data_type = i.attr["Tinput"].type - - self.assertNotEqual(quantized_pool_data_type, None) - self.assertEqual(quantized_pool_data_type, quantized_conv_data_type) - - @disable_random() - def test_conv3d_addv2_relu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - top_relu = tf.nn.relu(x) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight_conv3d_1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2") - relu = tf.nn.relu(add) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight_conv3d_2", [3, 3, 3, 32, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - - out_name = conv3d_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if b"Sum" in i.attr["fused_ops"].list.s: - found_conv_sumadd_fusion = True - if i.attr["fused_ops"].list.s == [b"BiasAdd", b"Relu", b"Requantize"]: - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, False) - self.assertEqual(found_conv_biasadd_fusion, True) - - # conv2d + dummybiasadd + addv2 fusion - @disable_random() - def test_conv_add_addn_non_const_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv2d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_1 = tf.nn.conv2d(top_relu, conv2d_1_weights, strides=[1, 2, 2, 1], padding="SAME") - conv2d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_2 = tf.nn.conv2d(top_relu, conv2d_2_weights, strides=[1, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv2d_1, y=conv2d_2, name="addv2_1") - conv2d_3_weights = tf.compat.v1.get_variable( - "weight3", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_3 = tf.nn.conv2d(top_relu, conv2d_3_weights, strides=[1, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv2d_3, name="addv2_2") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Requantize", - ]: - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - @unittest.skipIf( - tf.__version__ not in ["2.11.0202242", "2.11.0202250", "2.11.0202317", "2.11.0202323"], - "deconv2d quantization only support 2.11", - ) - def test_deconv2d_biasadd_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 2, 2, 1], name="input") - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - - conv2 = tf.nn.conv2d_transpose( - x, conv_weights2, output_shape=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME" - ) - - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0])) - out = tf.identity(normed2) - - out_name = out.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 2, 2, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_deconv2d_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("_FusedQuantizedDeconv2D") != -1: - found_deconv2d_fusion = True - break - - self.assertEqual(found_deconv2d_fusion, True) - - @disable_random() - @unittest.skipIf( - tf.__version__ not in ["2.11.0202242", "2.11.0202250", "2.11.0202317", "2.11.0202323"], - "deconv2d quantization only support 2.11", - ) - def test_single_deconv2d_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 2, 2, 1], name="input") - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - - conv2 = tf.nn.conv2d_transpose( - x, conv_weights2, output_shape=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="SAME" - ) - - out = tf.identity(conv2) - - out_name = out.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 2, 2, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_deconv2d_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("_FusedQuantizedDeconv2D") != -1: - found_deconv2d_fusion = True - break - - self.assertEqual(found_deconv2d_fusion, True) - - @disable_random() - @unittest.skipIf( - tf.__version__ not in ["2.11.0202242", "2.11.0202250", "2.11.0202317", "2.11.0202323"], - "deconv2d quantization only support 2.11", - ) - def test_deconv3d_biasadd_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 2, 2, 2, 1], name="input") - conv3d_weights = tf.compat.v1.get_variable( - "weight_conv3d_1", [3, 3, 3, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d = tf.nn.conv3d_transpose( - x, conv3d_weights, output_shape=[1, 2, 2, 2, 1], strides=[1, 1, 1, 1, 1], padding="SAME" - ) - - normed2 = tf.nn.bias_add(conv3d, tf.constant([3.0])) - out = tf.identity(normed2) - - out_name = out.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 2, 2, 2, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_deconv3d_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("_FusedQuantizedDeconv3D") != -1: - found_deconv3d_fusion = True - break - - self.assertEqual(found_deconv3d_fusion, True) - - @disable_random() - @unittest.skipIf( - tf.__version__ not in ["2.11.0202242", "2.11.0202250", "2.11.0202317", "2.11.0202323"], - "deconv2d quantization only support 2.11", - ) - def test_single_deconv3d_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 2, 2, 2, 1], name="input") - conv3d_weights = tf.compat.v1.get_variable( - "weight_conv3d_1", [3, 3, 3, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d = tf.nn.conv3d_transpose( - x, conv3d_weights, output_shape=[1, 2, 2, 2, 1], strides=[1, 1, 1, 1, 1], padding="SAME" - ) - - out = tf.identity(conv3d) - - out_name = out.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 2, 2, 2, 1), label=True) - # quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_deconv3d_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("_FusedQuantizedDeconv3D") != -1: - found_deconv3d_fusion = True - break - - self.assertEqual(found_deconv3d_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_conv_requantize_fusion.py b/test/tfnewapi/test_tensorflow_graph_conv_requantize_fusion.py deleted file mode 100644 index 55021a94eaa..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_conv_requantize_fusion.py +++ /dev/null @@ -1,954 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_tensorflow_yaml(): - fake_yaml = """ - model: - name: tensorflow_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("tensorflow_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestConvRequantizedFusionNewAPI(unittest.TestCase): - @classmethod - def setUpClass(self): - build_tensorflow_yaml() - - @classmethod - def tearDownClass(self): - os.remove("tensorflow_yaml.yaml") - - @disable_random() - def test_conv_biasadd_relu6_fusion(self): - logging.getLogger().info("test_conv_biasadd_relu6_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight0", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu6": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_single_conv3d_fusion(self): - logging.getLogger().info("test_single_conv3d_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight1", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID", name="op_to_store") - - out_name = conv.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_biasadd_fusion(self): - logging.getLogger().info("test_conv3d_biasadd_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight2", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - relu6 = tf.nn.relu6(conv, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight6", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1_weights = tf.compat.v1.get_variable( - "weight7", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - conv1 = tf.nn.conv3d(x, conv1_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - add = conv + conv1 - relu = tf.nn.relu(add) - - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"]: - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv2d_biasadd_elu_fusion(self): - input = tf.compat.v1.placeholder(tf.float32, shape=(1, 3, 3, 1), name="input") - weight = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)).astype(np.float32), name="weight") - bias = tf.constant(np.random.random((1)), name="bias", dtype=tf.float32) - conv = tf.nn.conv2d(input=input, filters=weight, strides=[1, 1, 1, 1], padding="VALID", name="conv") - bias_add = tf.nn.bias_add(conv, bias, name="bias_add") - res = tf.nn.elu(bias_add, name="res") - output = tf.nn.softmax(res, name="op_to_store") - - out_name = output.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 3, 3, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - self.assertNotEqual(output_graph, None) - elu_fused = False - for node in output_graph.graph_def.node: - if node.name == "conv_eightbit_requantize_dequantize": - if b"Elu" in node.attr["fused_ops"].list.s: - elu_fused = True - self.assertEqual(elu_fused, True) - - @disable_random() - def test_conv3d_add_const_fusion(self): - logging.getLogger().info("test_conv3d_add_const_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight11", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - add = conv + tf.constant( - [ - [ - [ - [ - [ - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - ] - ] - ] - ] - ] - ) - - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "AddV2": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_add_add_fusion(self): - logging.getLogger().info("test_conv_add_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight12", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - add = normed + tf.constant(np.random.randn(16), dtype=tf.float32) - relu6 = tf.nn.relu6(add, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Add": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_single_conv2d_fusion(self): - logging.getLogger().info("test_single_conv2d_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight13", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - out_name = conv.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_const_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_const_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - top_relu = tf.nn.relu(x) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight14", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2") - var = tf.compat.v1.get_variable( - "add_y", [1, 64, 32, 32, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - add = tf.raw_ops.AddV2(x=add_1, y=var, name="addv2_1") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, False) - self.assertEqual(found_conv_biasadd_fusion, False) - - @disable_random() - def test_conv3d_add_const_addn_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight15", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2_2") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight16", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_3") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, True) - self.assertEqual(found_conv_biasadd_fusion, False) - - @disable_random() - def test_conv3d_add_addn_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight15", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2_4") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight16", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_5") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight17", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight18", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2_6") - conv3d_3_weights = tf.compat.v1.get_variable( - "weight19", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_3_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_7") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_relu_fusion = False - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_relu_fusion = True - self.assertEqual(found_relu_fusion, True) - - @disable_random() - def test_conv3d_relu_fusion(self): - logging.getLogger().info("test_conv3d_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight20", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - relu = tf.nn.leaky_relu(conv) - - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_fusion(self): - logging.getLogger().info("test_conv3d_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight21", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight22", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2_8") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_addn_relu_requantize_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_relu_requantize_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight23", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_9") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight24", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_10") - relu = tf.nn.relu(add_2) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu", b"Requantize"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, True) - self.assertEqual(found_conv_biasadd_fusion, True) - - @disable_random() - def test_conv3d_add_const_addn_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight25", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_11") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight26", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_12") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_no_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_no_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight27", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_13") - pooling = tf.nn.max_pool(add, ksize=1, strides=[1, 2, 2, 2, 1], padding="SAME") - out_name = pooling.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_const_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight28", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_10") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv2d_add_const_leakyrelu_add_fusion(self): - logging.getLogger().info("test_conv2d_add_const_leakyrelu_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv2d_1_weights = tf.compat.v1.get_variable( - "weight29", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_1 = tf.nn.conv2d(top_relu, conv2d_1_weights, strides=[1, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(16), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv2d_1, y=y_const, name="addv2_11") - relu = tf.nn.leaky_relu(add_1) - conv2d_2_weights = tf.compat.v1.get_variable( - "weight30", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_2 = tf.nn.conv2d(top_relu, conv2d_2_weights, strides=[1, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=relu, y=conv2d_2, name="addv2_12") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_leakyrelu_add_fusion(self): - logging.getLogger().info("test_conv3d_add_const_leakyrelu_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight31", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_13") - relu = tf.nn.leaky_relu(add_1) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight32", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=relu, y=conv3d_2, name="addv2_14") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_non_const_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_non_const_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight33", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight34", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2_15") - conv3d_3_weights = tf.compat.v1.get_variable( - "weight35", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_3 = tf.nn.conv3d(top_relu, conv3d_3_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_3, name="addv2_16") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_elu_add_fusion(self): - logging.getLogger().info("test_conv3d_add_const_elufusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d = tf.nn.conv3d(top_relu, conv3d_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv3d, y=y_const, name="addv2") - elu = tf.nn.elu(add) - output = tf.nn.softmax(elu, name="op_to_store") - out_name = output.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("tensorflow_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_depthwiseconv_fusion.py b/test/tfnewapi/test_tensorflow_graph_depthwiseconv_fusion.py deleted file mode 100644 index 2eaf6efe3b1..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_depthwiseconv_fusion.py +++ /dev/null @@ -1,271 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 -from tensorflow.python.framework import dtypes, tensor_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_Conv2dBiasAddAddRelu6MulMul(): - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - - offset_node = node_def_pb2.NodeDef() - offset_node.name = "offset" - offset_node.op = "Const" - offset_value = np.float32(np.abs(np.random.randn(1))) - offset_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - offset_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(offset_value, offset_value.dtype.type, offset_value.shape) - ) - ) - - add_node = node_def_pb2.NodeDef() - add_node.op = "Add" - add_node.name = "add/hard_swish" - add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - add_node.input.extend([bias_add_node.name, offset_node.name]) - - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu6" - relu_node.name = "relu6/hard_swish" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([add_node.name]) - - mul_node = node_def_pb2.NodeDef() - mul_node.op = "Mul" - mul_node.name = "mul/hard_swish" - mul_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - mul_node.input.extend([bias_add_node.name, relu_node.name]) - - offset1_node = node_def_pb2.NodeDef() - offset1_node.name = "mul1_offset" - offset1_node.op = "Const" - offset1_value = np.float32(np.abs(np.random.randn(1))) - offset1_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - offset1_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(offset1_value, offset1_value.dtype.type, offset1_value.shape) - ) - ) - - mul1_node = node_def_pb2.NodeDef() - mul1_node.op = "Mul" - mul1_node.name = "mul1/hard_swish" - mul1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - mul1_node.input.extend([mul_node.name, offset1_node.name]) - - test_graph = graph_pb2.GraphDef() - - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - add_node, - relu_node, - offset_node, - offset1_node, - mul_node, - mul1_node, - ] - ) - return test_graph - - -class TestConvBiasAddAddReluFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_depthwiseconv_biasadd_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = normed.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseConv2dNative_BiasAddAddRelu6MulMul_fusion(self): - output_graph_def = build_Conv2dBiasAddAddRelu6MulMul() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_leakyrelu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - - leakyrelu = tf.nn.leaky_relu(normed) - out_name = leakyrelu.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_dequantize_cast_optimizer_newapi.py b/test/tfnewapi/test_tensorflow_graph_dequantize_cast_optimizer_newapi.py deleted file mode 100644 index 376e0e6dc0b..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_dequantize_cast_optimizer_newapi.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.python.framework import dtypes - -from neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer import DequantizeCastOptimizer -from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_graphdef(set_min_first=False, dq_multi_outputs=False): - tf.compat.v1.disable_eager_execution() - - input = tf.compat.v1.placeholder(tf.float32, shape=(32, 224, 224, 3), name="input") - graph_def = tf.compat.v1.get_default_graph().as_graph_def(add_shapes=True) - - min_input = Helper.create_constant_node("test_min", value=0.0, dtype=dtypes.float32) - - max_input = Helper.create_constant_node("test_max", value=[1], dtype=dtypes.float32) - - quant_v2_node = Helper.create_node("QuantizeV2", "test_quantize", [input.name, min_input.name, max_input.name]) - - dequantize_node = Helper.create_node( - "Dequantize", "test_dequantize", [quant_v2_node.name, quant_v2_node.name + ":1", quant_v2_node.name + ":2"] - ) - if set_min_first: - Helper.set_attr_string(dequantize_node, "mode", b"MIN_FIRST") - - cast_node = Helper.create_node("Cast", "test_cast", [dequantize_node.name]) - Helper.set_attr_dtype(cast_node, "DstT", dtypes.bfloat16) - Helper.set_attr_dtype(cast_node, "SrcT", dtypes.float32) - Helper.set_attr_bool(cast_node, "Truncate", False) - - dentity_node = Helper.create_node("Identity", "output", [cast_node.name]) - Helper.set_attr_dtype(dentity_node, "T", dtypes.bfloat16) - - graph_def.node.extend( - [ - min_input, - max_input, - quant_v2_node, - dequantize_node, - cast_node, - dentity_node, - ] - ) - - if dq_multi_outputs: - dentity_node_2 = Helper.create_node("Identity", "id_1", [dequantize_node.name]) - Helper.set_attr_dtype(dentity_node_2, "T", dtypes.float32) - graph_def.node.extend([dentity_node_2]) - - return graph_def - - -class TestDequantizeCastOptimizer(unittest.TestCase): - @disable_random() - def test_dequantize_cast_normal(self): - graph_def = build_fake_graphdef() - converted_graph_def = DequantizeCastOptimizer(graph_def).do_transformation() - for i in converted_graph_def.node: - self.assertNotEqual(i.op, "Cast") - - @disable_random() - def test_dequantize_cast_min_first(self): - graph_def = build_fake_graphdef(set_min_first=True) - converted_graph_def = DequantizeCastOptimizer(graph_def).do_transformation() - hasCast = False - # Remove MIN_FIRST limitation for spr-base, so the "Cast" will be removed now - for i in converted_graph_def.node: - if i.op == "Cast": - hasCast = True - break - self.assertEqual(hasCast, False) - - @disable_random() - def test_dequantize_cast_multiple_outputs(self): - graph_def = build_fake_graphdef(dq_multi_outputs=True) - converted_graph_def = DequantizeCastOptimizer(graph_def).do_transformation() - hasCast = False - for i in converted_graph_def.node: - if i.op == "Cast": - hasCast = True - break - self.assertEqual(hasCast, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_dq_cast_fusion.py b/test/tfnewapi/test_tensorflow_graph_dq_cast_fusion.py deleted file mode 100644 index 85e9fb9aed7..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_dq_cast_fusion.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestDqCastFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - os.environ["FORCE_BF16"] = "1" - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_dq_all_outputs_bf16(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.constant(np.random.random((1, 3, 16, 16)).astype(np.float32), name="y") - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - conv_reshape1 = tf.reshape(conv, [1, 28, 27, 16]) - conv_reshape2 = tf.reshape(conv, [1, 28, 27, 16]) - out = tf.math.add(conv_reshape1, conv_reshape2, name="op_to_store") - out_name = out.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16)) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_cast = False - for node in output_graph.graph_def.node: - if node.op == "Cast": - found_cast = True - break - self.assertEqual(found_cast, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_fuse_gelu_newapi.py b/test/tfnewapi/test_tensorflow_graph_fuse_gelu_newapi.py deleted file mode 100644 index f14f7d70fbe..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_fuse_gelu_newapi.py +++ /dev/null @@ -1,413 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import unittest - -import tensorflow as tf -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu import FuseGeluOptimizer -from neural_compressor.adaptor.tf_utils.util import disable_random - - -class TestGeluFusion(unittest.TestCase): - def gelu(self, input_tensor, mul_value=0.5, addv2_value=1.0, sqrt_value=2.0): - cdf = mul_value * (addv2_value + tf.math.erf(input_tensor / tf.sqrt(sqrt_value))) - return input_tensor * cdf - - def gelu_enable_approximation( - self, - input_tensor, - another_mul_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - return ( - another_mul_value - * input_tensor - * (addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value)))) - ) - - def gelu_enable_approximation_varaint( - self, - input_tensor, - another_mul_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - cdf = another_mul_value * ( - addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value))) - ) - - return input_tensor * cdf - - def gelu_disable_approximation( - self, - input_tensor, - another_add_value=0.5, - mul1_value=0.044715, - addv2_value=1.0, - mul2_value=0.7978845608028654, - pow_value=3, - ): - coeff = tf.cast(mul1_value, input_tensor.dtype) - return (another_add_value + input_tensor) * ( - addv2_value + tf.tanh(mul2_value * (input_tensor + coeff * tf.pow(input_tensor, pow_value))) - ) - - @disable_random() - def test_gelu_disable_approximation_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_disable_approximation(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - @disable_random() - def test_gelu_approximation_fusion_varaint(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation_varaint(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_pow_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, pow_value=1.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_mul2_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, mul2_value=1.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_addv2_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, addv2_value=12.0) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_mul1_value(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, mul1_value=1.0) - relu = tf.nn.relu(gelu) - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_approximation_fusion_with_invalid_another_mul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu_enable_approximation(conv_bias, another_mul_value=1.0) - relu = tf.nn.relu(gelu) - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_sqrt(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, sqrt_value=1.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_addv2(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, addv2_value=10.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion_with_invalid_mul(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias, mul_value=1.0) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[gelu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, False) - - @disable_random() - def test_gelu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name="input") - - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [32], initializer=tf.compat.v1.random_normal_initializer()) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="SAME") - conv_bias = tf.math.add(conv1, conv_bias) - - gelu = self.gelu(conv_bias) - relu = tf.nn.relu(gelu) - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(":")[0]] - ) - - output_graph_def = FuseGeluOptimizer(output_graph_def).do_transformation() - - found_gelu = False - for i in output_graph_def.node: - if i.op == "Gelu": - found_gelu = True - break - - self.assertEqual(found_gelu, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_fuse_pad_conv_fp32.py b/test/tfnewapi/test_tensorflow_graph_fuse_pad_conv_fp32.py deleted file mode 100644 index 6b1ac19ba02..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_fuse_pad_conv_fp32.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: op_to_store - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestFoldPadConv(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_fold_pad_conv(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed, name="op_to_store") - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - self.assertEqual(found_pad, False) - - @disable_random() - def test_fold_non_const_pad_conv(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - vec = tf.raw_ops.DataFormatVecPermute(x=paddings, src_format="NHWC", dst_format="NHWC") - x_pad = tf.pad(x, vec, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed, name="op_to_store") - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - self.assertEqual(found_pad, False) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_bn_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_bn_fusion.py deleted file mode 100644 index 02ee5d61bf8..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_bn_fusion.py +++ /dev/null @@ -1,417 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.core.framework import attr_value_pb2 -from tensorflow.python.framework import dtypes - -from neural_compressor.adaptor.tf_utils.util import disable_random -from neural_compressor.experimental import Quantization, common -from neural_compressor.utils import logger -from neural_compressor.utils.utility import CpuInfo - - -def build_fake_yaml_1(): - fake_yaml_1 = """ - model: - name: fake_yaml_1 - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml_1, Loader=yaml.SafeLoader) - with open("fake_yaml_1.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_fake_yaml_2(): - fake_yaml_2 = """ - model: - name: fake_yaml_2 - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - workspace: - path: saved - """ - y = yaml.load(fake_yaml_2, Loader=yaml.SafeLoader) - with open("fake_yaml_2.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowQdqConvFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml_1() - build_fake_yaml_2() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml_1.yaml") - os.remove("fake_yaml_2.yaml") - - @disable_random() - def test_bn_relu_depthwiseconv_biasadd_relu6_fusion(self): - logger.info("test_bn_relu_depthwiseconv_biasadd_relu6_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x) - relu = tf.nn.relu(normed_0, name="op_to_store_0") - conv = tf.compat.v1.nn.depthwise_conv2d_native(relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv) - relu6 = tf.nn.relu6(normed_1, name="op_to_store_1") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - conv_input_type = True - found_fusion = True - qbn_num = 0 - dq_num = 0 - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D" and i.attr["Thost_inputs"].list.type != [ - 11, - 11, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - ]: - conv_input_type = False - break - if i.op in ["Relu", "Relu6", "FusedBatchNormV3"]: - found_fusion = False - break - if i.op == "_QuantizedFusedBatchNorm": - qbn_num += 1 - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(conv_input_type, True) - self.assertEqual(found_fusion, True) - self.assertEqual(qbn_num, 1) - self.assertEqual(dq_num, 0) - - @disable_random() - def test_training_bn_relu_depthwiseconv_biasadd_relu6_fusion(self): - logger.info("test_training_bn_relu_depthwiseconv_biasadd_relu6_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x, training=True) - relu = tf.nn.relu(normed_0, name="op_to_store_0") - conv = tf.compat.v1.nn.depthwise_conv2d_native(relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv) - relu6 = tf.nn.relu6(normed_1, name="op_to_store_1") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - bn_num, bf16_bn_num, qbn_num, dq_num = 0, 0, 0, 0 - for i in output_graph.graph_def.node: - if i.op == "FusedBatchNormV3": - bn_num += 1 - if i.attr["T"].type == dtypes.bfloat16.as_datatype_enum: - bf16_bn_num += 1 - if i.op == "_QuantizedFusedBatchNorm": - qbn_num += 1 - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(bn_num, 1) - self.assertEqual(qbn_num, 0) - self.assertEqual(dq_num, 0) - bf16_enabled = bool(CpuInfo().bf16 or os.getenv("FORCE_BF16") == "1") - if bf16_enabled: - self.assertEqual(bf16_bn_num, 1) - - @disable_random() - def test_bn_leakyrelu_conv_biasadd_relu(self): - logger.info("test_bn_leakyrelu_conv_biasadd_relu") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x) - leaky_relu = tf.nn.leaky_relu(normed_0, alpha=0.3, name="op_to_store_0") - conv = tf.nn.conv2d(leaky_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed_1, name="op_to_store_1") - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - - quantizer.model = output_graph_def - output_graph = quantizer.fit() - conv_input_type = True - found_fusion = True - qbn_num = 0 - dq_num = 0 - qbn_output_max_name = "batch_normalization/FusedBatchNormV3_eightbit_quantized_bn/frozen_bn_output_max" - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["Thost_inputs"].list.type != [11, 11, 1, 1, 1, 1, 1, 1, 1]: - conv_input_type = False - break - if i.op in ["Relu", "LeakyRelu", "FusedBatchNormV3"]: - found_fusion = False - break - if i.op == "_QuantizedFusedBatchNorm": - is_offset_const = i.attr["is_offset_const"].b - is_mean_const = i.attr["is_mean_const"].b - qbn_alpha = i.attr["alpha"].f - frozen_qbn_output_max = i.input[8] - qbn_num += 1 - if i.name == qbn_output_max_name: - frozen_qbn_output_max_value = i.attr["value"].tensor.float_val[0] - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(conv_input_type, True) - self.assertEqual(found_fusion, True) - self.assertEqual(qbn_num, 1) - self.assertEqual(dq_num, 0) - self.assertEqual(is_offset_const, True) - self.assertEqual(is_mean_const, True) - self.assertEqual(round(qbn_alpha, 7), 0.3) - self.assertEqual(frozen_qbn_output_max, qbn_output_max_name) - self.assertGreater(frozen_qbn_output_max_value, 126) - - @disable_random() - def test_bn_relu_conv_biasadd_relu(self): - logger.info("test_bn_relu_conv_biasadd_relu") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x) - relu_0 = tf.nn.relu(normed_0, name="op_to_store_0") - conv = tf.nn.conv2d(relu_0, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv) - relu_1 = tf.nn.relu(normed_1, name="op_to_store_1") - out_name = relu_1.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_1.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - - quantizer.model = output_graph_def - output_graph = quantizer.fit() - conv_input_type = True - found_fusion = True - qbn_num = 0 - dq_num = 0 - qbn_output_max_name = "batch_normalization/FusedBatchNormV3_eightbit_quantized_bn/frozen_bn_output_max" - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["Thost_inputs"].list.type != [11, 11, 1, 1, 1, 1, 1, 1, 1]: - conv_input_type = False - break - if i.op in ["Relu", "FusedBatchNormV3"]: - found_fusion = False - break - if i.op == "_QuantizedFusedBatchNorm": - is_offset_const = i.attr["is_offset_const"].b - is_mean_const = i.attr["is_mean_const"].b - frozen_qbn_output_max = i.input[8] - qbn_num += 1 - if i.name == qbn_output_max_name: - frozen_qbn_output_max_value = i.attr["value"].tensor.float_val[0] - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(conv_input_type, True) - self.assertEqual(found_fusion, True) - self.assertEqual(qbn_num, 1) - self.assertEqual(dq_num, 0) - self.assertEqual(is_offset_const, True) - self.assertEqual(is_mean_const, True) - self.assertEqual(frozen_qbn_output_max, qbn_output_max_name) - self.assertGreater(frozen_qbn_output_max_value, 126) - - @disable_random() - def test_bn_performance_only_false(self): - logger.info("test_bn_performance_only_false") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x) - relu_0 = tf.nn.relu(normed_0, name="op_to_store_0") - conv = tf.nn.conv2d(relu_0, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv) - relu_1 = tf.nn.relu6(normed_1, name="op_to_store_1") - out_name = relu_1.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - quantizer = Quantization("fake_yaml_2.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_fusion = True - qconv_num = 0 - qbn_num = 0 - dq_num = 0 - for i in output_graph.graph_def.node: - if i.op in ["Relu6"]: - found_fusion = False - break - if i.op == "_FusedQuantizedConv2D": - qconv_num += 1 - if i.op == "_QuantizedFusedBatchNorm": - qbn_num += 1 - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(found_fusion, True) - self.assertEqual(qconv_num, 1) - self.assertEqual(qbn_num, 0) - self.assertEqual(dq_num, 1) - - @disable_random() - def test_bnex_performance_only_false(self): - logger.info("test_bnex_performance_only_false") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights_0 = tf.compat.v1.get_variable( - "weight_0", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - normed_0 = tf.compat.v1.layers.batch_normalization(x) - relu_0 = tf.nn.relu(normed_0, name="op_to_store_0") - conv_0 = tf.nn.conv2d(relu_0, conv_weights_0, strides=[1, 2, 2, 1], padding="VALID") - normed_1 = tf.compat.v1.layers.batch_normalization(conv_0) - conv_weights_1 = tf.compat.v1.get_variable( - "weight_1", [5, 5, 16, 2], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_1 = tf.nn.conv2d(normed_1, conv_weights_1, strides=[1, 3, 3, 1], padding="VALID") - relu_1 = tf.nn.relu6(conv_1, name="op_to_store_1") - out_name = relu_1.name.split(":")[0] - """graph_def = tf.compat.v1.get_default_graph().as_graph_def() - for node in graph_def.node: - - if node.name == "batch_normalization_1/FusedBatchNormV3": - node.op = "_FusedBatchNormEx" - with tf.Graph().as_default() as graph: - tf.import_graph_def(graph_def, name='') - """ - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - for node in output_graph_def.node: - if node.name == "batch_normalization_1/FusedBatchNormV3": - node.op = "_FusedBatchNormEx" - node.attr["activation_mode"].CopyFrom(attr_value_pb2.AttrValue(s=b"Relu")) - - quantizer = Quantization("fake_yaml_2.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_fusion = True - qconv_num = 0 - qbn_num = 0 - dq_num = 0 - for i in output_graph.graph_def.node: - if i.op in ["Relu6", "_FusedBatchNormEx"]: - found_fusion = False - break - if i.op == "_FusedQuantizedConv2D": - qconv_num += 1 - if i.op == "_QuantizedFusedBatchNorm": - qbn_num += 1 - if i.op == "Dequantize": - dq_num += 1 - self.assertEqual(found_fusion, True) - self.assertEqual(qconv_num, 2) - self.assertEqual(qbn_num, 0) - self.assertEqual(dq_num, 1) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_concat_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_concat_fusion.py deleted file mode 100644 index 93d252e793a..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_concat_fusion.py +++ /dev/null @@ -1,224 +0,0 @@ -# -# -# -*- coding: utf-8 -*- -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel -from neural_compressor.adaptor.tf_utils.util import disable_random, read_graph - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - outputs: predict - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: mse - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestTensorflowQdqConcatFusion(unittest.TestCase): - mb_model_url = ( - "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/inceptionv3_fp32_pretrained_model.pb" - ) - pb_path = "/tmp/.neural_compressor/inceptionv3_fp32.pb" - - @classmethod - def setUpClass(self): - if not os.path.exists(self.pb_path): - os.system("mkdir -p /tmp/.neural_compressor && wget {} -O {} ".format(self.mb_model_url, self.pb_path)) - self.op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml") - ).get_eightbit_patterns() - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - def test_tensorflow_concat_quantization(self): - output_graph_def = read_graph(self.pb_path) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 299, 299, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_quantized_concat_node = False - - target_concat_node_name = "v0/cg/incept_v3_a0/concat_eightbit_quantized_concatv2" - from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer - - cur_graph = GraphAnalyzer() - cur_graph.graph = output_graph.graph_def - graph_info = cur_graph.parse_graph() - found_quantized_concat_node = target_concat_node_name in graph_info - - self.assertEqual(found_quantized_concat_node, True) - min_out, max_out = [], [] - for input_conv_name in graph_info[target_concat_node_name].node.input[:4]: - # print (input_conv_name, graph_info[input_conv_name].node.input) - min_freezed_out_name = graph_info[input_conv_name].node.input[-2] - max_freezed_out_name = graph_info[input_conv_name].node.input[-1] - min_freezed_out_value = (graph_info[min_freezed_out_name].node.attr["value"].tensor.float_val)[0] - max_freezed_out_value = (graph_info[max_freezed_out_name].node.attr["value"].tensor.float_val)[0] - min_out.append(min_freezed_out_value) - max_out.append(max_freezed_out_value) - - self.assertEqual(len(set(min_out)), 1) - self.assertEqual(len(set(max_out)), 1) - - @disable_random() - def test_concat_with_different_input_type(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - x = tf.nn.relu(x) - sqrt = tf.math.sqrt(x) - relu_sqrt = tf.nn.relu(sqrt) - conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - concat = tf.concat([relu, conv_bias], 1) - pool = tf.nn.avg_pool(concat, ksize=1, strides=[1, 2, 2, 1], name="avgpool", padding="SAME") - final_node = tf.nn.relu(pool, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 128, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_concat = False - for i in output_graph.graph_def.node: - if i.op == "QuantizedConcatV2": - quantized_concat = True - self.assertEqual(quantized_concat, False) - - @disable_random() - def test_concat_with_same_input_type(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - conv1_bias = tf.compat.v1.get_variable("bias1", [16], initializer=tf.compat.v1.random_normal_initializer()) - x = tf.nn.relu(x) - sqrt = tf.math.sqrt(x) - relu_sqrt = tf.nn.relu(sqrt) - conv = tf.nn.conv2d(relu_sqrt, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv, conv_bias) - relu1 = tf.nn.relu(conv_bias) - - conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv1_bias = tf.nn.bias_add(conv1, conv1_bias) - relu2 = tf.nn.relu(conv1_bias) - concat = tf.concat([relu1, relu2], 1) - pool = tf.nn.avg_pool(concat, ksize=1, strides=[1, 2, 2, 1], name="avgpool", padding="SAME") - final_node = tf.nn.relu(pool, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 128, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_concat = False - for i in output_graph.graph_def.node: - if i.op == "QuantizedConcatV2": - quantized_concat = True - self.assertEqual(quantized_concat, True) - - @disable_random() - def test_concat_with_qint8_and_fp32_input_type(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 128, 16], name="input") - bias = tf.compat.v1.get_variable("bias", [16], initializer=tf.compat.v1.random_normal_initializer()) - - bias_add = tf.nn.bias_add(x, bias) - - pool = tf.nn.avg_pool(x, ksize=1, strides=[1, 1, 1, 1], name="avgpool", padding="SAME") - concat = tf.concat([bias_add, pool], 1) - final_node = tf.nn.relu(concat, name="op_to_store") - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 128, 16), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - dtype = None - quantized_concat = False - from tensorflow.python.framework import dtypes - - for i in output_graph.graph_def.node: - if i.op == "QuantizedConcatV2": - dtype = dtypes.DType(i.attr["T"].type) - quantized_concat = True - self.assertEqual(quantized_concat, True) - self.assertEqual(dtype, dtypes.qint8) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_conv3d_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_conv3d_fusion.py deleted file mode 100644 index cbcb63e95e2..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_conv3d_fusion.py +++ /dev/null @@ -1,854 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from pkg_resources import parse_version -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import function - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm import FoldBatchNormNodesOptimizer -from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes import StripUnusedNodesOptimizer -from neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - - f.close() - - -class TestTensorflowQdqConvFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv3d_addv2_relu_fusion(self): - logging.getLogger().info("test_conv3d_addv2_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - top_relu = tf.nn.relu(x) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight_conv3d_1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2") - relu = tf.nn.relu(add) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight_conv3d_2", [3, 3, 3, 32, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - - out_name = conv3d_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if b"Sum" in i.attr["fused_ops"].list.s: - found_conv_sumadd_fusion = True - if i.attr["fused_ops"].list.s == [b"BiasAdd", b"Relu", b"Requantize"]: - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, False) - self.assertEqual(found_conv_biasadd_fusion, True) - - @disable_random() - def test_single_conv3d_fusion(self): - logging.getLogger().info("test_single_conv3d_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - mul = tf.multiply(conv, 2.0, name="op_to_store") - out_name = mul.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - found_dequantize_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"Dequantize"]): - found_dequantize_fusion = True - self.assertEqual(found_conv_fusion, True) - self.assertEqual(found_dequantize_fusion, True) - - @disable_random() - def test_conv3d_biasadd_fusion(self): - logging.getLogger().info("test_conv3d_biasadd_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 64), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv, y=y_const, name="addv2") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - found_dequantize_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Dequantize"]): - found_dequantize_fusion = True - self.assertEqual(found_conv_fusion, True) - self.assertEqual(found_dequantize_fusion, True) - - def test_conv3d_relu6_fusion(self): - logging.getLogger().info("test_conv3d_biasadd_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - relu6 = tf.nn.relu6(conv, name="op_to_store") - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - found_requantize_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Relu", b"Dequantize"]): - found_requantize_fusion = True - self.assertEqual(found_conv_fusion, True) - self.assertEqual(found_requantize_fusion, True) - - @disable_random() - def test_conv3d_add_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight1", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1_weights = tf.compat.v1.get_variable( - "weight2", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - conv1 = tf.nn.conv3d(x, conv1_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - add = conv + conv1 - relu = tf.nn.relu(add) - - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_fusion(self): - logging.getLogger().info("test_conv3d_add_const_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - add = conv + tf.constant( - [ - [ - [ - [ - [ - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - 0.000015179151887423359, - 0.000022200847524800338, - -0.000009995766049541999, - -0.0000022956028260523453, - 0.000008830029400996864, - 0.0000017190360495078494, - 0.000019561824956326745, - 0.00014721050683874637, - -0.000005871841494808905, - 0.000004377178811409976, - -0.000006191140982991783, - 0.000009258330464945175, - -0.000009839599442784674, - 0.000008547322067897767, - 0.000004629391241905978, - 2.345327061448188e-7, - ] - ] - ] - ] - ] - ) - - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "AddV2": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_const_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_const_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - top_relu = tf.nn.relu(x) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2_1") - var = tf.compat.v1.get_variable( - "add_y", [1, 64, 32, 32, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - add_2 = tf.raw_ops.AddV2(x=add_1, y=var, name="addv2_2") - relu = tf.nn.relu(add_2) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, False) - self.assertEqual(found_conv_biasadd_fusion, False) - - @disable_random() - def test_conv3d_add_const_addn_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2_1") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_2") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, True) - - @disable_random() - def test_conv3d_add_addn_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=tf.constant(np.random.randn(32), dtype=tf.float32), name="addv2_4") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Requantize", - ]: - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2_1") - conv3d_3_weights = tf.compat.v1.get_variable( - "weight3", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_3_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_2") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_relu_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Relu", - b"Requantize", - ]: - found_relu_fusion = True - self.assertEqual(found_relu_fusion, True) - - @disable_random() - def test_conv3d_leakyrelu_fusion(self): - logging.getLogger().info("test_conv3d_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 64, 64, 64, 1], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [4, 4, 4, 1, 64], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv3d(x_pad, conv_weights, strides=[1, 2, 2, 2, 1], padding="VALID") - relu = tf.nn.leaky_relu(conv) - - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 64, 64, 64, 1), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"LeakyRelu", - b"Dequantize", - ]: - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_fusion(self): - logging.getLogger().info("test_conv3d_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Requantize", - ]: - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_addn_relu_requantize_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_relu_requantize_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_1") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_2") - relu = tf.nn.relu(add_2) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_sumadd_fusion = False - found_conv_biasadd_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - if str(b"Sum") in str(i.attr["fused_ops"].list.s): - found_conv_sumadd_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Sum", b"Relu", b"Requantize"]): - found_conv_biasadd_fusion = True - self.assertEqual(found_conv_sumadd_fusion, True) - self.assertEqual(found_conv_biasadd_fusion, True) - - @disable_random() - def test_conv3d_add_const_addn_fusion(self): - logging.getLogger().info("test_conv3d_add_const_addn_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_1") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=add_1, y=conv3d_2, name="addv2_2") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_no_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_no_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2") - pooling = tf.nn.max_pool(add, ksize=1, strides=[1, 2, 2, 2, 1], padding="SAME") - out_name = pooling.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_relu_fusion(self): - logging.getLogger().info("test_conv3d_add_const_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2") - relu = tf.nn.relu(add) - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_const_leakyrelu_add_fusion(self): - logging.getLogger().info("test_conv3d_add_const_leakyrelu_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(1, 1, 1, 1, 32), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=y_const, name="addv2_1") - relu = tf.nn.leaky_relu(add_1) - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=relu, y=conv3d_2, name="addv2_2") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv3d_add_addn_non_const_fusion(self): - logging.getLogger().info("test_conv3d_add_addn_non_const_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 128, 64, 64, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv3d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_1 = tf.nn.conv3d(top_relu, conv3d_1_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - conv3d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_2 = tf.nn.conv3d(top_relu, conv3d_2_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add_1 = tf.raw_ops.AddV2(x=conv3d_1, y=conv3d_2, name="addv2_1") - conv3d_3_weights = tf.compat.v1.get_variable( - "weight3", [3, 3, 3, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - conv3d_3 = tf.nn.conv3d(top_relu, conv3d_3_weights, strides=[1, 2, 2, 2, 1], padding="SAME") - add = tf.raw_ops.AddV2(x=add_1, y=conv3d_3, name="addv2_2") - out_name = add.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 128, 64, 64, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv3D" and str(i.attr["fused_ops"].list.s) == str( - [b"BiasAdd", b"Sum", b"Requantize"] - ): - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_conv_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_conv_fusion.py deleted file mode 100644 index f10209114c6..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_conv_fusion.py +++ /dev/null @@ -1,922 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import function - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - - f.close() - - -class TestTensorflowQdqConvFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_fold_pad_conv(self): - logging.getLogger().info("test_fold_pad_conv") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - relu = tf.nn.relu(normed, name="op_to_store") - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_pad = False - - for i in output_graph.graph_def.node: - if i.op == "Pad": - found_pad = True - break - self.assertEqual(found_pad, False) - - @disable_random() - def test_conv_relu_fusion(self): - logging.getLogger().info("test_conv_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - relu = tf.nn.relu(conv) - - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - - self.assertEqual(found_conv_fusion, False) - - @disable_random() - def test_conv_biasadd_relu6_fusion(self): - logging.getLogger().info("test_conv_biasadd_relu6_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu6": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_swishf32_fusion(self): - logging.getLogger().info("test_conv_biasadd_swishf32_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - @function.Defun(tf.float32, func_name="swish_f32") - def swish_f32(x): - return tf.nn.silu(x, beta=1.0) - - swish = swish_f32(normed, name="swish_f32_output_node") - - out_name = swish.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "swish_f32": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_addv2_fusion(self): - logging.getLogger().info("test_conv_addv2_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv1_weights = tf.compat.v1.get_variable( - "weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 2, 2, 1], padding="SAME") - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(x, conv2_weights, strides=[1, 2, 2, 1], padding="SAME") - sumadd = tf.raw_ops.AddV2(x=conv1, y=conv2, name="addv2") - - out_name = sumadd.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_add_relu_fusion(self): - logging.getLogger().info("test_conv_biasadd_add_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.nn.bias_add(conv2, tf.constant([3.0, 1.2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 12, 2, 3, 4])) - relu = tf.nn.relu(normed2 + tf.constant([3.0])) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op.find("QuantizedConv2D") != -1: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_1(self): - logging.getLogger().info("test_conv_biasadd_addv2_relu_fallback_fusion_1") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.leaky_relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sum", - b"Relu", - b"Requantize", - ]: - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_fusion_with_last_conv(self): - logging.getLogger().info("test_conv_fusion_with_last_conv") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights_2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(pooling, conv_weights_2, strides=[1, 2, 2, 1], padding="VALID") - conv_weights_3 = tf.compat.v1.get_variable( - "weight3", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - relu2 = tf.nn.relu(conv2) - conv3 = tf.nn.conv2d(relu2, conv_weights_3, strides=[1, 2, 2, 1], padding="VALID") - - relu3 = tf.nn.relu(conv3) - relu6 = tf.nn.relu6(relu3, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv_fusion_with_max_pooling(self): - logging.getLogger().info("test_conv_fusion_with_max_pooling") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - - relu = tf.nn.relu(x) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - conv_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(pooling, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - biasadd = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = biasadd.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantized_pool_data_type = None - quantized_conv_data_type = None - for i in output_graph.graph_def.node: - if i.op.find("QuantizedMaxPool") != -1: - quantized_pool_data_type = i.attr["T"].type - if i.op.find("QuantizedConv2D") != -1: - quantized_conv_data_type = i.attr["Tinput"].type - - self.assertNotEqual(quantized_pool_data_type, None) - self.assertEqual(quantized_pool_data_type, quantized_conv_data_type) - - @disable_random() - def test_conv_biasadd_fusion(self): - logging.getLogger().info("test_conv_biasadd_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - - out_name = normed.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "batch_normalization/FusedBatchNormV3": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_fusion(self): - logging.getLogger().info("test_depthwiseconv_biasadd_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(top_relu, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - - out_name = normed.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "batch_normalization/FusedBatchNormV3": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_relu_fusion(self): - logging.getLogger().info("test_conv_biasadd_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed, name="op_to_store") - - out_name = relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_leakyrelu_fusion(self): - logging.getLogger().info("test_conv_biasadd_leakyrelu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - leaky_relu = tf.nn.leaky_relu(normed, name="op_to_store") - - out_name = leaky_relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Leaky_Relu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_relu6_fusion(self): - logging.getLogger().info("test_depthwiseconv_biasadd_relu6_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.compat.v1.nn.depthwise_conv2d_native(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu6": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_relu_fusion(self): - logging.getLogger().info("test_depthwiseconv_biasadd_relu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.compat.v1.nn.depthwise_conv2d_native(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu6 = tf.nn.relu6(normed, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_single_fusion(self): - logging.getLogger().info("test_conv_single_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv1_weights = tf.compat.v1.get_variable( - "weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x_pad, conv1_weights, strides=[1, 2, 2, 1], padding="VALID") - matmul_weights = tf.compat.v1.get_variable( - "weight_matmul", [1, 28, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - matmul = tf.matmul(conv1, matmul_weights) - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [7, 7, 32, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(matmul, conv2_weights, strides=[1, 2, 2, 1], padding="VALID") - leaky_relu = tf.nn.leaky_relu(conv2, name="op_to_store") - - out_name = leaky_relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - find_single_qconv = [] - for i in output_graph.graph_def.node: - # BatchMatMul Quantization disabled - if i.op == "_FusedQuantizedConv2D": - find_single_qconv.append(i.attr["fused_ops"].list.s == [b"Requantize"]) - - self.assertEqual(find_single_qconv, [False, False]) - - @disable_random() - def test_conv_fusion_with_last_matmul(self): - logging.getLogger().info("test_conv_fusion_with_last_matmul") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - # paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - # x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(top_relu, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - pooling = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME") - reshape = tf.reshape(pooling, [-1, 3136]) - - y_data = np.random.random([3136, 1]) - - y = tf.constant(y_data, dtype=tf.float32, shape=[3136, 1]) - z = tf.raw_ops.MatMul(a=reshape, b=y, name="matmul_1") - relu1 = tf.nn.relu(z) - y_data_1 = np.random.random([1, 1]) - y_1 = tf.constant(y_data_1, dtype=tf.float32, shape=[1, 1]) - - z_2nd_matmul = tf.raw_ops.MatMul(a=relu1, b=y_1, name="matmul_2") - relu6 = tf.nn.relu6(z_2nd_matmul, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - quantize_v2_count = 0 - for i in output_graph.graph_def.node: - if i.op == "QuantizeV2": - quantize_v2_count += 1 - break - - self.assertEqual(quantize_v2_count, 1) - - @disable_random() - def test_conv2d_add_const_leakyrelu_add_fusion(self): - logging.getLogger().info("test_conv2d_add_const_leakyrelu_add_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(x, paddings, "CONSTANT") - top_relu = tf.nn.relu(x_pad) - conv2d_1_weights = tf.compat.v1.get_variable( - "weight1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_1 = tf.nn.conv2d(top_relu, conv2d_1_weights, strides=[1, 2, 2, 1], padding="SAME") - y_const = tf.constant(np.random.randn(16), dtype=tf.float32) - add_1 = tf.raw_ops.AddV2(x=conv2d_1, y=y_const, name="addv2_1") - relu = tf.nn.leaky_relu(add_1) - conv2d_2_weights = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2d_2 = tf.nn.conv2d(top_relu, conv2d_2_weights, strides=[1, 2, 2, 1], padding="SAME") - add_2 = tf.raw_ops.AddV2(x=relu, y=conv2d_2, name="addv2_2") - out_name = add_2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - found_conv_fusion = True - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv_biasadd_leakyrelu_fusion(self): - logging.getLogger().info("test_depthwiseconv_biasadd_leakyrelu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.compat.v1.nn.depthwise_conv2d_native(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - leaky_relu = tf.nn.leaky_relu(normed, name="op_to_store") - - out_name = leaky_relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Relu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_conv_biasadd_addv2_relu_fallback_fusion_2(self): - logging.getLogger().info("test_conv_biasadd_addv2_relu_fallback_fusion_2") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - # relu = tf.nn.relu(normed) - - conv_weights2 = tf.compat.v1.get_variable( - "weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME") - normed2 = tf.compat.v1.layers.batch_normalization(conv2) - # relu2 = tf.nn.relu(normed2) - add = tf.raw_ops.AddV2(x=normed, y=normed2, name="addv2") - relu = tf.nn.relu(add) - relu6 = tf.nn.relu6(relu, name="op_to_store") - - out_name = relu6.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Requantize"]: - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - # fuse conv + biasadd + elu - @disable_random() - def test_conv_biasadd_elu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - elu = tf.nn.elu(normed, name="op_to_store") - - out_name = elu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Elu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - # fuse conv + biasadd + sigmoid - @disable_random() - def test_conv_biasadd_sigmoid_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="VALID") - normed = tf.compat.v1.layers.batch_normalization(conv) - - sigmoid = tf.math.sigmoid(normed, name="op_to_store") - - out_name = sigmoid.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "Sigmoid": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_depthwiseconv_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_depthwiseconv_fusion.py deleted file mode 100644 index 32f1650068e..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_depthwiseconv_fusion.py +++ /dev/null @@ -1,306 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.core.framework import attr_value_pb2, graph_pb2, node_def_pb2 -from tensorflow.python.framework import dtypes, tensor_util - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -def build_conv2d_biasadd_add_relu6_mul_mul(): - input_node = node_def_pb2.NodeDef() - input_node.name = "input" - input_node.op = "Placeholder" - input_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - - conv1_weight_node = node_def_pb2.NodeDef() - conv1_weight_node.name = "conv1_weights" - conv1_weight_node.op = "Const" - conv1_weight_value = np.float32(np.abs(np.random.randn(3, 3, 3, 32))) - conv1_weight_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_weight_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto( - conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape - ) - ) - ) - - conv1_node = node_def_pb2.NodeDef() - conv1_node.name = "conv1" - conv1_node.op = "Conv2D" - conv1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - conv1_node.input.extend([input_node.name, conv1_weight_node.name]) - conv1_node.attr["strides"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["dilations"].CopyFrom( - attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(i=[1, 1, 1, 1])) - ) - conv1_node.attr["padding"].CopyFrom(attr_value_pb2.AttrValue(s=b"SAME")) - conv1_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - - bias_node = node_def_pb2.NodeDef() - bias_node.name = "conv1_bias" - bias_node.op = "Const" - bias_value = np.float32(np.abs(np.random.randn(32))) - bias_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(bias_value, bias_value.dtype.type, bias_value.shape) - ) - ) - - bias_add_node = node_def_pb2.NodeDef() - bias_add_node.name = "conv1_bias_add" - bias_add_node.op = "BiasAdd" - bias_add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - bias_add_node.attr["data_format"].CopyFrom(attr_value_pb2.AttrValue(s=b"NHWC")) - bias_add_node.input.extend([conv1_node.name, bias_node.name]) - - offset_node = node_def_pb2.NodeDef() - offset_node.name = "offset" - offset_node.op = "Const" - offset_value = np.float32(np.abs(np.random.randn(1))) - offset_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - offset_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(offset_value, offset_value.dtype.type, offset_value.shape) - ) - ) - - add_node = node_def_pb2.NodeDef() - add_node.op = "Add" - add_node.name = "add/hard_swish" - add_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - add_node.input.extend([bias_add_node.name, offset_node.name]) - - relu_node = node_def_pb2.NodeDef() - relu_node.op = "Relu6" - relu_node.name = "relu6/hard_swish" - relu_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - relu_node.input.extend([add_node.name]) - - mul_node = node_def_pb2.NodeDef() - mul_node.op = "Mul" - mul_node.name = "mul/hard_swish" - mul_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - mul_node.input.extend([bias_add_node.name, relu_node.name]) - - offset1_node = node_def_pb2.NodeDef() - offset1_node.name = "mul1_offset" - offset1_node.op = "Const" - offset1_value = np.float32(np.abs(np.random.randn(1))) - offset1_node.attr["dtype"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - offset1_node.attr["value"].CopyFrom( - attr_value_pb2.AttrValue( - tensor=tensor_util.make_tensor_proto(offset1_value, offset1_value.dtype.type, offset1_value.shape) - ) - ) - - mul1_node = node_def_pb2.NodeDef() - mul1_node.op = "Mul" - mul1_node.name = "mul1/hard_swish" - mul1_node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum)) - mul1_node.input.extend([mul_node.name, offset1_node.name]) - - test_graph = graph_pb2.GraphDef() - - test_graph.node.extend( - [ - input_node, - conv1_weight_node, - conv1_node, - bias_node, - bias_add_node, - add_node, - relu_node, - offset_node, - offset1_node, - mul_node, - mul1_node, - ] - ) - return test_graph - - -class TestConv2DBiasAddAddReluFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_single_depthwiseconv2d_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - out_name = conv.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - found_dequantize_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D": - found_conv_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"Dequantize"]): - found_dequantize_fusion = True - self.assertEqual(found_conv_fusion, True) - self.assertEqual(found_dequantize_fusion, True) - - @disable_random() - def test_depthwiseconv2d_biasadd_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - out_name = normed.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - found_dequantize_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D": - found_conv_fusion = True - if str(i.attr["fused_ops"].list.s) == str([b"BiasAdd", b"Dequantize"]): - found_dequantize_fusion = True - self.assertEqual(found_conv_fusion, True) - self.assertEqual(found_dequantize_fusion, True) - - @disable_random() - def test_depthwiseconv2dnative_biasadd_add_relu6_mul_mul_fusion(self): - output_graph_def = build_conv2d_biasadd_add_relu6_mul_mul() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 224, 224, 3), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedConv2D": - found_conv_fusion = True - break - self.assertEqual(found_conv_fusion, True) - - @disable_random() - def test_depthwiseconv2d_biasadd_leakyrelu_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.depthwise_conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding="VALID") - - normed = tf.compat.v1.layers.batch_normalization(conv, name="op_to_store") - - leakyrelu = tf.nn.leaky_relu(normed) - out_name = leakyrelu.name.split(":")[0] - - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = False - - for i in output_graph.graph_def.node: - if i.op == "_FusedQuantizedDepthwiseConv2D": - found_conv_fusion = True - break - - self.assertEqual(found_conv_fusion, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_matmul_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_matmul_fusion.py deleted file mode 100644 index 0040c4d53a5..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_matmul_fusion.py +++ /dev/null @@ -1,1258 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow.compat.v1 as tf -import yaml -from tensorflow.python.framework import dtypes - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestGraphMatMulFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - self.op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml") - ).get_eightbit_patterns(True) - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_matmul_biasadd_relu_requantize_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Relu", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_first_matmul_biasadd_relu_fusion(self): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu(z, name="op_to_store") - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if ( - i.op == "QuantizeV2" - and i.name == "MatMul_eightbit_quantize_x" - and i.attr["T"].type == dtypes.quint8 - ): - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.identity(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"]: - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_last_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2], name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if ( - i.op == "_QuantizedMatMul" - and i.name == "op_to_store" - and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"] - ): - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_fusion_with_transpose_b_true(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul", transpose_b=True) - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummybiasadd_relu6_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="quant_matmul") - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.name == "op_to_store": - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_with_reshape_transpose(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="no_quant_matmul") - z = tf.nn.bias_add(z, [1, 2], name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_with_add(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="no_quant_matmul") - z = tf.math.add(z, [1, 2], name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_requantize_dequantize_fusion_with_softmax(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - biasadd = tf.nn.bias_add(z, [1, 2]) - biasadd1 = tf.nn.bias_add(biasadd, [1, 1]) - - y1 = tf.constant(x_data, dtype=tf.float32, shape=[2, 2]) - matmul1 = tf.matmul(biasadd1, y1) - - biasadd2 = tf.nn.bias_add(matmul1, [1, 1]) - - z = tf.nn.softmax(biasadd2, name="op_to_store") - found_quantized_matmul = False - if tf.version.VERSION < "2.2.0": - found_quantized_matmul = False - else: - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - count = 0 - for i in output_graph.model.as_graph_def().node: - if i.op == "_QuantizedMatMul": - count += 1 - found_quantized_matmul = bool(count > 1) - self.assertEqual(found_quantized_matmul, False) - - def test_matmul_biasadd_relu_non_const_weight(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.matmul(x, x, name="quant_matmul_non_const_weight") - biasadd = tf.nn.bias_add(y, [1, 2]) - z = tf.nn.relu(biasadd) - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - def test_matmul_biasadd_non_const_weight(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.matmul(x, x, name="quant_matmul_non_const_weight") - z = tf.nn.bias_add(y, [1, 2]) - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_with_dummy_biasadd(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="no_quant_matmul") - z = tf.identity(z, name="op_to_store") - found_quantized_matmul = True - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "MatMul": - found_quantized_matmul = False - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_first_matmul_addv2_relu_fusion(self): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - a = tf.matmul(x, y) - b = tf.matmul(x, y) - c = tf.nn.relu(b) - add = tf.raw_ops.AddV2(x=a, y=c, name="addv2") - z = tf.nn.relu(add, name="op_to_store") - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul": - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - # batchmatmul quantization disabled temporarily for its bad performance - """ - @disable_random() - def test_batchmatmulv2_dequantize_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.5, 0.6]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name='x') - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.raw_ops.BatchMatMulV2(x=x, y=y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu(z, name='op_to_store') - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - quantizer = Quantization('fake_yaml.yaml') - dataset = quantizer.dataset('dummy', shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - for i in output_graph.graph_def.node: - if i.op == '_QuantizedBatchMatMul': - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_batchmatmulv2_mul_dequantize_fusion(self): - np_input = np.random.randn(1, 2, 4, 6).astype(np.float32) - np_filter = np.random.randn(1, 2, 6, 5).astype(np.float32) - input = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 2, 4, 6)) - filter = tf.constant(np_filter) - mul = tf.constant(0.2) - z = tf.raw_ops.BatchMatMulV2(x=input, y=filter) - z = tf.raw_ops.Mul(x=z, y=mul) - z = tf.nn.relu(z, name='op_to_store') - - with tf.Session() as sess: - sess.run(z, feed_dict={input: np_input, filter: np_filter}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - quantizer = Quantization('fake_yaml.yaml') - dataset = quantizer.dataset('dummy', shape=(1, 2, 4, 6), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if i.op == '_QuantizedBatchMatMul': - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_batchmatmulv2_add_dequantize_fusion(self): - np_input = np.random.randn(1, 2, 4, 6).astype(np.float32) - np_filter = np.random.randn(1, 2, 6, 5).astype(np.float32) - np_add = np.random.randn(1, 2, 4, 5).astype(np.float32) - input = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 2, 4, 6)) - filter = tf.constant(np_filter) - add = tf.constant(np_add) - z = tf.raw_ops.BatchMatMulV2(x=input, y=filter) - z = tf.raw_ops.Add(x=z, y=add) - z = tf.nn.relu(z, name='op_to_store') - - with tf.Session() as sess: - sess.run(z, feed_dict={input: np_input, filter: np_filter}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - quantizer = Quantization('fake_yaml.yaml') - dataset = quantizer.dataset('dummy', shape=(1, 2, 4, 6), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if i.op == '_QuantizedBatchMatMul': - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_batchmatmulv2_mul_add_dequantize_fusion(self): - np_input = np.random.randn(1, 2, 4, 6).astype(np.float32) - np_filter = np.random.randn(1, 2, 6, 5).astype(np.float32) - np_add = np.random.randn(1, 2, 4, 5).astype(np.float32) - input = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 2, 4, 6)) - filter = tf.constant(np_filter) - mul = tf.constant(0.2) - add = tf.constant(np_add) - z = tf.raw_ops.BatchMatMulV2(x=input, y=filter) - z = tf.raw_ops.Mul(x=z, y=mul) - z = tf.raw_ops.Add(x=z, y=add) - z = tf.nn.relu(z, name='op_to_store') - - with tf.Session() as sess: - sess.run(z, feed_dict={input: np_input, filter: np_filter}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - quantizer = Quantization('fake_yaml.yaml') - dataset = quantizer.dataset('dummy', shape=(1, 2, 4, 6), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - found_quantized_matmul = False - for i in output_graph.graph_def.node: - if i.op == '_QuantizedBatchMatMul': - found_quantized_matmul = True - break - - self.assertEqual(found_quantized_matmul, True) - """ - - @disable_random() - def test_matmul_biasadd_relu6_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Relu6", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_leakyrelu_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.leaky_relu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"LeakyRelu", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_geluapproximate_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.gelu(z, approximate=True, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"GeluApproximate", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_geluexact_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.gelu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"GeluExact", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_elu_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.nn.elu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Elu", b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_tanh_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.math.tanh(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Tanh", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_sigmoid_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.math.sigmoid(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sigmoid", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_relu_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y, name="quant_matmul") - z = tf.nn.relu(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Relu", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_relu6_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.relu6(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Relu6", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_leakyrelu_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.leaky_relu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"LeakyRelu", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_geluapproximate_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.gelu(z, approximate=True, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"GeluApproximate", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_geluexact_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.gelu(z, approximate=False, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"GeluExact", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_elu_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.elu(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Elu", b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_tanh_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.math.tanh(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Tanh", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_dummy_biasadd_sigmoid_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.math.sigmoid(z, name="op_to_store") - found_quantized_matmul = False - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [ - b"BiasAdd", - b"Sigmoid", - b"Dequantize", - ]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_add_const_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="quant_matmul") - z = tf.math.add(z, [1, 2], name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_add_non_const_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - transpose = tf.transpose(y, perm=[1, 0]) - reshape = tf.reshape(transpose, [2, 2]) - z = tf.matmul(x, reshape, name="quant_matmul") - z = tf.math.add(z, x, name="addv2") - z = tf.nn.relu(z, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_add_const_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.math.add(z, [1, 2], name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - @disable_random() - def test_matmul_biasadd_add_non_const_fusion(self): - g = tf.Graph() - with g.as_default(): - x_data = np.array([[0.1, 0.2], [0.2, 0.3]]) - y_data = np.array([[1, 2], [3, 4]], dtype=np.float32) - x = tf.placeholder(tf.float32, shape=[2, 2], name="x") - y = tf.constant(y_data, dtype=tf.float32, shape=[2, 2]) - z = tf.matmul(x, y) - z = tf.nn.bias_add(z, [1, 2]) - z = tf.math.add(z, x, name="op_to_store") - found_quantized_matmul = False - - with tf.Session() as sess: - sess.run(z, feed_dict={x: x_data, y: y_data}) - float_graph_def = sess.graph.as_graph_def() - - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(2, 2), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.eval_dataloader = common.DataLoader(dataset, batch_size=2) - quantizer.model = float_graph_def - output_graph = quantizer.fit() - - for i in output_graph.graph_def.node: - if i.op == "_QuantizedMatMul" and i.attr["fused_ops"].list.s == [b"BiasAdd", b"Dequantize"]: - found_quantized_matmul = True - break - self.assertEqual(found_quantized_matmul, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_new_conv_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_new_conv_fusion.py deleted file mode 100644 index 806171b7c56..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_new_conv_fusion.py +++ /dev/null @@ -1,146 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import logging -import os -import unittest - -import tensorflow as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import function - -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: input - device: cpu - use_bf16: True - quantization: - model_wise: - weight: - granularity: per_tensor - scheme: sym - dtype: int8 - algorithm: minmax - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.1 - exit_policy: - performance_only: True - workspace: - path: saved - """ - - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - - f.close() - - -class TestTensorflowNewQdqConvFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_conv_biasadd_add_leakyrelu_fusion(self): - logging.getLogger().info("test_conv_biasadd_add_leakyrelu_fusion") - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME") - normed = tf.compat.v1.layers.batch_normalization(conv) - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(x, conv2_weights, strides=[1, 2, 2, 1], padding="SAME") - sumadd = tf.raw_ops.AddV2(x=normed, y=conv2, name="addv2") - leaky_relu = tf.nn.leaky_relu(sumadd, name="op_to_store") - - out_name = leaky_relu.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - found_conv_fusion = True - - for i in output_graph.graph_def.node: - if i.op == "LeakyRelu": - found_conv_fusion = False - break - self.assertEqual(found_conv_fusion, False) - - @disable_random() - def test_resizebilinear_bf16_input(self): - os.environ["FORCE_BF16"] = "1" - x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input") - top_relu = tf.nn.relu(x) - paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]]) - x_pad = tf.pad(top_relu, paddings, "CONSTANT") - conv1_weights = tf.compat.v1.get_variable( - "weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer() - ) - conv1 = tf.nn.conv2d(x_pad, conv1_weights, strides=[1, 2, 2, 1], padding="VALID") - matmul_weights = tf.compat.v1.get_variable( - "weight_matmul", [1, 28, 16, 32], initializer=tf.compat.v1.random_normal_initializer() - ) - matmul = tf.linalg.matmul(conv1, matmul_weights) - conv2_weights = tf.compat.v1.get_variable( - "weight_conv2", [7, 7, 32, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv2 = tf.nn.conv2d(matmul, conv2_weights, strides=[1, 2, 2, 1], padding="VALID") - leaky_relu = tf.nn.leaky_relu(conv2, name="op_to_store") - resize_bili1 = tf.raw_ops.ResizeBilinear(images=leaky_relu, size=(28, 28)) - resize_bili2 = tf.raw_ops.ResizeBilinear(images=resize_bili1, size=(14, 14)) - out_name = resize_bili2.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 56, 56, 16), label=True) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - cast_counter = 0 - - for i in output_graph.graph_def.node: - if i.op == "Cast": - cast_counter += 1 - self.assertEqual(cast_counter, 2) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tensorflow_graph_qdq_pooling_fusion.py b/test/tfnewapi/test_tensorflow_graph_qdq_pooling_fusion.py deleted file mode 100644 index e50e0e0e651..00000000000 --- a/test/tfnewapi/test_tensorflow_graph_qdq_pooling_fusion.py +++ /dev/null @@ -1,141 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -import os -import unittest - -import numpy as np -import tensorflow.compat.v1 as tf -import yaml -from tensorflow.compat.v1 import graph_util -from tensorflow.python.framework import dtypes - -from neural_compressor.adaptor.tensorflow import TensorflowQuery -from neural_compressor.adaptor.tf_utils.util import disable_random - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: fake_yaml - framework: tensorflow - inputs: x - outputs: op_to_store - device: cpu - evaluation: - accuracy: - metric: - topk: 1 - tuning: - strategy: - name: basic - accuracy_criterion: - relative: 0.01 - exit_policy: - performance_only: True - workspace: - path: saved - """ - y = yaml.load(fake_yaml, Loader=yaml.SafeLoader) - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - yaml.dump(y, f) - f.close() - - -class TestGraphQDQPoolingFusion(unittest.TestCase): - @classmethod - def setUpClass(self): - build_fake_yaml() - self.op_wise_sequences = TensorflowQuery( - local_config_file=os.path.join(os.path.dirname(__file__), "../../neural_compressor/adaptor/tensorflow.yaml") - ).get_eightbit_patterns(True) - - @classmethod - def tearDownClass(self): - os.remove("fake_yaml.yaml") - - @disable_random() - def test_qdq_maxpool_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - x = tf.nn.relu(x) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - relu2 = tf.nn.relu(relu) - pool = tf.nn.max_pool(relu2, ksize=1, strides=[1, 2, 2, 1], name="maxpool", padding="SAME") - conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - x = tf.nn.relu(conv_bias) - final_node = tf.nn.relu(x, name="op_to_store") - - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_quantized_maxpool = False - for i in output_graph.graph_def.node: - if i.op == "QuantizedMaxPool": - found_quantized_maxpool = True - break - self.assertEqual(found_quantized_maxpool, True) - - @disable_random() - def test_qdq_avgpool_fusion(self): - x = tf.compat.v1.placeholder(tf.float32, [1, 30, 30, 1], name="input") - conv_weights = tf.compat.v1.get_variable( - "weight", [2, 2, 1, 1], initializer=tf.compat.v1.random_normal_initializer() - ) - conv_bias = tf.compat.v1.get_variable("bias", [1], initializer=tf.compat.v1.random_normal_initializer()) - x = tf.nn.relu(x) - conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - normed = tf.compat.v1.layers.batch_normalization(conv) - - relu = tf.nn.relu(normed) - relu2 = tf.nn.relu(relu) - pool = tf.nn.avg_pool(relu2, ksize=1, strides=[1, 2, 2, 1], name="avgpool", padding="SAME") - conv1 = tf.nn.conv2d(pool, conv_weights, strides=[1, 2, 2, 1], padding="SAME", name="last") - conv_bias = tf.nn.bias_add(conv1, conv_bias) - x = tf.nn.relu(conv_bias) - final_node = tf.nn.relu(x, name="op_to_store") - - out_name = final_node.name.split(":")[0] - with tf.compat.v1.Session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_graph_def = graph_util.convert_variables_to_constants( - sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name] - ) - from neural_compressor.experimental import Quantization, common - - quantizer = Quantization("fake_yaml.yaml") - dataset = quantizer.dataset("dummy", shape=(100, 30, 30, 1), label=True) - quantizer.calib_dataloader = common.DataLoader(dataset) - quantizer.eval_dataloader = common.DataLoader(dataset) - quantizer.model = output_graph_def - output_graph = quantizer.fit() - - found_quantized_avgpool = False - for i in output_graph.graph_def.node: - if i.op == "QuantizedAvgPool": - found_quantized_avgpool = True - break - self.assertEqual(found_quantized_avgpool, True) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tf_spr_base_distributed_metrics.py b/test/tfnewapi/test_tf_spr_base_distributed_metrics.py deleted file mode 100644 index 000c3175e0d..00000000000 --- a/test/tfnewapi/test_tf_spr_base_distributed_metrics.py +++ /dev/null @@ -1,975 +0,0 @@ -"""Tests for the distributed metrics.""" - -import os -import re -import shutil -import signal -import subprocess -import unittest - -import tensorflow as tf - -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 - - -def build_fake_ut(): - fake_ut = """ -import numpy as np -import unittest -from neural_compressor.metric import METRICS -from neural_compressor.metric.f1 import evaluate -from neural_compressor.metric.evaluate_squad import evaluate as evaluate_squad -from neural_compressor.metric import bleu -import horovod.tensorflow as hvd -import os -import json -import tensorflow as tf - -tf.compat.v1.enable_eager_execution() - -class TestMetrics(unittest.TestCase): - - @classmethod - def setUpClass(cls): - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - hvd.init() - if hvd.rank() == 0: - if os.path.exists('anno_0.yaml'): - os.remove('anno_0.yaml') - if os.path.exists('anno_1.yaml'): - os.remove('anno_1.yaml') - if os.path.exists('anno_2.yaml'): - os.remove('anno_2.yaml') - while hvd.rank() == 1: - if not os.path.exists('anno_0.yaml') \\ - and not os.path.exists('anno_1.yaml') \\ - and not os.path.exists('anno_2.yaml'): - break - - @classmethod - def tearDownClass(cls): - if hvd.rank() == 1: - if os.path.exists('anno_0.yaml'): - os.remove('anno_0.yaml') - if os.path.exists('anno_1.yaml'): - os.remove('anno_1.yaml') - if os.path.exists('anno_2.yaml'): - os.remove('anno_2.yaml') - - def test_mIOU(self): - metrics = METRICS('tensorflow') - miou = metrics['mIOU']() - miou.hvd = hvd - if hvd.rank() == 0: - preds = np.array([0]) - labels = np.array([0]) - else: - preds = np.array([0, 1, 1]) - labels = np.array([1, 0, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.33333334) - - miou.reset() - if hvd.rank() == 0: - preds = np.array([0, 0]) - labels = np.array([0, 1]) - else: - preds = np.array([1, 1]) - labels = np.array([1, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.58333333) - - def test_onnxrt_GLUE(self): - metrics = METRICS('onnxrt_qlinearops') - glue = metrics['GLUE']('mrpc') - glue.hvd = hvd - preds = [np.array( - [[-3.2443411, 3.0909934], - [2.0500996, -2.3100944], - [1.870293 , -2.0741048], - [-2.8377204, 2.617834], - [2.008347 , -2.0215416], - [-2.9693947, 2.7782154], - [-2.9949608, 2.7887983], - [-3.0623112, 2.8748074]]) - ] - labels = [np.array([1, 0, 0, 1, 0, 1, 0, 1])] - self.assertRaises(NotImplementedError, glue.update, preds, labels) - preds_2 = [np.array( - [[-3.1296735, 2.8356276], - [-3.172515 , 2.9173899], - [-3.220131 , 3.0916846], - [2.1452675, -1.9398905], - [1.5475761, -1.9101546], - [-2.9797182, 2.721741], - [-3.2052834, 2.9934788], - [-2.7451005, 2.622343]]) - ] - labels_2 = [np.array([1, 1, 1, 0, 0, 1, 1, 1])] - self.assertRaises(NotImplementedError, glue.update, preds_2, labels_2) - glue.reset() - self.assertRaises(NotImplementedError, glue.update, preds, labels) - - def test_tensorflow_F1(self): - metrics = METRICS('tensorflow') - F1 = metrics['F1']() - F1.hvd = hvd - if hvd.rank() == 0: - preds = [1, 1, 1, 1] - labels = [0, 1, 1, 1] - else: - preds = [1, 1, 1, 1, 1, 1] - labels = [1, 1, 1, 1, 1, 1] - - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.9) - - def test_squad_evaluate(self): - evaluate.hvd = hvd - label = [{'paragraphs':\\ - [{'qas':[{'answers': [{'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}], \\ - 'question': 'Which NFL team represented the AFC at Super Bowl 50?', \\ - 'id': '56be4db0acb8001400a502ec'}]}]}] - preds = {'56be4db0acb8001400a502ec': 'Denver Broncos'} - f1 = evaluate(preds, label) - self.assertEqual(f1, 100.) - dataset = [{'paragraphs':\\ - [{'qas':[{'answers': [{'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}], \\ - 'question': 'Which NFL team represented the AFC at Super Bowl 50?', \\ - 'id': '56be4db0acb8001400a502ec'}]}]}] - predictions = {'56be4db0acb8001400a502ec': 'Denver Broncos'} - f1_squad = evaluate_squad(dataset,predictions) - self.assertEqual(f1_squad['f1'], 100.) - self.assertEqual(f1_squad['exact_match'], 100.) - - def test_pytorch_F1(self): - metrics = METRICS('pytorch') - F1 = metrics['F1']() - F1.hvd = hvd - F1.reset() - if hvd.rank() == 0: - preds = [1] - labels = [2] - else: - preds = [1] - labels = [1, 1] - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.8) - - def test_tensorflow_topk(self): - metrics = METRICS('tensorflow') - top1 = metrics['topk']() - top1.reset() - self.assertEqual(top1.result(), 0) - top2 = metrics['topk'](k=2) - top3 = metrics['topk'](k=3) - top1.hvd = hvd - top2.hvd = hvd - top3.hvd = hvd - - if hvd.rank() == 0: - predicts = [[0, 0.2, 0.9, 0.3]] - labels = [[0, 1, 0, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - sparse_labels = [2] - single_label = 2 - else: - predicts = [[0, 0.9, 0.8, 0]] - labels = [[0, 0, 1, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - sparse_labels = [2] - single_label = 2 - - # test functionality of one-hot label - top1.update(predicts, labels) - top2.update(predicts, labels) - top3.update(predicts, labels) - self.assertEqual(top1.result(), 0.0) - self.assertEqual(top2.result(), 0.5) - self.assertEqual(top3.result(), 1) - - # test functionality of sparse label - top1.reset() - top2.reset() - top3.reset() - top1.update(predicts, sparse_labels) - top2.update(predicts, sparse_labels) - top3.update(predicts, sparse_labels) - self.assertEqual(top1.result(), 0.5) - self.assertEqual(top2.result(), 1) - self.assertEqual(top3.result(), 1) - - # test functionality of single label - top1.reset() - top2.reset() - top3.reset() - top1.update(single_predict, single_label) - top2.update(single_predict, single_label) - top3.update(single_predict, single_label) - self.assertEqual(top1.result(), 1) - self.assertEqual(top2.result(), 1) - self.assertEqual(top3.result(), 1) - - def test_tensorflow_mAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_0.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_0.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['mAP']('anno_0.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 1.]]) - ] - ground_truth = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[b'dog', b'dog']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - mAP.result() - self.assertEqual(format(mAP.result(), '.5f'), - '0.00000') - - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - mAP = metrics['mAP']() - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.20347') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test_tensorflow_VOCmAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_1.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_1.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['VOCmAP']('anno_1.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.iou_thrs, 0.5) - self.assertEqual(mAP.map_points, 0) - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - - mAP = metrics['VOCmAP']() - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.20347') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test_tensorflow_COCOmAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_2.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_2.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['COCOmAP']('anno_2.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - mAP = metrics['COCOmAP']() - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.14149') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.13366') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.14149') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test__accuracy(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [[0, 0]] - labels2 = [[0, 1]] - predicts3 = [[[0, 1], [0, 0], [0, 1]]] - labels3 = [[[0, 1], [0, 1], [1, 0]]] - predicts4 = [[0.2, 0.8]] - labels4 = [0] - else: - predicts1 = [0, 1, 1] - labels1 = [1, 1, 1] - predicts2 = [[0, 0]] - labels2 = [[1, 1]] - predicts3 = [[[0, 1], [0, 1], [0, 1]]] - labels3 = [[[1, 0], [1, 0], [1, 0]]] - predicts4 = [[0.1, 0.9], [0.3, 0.7], [0.4, 0.6]] - labels4 = [1, 0, 0] - - metrics = METRICS('tensorflow') - acc = metrics['Accuracy']() - acc.hvd = hvd - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(1, 1) - self.assertEqual(acc.result(), 1.0) - - wrong_predictions = [1, 0, 0] - wrong_labels = [[0, 1, 1]] - self.assertRaises(ValueError, acc.update, wrong_predictions, wrong_labels) - - metrics = METRICS('pytorch') - acc = metrics['Accuracy']() - acc.hvd = hvd - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - - def test_mse(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 1] - labels2 = [0, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1, 1] - labels2 = [1, 0] - - metrics = METRICS('tensorflow') - mse = metrics['MSE'](compare_label=False) - mse.hvd = hvd - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - metrics = METRICS('pytorch') - mse = metrics['MSE']() - mse.hvd = hvd - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - def test_mae(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 1] - labels2 = [1, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1, 1] - labels2 = [1, 0] - - metrics = METRICS('tensorflow') - mae = metrics['MAE']() - mae.hvd = hvd - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - if hvd.rank() == 1: - mae.update(0, 1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.8) - mae.reset() - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.25) - - metrics = METRICS('pytorch') - mae = metrics['MAE']() - mae.hvd = hvd - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.5) - - self.assertRaises(AssertionError, mae.update, [1], [1, 2]) - self.assertRaises(AssertionError, mae.update, 1, [1,2]) - self.assertRaises(AssertionError, mae.update, [1, 2], [1]) - self.assertRaises(AssertionError, mae.update, 1, np.array([1,2])) - - def test_rmse(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [1] - predicts2 = [1, 1] - labels2 = [1, 0] - else: - predicts1 = [0, 0, 1] - labels1 = [0, 0, 0] - predicts2 = [1, 1] - labels2 = [0, 0] - - metrics = METRICS('tensorflow') - rmse = metrics['RMSE']() - rmse.hvd = hvd - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.reset() - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.75)) - - metrics = METRICS('pytorch') - rmse = metrics['RMSE']() - rmse.hvd = hvd - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.5)) - - def test_loss(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 0, 1] - labels2 = [1, 0, 0] - predicts3 = [1, 0] - labels3 = [0, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1] - labels2 = [0] - predicts3 = [0, 1] - labels3 = [0, 0] - - metrics = METRICS('tensorflow') - loss = metrics['Loss']() - loss.hvd = hvd - loss.update(predicts1, labels1) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - loss.update(predicts2, labels2) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - loss.update(predicts3, labels3) - self.assertEqual(loss.result(), 0.5) - - metrics = METRICS('pytorch') - loss = metrics['Loss']() - loss.hvd = hvd - loss.update(predicts1, labels1) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - loss.update(predicts2, labels2) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - loss.update(predicts3, labels3) - self.assertEqual(loss.result(), 0.5) - - -if __name__ == "__main__": - unittest.main() - """ - - with open("fake_ut.py", "w", encoding="utf-8") as f: - f.write(fake_ut) - - -class TestDistributed(unittest.TestCase): - @classmethod - def setUpClass(cls): - build_fake_ut() - - @classmethod - def tearDownClass(cls): - os.remove("fake_ut.py") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") - def test_distributed(self): - distributed_cmd = "horovodrun -np 2 python fake_ut.py" - p = subprocess.Popen( - distributed_cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True - ) # nosec - try: - out, error = p.communicate() - matches = re.findall(r"FAILED", error.decode("utf-8")) - self.assertEqual(matches, []) - - matches = re.findall(r"OK", error.decode("utf-8")) - self.assertTrue(len(matches) > 0) - - except KeyboardInterrupt: - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - assert 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tf_spr_base_distributed_pruning.py b/test/tfnewapi/test_tf_spr_base_distributed_pruning.py deleted file mode 100644 index 9a52f3bc5e5..00000000000 --- a/test/tfnewapi/test_tf_spr_base_distributed_pruning.py +++ /dev/null @@ -1,464 +0,0 @@ -"""Tests for the TensorFlow pruning with distributed training and inference.""" - -import hashlib -import os -import re -import shutil -import signal -import subprocess -import sys -import time -import unittest -from platform import platform, system - -import cpuinfo -import tensorflow as tf - -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.utils import logger - - -def build_fake_ut(): - fake_ut = ''' -from __future__ import print_function -import tensorflow -from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation -from tensorflow.keras.layers import AveragePooling2D, Input, Flatten -from tensorflow.keras.callbacks import LearningRateScheduler -from tensorflow.keras.callbacks import ReduceLROnPlateau -from tensorflow.keras.regularizers import l2 -from tensorflow.keras.models import Model -from tensorflow.keras.datasets import cifar10 -import numpy as np -import os -import sys -import cpuinfo -import shutil -import unittest -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.utils import logger -from neural_compressor.utils.utility import CpuInfo - -def lr_schedule(epoch): - """Learning Rate Schedule - Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. - Called automatically every epoch as part of callbacks during training. - # Arguments - epoch (int): The number of epochs - # Returns - lr (float32): learning rate - """ - lr = 1e-3 - if epoch > 180: - lr *= 0.5e-3 - elif epoch > 160: - lr *= 1e-3 - elif epoch > 120: - lr *= 1e-2 - elif epoch > 80: - lr *= 1e-1 - print('Learning rate: ', lr) - return lr - -def resnet_layer(inputs, - num_filters=8, - kernel_size=3, - strides=1, - activation='relu', - batch_normalization=True, - conv_first=True): - """2D Convolution-Batch Normalization-Activation stack builder - # Arguments - inputs (tensor): input tensor from input image or previous layer - num_filters (int): Conv2D number of filters - kernel_size (int): Conv2D square kernel dimensions - strides (int): Conv2D square stride dimensions - activation (string): activation name - batch_normalization (bool): whether to include batch normalization - conv_first (bool): conv-bn-activation (True) or - bn-activation-conv (False) - # Returns - x (tensor): tensor as input to the next layer - """ - conv = Conv2D(num_filters, - kernel_size=kernel_size, - strides=strides, - padding='same', - use_bias=True, - kernel_initializer='he_normal', - kernel_regularizer=l2(1e-4)) - - x = inputs - if conv_first: - x = conv(x) - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = Activation(activation)(x) - else: - # if batch_normalization: - # x = BatchNormalization()(x) - if activation is not None: - x = Activation(activation)(x) - x = conv(x) - return x - -def resnet_v2(input_shape, depth, num_classes=10): - """ResNet Version 2 Model builder [b] - Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as - bottleneck layer - First shortcut connection per layer is 1 x 1 Conv2D. - Second and onwards shortcut connection is identity. - At the beginning of each stage, the feature map size is halved (downsampled) - by a convolutional layer with strides=2, while the number of filter maps is - doubled. Within each stage, the layers have the same number filters and the - same filter map sizes. - Features maps sizes: - conv1 : 32x32, 16 - stage 0: 32x32, 64 - stage 1: 16x16, 128 - stage 2: 8x8, 256 - # Arguments - input_shape (tensor): shape of input image tensor - depth (int): number of core convolutional layers - num_classes (int): number of classes (CIFAR10 has 10) - # Returns - model (Model): Keras model instance - """ - if (depth - 2) % 9 != 0: - raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])') - # Start model definition. - num_filters_in = 4 - num_res_blocks = int((depth - 2) / 9) - - inputs = Input(shape=input_shape) - # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths - x = resnet_layer(inputs=inputs, - num_filters=num_filters_in, - conv_first=True) - - # Instantiate the stack of residual units - for stage in range(1): - for res_block in range(num_res_blocks): - activation = 'relu' - batch_normalization = True - strides = 1 - if stage == 0: - num_filters_out = num_filters_in * 4 - if res_block == 0: # first layer and first stage - activation = None - batch_normalization = False - else: - num_filters_out = num_filters_in * 2 - if res_block == 0: # first layer but not first stage - strides = 2 # downsample - - # bottleneck residual unit - y = resnet_layer(inputs=x, - num_filters=num_filters_in, - kernel_size=1, - strides=strides, - activation=activation, - batch_normalization=batch_normalization, - conv_first=False) - y = resnet_layer(inputs=y, - num_filters=num_filters_in, - conv_first=False) - - y = resnet_layer(inputs=y, - num_filters=num_filters_out, - kernel_size=1, - conv_first=False) - if res_block == 0: - # linear projection residual shortcut connection to match - # changed dims - x = resnet_layer(inputs=x, - num_filters=num_filters_out, - kernel_size=1, - strides=strides, - activation=None, - batch_normalization=False) - x = tensorflow.keras.layers.add([x, y]) - - num_filters_in = num_filters_out - - # Add classifier on top. - # v2 has BN-ReLU before Pooling - # x = BatchNormalization()(x) - x = Activation('relu')(x) - x = AveragePooling2D(pool_size=8)(x) - y = Flatten()(x) - outputs = Dense(num_classes, - activation='softmax', - kernel_initializer='he_normal')(y) - - # Instantiate model. - model = Model(inputs=inputs, outputs=outputs) - return model - -# Training parameters -batch_size = 128 # orig paper trained all networks with batch_size=128 -epochs = 1 -num_classes = 10 - -# Subtracting pixel mean improves accuracy -subtract_pixel_mean = True - -n = 1 -depth = n * 9 + 2 - -def train(): - # Load the CIFAR10 data. - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - - # Input image dimensions. - input_shape = x_train.shape[1:] - # Normalize data. - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - - model = resnet_v2(input_shape=input_shape, depth=depth) - - model.compile(loss='categorical_crossentropy', - optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.01), - metrics=['accuracy']) - model.summary() - - lr_scheduler = LearningRateScheduler(lr_schedule) - - lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), - cooldown=0, - patience=5, - min_lr=0.5e-6) - - callbacks = [lr_reducer, lr_scheduler] - - # Run training, with or without data augmentation. - model.fit(x_train, y_train, - batch_size=batch_size, - epochs=epochs, - validation_data=(x_test, y_test), - shuffle=True, - callbacks=callbacks) - - # Score trained model. - scores = model.evaluate(x_test, y_test, verbose=1) - print('Test loss:', scores[0]) - print('Test accuracy:', scores[1]) - model.save("baseline_model") - -class TrainDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - x_train, y_train = x_train[:100], y_train[:100] - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - self.train_images = x_train - self.train_labels = y_train - - def __len__(self): - return len(self.train_images) - - def __getitem__(self, idx): - return self.train_images[idx], self.train_labels[idx] - -class EvalDataset(object): - def __init__(self): - (x_train, y_train), (x_test, y_test) = cifar10.load_data() - - x_train = x_train.astype('float32') / 255 - x_test = x_test.astype('float32') / 255 - - # If subtract pixel mean is enabled - x_train_mean = np.mean(x_train, axis=0) - x_train -= x_train_mean - x_test -= x_train_mean - - # Convert class vectors to binary class matrices. - y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) - y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) - self.test_images = x_test - self.test_labels = y_test - - def __len__(self): - return len(self.test_images) - - def __getitem__(self, idx): - return self.test_images[idx], self.test_labels[idx] - -class TestTensorflowPruning(unittest.TestCase): - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\\n") - - def test_tensorflow_pruning(self): - from neural_compressor.experimental import Pruning, common - from neural_compressor.utils import logger - prune = Pruning("./fake_yaml.yaml") - prune.train_distributed = True - prune.evaluation_distributed = True - prune.train_dataloader = common.DataLoader(TrainDataset(), batch_size=16) - prune.eval_dataloader = common.DataLoader(EvalDataset(), batch_size=32) - prune.model = './baseline_model' - pruned_model = prune() - stats, sparsity = pruned_model.report_sparsity() - logger.info(stats) - logger.info(sparsity) - self.assertGreater(sparsity, 20) - self.assertGreater(prune.baseline_score, 0.729) - if bool(CpuInfo().bf16): - self.assertGreater(prune.last_score, 0.742) - else: - self.assertGreater(prune.last_score, 0.743) - - -if __name__ == '__main__': - unittest.main() - ''' - with open("fake_ut.py", "w", encoding="utf-8") as f: - f.write(fake_ut) - build_fake_yaml() - - -def build_fake_yaml(): - fake_yaml = """ - model: - name: resnet_v2_prune - framework: tensorflow - pruning: - train: - epoch: 4 - optimizer: - SGD: - learning_rate: 0.001 - momentum: 0.1 - nesterov: True - weight_decay: 0.1 - criterion: - CrossEntropyLoss: - reduction: sum - approach: - weight_compression: - initial_sparsity: 0.0 - target_sparsity: 0.2 - start_epoch: 0 - end_epoch: 4 - pruners: - - !Pruner - start_epoch: 1 - end_epoch: 3 - prune_type: basic_magnitude - evaluation: - accuracy: - metric: - topk: 1 - """ - with open("fake_yaml.yaml", "w", encoding="utf-8") as f: - f.write(fake_yaml) - - -def dir_md5_check(dir): - files_list = [] - md5_list = [] - - def get_files_list(path, list_name): - for file in sorted(os.listdir(path)): - file_path = os.path.join(path, file) - if os.path.isdir(file_path): - get_files_list(file_path, list_name) - else: - list_name.append(file_path) - - get_files_list(dir, files_list) - for file_path in files_list: - with open(file_path, "rb") as fp: - data = fp.read() - file_md5 = hashlib.md5(data).hexdigest() - md5_list.append(file_md5) - return md5_list - - -class TestDistributed(unittest.TestCase): - dst_path = "./baseline_model" - - @classmethod - def setUpClass(cls): - build_fake_ut() - build_fake_yaml() - if system().lower() == "windows": - src_path = "C:\\tmp\\.neural_compressor\\inc_ut\\resnet_v2\\" - elif system().lower() == "linux": - src_path = "/tmp/.neural_compressor/inc_ut/resnet_v2/" - if os.path.exists(src_path): - shutil.copytree(src_path, os.getcwd(), dirs_exist_ok=True) - if not os.path.exists(cls.dst_path): - raise FileNotFoundError(f"'{cls.dst_path}' doesn't exist.") - elif dir_md5_check(cls.dst_path) != [ - "65625fef42f44e6853d4d6d5e4188a49", - "a783396652bf62db3db4c9f647953175", - "c7259753419d9fc053df5b2059aef8c0", - "77f2a1045cffee9f6a43f2594a5627ba", - ]: - logger.warning("resnet_v2 baseline_model md5 verification failed.") - raise ValueError(f"'{cls.dst_path}' md5 verification failed.") - else: - logger.info("resnet_v2 baseline_model for TF-spr-base distributed pruning md5 verification succeeded.") - - @classmethod - def tearDownClass(cls): - os.remove("fake_ut.py") - os.remove("fake_yaml.yaml") - shutil.rmtree("nc_workspace", ignore_errors=True) - shutil.rmtree("baseline_model", ignore_errors=True) - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - @unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") - def test_tf_distributed_pruning(self): - distributed_cmd = "horovodrun -np 2 python fake_ut.py" - p = subprocess.Popen( - distributed_cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True - ) - try: - out, _ = p.communicate() - for line in out.splitlines(): - print(line.decode().strip()) - matches = re.findall(r"FAILED", out.decode("utf-8")) - self.assertEqual(matches, []) - - matches = re.findall(r"OK", out.decode("utf-8")) - self.assertTrue(len(matches) > 0) - except KeyboardInterrupt: - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - assert 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tfnewapi/test_tf_spr_base_distributed_tf_dataloader.py b/test/tfnewapi/test_tf_spr_base_distributed_tf_dataloader.py deleted file mode 100644 index 5003b876c0c..00000000000 --- a/test/tfnewapi/test_tf_spr_base_distributed_tf_dataloader.py +++ /dev/null @@ -1,667 +0,0 @@ -"""Tests for Distributed TensorFlow Dataloader.""" - -import collections -import json -import os -import shutil -import sys -import unittest - -import cpuinfo -import numpy as np -import tensorflow as tf - -from neural_compressor import data -from neural_compressor.adaptor.tf_utils.util import version1_lt_version2 -from neural_compressor.data import DATALOADERS, TRANSFORMS, Datasets -from neural_compressor.data.dataloaders.dataloader import DataLoader -from neural_compressor.utils import logger -from neural_compressor.utils.create_obj_from_config import create_dataloader, create_dataset - - -@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") -class TestDistributedTFDataDataloader(unittest.TestCase): - def setUp(self): - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - tf.compat.v1.enable_eager_execution() - self.dataset = tf.data.Dataset.from_tensors((tf.ones([3, 224, 224]), tf.ones([1000]))).repeat(600) - self.count = 0 - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - def check_tf_dataset_with_batch_raise(self, batch_size, last_batch, distributed): - dataset_with_batch = ( - tf.data.Dataset.from_tensors((tf.ones([3, 224, 224]), tf.ones([1000]))).repeat(600).batch(2) - ) - dataloader = DATALOADERS["tensorflow"]( - dataset_with_batch, batch_size=batch_size, last_batch=last_batch, distributed=distributed - ) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def check_distributed_raise(self, batch_size, last_batch, distributed): - dataloader = DATALOADERS["tensorflow"]( - self.dataset, batch_size=batch_size, last_batch=last_batch, distributed=distributed - ) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_1(self): - self.assertRaises(TypeError, self.check_tf_dataset_with_batch_raise, 32, "rollover", True) - - def test_dis_tf_data_dataloader_2(self): - self.assertRaises(TypeError, self.check_tf_dataset_with_batch_raise, 32, "no_rollover", True) - - def test_dis_tf_data_dataloader_3(self): - self.assertRaises(TypeError, self.check_tf_dataset_with_batch_raise, 1, "rollover", True) - - def test_dis_tf_data_dataloader_4(self): - self.assertRaises(TypeError, self.check_tf_dataset_with_batch_raise, 1, "no_rollover", True) - - def test_dis_tf_data_dataloader_5(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "rollover", True) - - def test_dis_tf_data_dataloader_6(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "no_rollover", True) - - def test_dis_tf_data_dataloader_7(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "rollover", True) - - def test_dis_tf_data_dataloader_8(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "no_rollover", True) - - def test_dis_tf_data_dataloader_9(self): - batch_size = 32 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_10(self): - batch_size = 32 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="no_rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_11(self): - batch_size = 1 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_12(self): - batch_size = 1 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="no_rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_13(self): - batch_size = 600 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_dis_tf_data_dataloader_14(self): - batch_size = 600 - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch="no_rollover") - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (3, 224, 224)) - self.assertEqual(x[-1].shape, (3, 224, 224)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - -@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") -class TestDefaultDataLoaderSequentialSampler(unittest.TestCase): - @classmethod - def tearDownClass(cls): - if os.path.exists("minist"): - shutil.rmtree("minist") - - def setUp(self): - self.count = 0 - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - def check_get_len_raise(self, batch_size, last_batch, distributed): - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": last_batch, - "distributed": distributed, - } - dataloader = create_dataloader("tensorflow", dataloader_args) - len_dataloader = len(dataloader) - - def check_distributed_raise(self, batch_size, last_batch, distributed): - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": last_batch, - "distributed": distributed, - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler1(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 32, "rollover", True) - - def test_sequential_sampler2(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 32, "no_rollover", True) - - def test_sequential_sampler3(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 1, "rollover", True) - - def test_sequential_sampler4(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 1, "no_rollover", True) - - def test_sequential_sampler5(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "rollover", True) - - def test_sequential_sampler6(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "no_rollover", True) - - def test_sequential_sampler7(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "rollover", True) - - def test_sequential_sampler8(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "no_rollover", True) - - def test_sequential_sampler9(self): - batch_size = 3332 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler10(self): - batch_size = 3332 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "no_rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler11(self): - batch_size = 1 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler12(self): - batch_size = 1 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "no_rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler13(self): - batch_size = 10000 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - def test_sequential_sampler14(self): - batch_size = 10000 - dataloader_args = { - "batch_size": batch_size, - "dataset": {"MNIST": {"root": "./minist", "train": False, "download": True}}, - "transform": {"Resize": {"size": 24}}, - "filter": None, - "last_batch": "no_rollover", - } - dataloader = create_dataloader("tensorflow", dataloader_args) - for batch in dataloader: - x, y = batch - if self.count < len(dataloader) - 1: - self.assertEqual(len(x), batch_size) - else: - self.assertTrue(len(x) == dataloader.dis_dataset_len % batch_size or len(x) == batch_size) - self.assertEqual(x[0].shape, (24, 24)) - self.assertEqual(x[-1].shape, (24, 24)) - self.assertIsInstance(x, np.ndarray) - self.count += 1 - - -@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") -class TestDefaultDataLoaderIterableSampler(unittest.TestCase): - class iter_dataset(object): - def __iter__(self): - sample_size = 250 - for i in range(1, sample_size + 1): - yield np.array([i]) - - def setUp(self): - self.rank = 0 - self.size = 1 - self.count = 1 - self.dataset = self.iter_dataset() - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - def check_get_len_raise(self, batch_size, last_batch, distributed): - dataloader = DATALOADERS["tensorflow"]( - self.dataset, batch_size=batch_size, last_batch=last_batch, distributed=distributed - ) - len_dataloader = len(dataloader) - - def check_distributed_raise(self, batch_size, last_batch, distributed): - dataloader = DATALOADERS["tensorflow"]( - self.dataset, batch_size=batch_size, last_batch=last_batch, distributed=distributed - ) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler1(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 32, "rollover", True) - - def test_iterable_sampler2(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 32, "no_rollover", True) - - def test_iterable_sampler3(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 1, "rollover", True) - - def test_iterable_sampler4(self): - self.assertRaises(EnvironmentError, self.check_get_len_raise, 1, "no_rollover", True) - - def test_iterable_sampler5(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "rollover", True) - - def test_iterable_sampler6(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 32, "no_rollover", True) - - def test_iterable_sampler7(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "rollover", True) - - def test_iterable_sampler8(self): - self.assertRaises(EnvironmentError, self.check_distributed_raise, 1, "no_rollover", True) - - def test_iterable_sampler9(self): - batch_size = 128 - last_batch = "rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler10(self): - batch_size = 128 - last_batch = "no_rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler11(self): - batch_size = 1 - last_batch = "rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler12(self): - batch_size = 1 - last_batch = "no_rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler13(self): - batch_size = 1000 - last_batch = "rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - def test_iterable_sampler14(self): - batch_size = 1000 - last_batch = "no_rollover" - dataloader = DATALOADERS["tensorflow"](self.dataset, batch_size=batch_size, last_batch=last_batch) - for batch in dataloader: - if self.count < len(dataloader): - self.assertEqual(len(batch), batch_size) - self.assertEqual(self.count * batch_size * self.size - self.size + self.rank + 1, batch[-1][0]) - else: - self.assertTrue(len(batch) == dataloader.dis_dataset_len % batch_size or len(batch) == batch_size) - self.assertEqual( - ((self.count - 1) * batch_size + len(batch) - 1) * self.size + self.rank + 1, batch[-1][0] - ) - break - self.count += 1 - - -@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.10.0"), "Only test TF 2.10.0 or above") -class TestTensorflowBertDataLoader(unittest.TestCase): - label = [ - { - "paragraphs0": [ - { - "context": "Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season.", - "qas": [ - { - "answers": [ - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - {"answer_start": 177, "text": "Denver Broncos"}, - ], - "question": "Which NFL team represented the AFC at Super Bowl 50?", - "id": "56be4db0acb8001400a502ec", - } - ], - } - ] - } - ] - unique_id = 1000000000 - input_ids = [ - 101, - 2029, - 5088, - 2136, - 3421, - 1996, - 10511, - 2012, - 3565, - 4605, - 2753, - 1029, - 102, - 3565, - 4605, - 2753, - 1007, - 2005, - 1996, - 2325, - 2161, - 1012, - 1996, - 2137, - 2374, - 3034, - 1006, - ] - input_mask = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - segment_ids = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - fake_json = json.dumps({"data": label, "version": "1.1"}) - with open("dev.json", "w") as f: - f.write(fake_json) - - @classmethod - def tearDownClass(cls): - os.remove("test.record") - os.remove("dev.json") - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - def check_not_implement(self, batch_size, distributed): - with tf.io.TFRecordWriter("./test.record") as writer: - features = collections.OrderedDict() - features["unique_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list([self.unique_id]))) - features["input_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_ids))) - features["input_mask"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_mask))) - features["segment_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.segment_ids))) - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(tf_example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", {"bert": {"root": "test.record", "label_file": "./dev.json"}}, None, None - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=batch_size, distributed=distributed) - self.assertNotEqual(dataloader, None) - - def test_tf_bert_dataloader_1(self): - self.assertRaises(NotImplementedError, self.check_not_implement, 32, True) - - def test_tf_bert_dataloader_2(self): - batch_size = 128 - with tf.io.TFRecordWriter("./test.record") as writer: - features = collections.OrderedDict() - features["unique_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list([self.unique_id]))) - features["input_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_ids))) - features["input_mask"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_mask))) - features["segment_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.segment_ids))) - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(tf_example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", {"bert": {"root": "test.record", "label_file": "./dev.json"}}, None, None - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=batch_size) - for inputs, labels in dataloader: - self.assertEqual(inputs[0], "test.record") - self.assertEqual(inputs[1], batch_size) - self.assertEqual(len(labels), 1) - - def test_tf_bert_dataloader_3(self): - batch_size = 1 - with tf.io.TFRecordWriter("./test.record") as writer: - features = collections.OrderedDict() - features["unique_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list([self.unique_id]))) - features["input_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_ids))) - features["input_mask"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.input_mask))) - features["segment_ids"] = tf.train.Feature(int64_list=tf.train.Int64List(value=list(self.segment_ids))) - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(tf_example.SerializeToString()) - eval_dataset = create_dataset( - "tensorflow", {"bert": {"root": "test.record", "label_file": "./dev.json"}}, None, None - ) - dataloader = DATALOADERS["tensorflow"](dataset=eval_dataset, batch_size=batch_size) - for inputs, labels in dataloader: - self.assertEqual(inputs[0], "test.record") - self.assertEqual(inputs[1], batch_size) - self.assertEqual(len(labels), 1) - - -if __name__ == "__main__": - unittest.main()