From 26a5c1e6690686456189080b825cc06766894f06 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 27 Jul 2023 00:04:17 +0100 Subject: [PATCH 1/3] Update README_reference.md --- docs/mlperf/inference/bert/README_reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/mlperf/inference/bert/README_reference.md b/docs/mlperf/inference/bert/README_reference.md index e7c22bf780..aeef049a07 100644 --- a/docs/mlperf/inference/bert/README_reference.md +++ b/docs/mlperf/inference/bert/README_reference.md @@ -14,7 +14,7 @@ cm run script --tags=generate-run-cmds,inference,_find-performance,_all-scenario * Use `--device=cuda` to run the inference on Nvidia GPU * Use `--division=closed` to run all scenarios for the closed division (compliance tests are skipped for `_find-performance` mode) * Use `--category=datacenter` to run datacenter scenarios -* Use `--backend=pytorch` and `--backend=tf` to use the pytorch and tensorflow backends respectively +* Use `--backend=pytorch` and `--backend=tf` to use the pytorch and tensorflow backends respectively. `--backend=deepsparse` will run the sparse int8 model using deepsparse backend (not allowed to be submitted under closed division). * Use `--model=bert-99.9` to run the high accuracy constraint bert-99 model. But since we are running the fp32 model, this is redundant and instead, we can reuse the results of bert-99 for bert-99.9 From 0a3f6d02f9604a8fd3b4da3ad4c6bb167d4e7682 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 27 Jul 2023 00:09:35 +0100 Subject: [PATCH 2/3] Update run-cpu-bert-99-deepsparse.md --- .../run-cpu-bert-99-deepsparse.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/cm-mlops/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md b/cm-mlops/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md index 35660820ed..aeab2fc8a3 100644 --- a/cm-mlops/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md +++ b/cm-mlops/challenge/run-mlperf@home-v3.1-cpu/run-cpu-bert-99-deepsparse.md @@ -61,5 +61,24 @@ CM will install a new Python virtual environment in CM cache and will install al cm show cache ``` +### Do a test run to detect and record the system performance + +```bash +cm run script --tags=generate-run-cmds,inference,_find-performance \ +--model=bert-99 --implementation=reference --device=cpu --backend=deepsparse \ +--category=edge --division=open --quiet --scenario=Offline +``` + +### Do full accuracy and performance run + +``` +cm run script --tags=generate-run-cmds,inference,_submission --model=bert-99 \ +--device=cpu --implementation=reference --backend=deepsparse \ +--execution-mode=valid --results_dir=$HOME/results_dir \ +--category=edge --division=open --quiet --scenario=Offline +``` +### Generate and upload MLPerf submission + +Follow [this guide](https://github.com/mlcommons/ck/blob/master/docs/mlperf/inference/Submission.md) to generate the submission tree and upload your results. From 1872bb02f52af417f083161e176903b8427ba514 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 27 Jul 2023 08:34:54 +0100 Subject: [PATCH 3/3] Fix issue with get-dataset-openimages --- cm-mlops/script/get-dataset-openimages/run.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cm-mlops/script/get-dataset-openimages/run.sh b/cm-mlops/script/get-dataset-openimages/run.sh index 841e8ac46e..94841fbf4e 100644 --- a/cm-mlops/script/get-dataset-openimages/run.sh +++ b/cm-mlops/script/get-dataset-openimages/run.sh @@ -27,5 +27,8 @@ else test $? -eq 0 || exit 1 fi cd ${INSTALL_DIR} -ln -s ../ open-images-v6-mlperf +if [[ ! -f "open-images-v6-mlperf" ]]; then + ln -s ../ open-images-v6-mlperf +fi + test $? -eq 0 || exit 1