diff --git a/docs/mlperf/inference/bert/run_sparse_models.sh b/docs/mlperf/inference/bert/run_sparse_models.sh index a842ef50cb..1b9db28cca 100644 --- a/docs/mlperf/inference/bert/run_sparse_models.sh +++ b/docs/mlperf/inference/bert/run_sparse_models.sh @@ -5,11 +5,13 @@ zoo_stub_list=( \ "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" \ "zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none" \ "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none" \ -"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none" \ -"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none" \ "zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni" \ ) +#the below ones are segfaulting +#"zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none" \ +#"zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none" \ + for stub in ${zoo_stub_list[@]}; do cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performance \ --adr.python.version_min=3.8 \ @@ -23,7 +25,8 @@ cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performan --test_query_count=15000 \ --adr.mlperf-inference-implementation.max_batchsize=384 \ --results_dir=$HOME/results_dir \ - --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub" + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --quiet" echo ${cmd} eval ${cmd} @@ -39,8 +42,10 @@ cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performan --mode=performance \ --execution_mode=valid \ --adr.mlperf-inference-implementation.max_batchsize=384 \ + --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --results_dir=$HOME/results_dir \ - --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub" + --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ + --quiet" echo ${cmd} eval ${cmd} done