From 9671c3f389d66e50a22e108341aae6698679b295 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 29 Jul 2023 09:42:47 +0100 Subject: [PATCH] Added an option to avoid max_duration - needed for very slow runs --- .../script/generate-mlperf-inference-user-conf/customize.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cm-mlops/script/generate-mlperf-inference-user-conf/customize.py b/cm-mlops/script/generate-mlperf-inference-user-conf/customize.py index a5b226768c..73b4276daf 100644 --- a/cm-mlops/script/generate-mlperf-inference-user-conf/customize.py +++ b/cm-mlops/script/generate-mlperf-inference-user-conf/customize.py @@ -51,6 +51,7 @@ def preprocess(i): test_list.remove("TEST04") if "gpt-" in env['CM_MODEL']: test_list.remove("TEST05") + test_list.remove("TEST04") scenario = env['CM_MLPERF_LOADGEN_SCENARIO'] state['RUN'][scenario] = {} @@ -184,7 +185,8 @@ def preprocess(i): else: if scenario == "MultiStream" or scenario == "SingleStream": - user_conf += ml_model_name + "." + scenario + ".max_duration = 660000 \n" + if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() not in [ "no", "false" ]: + user_conf += ml_model_name + "." + scenario + ".max_duration = 660000 \n" if scenario == "MultiStream": user_conf += ml_model_name + "." + scenario + ".min_query_count = 662" + "\n" if short_ranging: