Skip to content

Commit e051a9f

Browse files
authored
Allow lowercase 'interactive' as scenario name (#2315)
1 parent 62bebd7 commit e051a9f

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

tools/submission/submission_checker.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -726,6 +726,7 @@
726726
"multistream": "MultiStream",
727727
"server": "Server",
728728
"offline": "Offline",
729+
"interactive": "Interactive",
729730
}
730731

731732
RESULT_FIELD = {
@@ -1487,7 +1488,8 @@ def check_accuracy_dir(config, model, path, verbose):
14871488
def extra_check_llm(mlperf_log, scenario, model):
14881489
if mlperf_log["requested_use_token_latencies"]:
14891490
if scenario not in ["Server", "Interactive"]:
1490-
# For offline, singlestream and multistream no further checks are necessary
1491+
# For offline, singlestream and multistream no further checks are
1492+
# necessary
14911493
return True
14921494
else:
14931495
limits = LLM_LATENCY_LIMITS[model][scenario]
@@ -1887,7 +1889,7 @@ def get_power_metric(config, scenario_fixed, log_path, is_valid, res):
18871889
samples_per_query = 8
18881890

18891891
if (scenario_fixed in ["MultiStream"]
1890-
) and scenario in ["SingleStream"]:
1892+
) and scenario in ["SingleStream"]:
18911893
power_metric = (
18921894
avg_power * power_duration * samples_per_query * 1000 / num_queries
18931895
)

0 commit comments

Comments
 (0)