Skip to content

[Nightly] Add op performance regression check #1622

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
153 changes: 153 additions & 0 deletions .github/scripts/op_calculate_best_perf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
"""
To update the op perf baseline, use the better performance value
# usage
python op_calculate_best_perf.py --xpu /path/to/xpu/performance/result/dir/forward.csv --baseline /path/to/baseline/dir/new_baseline.csv -r

"""

import csv
import argparse
from pathlib import Path

updated_rows = []
added_cases = []
updated_cases = []
removed_cases = []

def update_baseline(xpu_file, baseline_file, remove_missing=False):
with open(xpu_file) as f:
xpu_reader = csv.DictReader(f, delimiter=';')
xpu_rows = list(xpu_reader)
xpu_fieldnames = xpu_reader.fieldnames # Keep original field order
fieldnames = [f for f in xpu_fieldnames if f != 'time(us)']
xpu_data = {make_key(row, fieldnames): (float(row['time(us)']), row) for row in xpu_rows}

with open(baseline_file) as f:
baseline_reader = csv.DictReader(f, delimiter=';')
baseline_rows = list(baseline_reader)
baseline_fieldnames = baseline_reader.fieldnames

# To add new parameter of new ops into baseline file
all_fieldnames = xpu_fieldnames + [f for f in baseline_fieldnames if f not in xpu_fieldnames]
fieldnames = [f for f in all_fieldnames if f != 'time(us)']

baseline_keys = {make_key(row, fieldnames) for row in baseline_rows}
xpu_keys = set(xpu_data.keys())

# Resolve existing cases
for row in baseline_rows:
key = make_key(row, fieldnames)
if key in xpu_data:
xpu_time, xpu_row = xpu_data[key]
baseline_time = float(row['time(us)'])

if xpu_time < baseline_time:
updated_row = {}
for field in all_fieldnames:
updated_row[field] = xpu_row.get(field, row.get(field, ''))
updated_row['time(us)'] = str(xpu_time)
updated_cases.append((key, baseline_time, xpu_time, updated_row))
updated_rows.append(updated_row)
else:
ordered_row = {}
for field in all_fieldnames:
ordered_row[field] = row.get(field, '')
updated_rows.append(ordered_row)
elif not remove_missing:
ordered_row = {}
for field in all_fieldnames:
ordered_row[field] = row.get(field, '')
updated_rows.append(ordered_row)

# Add new cases
for key in xpu_keys - baseline_keys:
xpu_time, xpu_row = xpu_data[key]
new_row = {}
for field in all_fieldnames:
new_row[field] = xpu_row.get(field, '')
new_row['time(us)'] = str(xpu_time)
updated_rows.append(new_row)
added_cases.append((key, xpu_time, new_row))

# Resolve removed cases
if remove_missing:
for key in baseline_keys - xpu_keys:
removed_case = next(row for row in baseline_rows if make_key(row, fieldnames) == key)
removed_cases.append((key, float(removed_case['time(us)']), removed_case))

if added_cases:
print(f"\nAdded {len(added_cases)} new case(s):")
for key, time, row in added_cases:
print(f"\n[New Case] {format_case(key)}")
print(f"Time: {time} us")
print("Parameters:")
for k, v in row.items():
if k != 'time(us)':
print(f" {k}: {v}")
print("-" * 60)

if updated_cases:
print(f"\nUpdated {len(updated_cases)} case(s):")
for key, old_time, new_time, row in updated_cases:
print(f"\n[Updated] {format_case(key)}")
print(f"Time: {old_time} us → {new_time} us")
print("Parameters:")
for k, v in row.items():
if k != 'time(us)':
print(f" {k}: {v}")
print("-" * 60)

if remove_missing and removed_cases:
print(f"\nRemoved {len(removed_cases)} case(s):")
for key, time, row in removed_cases:
print(f"\n[Removed] {format_case(key)}")
print(f"Time: {time} us")
print("Parameters:")
for k, v in row.items():
if k != 'time(us)':
print(f" {k}: {v}")
print("-" * 60)

if not (added_cases or updated_cases or (remove_missing and removed_cases)):
print("\nNo changes detected between files.")

backup_file = baseline_file.replace('.csv', '_backup.csv')
Path(baseline_file).rename(backup_file)

with open(baseline_file, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=all_fieldnames, delimiter=';')
writer.writeheader()
writer.writerows(updated_rows)

print("\n" + "-" * 80)
print(f"Update complete! Total cases in new baseline: {len(updated_rows)}")
print(f"Updated baseline saved to {baseline_file}")
print(f"Original backup created at {backup_file}")

def make_key(row, fieldnames):
return tuple(str(row.get(field, '')) for field in fieldnames)

def format_case(key):
return f"{key[0]} | {key[1]} | {key[2]} (shape: {key[3]})"

def main():
parser = argparse.ArgumentParser(description='Compare and synchronize operation performance data')
parser.add_argument('-x', '--xpu', required=True, help='Path to xpu_op_summary.csv')
parser.add_argument('-b', '--baseline', required=True, help='Path to baseline_op_summary.csv')
parser.add_argument('-r', '--remove-missing', action='store_true',
help='Remove cases not present in xpu file')

args = parser.parse_args()

if not Path(args.xpu).exists():
print(f"Error: XPU file not found at {args.xpu}")
return
if not Path(args.baseline).exists():
print(f"Error: Baseline file not found at {args.baseline}")
return

update_baseline(args.xpu, args.baseline, args.remove_missing)


if __name__ == "__main__":
main()
196 changes: 196 additions & 0 deletions .github/scripts/op_perf_comparison.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
"""
To compare the op perf diff
# usage
python op_perf_comparison.py --xpu_file /path/to/xpu/performance/result/dir/forward.csv --baseline_file /path/to/baselineence/dir/baseline.csv

"""

import pandas as pd
import argparse
import os
from ast import literal_eval
from tabulate import tabulate

def preprocess_row(row):
processed = {}
for col, val in row.items():
if pd.isna(val):
processed[col] = "NULL"
else:
try:
processed[col] = literal_eval(str(val))
except (ValueError, SyntaxError):
processed[col] = val
return processed

def display_row(record):
formatted = {}
for key, value in record.items():
if isinstance(value, (list, tuple, dict)):
formatted[key] = str(value)
elif value == "NULL":
formatted[key] = "NULL"
else:
formatted[key] = value
return formatted

def write_to_github_summary(content):
github_step_summary = os.getenv('GITHUB_STEP_SUMMARY')
if github_step_summary:
with open(github_step_summary, 'a') as f:
f.write(content + "\n")

def display_comparison(results, threshold):
if results.empty:
print(f"\n No outlier exceeding ({threshold:.0%})")
write_to_github_summary(f"## No outlier exceeding ({threshold:.0%})")
return

regression = results[results['change'] == '↓']
improvement = results[results['change'] == '↑']

if not regression.empty:
print("\n🔴 Regression:")
display_records = []
for _, row in regression.iterrows():
record = display_row(row)
display_records.append({
**{k: v for k, v in record.items() if k not in ['time_xpu_file', 'time_baseline_file', 'difference', 'change']},
'Current Time(us)': record['time_xpu_file'],
'Baseline Time(us)': record['time_baseline_file'],
'Difference': record['difference']
})

print(tabulate(
display_records,
headers="keys",
tablefmt='grid',
showindex=False,
floatfmt=".2f"
))

if not improvement.empty:
print("\n🟢 Improvement:")
display_records = []
for _, row in improvement.iterrows():
record = display_row(row)
display_records.append({
**{k: v for k, v in record.items() if k not in ['time_xpu_file', 'time_baseline_file', 'difference', 'change']},
'Current Time(us)': record['time_xpu_file'],
'Baseline Time(us)': record['time_baseline_file'],
'Difference': record['difference']
})

print(tabulate(
display_records,
headers="keys",
tablefmt='grid',
showindex=False,
floatfmt=".2f"
))
# Print Summary on Github Action Summary
summary_output = "## Performance Comparison Results\n"
if not regression.empty:
summary_output += "\n### 🔴 Regression\n"
display_records = []
for _, row in regression.iterrows():
record = display_row(row)
display_records.append({
**{k: v for k, v in record.items() if k not in ['time_xpu_file', 'time_baseline_file', 'difference', 'change']},
'Current Time(us)': record['time_xpu_file'],
'Baseline Time(us)': record['time_baseline_file'],
'Difference': record['difference']
})

summary_output += tabulate(
display_records,
headers="keys",
tablefmt='github',
showindex=False,
floatfmt=".2f"
) + "\n"

if not improvement.empty:
summary_output += "\n### 🟢 Improvement\n"
display_records = []
for _, row in improvement.iterrows():
record = display_row(row)
display_records.append({
**{k: v for k, v in record.items() if k not in ['time_xpu_file', 'time_baseline_file', 'difference', 'change']},
'Current Time(us)': record['time_xpu_file'],
'Baseline Time(us)': record['time_baseline_file'],
'Difference': record['difference']
})

summary_output += tabulate(
display_records,
headers="keys",
tablefmt='github',
showindex=False,
floatfmt=".2f"
) + "\n"

write_to_github_summary(summary_output)

def compare_op_time_values(xpu_file, baseline_file, threshold=0.05, output_file=None):
df_xpu = pd.read_csv(xpu_file, sep=';')
df_baseline = pd.read_csv(baseline_file, sep=';')

records_xpu = [preprocess_row(row) for _, row in df_xpu.iterrows()]
records_baseline = [preprocess_row(row) for _, row in df_baseline.iterrows()]

dict_xpu = {
tuple((k, str(v)) for k, v in record.items() if k != 'time(us)'):
record['time(us)']
for record in records_xpu
}
dict_baseline = {
tuple((k, str(v)) for k, v in record.items() if k != 'time(us)'):
record['time(us)']
for record in records_baseline
}
common_keys = set(dict_xpu.keys()) & set(dict_baseline.keys())
results = []

for key in common_keys:
time_xpu = dict_xpu[key]
time_baseline = dict_baseline[key]
diff = (time_baseline - time_xpu) / time_xpu
# Compare Time, Lower is better
if abs(diff) > threshold:
record = dict(key)
print(record)
record.update({
'time_xpu_file': time_xpu,
'time_baseline_file': time_baseline,
'difference': f"{diff:.2%}",
'change': "↑" if diff > 0 else "↓"
})
results.append(record)

result_df = pd.DataFrame(results) if results else pd.DataFrame()
display_comparison(result_df, threshold)


def main():
parser = argparse.ArgumentParser(description='Compare time values between two CSV files')
parser.add_argument('-x', '--xpu_file', required=True, help='XPU OP performance result csv files dir')
parser.add_argument('-b', '--baseline_file', required=True, help="XPU OP baseline result csv files dir")
parser.add_argument('-t', '--threshold', type=float, default=0.05,
help='Threshold for time difference (default: 0.05 for 5%)')
args = parser.parse_args()

print(f" Compared file: {args.xpu_file} 和 {args.baseline_file}")
print(f" Threshold: {args.threshold:.0%}")
write_to_github_summary("## Performance Comparison Set")
write_to_github_summary(f"- Threshold: {args.threshold:.0%}")

compare_op_time_values(
xpu_file=args.xpu_file,
baseline_file=args.baseline_file,
threshold=args.threshold,
)


if __name__ == "__main__":
main()
Loading
Loading