From 1670234de2461942a7b74606b302a51ba1295780 Mon Sep 17 00:00:00 2001 From: "Zhong, Ruijie" Date: Fri, 25 Apr 2025 01:29:02 -0700 Subject: [PATCH 01/13] [CI] Enhance UT summary --- .github/scripts/check-ut.py | 296 ++++++++++++++++++++++++-------- .github/workflows/_linux_ut.yml | 12 ++ 2 files changed, 232 insertions(+), 76 deletions(-) diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index 8cd490bc8..5758c4e6d 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -1,22 +1,47 @@ import argparse import sys import os +import re from junitparser import JUnitXml, Error, Failure, Skipped -parser = argparse.ArgumentParser() -parser.add_argument('junitxml', nargs='+') +parser = argparse.ArgumentParser(description='Test results analyzer') +parser.add_argument('input_files', nargs='+', help='JUnit XML files or log files') args = parser.parse_args() failures = [] -suites = [] +summaries = [] + +error_types = [ + "RuntimeError", + "ValueError", + "TypeError", + "AttributeError", + "KeyError", + "IndexError", + "ImportError", + "AssertionError", + "Exception", + "OSError", + "Failed", + "TimeoutError", + "asyncio.TimeoutError", + "FileNotFoundError", + "PermissionError", + "NotImplementedError", +] def get_classname(case): - return ' '.join(case.classname.split()) + return ' '.join(case.classname.split()) if hasattr(case, 'classname') else case.get('classname', '') def get_name(case): + if isinstance(case, dict): + return case.get('name', '') return ' '.join(case.name.split()) def get_result(case): + if isinstance(case, dict): + return case.get('status', 'failed') + result = "passed" if case.result: if isinstance(case.result[0], Error): @@ -28,88 +53,207 @@ def get_result(case): return result def get_message(case): + if isinstance(case, dict): + return case.get('error', '') + if not case.result: return "" - return f"{case.result[0].message.splitlines()[0]}" + full_text = case.result[0].text if hasattr(case.result[0], 'text') else case.result[0].message + if not full_text: + return "" + + error_messages = [] + capture_next_lines = False + indent_level = 0 + + for line in full_text.splitlines(): + stripped_line = line.strip() + if not stripped_line: + continue + + for error_type in error_types: + if stripped_line.startswith(error_type + ": "): + error_msg = stripped_line[len(error_type)+2:] + error_messages.append(f"{error_type}: {error_msg}") + capture_next_lines = True + indent_level = 0 + break + elif f"{error_type}:" in stripped_line and "Traceback" not in stripped_line: + error_msg = stripped_line.split(f'{error_type}:')[-1].strip() + error_messages.append(f"{error_type}: {error_msg}") + capture_next_lines = True + indent_level = 0 + break + + return " ; ".join(error_messages) if error_messages else f"{case.result[0].message.splitlines()[0]}" -def print_md_row(row, print_header): +def print_md_row(row, print_header=False): if print_header: - header = " | ".join([f"{key}" for key, _ in row.items()]) + header = " | ".join([f"{key}" for key in row.keys()]) print(f"| {header} |") - header = " | ".join(["-"*len(key) for key, _ in row.items()]) + header = " | ".join(["---"] * len(row)) print(f"| {header} |") - row = " | ".join([f"{value}" for _, value in row.items()]) - print(f"| {row} |") + row_values = " | ".join([f"{value}" for value in row.values()]) + print(f"| {row_values} |") -def print_cases(cases): +def print_failures(): + if not failures: + return + + print("### Test Failures") print_header = True - for case in cases: - classname = get_classname(case) - name = get_name(case) - result = get_result(case) - message = get_message(case) - row = { - 'Class name': classname, - 'Test name': name, - 'Status': result, - 'Message': message, - } - print_md_row(row, print_header) + for case in failures: + print_md_row({ + 'Class name': get_classname(case), + 'Test name': get_name(case), + 'Status': get_result(case), + 'Message': get_message(case), + 'Source': case['source'] if isinstance(case, dict) else 'XML' + }, print_header) print_header = False -def print_suite(suite): +def parse_log_file(log_file): + with open(log_file, encoding='utf-8') as f: + content = f.read() + + ut_name = os.path.splitext(os.path.basename(log_file))[0] + summary = { + 'Category': determine_category(ut_name), + 'UT': ut_name, + 'Test cases': 0, + 'Passed': 0, + 'Skipped': 0, + 'Failures': 0, + 'Errors': 0, + 'Source': 'Log' + } + + # Extract test counts + test_run_match = re.search(r"Ran (\d+) tests in [\d.]+s", content) + if test_run_match: + summary['Test cases'] = int(test_run_match.group(1)) + + # Extract skipped case number + skipped_match = re.search(r"skipped[ =](\d+)", content, re.IGNORECASE) + if skipped_match: + summary['Skipped'] = int(skipped_match.group(1)) + else: + skipped_match = re.search(r"skipped (\d+) cases?", content, re.IGNORECASE) + if skipped_match: + summary['Skipped'] = int(skipped_match.group(1)) + + # Extract failures + failure_blocks = re.findall(r"(FAIL:.*?)(?:\n\n|\n=+\n|\Z)", content, re.DOTALL) + exist_test_names = set() + failures_number = 0 + + for block in failure_blocks: + case_match = re.match(r"FAIL: (\w+) \(__mp_main__\.(\w+)\)", block) + if not case_match: + continue + + test_name = case_match.group(1) + if test_name in exist_test_names: + continue + exist_test_names.add(test_name) + + error_msg = [] + error_pattern = r"(" + "|".join(error_types) + r"):.*?(?=\n\S|\n\n|\n=+\n|\Z)" + error_matches = re.finditer(error_pattern, block, re.DOTALL) + if not error_matches and "Traceback" in block: + error_msg.append("Unknown error (see traceback)") + else: + for match in error_matches: + error_msg.append(match.group(0).strip()) + + failures.append({ + 'classname': ut_name, + 'name': f"{case_match.group(2)}:{test_name}", + 'error': " ".join(error_msg), + 'status': 'failed', + 'source': 'Log' + }) + failures_number += 1 + + if failures_number > summary['Failures']: + summary['Failures'] = failures_number + summary['Passed'] = summary['Test cases'] - summary['Failures'] - summary['Skipped'] + + return summary + +def determine_category(ut): + if ut == 'op_regression': + return 'op_regression' + elif ut == 'op_regression_dev1': + return 'op_regression_dev1' + elif ut == 'op_extended': + return 'op_extended' + elif 'op_ut' in ut: + return 'op_ut' + else: + return 'unknown' + +def process_log_file(log_file): + try: + summary = parse_log_file(log_file) + summaries.append(summary) + except Exception as e: + print(f"Error processing {log_file}: {e}", file=sys.stderr) + +def process_xml_file(xml_file): + try: + xml = JUnitXml.fromfile(xml_file) + ut = os.path.basename(xml_file).split('.')[0] + category = determine_category(ut) + + for suite in xml: + suite_summary = { + 'Category': category, + 'UT': ut, + 'Test cases': suite.tests, + 'Passed': suite.tests - suite.skipped - suite.failures - suite.errors, + 'Skipped': suite.skipped, + 'Failures': suite.failures, + 'Errors': suite.errors, + 'Source': 'XML' + } + summaries.append(suite_summary) + + for case in suite: + if get_result(case) not in ["passed", "skipped"]: + failures.append(case) + except Exception as e: + print(f"Error processing {xml_file}: {e}", file=sys.stderr) + +def print_summary(): + print("### Results Summary") print_header = True - for suite in suites: - ut = args.junitxml[0] - del(args.junitxml[0]) - ut = os.path.basename(ut).split('.')[0] - tests = suite.tests - skipped = suite.skipped - failures = suite.failures - errors = suite.errors - if ut == 'op_regression': - category = 'op_regression' - elif ut == 'op_regression_dev1': - category = 'op_regression_dev1' - elif ut == 'op_extended': - category = 'op_extended' - elif 'op_ut' in ut: - category = 'op_ut' - row = { - 'Category': category, - 'UT': ut, - 'Test cases': tests, - 'Passed': tests-skipped-failures-errors, - 'Skipped': skipped, - 'Failures': failures, - 'Errors': errors, - } - print_md_row(row, print_header) + + for summary in summaries: + print_md_row({ + 'Category': summary['Category'], + 'UT': summary['UT'], + 'Test cases': summary['Test cases'], + 'Passed': summary['Passed'], + 'Skipped': summary['Skipped'], + 'Failures': summary['Failures'], + 'Errors': summary['Errors'], + 'Source': summary['Source'] + }, print_header) print_header = False -xmls = [ JUnitXml.fromfile(f) for f in args.junitxml ] -for idx, xml in enumerate(xmls): - for suite in xml: - suites.append(suite) - for case in suite: - classname = get_classname(case) - name = get_name(case) - result = get_result(case) - if result not in ["passed", "skipped"]: - failures.append(case) - -printed = False -def print_break(needed): - if needed: - print("") - -if failures: - print_break(printed) - print("### Failures") - print_cases(failures) - printed = True - -print("### Results Summary") -print_suite(suites) - -sys.exit(0) +def main(): + for input_file in args.input_files: + if input_file.endswith('.log'): + process_log_file(input_file) + elif input_file.endswith('.xml'): + process_xml_file(input_file) + else: + print(f"Skipping unknown file type: {input_file}", file=sys.stderr) + + print_failures() + print_summary() + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index f0f8ea42f..cb39bb7cc 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -175,6 +175,18 @@ jobs: cd ../pytorch/third_party/torch-xpu-ops/test/xpu timeout 10000 python run_test_with_skip.py 2>${{ github.workspace }}/ut_log/op_ut/op_ut_with_skip_test_error.log | tee ${{ github.workspace }}/ut_log/op_ut/op_ut_with_skip_test.log cp *.xml ${{ github.workspace }}/ut_log + find op_ut_with_skip_nn op_ut_with_skip_quantization/core -type f -exec sh -c ' + dir_path=$(dirname "$1"); + case "$dir_path" in + *"op_ut_with_skip_quantization/core"*) + dir_name="op_ut_with_skip_quantization_core";; + *) + dir_name=$(basename "$dir_path");; + esac; + mv "$1" "$dir_path/${dir_name}_$(basename "$1")" + ' _ {} \; + cp op_ut_with_skip_nn/*.xml ${{ github.workspace }}/ut_log + cp op_ut_with_skip_quantization/core/*.xml ${{ github.workspace }}/ut_log # Cases run with a on-demand white list, since some suites are too # slow to go through all operators on CPU. So add cases on-demand # when XPU implementatoin is done. From cffc0a574c0a8acfd984edbf4e26bf0d7d727a7b Mon Sep 17 00:00:00 2001 From: "Zhong, Ruijie" Date: Mon, 28 Apr 2025 20:15:25 -0700 Subject: [PATCH 02/13] Add known issue filter function --- .github/scripts/ut_result_check.sh | 60 ++++++++++++++++++++++++++++-- .github/workflows/_linux_ut.yml | 8 ++++ 2 files changed, 64 insertions(+), 4 deletions(-) diff --git a/.github/scripts/ut_result_check.sh b/.github/scripts/ut_result_check.sh index 3fb1a1997..9cbdd0649 100644 --- a/.github/scripts/ut_result_check.sh +++ b/.github/scripts/ut_result_check.sh @@ -1,10 +1,59 @@ #!/bin/bash ut_suite="${1:-op_regression}" # op_regression / op_extended / op_ut / torch_xpu +# usage +# compare_and_filter_logs [output.log] + +compare_and_filter_logs() { + local file_UT="$1" + local file_known_issue="$2" + local output_file="${3:-${file_UT%.*}_filtered.log}" + local filtered_content="${file_UT%.*}_removed.log" + + if [[ $# -lt 2 ]]; then + echo "[ERROR] Need 2 files to compare" + return 1 + fi + + # Check whether UT's failed log contains the case of the known issue'log + echo "Checking whether $file_UT contains $file_known_issue" + if grep -qFf "$file_known_issue" "$file_UT"; then + echo "$file_UT contains $file_known_issue" + else + echo "$file_UT does not contain $file_known_issue" + return 1 + fi + + # Filter the same content from file_UT as file_known_issue + echo "Filtering $file_known_issue for $file_UT" + grep -vFf "$file_known_issue" "$file_UT" > "$output_file" + + # Keep the filtered UT cases + grep -nFf "$file_known_issue" "$file_UT" > "$filtered_content" + + local original_lines=$(wc -l < "$file_UT") + local filtered_lines=$(wc -l < "$output_file") + local filtered_lines=$((original_lines - filtered_lines)) + echo "Filtered lines: $filtered_lines" + echo "Filtered cases file: $filtered_content" + if [[ -s "$filtered_content" ]]; then + echo -e "\n\033[1;31m[Filtered Cases]\033[0m" + awk -F':' '{ + line_number = $1 + $1 = "" + gsub(/^ /, "", $0) + printf "\033[33m%3d\033[0m: %s\n", line_number, $0 + }' "$filtered_content" + else + echo -e "\n\033[1;32mNo Filtered Cases\033[0m" + fi +} + if [[ "${ut_suite}" == 'op_regression' || "${ut_suite}" == 'op_regression_dev1' || "${ut_suite}" == 'op_extended' ]]; then grep -E "^FAILED|have failures" "${ut_suite}"_test.log | awk '{print $2}' > ./"${ut_suite}"_failed.log grep "PASSED" "${ut_suite}"_test.log | awk '{print $1}' > ./"${ut_suite}"_passed.log - num_failed=$(wc -l < "./${ut_suite}_failed.log") + compare_and_filter_logs "${ut_suite}"_failed.log Known_issue.log + num_failed=$(wc -l < "./${ut_suite}_failed_filtered.log") num_passed=$(wc -l < "./${ut_suite}_passed.log") echo -e "=========================================================================" echo -e "Show Failed cases in ${ut_suite}" @@ -20,8 +69,10 @@ fi if [[ "${ut_suite}" == 'op_ut' ]]; then grep -E "^FAILED|have failures" op_ut_with_skip_test.log | awk '{print $2}' > ./"${ut_suite}"_with_skip_test_failed.log grep -E "^FAILED|have failures" op_ut_with_only_test.log | awk '{print $2}' > ./"${ut_suite}"_with_only_test_failed.log - num_failed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_failed.log") - num_failed_with_only=$(wc -l < "./${ut_suite}_with_only_test_failed.log") + compare_and_filter_logs "${ut_suite}"_with_skip_test_failed.log Known_issue.log + num_failed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_failed_filtered.log") + compare_and_filter_logs "${ut_suite}"_with_only_test_failed.log Known_issue.log + num_failed_with_only=$(wc -l < "./${ut_suite}_with_only_test_failed_filtered.log") echo -e "=========================================================================" echo -e "Show Failed cases in ${ut_suite} with skip" echo -e "=========================================================================" @@ -74,7 +125,8 @@ if [[ "${ut_suite}" == 'torch_xpu' ]]; then fi if [[ "${ut_suite}" == 'xpu_distributed' ]]; then grep -E "^FAILED|have failures" xpu_distributed_test.log | awk '{print $2}' > ./"${ut_suite}"_xpu_distributed_test_failed.log - num_failed_xpu_distributed=$(wc -l < "./${ut_suite}_xpu_distributed_test_failed.log") + compare_and_filter_logs "${ut_suite}"_xpu_distributed_test_failed.log Known_issue.log + num_failed_xpu_distributed=$(wc -l < "./${ut_suite}_xpu_distributed_test_failed_filtered.log") echo -e "=========================================================================" echo -e "Show Failed cases in ${ut_suite} xpu distributed" echo -e "=========================================================================" diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index cb39bb7cc..f1525d2c6 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -49,6 +49,7 @@ jobs: env: NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }} DisableScratchPages: ${{ inputs.driver == 'rolling' && '1' || '0' }} + commit_issue: 1624 steps: - name: Checkout torch-xpu-ops uses: actions/checkout@v4 @@ -260,6 +261,8 @@ jobs: - name: UT Test Results Check shell: bash run: | + test_url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + repo="${{ github.repository }}" function contains() { contains_status="echo 'Start $2 ...'" { @@ -276,6 +279,7 @@ jobs: contains "op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu" $ut_suite $contains_status cd ${{ github.workspace }}/ut_log/${ut_suite} + gh --repo $repo issue view $commit_issue --json body -q .body | sed '/^$/d' > Known_issue.log cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./ bash ut_result_check.sh ${ut_suite} done @@ -293,6 +297,7 @@ jobs: env: NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }} DisableScratchPages: ${{ inputs.driver == 'rolling' && '1' || '0' }} + commit_issue: 1624 steps: - name: Checkout torch-xpu-ops uses: actions/checkout@v4 @@ -395,6 +400,8 @@ jobs: - name: UT Test Results Check shell: bash run: | + test_url="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + repo="${{ github.repository }}" function contains() { contains_status="echo 'Start $2 ...'" { @@ -407,6 +414,7 @@ jobs: set -xe echo "UT_NAME=$(echo ${{ inputs.ut }} |sed 's/,/-/g')" |tee -a "${GITHUB_OUTPUT}" >> "${GITHUB_ENV}" cd ${{ github.workspace }}/ut_log/xpu_distributed + gh --repo $repo issue view $commit_issue --json body -q .body | sed '/^$/d' > Known_issue.log cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./ bash ut_result_check.sh 'xpu_distributed' - name: Upload Inductor XPU UT Log From ab0b8175309e541bcd0a28f07b3a1b7e6aa2ad07 Mon Sep 17 00:00:00 2001 From: "Zhong, Ruijie" Date: Mon, 28 Apr 2025 20:21:03 -0700 Subject: [PATCH 03/13] align the lint check --- .github/scripts/ut_result_check.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/scripts/ut_result_check.sh b/.github/scripts/ut_result_check.sh index 9cbdd0649..fbe696a21 100644 --- a/.github/scripts/ut_result_check.sh +++ b/.github/scripts/ut_result_check.sh @@ -30,11 +30,6 @@ compare_and_filter_logs() { # Keep the filtered UT cases grep -nFf "$file_known_issue" "$file_UT" > "$filtered_content" - - local original_lines=$(wc -l < "$file_UT") - local filtered_lines=$(wc -l < "$output_file") - local filtered_lines=$((original_lines - filtered_lines)) - echo "Filtered lines: $filtered_lines" echo "Filtered cases file: $filtered_content" if [[ -s "$filtered_content" ]]; then echo -e "\n\033[1;31m[Filtered Cases]\033[0m" From 1abb95f8efda18ba91903b1e319879dccdf621f7 Mon Sep 17 00:00:00 2001 From: "Zhong, Ruijie" Date: Mon, 28 Apr 2025 20:38:07 -0700 Subject: [PATCH 04/13] fix the grep issue --- .github/scripts/ut_result_check.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/scripts/ut_result_check.sh b/.github/scripts/ut_result_check.sh index fbe696a21..eb62a2da0 100644 --- a/.github/scripts/ut_result_check.sh +++ b/.github/scripts/ut_result_check.sh @@ -45,7 +45,8 @@ compare_and_filter_logs() { } if [[ "${ut_suite}" == 'op_regression' || "${ut_suite}" == 'op_regression_dev1' || "${ut_suite}" == 'op_extended' ]]; then - grep -E "^FAILED|have failures" "${ut_suite}"_test.log | awk '{print $2}' > ./"${ut_suite}"_failed.log + grep -E "^FAILED" "${ut_suite}"_test.log | awk '{print $2}' > ./"${ut_suite}"_failed.log + grep -E "have failures" "${ut_suite}"_test.log | awk '{print $1}' >> ./"${ut_suite}"_failed.log grep "PASSED" "${ut_suite}"_test.log | awk '{print $1}' > ./"${ut_suite}"_passed.log compare_and_filter_logs "${ut_suite}"_failed.log Known_issue.log num_failed=$(wc -l < "./${ut_suite}_failed_filtered.log") @@ -62,8 +63,10 @@ if [[ "${ut_suite}" == 'op_regression' || "${ut_suite}" == 'op_regression_dev1' fi fi if [[ "${ut_suite}" == 'op_ut' ]]; then - grep -E "^FAILED|have failures" op_ut_with_skip_test.log | awk '{print $2}' > ./"${ut_suite}"_with_skip_test_failed.log - grep -E "^FAILED|have failures" op_ut_with_only_test.log | awk '{print $2}' > ./"${ut_suite}"_with_only_test_failed.log + grep -E "^FAILED" op_ut_with_skip_test.log | awk '{print $2}' > ./"${ut_suite}"_with_skip_test_failed.log + grep -E "have failures" op_ut_with_skip_test.log | awk '{print $1}' >> ./"${ut_suite}"_with_skip_test_failed.log + grep -E "^FAILED" op_ut_with_only_test.log | awk '{print $2}' > ./"${ut_suite}"_with_only_test_failed.log + grep -E "have failures" op_ut_with_only_test.log | awk '{print $1}' >> ./"${ut_suite}"_with_only_test_failed.log compare_and_filter_logs "${ut_suite}"_with_skip_test_failed.log Known_issue.log num_failed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_failed_filtered.log") compare_and_filter_logs "${ut_suite}"_with_only_test_failed.log Known_issue.log @@ -119,7 +122,8 @@ if [[ "${ut_suite}" == 'torch_xpu' ]]; then fi fi if [[ "${ut_suite}" == 'xpu_distributed' ]]; then - grep -E "^FAILED|have failures" xpu_distributed_test.log | awk '{print $2}' > ./"${ut_suite}"_xpu_distributed_test_failed.log + grep -E "^FAILED" xpu_distributed_test.log | awk '{print $2}' > ./"${ut_suite}"_xpu_distributed_test_failed.log + grep -E "have failures" xpu_distributed_test.log | awk '{print $1}' >> ./"${ut_suite}"_xpu_distributed_test_failed.log compare_and_filter_logs "${ut_suite}"_xpu_distributed_test_failed.log Known_issue.log num_failed_xpu_distributed=$(wc -l < "./${ut_suite}_xpu_distributed_test_failed_filtered.log") echo -e "=========================================================================" From 658a62d77def44d9600242d5a03b7a72caab6d9e Mon Sep 17 00:00:00 2001 From: "Zhong, Ruijie" Date: Mon, 28 Apr 2025 22:15:15 -0700 Subject: [PATCH 05/13] Add GH_TOKEN set --- .github/workflows/_linux_ut.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index f1525d2c6..090d90466 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -50,6 +50,7 @@ jobs: NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }} DisableScratchPages: ${{ inputs.driver == 'rolling' && '1' || '0' }} commit_issue: 1624 + GH_TOKEN: ${{ github.token }} steps: - name: Checkout torch-xpu-ops uses: actions/checkout@v4 @@ -298,6 +299,7 @@ jobs: NEOReadDebugKeys: ${{ inputs.driver == 'rolling' && '1' || '0' }} DisableScratchPages: ${{ inputs.driver == 'rolling' && '1' || '0' }} commit_issue: 1624 + GH_TOKEN: ${{ github.token }} steps: - name: Checkout torch-xpu-ops uses: actions/checkout@v4 From 167537100ae6549fb41a7fc28a29b2177e82e4a3 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Tue, 29 Apr 2025 23:38:03 -0700 Subject: [PATCH 06/13] remove Exception to avoid duplicated error message --- .github/scripts/check-ut.py | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index 5758c4e6d..aeb820aaf 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -20,7 +20,6 @@ "IndexError", "ImportError", "AssertionError", - "Exception", "OSError", "Failed", "TimeoutError", From e3f9ccff76660e8b8e9b3a5e2e5541a11039ef77 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Wed, 30 Apr 2025 00:19:57 -0700 Subject: [PATCH 07/13] upload ut_failure_list.csv as artifacts --- .github/scripts/check-ut.py | 32 ++++++++++++++++++++------------ .github/workflows/_linux_ut.yml | 6 ++++++ 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index aeb820aaf..cf1d577cb 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -86,7 +86,7 @@ def get_message(case): return " ; ".join(error_messages) if error_messages else f"{case.result[0].message.splitlines()[0]}" -def print_md_row(row, print_header=False): +def print_md_row(row, print_header=False, fail_list): if print_header: header = " | ".join([f"{key}" for key in row.keys()]) print(f"| {header} |") @@ -95,21 +95,29 @@ def print_md_row(row, print_header=False): row_values = " | ".join([f"{value}" for value in row.values()]) print(f"| {row_values} |") + if fail_list != None: + fail_list.write(f"| {row_values} |\n") + + + def print_failures(): if not failures: return - print("### Test Failures") - print_header = True - for case in failures: - print_md_row({ - 'Class name': get_classname(case), - 'Test name': get_name(case), - 'Status': get_result(case), - 'Message': get_message(case), - 'Source': case['source'] if isinstance(case, dict) else 'XML' - }, print_header) - print_header = False + with open("ut_failure_list.csv", "w") as fail_list: + fail_list.write("sep=\'|\''.\n") + + print("### Test Failures") + print_header = True + for case in failures: + print_md_row({ + 'Class name': get_classname(case), + 'Test name': get_name(case), + 'Status': get_result(case), + 'Message': get_message(case), + 'Source': case['source'] if isinstance(case, dict) else 'XML' + }, print_header, fail_list) + print_header = False def parse_log_file(log_file): with open(log_file, encoding='utf-8') as f: diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index 090d90466..e4b755cec 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -425,3 +425,9 @@ jobs: with: name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-xpu_distributed path: ${{ github.workspace }}/ut_log + - name: Upload XPU UT Log + if: ${{ ! cancelled() }} + uses: actions/upload-artifact@v4 + with: + name: XPU-UT-Failure-List-${{ github.event.pull_request.number || github.sha }}-${{ inputs.abi }}-pytorch_distributed + path: ${{ github.workspace }}/ut_log/ut_failure_list.csv From ec3bc57278a98de684b43476dbc7d4cb22eaaaa0 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Wed, 30 Apr 2025 06:55:54 -0700 Subject: [PATCH 08/13] fix lint issue --- .github/scripts/check-ut.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index cf1d577cb..3003b1b72 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -86,7 +86,7 @@ def get_message(case): return " ; ".join(error_messages) if error_messages else f"{case.result[0].message.splitlines()[0]}" -def print_md_row(row, print_header=False, fail_list): +def print_md_row(row, print_header=False, fail_list=None): if print_header: header = " | ".join([f"{key}" for key in row.keys()]) print(f"| {header} |") @@ -95,7 +95,7 @@ def print_md_row(row, print_header=False, fail_list): row_values = " | ".join([f"{value}" for value in row.values()]) print(f"| {row_values} |") - if fail_list != None: + if fail_list is not None: fail_list.write(f"| {row_values} |\n") From 22b0d7ea4920c1826f99eb3607c4d46975858391 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Fri, 2 May 2025 06:44:02 -0700 Subject: [PATCH 09/13] cp ut_failure_list.csv to ut_log folder --- .github/workflows/_linux_ut.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index e4b755cec..b2e50f2a7 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -259,6 +259,7 @@ jobs: source activate xpu_op_${ZE_AFFINITY_MASK} pip install junitparser python .github/scripts/check-ut.py ${{ github.workspace }}/ut_log/*.xml >> $GITHUB_STEP_SUMMARY || true + cp ut_failure_list.csv ${{ github.workspace }}/ut_log/. - name: UT Test Results Check shell: bash run: | From 651edb19f86308d98e730f2e4b5d12b643b28c70 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Sat, 3 May 2025 02:07:02 -0700 Subject: [PATCH 10/13] disable test for validation purpose --- .github/workflows/_linux_ut.yml | 28 +- test/xpu/skip_list_common.py | 3140 +------------------------------ 2 files changed, 16 insertions(+), 3152 deletions(-) diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index b2e50f2a7..ba1165a8b 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -6,7 +6,7 @@ on: pytorch: required: false type: string - default: 'main' + default: 'nightly_wheel' description: Pytorch branch/commit keep_torch_xpu_ops: required: false @@ -22,7 +22,8 @@ on: required: true type: string default: '' - description: UT scope. `op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu` Delimiter is comma + #description: UT scope. `op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu` Delimiter is comma + description: UT scope. `op_ut,` Delimiter is comma python: required: false type: string @@ -285,13 +286,19 @@ jobs: cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./ bash ut_result_check.sh ${ut_suite} done - - name: Upload Inductor XPU UT Log - if: always() + #- name: Upload Inductor XPU UT Log + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }} + # path: ${{ github.workspace }}/ut_log + - name: Upload XPU UT Log + if: ${{ ! cancelled() }} uses: actions/upload-artifact@v4 with: - name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }} - path: ${{ github.workspace }}/ut_log - + name: XPU-UT-Failure-List-${{ github.event.pull_request.number || github.sha }}-${{ inputs.abi }}-pytorch_distributed + path: ${{ github.workspace }}/ut_log/ut_failure_list.csv + distributed_ut_test: runs-on: pvc_e2e if: contains(inputs.ut, 'xpu_distributed') @@ -426,9 +433,4 @@ jobs: with: name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-xpu_distributed path: ${{ github.workspace }}/ut_log - - name: Upload XPU UT Log - if: ${{ ! cancelled() }} - uses: actions/upload-artifact@v4 - with: - name: XPU-UT-Failure-List-${{ github.event.pull_request.number || github.sha }}-${{ inputs.abi }}-pytorch_distributed - path: ${{ github.workspace }}/ut_log/ut_failure_list.csv + diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 12d85c833..2b3665cfc 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -1,3141 +1,3 @@ skip_dict = { - "test_ops_xpu.py": ( - # Skip list of base line - # XPU implementation doesn't claimn FP8 now - # https://github.com/intel/torch-xpu-ops/issues/461 - "float8", - # workarounds for the following tests - # https://github.com/intel/torch-xpu-ops/issues/1214 - "test_python_ref__refs_exp_xpu_complex128", - "test_python_ref__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128", - "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128", - "test_python_ref_torch_fallback__refs_log2_xpu_complex128", - "test_python_ref_torch_fallback__refs_log10_xpu_complex128", - "test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128", - "test_python_ref_executor__refs_log10_executor_aten_xpu_complex128", - "test_noncontiguous_samples_histogram_xpu_float32", - "test_python_ref_executor__refs_sigmoid_executor_aten_xpu_complex128", - # TODO: Fix the following tests - "test_out_warning_torch__scaled_mm_xpu", - # To be removed from this file. - # CUDA and XPU both XFAIL now. - "test_out_narrow_copy_xpu_float32", - # This case is marked as skip but XPU failed. However, CUDA and XPU throw the same runtime error. - "test_out_histc_xpu_float32", - # Data type is not supported in oneDNN! - "test_dtypes_nn_functional_conv1d_xpu", - "test_dtypes_nn_functional_conv2d_xpu", - "test_dtypes_nn_functional_conv3d_xpu", - "test_dtypes_nn_functional_conv_transpose1d_xpu", - "test_dtypes_nn_functional_conv_transpose2d_xpu", - "test_dtypes_nn_functional_conv_transpose3d_xpu", - # AssertionError: The supported dtypes for nn.functional.softsign on device type xpu are incorrect! - "test_dtypes_nn_functional_softsign_xpu", - # AssertionError: The supported dtypes for sparse.sampled_addmm on device type xpu are incorrect! - OPs not supported - "test_dtypes_sparse_sampled_addmm_xpu", - # OPs not supported - "test_errors_dot_xpu", - "test_errors_vdot_xpu", - # Linalg OPs not supported - "test_noncontiguous_samples_linalg_det_xpu_float32", - "test_noncontiguous_samples_linalg_slogdet_xpu_float32", - "test_noncontiguous_samples_linalg_solve_ex_xpu_float32", - "test_noncontiguous_samples_linalg_solve_xpu_float32", - "test_noncontiguous_samples_linalg_tensorsolve_xpu_float32", - "test_noncontiguous_samples_logdet_xpu_float32", - # Sparse CSR OPs not supported - # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta - # https://github.com/intel/torch-xpu-ops/issues/357 - "test_compare_cpu_sparse_sampled_addmm_xpu_float32", - "test_out_requires_grad_error_sparse_sampled_addmm_xpu_complex64", - "test_out_requires_grad_error_sparse_sampled_addmm_xpu_float32", - # OneDNN issues, https://github.com/intel/torch-xpu-ops/issues/253 - # RuntimeError: Long is not supported in oneDNN! - # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64", - "test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64", - "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64", - "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32", - "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_int64", - "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_complex64", - "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_float32", - "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64", - "test_noncontiguous_samples_nn_functional_conv1d_xpu_int64", - "test_noncontiguous_samples_nn_functional_conv2d_xpu_int64", - # Linalg OPs not supported - # RuntimeError: mode only supports CPU AND CUDA device type, got: xpu - # Issue https://github.com/intel/torch-xpu-ops/issues/327 - "test_numpy_ref_linalg_tensorinv_xpu_float64", - # RuntimeError: could not create a primitive descriptor for a deconvolution - # https://github.com/intel/torch-xpu-ops/issues/253 - "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_complex64", - "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_float32", - "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_complex64", - "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_float32", - # Linalg OPs not supported - "test_compare_cpu_linalg_lu_factor_ex_xpu_float32", - "test_compare_cpu_linalg_lu_factor_xpu_float32", - "test_compare_cpu_linalg_lu_xpu_float32", - # XPU hang. CUDA hang as well. - # https://github.com/pytorch/pytorch/issues/79528 - "test_compare_cpu_special_hermite_polynomial_h_xpu_float32", - # XFAIL of CUDA and XPU, unexpected success in fallback - # Linalg OPs not supported - "test_out_cholesky_inverse_xpu_float32", - "test_out_geqrf_xpu_float32", - "test_out_ormqr_xpu_float32", - # XFAIL of CUDA, XPU got unexpected success - "test_python_ref__refs_div_no_rounding_mode_xpu_complex32", - "test_python_ref__refs_pow_xpu_complex32", - "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", - "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32", - "test_python_ref__refs_pow_xpu_complex32", - "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", - "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32", - "test_python_ref_torch_fallback__refs_pow_xpu_complex32", - # unexpected success because of cpu fallback - # Linalg OPs not supported - "test_out_triangular_solve_xpu_float32", - # Newly added: - # Cuda skipped it - "test_non_standard_bool_values_sort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'. - # Cuda XFAIL (stock pytorch commit: e7cf7d0) - "test_non_standard_bool_values_argsort_xpu_bool", - # Unexpected success - "test_python_ref_executor__refs_pow_executor_aten_xpu_complex32", # Didn't align with CUDA, Unexpected success - # Unexpected success - # "test_errors_histogramdd_xpu", #XFAIL now - # Jiterator is only supported on CUDA and ROCm GPUs, none are available. - # https://github.com/intel/torch-xpu-ops/issues/584 - "_jiterator_", - # https://github.com/intel/torch-xpu-ops/issues/157 - # Segfault: - "test_dtypes_nn_functional_multi_head_attention_forward_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 - # Linalg OPs not supported - "test_dtypes_pca_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 - "test_dtypes_svd_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 - # RuntimeError: Long is not supported in oneDNN! - "test_noncontiguous_samples_nn_functional_linear_xpu_int64", # https://github.com/intel/torch-xpu-ops/issues/157 - # https://github.com/intel/torch-xpu-ops/issues/157 - # Datatype not supported in oneDNN - "test_dtypes_addmm_decomposed_xpu", - "test_dtypes_addmm_xpu", - "test_dtypes_addmv_xpu", - "test_dtypes_addr_xpu", - "test_dtypes_baddbmm_xpu", - "test_dtypes_cholesky_inverse_xpu", - "test_dtypes_cholesky_solve_xpu", - "test_dtypes_cholesky_xpu", - "test_dtypes_corrcoef_xpu", - "test_dtypes_cov_xpu", - "test_dtypes_linalg_cholesky_ex_xpu", - "test_dtypes_linalg_cholesky_xpu", - "test_dtypes_linalg_cond_xpu", - "test_dtypes_linalg_det_singular_xpu", - "test_dtypes_linalg_det_xpu", - "test_dtypes_linalg_eig_xpu", - "test_dtypes_linalg_eigh_xpu", - "test_dtypes_linalg_eigvals_xpu", - "test_dtypes_linalg_eigvalsh_xpu", - "test_dtypes_linalg_inv_ex_xpu", - "test_dtypes_linalg_inv_xpu", - "test_dtypes_linalg_ldl_factor_ex_xpu", - "test_dtypes_linalg_ldl_factor_xpu", - "test_dtypes_linalg_ldl_solve_xpu", - "test_dtypes_linalg_lstsq_grad_oriented_xpu", - "test_dtypes_linalg_lstsq_xpu", - "test_dtypes_linalg_lu_factor_ex_xpu", - "test_dtypes_linalg_lu_factor_xpu", - "test_dtypes_linalg_lu_solve_xpu", - "test_dtypes_linalg_lu_xpu", - "test_dtypes_linalg_matrix_power_xpu", - "test_dtypes_linalg_matrix_rank_hermitian_xpu", - "test_dtypes_linalg_matrix_rank_xpu", - "test_dtypes_linalg_pinv_hermitian_xpu", - "test_dtypes_linalg_pinv_xpu", - "test_dtypes_linalg_qr_xpu", - "test_dtypes_linalg_slogdet_xpu", - "test_dtypes_linalg_solve_ex_xpu", - "test_dtypes_linalg_solve_xpu", - "test_dtypes_linalg_svd_xpu", - "test_dtypes_linalg_tensorinv_xpu", - "test_dtypes_linalg_tensorsolve_xpu", - "test_dtypes_logdet_xpu", - "test_dtypes_lu_solve_xpu", - "test_dtypes_lu_xpu", - "test_dtypes_mv_xpu", - "test_dtypes_nn_functional_scaled_dot_product_attention_xpu", - "test_dtypes_norm_nuc_xpu", - "test_dtypes_pinverse_xpu", - "test_dtypes_qr_xpu", - "test_dtypes_svd_xpu", - "test_dtypes_tensordot_xpu", - "test_dtypes_triangular_solve_xpu", - "test_noncontiguous_samples___rmatmul___xpu_complex64", - "test_noncontiguous_samples___rmatmul___xpu_int64", - "test_noncontiguous_samples_addbmm_xpu_complex64", - "test_noncontiguous_samples_addbmm_xpu_float32", - "test_noncontiguous_samples_addbmm_xpu_int64", - "test_noncontiguous_samples_addmm_decomposed_xpu_complex64", - "test_noncontiguous_samples_addmm_decomposed_xpu_int64", - "test_noncontiguous_samples_addmm_xpu_complex64", - "test_noncontiguous_samples_addmm_xpu_float32", - "test_noncontiguous_samples_addmm_xpu_int64", - "test_noncontiguous_samples_addmv_xpu_complex64", - "test_noncontiguous_samples_addmv_xpu_float32", - "test_noncontiguous_samples_addmv_xpu_int64", - "test_noncontiguous_samples_addr_xpu_complex64", - "test_noncontiguous_samples_baddbmm_xpu_complex64", - "test_noncontiguous_samples_baddbmm_xpu_int64", - "test_noncontiguous_samples_bmm_xpu_complex64", - "test_noncontiguous_samples_bmm_xpu_int64", - "test_noncontiguous_samples_cholesky_inverse_xpu_complex64", - "test_noncontiguous_samples_cholesky_solve_xpu_complex64", - "test_noncontiguous_samples_cholesky_xpu_complex64", - "test_noncontiguous_samples_corrcoef_xpu_complex64", - "test_noncontiguous_samples_cov_xpu_complex64", - "test_noncontiguous_samples_einsum_xpu_complex64", - "test_noncontiguous_samples_einsum_xpu_int64", - "test_noncontiguous_samples_geqrf_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_complex64", - "test_noncontiguous_samples_inner_xpu_int64", - "test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64", - "test_noncontiguous_samples_linalg_cholesky_xpu_complex64", - "test_noncontiguous_samples_linalg_cond_xpu_complex64", - "test_noncontiguous_samples_linalg_det_xpu_complex64", - "test_noncontiguous_samples_linalg_eig_xpu_complex64", - "test_noncontiguous_samples_linalg_eig_xpu_float32", - "test_noncontiguous_samples_linalg_eigh_xpu_complex64", - "test_noncontiguous_samples_linalg_eigvals_xpu_complex64", - "test_noncontiguous_samples_linalg_eigvalsh_xpu_complex64", - "test_noncontiguous_samples_linalg_householder_product_xpu_complex64", - "test_noncontiguous_samples_linalg_inv_ex_xpu_complex64", - "test_noncontiguous_samples_linalg_inv_xpu_complex64", - "test_noncontiguous_samples_linalg_ldl_factor_ex_xpu_complex64", - "test_noncontiguous_samples_linalg_ldl_factor_xpu_complex64", - "test_noncontiguous_samples_linalg_ldl_solve_xpu_complex64", - "test_noncontiguous_samples_linalg_lstsq_grad_oriented_xpu_complex64", - "test_noncontiguous_samples_linalg_lstsq_xpu_complex64", - "test_noncontiguous_samples_linalg_lu_factor_ex_xpu_complex64", - "test_noncontiguous_samples_linalg_lu_factor_xpu_complex64", - "test_noncontiguous_samples_linalg_lu_solve_xpu_complex64", - "test_noncontiguous_samples_linalg_lu_xpu_complex64", - "test_noncontiguous_samples_linalg_matrix_norm_xpu_complex64", - "test_noncontiguous_samples_linalg_matrix_power_xpu_complex64", - "test_noncontiguous_samples_linalg_matrix_rank_hermitian_xpu_complex64", - "test_noncontiguous_samples_linalg_matrix_rank_xpu_complex64", - "test_noncontiguous_samples_linalg_norm_subgradients_at_zero_xpu_complex64", - "test_noncontiguous_samples_linalg_norm_xpu_complex64", - "test_noncontiguous_samples_linalg_pinv_hermitian_xpu_complex64", - "test_noncontiguous_samples_linalg_pinv_singular_xpu_complex64", - "test_noncontiguous_samples_linalg_pinv_xpu_complex64", - "test_noncontiguous_samples_linalg_qr_xpu_complex64", - "test_noncontiguous_samples_linalg_slogdet_xpu_complex64", - "test_noncontiguous_samples_linalg_solve_ex_xpu_complex64", - "test_noncontiguous_samples_linalg_solve_triangular_xpu_complex64", - "test_noncontiguous_samples_linalg_solve_xpu_complex64", - "test_noncontiguous_samples_linalg_svd_xpu_complex64", - "test_noncontiguous_samples_linalg_svdvals_xpu_complex64", - "test_noncontiguous_samples_linalg_tensorinv_xpu_complex64", - "test_noncontiguous_samples_linalg_tensorsolve_xpu_complex64", - "test_noncontiguous_samples_logdet_xpu_complex64", - "test_noncontiguous_samples_lu_solve_xpu_complex64", - "test_noncontiguous_samples_lu_xpu_complex64", - "test_noncontiguous_samples_matmul_xpu_complex64", - "test_noncontiguous_samples_matmul_xpu_int64", - "test_noncontiguous_samples_mm_xpu_complex64", - "test_noncontiguous_samples_mm_xpu_int64", - "test_noncontiguous_samples_mv_xpu_complex64", - "test_noncontiguous_samples_mv_xpu_int64", - "test_noncontiguous_samples_nn_functional_bilinear_xpu_int64", - "test_noncontiguous_samples_nn_functional_linear_xpu_complex64", - "test_noncontiguous_samples_norm_nuc_xpu_complex64", - "test_noncontiguous_samples_ormqr_xpu_complex64", - "test_noncontiguous_samples_pinverse_xpu_complex64", - "test_noncontiguous_samples_qr_xpu_complex64", - "test_noncontiguous_samples_svd_xpu_complex64", - "test_noncontiguous_samples_tensordot_xpu_complex64", - "test_noncontiguous_samples_tensordot_xpu_int64", - "test_noncontiguous_samples_triangular_solve_xpu_complex64", - "test_numpy_ref_addbmm_xpu_complex128", - "test_numpy_ref_addbmm_xpu_float64", - "test_numpy_ref_addbmm_xpu_int64", - "test_numpy_ref_linalg_tensorinv_xpu_complex128", - "test_out_addbmm_xpu_float32", - "test_out_addmm_xpu_float32", - "test_out_addmv_xpu_float32", - "test_out_baddbmm_xpu_float32", - "test_out_mm_xpu_float32", - "test_out_mv_xpu_float32", - "test_out_requires_grad_error_addbmm_xpu_complex64", - "test_out_requires_grad_error_addmm_decomposed_xpu_complex64", - "test_out_requires_grad_error_addmm_xpu_complex64", - "test_out_requires_grad_error_addmv_xpu_complex64", - "test_out_requires_grad_error_baddbmm_xpu_complex64", - "test_out_requires_grad_error_bmm_xpu_complex64", - "test_out_requires_grad_error_cholesky_inverse_xpu_complex64", - "test_out_requires_grad_error_cholesky_solve_xpu_complex64", - "test_out_requires_grad_error_cholesky_xpu_complex64", - "test_out_requires_grad_error_inner_xpu_complex64", - "test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64", - "test_out_requires_grad_error_linalg_cholesky_xpu_complex64", - "test_out_requires_grad_error_linalg_det_singular_xpu_complex64", - "test_out_requires_grad_error_linalg_eig_xpu_complex64", - "test_out_requires_grad_error_linalg_eigh_xpu_complex64", - "test_out_requires_grad_error_linalg_eigvals_xpu_complex64", - "test_out_requires_grad_error_linalg_eigvalsh_xpu_complex64", - "test_out_requires_grad_error_linalg_inv_ex_xpu_complex64", - "test_out_requires_grad_error_linalg_inv_xpu_complex64", - "test_out_requires_grad_error_linalg_lstsq_xpu_complex64", - "test_out_requires_grad_error_linalg_lu_factor_xpu_complex64", - "test_out_requires_grad_error_linalg_lu_solve_xpu_complex64", - "test_out_requires_grad_error_linalg_multi_dot_xpu_complex64", - "test_out_requires_grad_error_linalg_pinv_hermitian_xpu_complex64", - "test_out_requires_grad_error_linalg_pinv_xpu_complex64", - "test_out_requires_grad_error_linalg_qr_xpu_complex64", - "test_out_requires_grad_error_linalg_solve_ex_xpu_complex64", - "test_out_requires_grad_error_linalg_solve_xpu_complex64", - "test_out_requires_grad_error_linalg_tensorinv_xpu_complex64", - "test_out_requires_grad_error_lu_solve_xpu_complex64", - "test_out_requires_grad_error_lu_xpu_complex64", - "test_out_requires_grad_error_mm_xpu_complex64", - "test_out_requires_grad_error_mv_xpu_complex64", - "test_out_requires_grad_error_nn_functional_linear_xpu_complex64", - "test_out_requires_grad_error_qr_xpu_complex64", - "test_out_requires_grad_error_tensordot_xpu_complex64", - "test_out_requires_grad_error_triangular_solve_xpu_complex64", - "test_out_warning_addmm_decomposed_xpu", - "test_out_warning_addmm_xpu", - "test_out_warning_addmv_xpu", - "test_out_warning_baddbmm_xpu", - "test_out_warning_bmm_xpu", - "test_out_warning_matmul_xpu", - "test_out_warning_mm_xpu", - "test_out_warning_mv_xpu", - "test_out_warning_nn_functional_linear_xpu", - "test_python_ref__refs_linalg_svd_xpu_complex128", - "test_python_ref__refs_linalg_svd_xpu_complex64", - "test_python_ref__refs_linalg_svd_xpu_float64", - "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128", - "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64", - "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64", - "test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64", - "test_python_ref_meta__refs_linalg_svd_xpu_complex128", - "test_python_ref_meta__refs_linalg_svd_xpu_complex64", - "test_python_ref_meta__refs_linalg_svd_xpu_float64", - "test_python_ref_meta__refs_nn_functional_pdist_xpu_float64", - "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128", - "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64", - "test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64", - "test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64", - "test_variant_consistency_eager___rmatmul___xpu_complex64", - "test_variant_consistency_eager_addmm_decomposed_xpu_complex64", - "test_variant_consistency_eager_addmm_xpu_complex64", - "test_variant_consistency_eager_addmm_xpu_float32", - "test_variant_consistency_eager_addmv_xpu_complex64", - "test_variant_consistency_eager_addmv_xpu_float32", - "test_variant_consistency_eager_baddbmm_xpu_complex64", - "test_variant_consistency_eager_baddbmm_xpu_float32", - "test_variant_consistency_eager_bmm_xpu_complex64", - "test_variant_consistency_eager_cholesky_inverse_xpu_complex64", - "test_variant_consistency_eager_cholesky_solve_xpu_complex64", - "test_variant_consistency_eager_cholesky_xpu_complex64", - "test_variant_consistency_eager_corrcoef_xpu_complex64", - "test_variant_consistency_eager_cov_xpu_complex64", - "test_variant_consistency_eager_einsum_xpu_complex64", - "test_variant_consistency_eager_geqrf_xpu_complex64", - "test_variant_consistency_eager_inner_xpu_complex64", - "test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64", - "test_variant_consistency_eager_linalg_cholesky_xpu_complex64", - "test_variant_consistency_eager_linalg_cond_xpu_complex64", - "test_variant_consistency_eager_linalg_det_singular_xpu_complex64", - "test_variant_consistency_eager_linalg_det_xpu_complex64", - "test_variant_consistency_eager_linalg_eig_xpu_complex64", - "test_variant_consistency_eager_linalg_eigh_xpu_complex64", - "test_variant_consistency_eager_linalg_eigvals_xpu_complex64", - "test_variant_consistency_eager_linalg_eigvalsh_xpu_complex64", - "test_variant_consistency_eager_linalg_householder_product_xpu_complex64", - "test_variant_consistency_eager_linalg_inv_ex_xpu_complex64", - "test_variant_consistency_eager_linalg_inv_xpu_complex64", - "test_variant_consistency_eager_linalg_ldl_factor_ex_xpu_complex64", - "test_variant_consistency_eager_linalg_ldl_factor_xpu_complex64", - "test_variant_consistency_eager_linalg_ldl_solve_xpu_complex64", - "test_variant_consistency_eager_linalg_lstsq_grad_oriented_xpu_complex64", - "test_variant_consistency_eager_linalg_lstsq_xpu_complex64", - "test_variant_consistency_eager_linalg_lu_factor_xpu_complex64", - "test_variant_consistency_eager_linalg_lu_solve_xpu_complex64", - "test_variant_consistency_eager_linalg_matrix_norm_xpu_complex64", - "test_variant_consistency_eager_linalg_matrix_power_xpu_complex64", - "test_variant_consistency_eager_linalg_matrix_rank_hermitian_xpu_complex64", - "test_variant_consistency_eager_linalg_matrix_rank_xpu_complex64", - "test_variant_consistency_eager_linalg_multi_dot_xpu_complex64", - "test_variant_consistency_eager_linalg_norm_subgradients_at_zero_xpu_complex64", - "test_variant_consistency_eager_linalg_norm_xpu_complex64", - "test_variant_consistency_eager_linalg_pinv_hermitian_xpu_complex64", - "test_variant_consistency_eager_linalg_pinv_singular_xpu_complex64", - "test_variant_consistency_eager_linalg_pinv_xpu_complex64", - "test_variant_consistency_eager_linalg_qr_xpu_complex64", - "test_variant_consistency_eager_linalg_slogdet_xpu_complex64", - "test_variant_consistency_eager_linalg_solve_ex_xpu_complex64", - "test_variant_consistency_eager_linalg_solve_triangular_xpu_complex64", - "test_variant_consistency_eager_linalg_solve_xpu_complex64", - "test_variant_consistency_eager_linalg_svd_xpu_complex64", - "test_variant_consistency_eager_linalg_svdvals_xpu_complex64", - "test_variant_consistency_eager_linalg_tensorinv_xpu_complex64", - "test_variant_consistency_eager_linalg_tensorsolve_xpu_complex64", - "test_variant_consistency_eager_logdet_xpu_complex64", - "test_variant_consistency_eager_lu_solve_xpu_complex64", - "test_variant_consistency_eager_lu_xpu_complex64", - "test_variant_consistency_eager_matmul_xpu_complex64", - "test_variant_consistency_eager_mm_xpu_complex64", - "test_variant_consistency_eager_mv_xpu_complex64", - "test_variant_consistency_eager_nn_functional_linear_xpu_complex64", - "test_variant_consistency_eager_norm_nuc_xpu_complex64", - "test_variant_consistency_eager_ormqr_xpu_complex64", - "test_variant_consistency_eager_pinverse_xpu_complex64", - "test_variant_consistency_eager_qr_xpu_complex64", - "test_variant_consistency_eager_svd_xpu_complex64", - "test_variant_consistency_eager_tensordot_xpu_complex64", - "test_variant_consistency_eager_triangular_solve_xpu_complex64", - # oneDNN issues - # RuntimeError: value cannot be converted to type float without overflow - # https://github.com/intel/torch-xpu-ops/issues/683 - "test_conj_view_addbmm_xpu_complex64", - "test_neg_conj_view_addbmm_xpu_complex128", - ### Error #0 in TestMathBitsXPU , RuntimeError: Double and complex datatype matmul is not supported in oneDNN - # https://github.com/intel/torch-xpu-ops/issues/254 - "test_conj_view___rmatmul___xpu_complex64", - "test_conj_view__refs_linalg_svd_xpu_complex64", - "test_conj_view_addmm_decomposed_xpu_complex64", - "test_conj_view_addmm_xpu_complex64", - "test_conj_view_addmv_xpu_complex64", - "test_conj_view_addr_xpu_complex64", - "test_conj_view_baddbmm_xpu_complex64", - "test_conj_view_bmm_xpu_complex64", - "test_conj_view_cholesky_inverse_xpu_complex64", - "test_conj_view_cholesky_solve_xpu_complex64", - "test_conj_view_cholesky_xpu_complex64", - "test_conj_view_corrcoef_xpu_complex64", - "test_conj_view_cov_xpu_complex64", - "test_conj_view_einsum_xpu_complex64", - "test_conj_view_geqrf_xpu_complex64", - "test_conj_view_inner_xpu_complex64", - "test_conj_view_linalg_cholesky_ex_xpu_complex64", - "test_conj_view_linalg_cholesky_xpu_complex64", - "test_conj_view_linalg_cond_xpu_complex64", - "test_conj_view_linalg_det_singular_xpu_complex64", - "test_conj_view_linalg_det_xpu_complex64", - "test_conj_view_linalg_eig_xpu_complex64", - "test_conj_view_linalg_eigh_xpu_complex64", - "test_conj_view_linalg_eigvals_xpu_complex64", - "test_conj_view_linalg_eigvalsh_xpu_complex64", - "test_conj_view_linalg_householder_product_xpu_complex64", - "test_conj_view_linalg_inv_ex_xpu_complex64", - "test_conj_view_linalg_inv_xpu_complex64", - "test_conj_view_linalg_ldl_factor_ex_xpu_complex64", - "test_conj_view_linalg_ldl_factor_xpu_complex64", - "test_conj_view_linalg_ldl_solve_xpu_complex64", - "test_conj_view_linalg_lstsq_grad_oriented_xpu_complex64", - "test_conj_view_linalg_lstsq_xpu_complex64", - "test_conj_view_linalg_lu_factor_xpu_complex64", - "test_conj_view_linalg_lu_solve_xpu_complex64", - "test_conj_view_linalg_matrix_norm_xpu_complex64", - "test_conj_view_linalg_matrix_power_xpu_complex64", - "test_conj_view_linalg_matrix_rank_hermitian_xpu_complex64", - "test_conj_view_linalg_matrix_rank_xpu_complex64", - "test_conj_view_linalg_multi_dot_xpu_complex64", - "test_conj_view_linalg_norm_subgradients_at_zero_xpu_complex64", - "test_conj_view_linalg_norm_xpu_complex64", - "test_conj_view_linalg_pinv_hermitian_xpu_complex64", - "test_conj_view_linalg_pinv_singular_xpu_complex64", - "test_conj_view_linalg_pinv_xpu_complex64", - "test_conj_view_linalg_qr_xpu_complex64", - "test_conj_view_linalg_slogdet_xpu_complex64", - "test_conj_view_linalg_solve_ex_xpu_complex64", - "test_conj_view_linalg_solve_triangular_xpu_complex64", - "test_conj_view_linalg_solve_xpu_complex64", - "test_conj_view_linalg_svd_xpu_complex64", - "test_conj_view_linalg_svdvals_xpu_complex64", - "test_conj_view_linalg_tensorinv_xpu_complex64", - "test_conj_view_linalg_tensorsolve_xpu_complex64", - "test_conj_view_logdet_xpu_complex64", - "test_conj_view_lu_solve_xpu_complex64", - "test_conj_view_lu_xpu_complex64", - "test_conj_view_matmul_xpu_complex64", - "test_conj_view_mm_xpu_complex64", - "test_conj_view_mv_xpu_complex64", - "test_conj_view_nn_functional_linear_xpu_complex64", - "test_conj_view_norm_nuc_xpu_complex64", - "test_conj_view_ormqr_xpu_complex64", - "test_conj_view_pinverse_xpu_complex64", - "test_conj_view_qr_xpu_complex64", - "test_conj_view_svd_xpu_complex64", - "test_conj_view_tensordot_xpu_complex64", - "test_conj_view_triangular_solve_xpu_complex64", - "test_neg_conj_view_addmm_decomposed_xpu_complex128", - "test_neg_conj_view_addmm_xpu_complex128", - "test_neg_conj_view_addmv_xpu_complex128", - "test_neg_conj_view_addr_xpu_complex128", - "test_neg_conj_view_baddbmm_xpu_complex128", - "test_neg_conj_view_bmm_xpu_complex128", - "test_neg_conj_view_cholesky_inverse_xpu_complex128", - "test_neg_conj_view_cholesky_solve_xpu_complex128", - "test_neg_conj_view_cholesky_xpu_complex128", - "test_neg_conj_view_corrcoef_xpu_complex128", - "test_neg_conj_view_cov_xpu_complex128", - "test_neg_conj_view_geqrf_xpu_complex128", - "test_neg_conj_view_inner_xpu_complex128", - "test_neg_conj_view_linalg_cholesky_ex_xpu_complex128", - "test_neg_conj_view_linalg_cholesky_xpu_complex128", - "test_neg_conj_view_linalg_cond_xpu_complex128", - "test_neg_conj_view_linalg_det_singular_xpu_complex128", - "test_neg_conj_view_linalg_eig_xpu_complex128", - "test_neg_conj_view_linalg_eigh_xpu_complex128", - "test_neg_conj_view_linalg_eigvals_xpu_complex128", - "test_neg_conj_view_linalg_eigvalsh_xpu_complex128", - "test_neg_conj_view_linalg_householder_product_xpu_complex128", - "test_neg_conj_view_linalg_inv_ex_xpu_complex128", - "test_neg_conj_view_linalg_inv_xpu_complex128", - "test_neg_conj_view_linalg_ldl_factor_ex_xpu_complex128", - "test_neg_conj_view_linalg_ldl_factor_xpu_complex128", - "test_neg_conj_view_linalg_ldl_solve_xpu_complex128", - "test_neg_conj_view_linalg_lstsq_grad_oriented_xpu_complex128", - "test_neg_conj_view_linalg_lstsq_xpu_complex128", - "test_neg_conj_view_linalg_lu_factor_xpu_complex128", - "test_neg_conj_view_linalg_lu_solve_xpu_complex128", - "test_neg_conj_view_linalg_matrix_rank_hermitian_xpu_complex128", - "test_neg_conj_view_linalg_matrix_rank_xpu_complex128", - "test_neg_conj_view_linalg_multi_dot_xpu_complex128", - "test_neg_conj_view_linalg_pinv_hermitian_xpu_complex128", - "test_neg_conj_view_linalg_pinv_singular_xpu_complex128", - "test_neg_conj_view_linalg_pinv_xpu_complex128", - "test_neg_conj_view_linalg_qr_xpu_complex128", - "test_neg_conj_view_linalg_solve_ex_xpu_complex128", - "test_neg_conj_view_linalg_solve_triangular_xpu_complex128", - "test_neg_conj_view_linalg_solve_xpu_complex128", - "test_neg_conj_view_linalg_svdvals_xpu_complex128", - "test_neg_conj_view_linalg_tensorinv_xpu_complex128", - "test_neg_conj_view_linalg_tensorsolve_xpu_complex128", - "test_neg_conj_view_lu_solve_xpu_complex128", - "test_neg_conj_view_lu_xpu_complex128", - "test_neg_conj_view_mm_xpu_complex128", - "test_neg_conj_view_mv_xpu_complex128", - "test_neg_conj_view_nn_functional_linear_xpu_complex128", - "test_neg_conj_view_norm_nuc_xpu_complex128", - "test_neg_conj_view_ormqr_xpu_complex128", - "test_neg_conj_view_pinverse_xpu_complex128", - "test_neg_conj_view_qr_xpu_complex128", - "test_neg_conj_view_tensordot_xpu_complex128", - "test_neg_conj_view_triangular_solve_xpu_complex128", - "test_neg_view___rmatmul___xpu_float64", - "test_neg_view__refs_linalg_svd_xpu_float64", - "test_neg_view__refs_nn_functional_pdist_xpu_float64", - "test_neg_view_addbmm_xpu_float64", - "test_neg_view_addmm_decomposed_xpu_float64", - "test_neg_view_addmm_xpu_float64", - "test_neg_view_addmv_xpu_float64", - "test_neg_view_addr_xpu_float64", - "test_neg_view_baddbmm_xpu_float64", - "test_neg_view_bmm_xpu_float64", - "test_neg_view_cdist_xpu_float64", - "test_neg_view_cholesky_inverse_xpu_float64", - "test_neg_view_cholesky_solve_xpu_float64", - "test_neg_view_cholesky_xpu_float64", - "test_neg_view_corrcoef_xpu_float64", - "test_neg_view_cov_xpu_float64", - "test_neg_view_einsum_xpu_float64", - "test_neg_view_geqrf_xpu_float64", - "test_neg_view_inner_xpu_float64", - "test_neg_view_linalg_cholesky_ex_xpu_float64", - "test_neg_view_linalg_cholesky_xpu_float64", - "test_neg_view_linalg_cond_xpu_float64", - "test_neg_view_linalg_det_singular_xpu_float64", - "test_neg_view_linalg_det_xpu_float64", - "test_neg_view_linalg_eig_xpu_float64", - "test_neg_view_linalg_eigh_xpu_float64", - "test_neg_view_linalg_eigvals_xpu_float64", - "test_neg_view_linalg_eigvalsh_xpu_float64", - "test_neg_view_linalg_householder_product_xpu_float64", - "test_neg_view_linalg_inv_ex_xpu_float64", - "test_neg_view_linalg_inv_xpu_float64", - "test_neg_view_linalg_ldl_factor_ex_xpu_float64", - "test_neg_view_linalg_ldl_factor_xpu_float64", - "test_neg_view_linalg_ldl_solve_xpu_float64", - "test_neg_view_linalg_lstsq_grad_oriented_xpu_float64", - "test_neg_view_linalg_lstsq_xpu_float64", - "test_neg_view_linalg_lu_factor_xpu_float64", - "test_neg_view_linalg_lu_solve_xpu_float64", - "test_neg_view_linalg_matrix_norm_xpu_float64", - "test_neg_view_linalg_matrix_power_xpu_float64", - "test_neg_view_linalg_matrix_rank_hermitian_xpu_float64", - "test_neg_view_linalg_matrix_rank_xpu_float64", - "test_neg_view_linalg_multi_dot_xpu_float64", - "test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64", - "test_neg_view_linalg_norm_xpu_float64", - "test_neg_view_linalg_pinv_hermitian_xpu_float64", - "test_neg_view_linalg_pinv_singular_xpu_float64", - "test_neg_view_linalg_pinv_xpu_float64", - "test_neg_view_linalg_qr_xpu_float64", - "test_neg_view_linalg_slogdet_xpu_float64", - "test_neg_view_linalg_solve_ex_xpu_float64", - "test_neg_view_linalg_solve_triangular_xpu_float64", - "test_neg_view_linalg_solve_xpu_float64", - "test_neg_view_linalg_svd_xpu_float64", - "test_neg_view_linalg_svdvals_xpu_float64", - "test_neg_view_linalg_tensorinv_xpu_float64", - "test_neg_view_linalg_tensorsolve_xpu_float64", - "test_neg_view_logdet_xpu_float64", - "test_neg_view_lu_solve_xpu_float64", - "test_neg_view_lu_xpu_float64", - "test_neg_view_matmul_xpu_float64", - "test_neg_view_mm_xpu_float64", - "test_neg_view_mv_xpu_float64", - "test_neg_view_nn_functional_bilinear_xpu_float64", - "test_neg_view_nn_functional_linear_xpu_float64", - "test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64", - "test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_neg_view_norm_nuc_xpu_float64", - "test_neg_view_ormqr_xpu_float64", - "test_neg_view_pca_lowrank_xpu_float64", - "test_neg_view_pinverse_xpu_float64", - "test_neg_view_qr_xpu_float64", - "test_neg_view_svd_lowrank_xpu_float64", - "test_neg_view_svd_xpu_float64", - "test_neg_view_tensordot_xpu_float64", - "test_neg_view_triangular_solve_xpu_float64", - "test_noncontiguous_samples_pca_lowrank_xpu_complex64", - "test_noncontiguous_samples_svd_lowrank_xpu_complex64", - "test_variant_consistency_eager_pca_lowrank_xpu_complex64", - "test_variant_consistency_eager_svd_lowrank_xpu_complex64", - "test_conj_view_pca_lowrank_xpu_complex64", - "test_conj_view_svd_lowrank_xpu_complex64", - "test_neg_conj_view_pca_lowrank_xpu_complex128", - "test_neg_conj_view_svd_lowrank_xpu_complex128", - # oneDNN issues - ### Error #1 in TestMathBitsXPU , RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive - # https://github.com/intel/torch-xpu-ops/issues/253 - "test_conj_view_nn_functional_conv_transpose2d_xpu_complex64", - "test_conj_view_nn_functional_conv_transpose3d_xpu_complex64", - "test_neg_view_nn_functional_conv_transpose2d_xpu_float64", - "test_neg_view_nn_functional_conv_transpose3d_xpu_float64", - # implemented aten::histogram to align MPS operators coverage, CUDA doesn't support - # but test_dtypes infrastructure leverage CUDA supported datatypes - "test_dtypes_histogram_xpu", - # Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported - "test_errors_histogramdd_xpu", - # 2025 bundle std::pow complex result is different on host and device - "test_python_ref__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_square_xpu_complex64", - "test_python_ref_torch_fallback__refs_exp_xpu_complex128", - # Failed on rolling driver, passed on preci - "test_python_ref__refs_div_trunc_rounding_xpu_float64", - "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64", - "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64", - # TODO: passed from source code building version, investigate - "test_python_ref__refs_log2_xpu_complex128", - # The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}. - "test_dtypes_fft_fft2_xpu", - "test_dtypes_fft_fft_xpu", - "test_dtypes_fft_fftn_xpu", - "test_dtypes_fft_hfft2_xpu", - "test_dtypes_fft_hfft_xpu", - "test_dtypes_fft_hfftn_xpu", - "test_dtypes_fft_ifft2_xpu", - "test_dtypes_fft_ifft_xpu", - "test_dtypes_fft_ifftn_xpu", - "test_dtypes_fft_ihfft2_xpu", - "test_dtypes_fft_ihfft_xpu", - "test_dtypes_fft_ihfftn_xpu", - "test_dtypes_fft_irfft2_xpu", - "test_dtypes_fft_irfft_xpu", - "test_dtypes_fft_irfftn_xpu", - "test_dtypes_fft_rfft2_xpu", - "test_dtypes_fft_rfft_xpu", - "test_dtypes_fft_rfftn_xpu", - ), - "test_binary_ufuncs_xpu.py": ( - "test_fmod_remainder_by_zero_integral_xpu_int64", # zero division is an undefined behavior: different handles on different backends - "test_div_rounding_numpy_xpu_float16", # Calculation error. XPU implementation uses opmath type. - # AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available. - "_jiterator_", - # nextafter: Numeric error due to `std::nextafter` difference between CPU (GCC) and XPU (SYCL) - # https://github.com/intel/torch-xpu-ops/issues/623 - # AssertionError: Scalars are not equal! - # Expected 9.183549615799121e-41 but got 0.0. - # Absolute difference: 9.183549615799121e-41 - # Relative difference: 1.0 - "test_nextafter_bfloat16_xpu_bfloat16", - ), - "test_scatter_gather_ops_xpu.py": ( - # AssertionError: Tensor-likes are not equal! - # Mismatched elements: 2 / 1870 (0.1%) - # Greatest absolute difference: 2.220446049250313e-16 at index (14, 9, 4) - # Greatest relative difference: 1.7039539596977877e-16 at index (15, 7, 6) - "test_scatter_reduce_mean_xpu_float64", - ), - "test_autograd_fallback_xpu.py": None, - "test_sort_and_select_xpu.py": ( - "test_sort_large_slice_xpu", - ), # Hard code CUDA, UT has already been rewritten to test/regressions/test_sort.py. - "nn/test_embedding_xpu.py": ( - # NotImplementedError: Could not run 'aten::_indices' with arguments from the 'SparseXPU' backend. - "test_embedding_bag_device_xpu_int32_int32_float16", - "test_embedding_bag_device_xpu_int32_int32_float32", - "test_embedding_bag_device_xpu_int32_int32_float64", - "test_embedding_bag_device_xpu_int32_int64_float16", - "test_embedding_bag_device_xpu_int32_int64_float32", - "test_embedding_bag_device_xpu_int32_int64_float64", - "test_embedding_bag_device_xpu_int64_int32_float16", - "test_embedding_bag_device_xpu_int64_int32_float32", - "test_embedding_bag_device_xpu_int64_int32_float64", - "test_embedding_bag_device_xpu_int64_int64_float16", - "test_embedding_bag_device_xpu_int64_int64_float32", - "test_embedding_bag_device_xpu_int64_int64_float64", - # CUDA implementation has no such functionality due to performance consideration. - # skipped by CUDA for performance - # @skipCUDAIf(True, "no out-of-bounds check on CUDA for perf.") - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float64_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float64_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float64_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float64_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float64_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float32_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float32_int64", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float64_int32", - "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float64_int64", - ), - "test_transformers_xpu.py": ( - # https://github.com/intel/torch-xpu-ops/issues/761 - # AssertionError: False is not true - # CPU fallback failure. To support aten::transformer_encoder_layer_forward with proper priority. - "test_disable_fastpath_xpu", - # We have no mechanism to handle SDPBackend::ERROR so far. Will give a fully support when we support all SDPBackends. - "test_dispatch_fails_no_backend_xpu", - # NestedTensorXPU not supported - # Could not run 'aten::_to_copy' with arguments from the 'NestedTensorXPU' backend - "test_with_nested_tensor_input_xpu", - # oneDNN issues - # Double and complex datatype matmul is not supported in oneDNN - # https://github.com/intel/torch-xpu-ops/issues/253 - "test_sdp_math_gradcheck_contiguous_inputs_False_xpu", - "test_sdp_math_gradcheck_contiguous_inputs_True_xpu", - "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_False_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_True_xpu", - "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_False_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_5_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", - "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", - # https://github.com/intel/torch-xpu-ops/issues/1432 - "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu", - "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_False_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_False_d_model_12_xpu", - "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_True_use_autocast_True_d_model_12_xpu", - ), - "test_complex_xpu.py": None, - "test_modules_xpu.py": ( - # oneDNN issues - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_cpu_gpu_parity_nn_Bilinear_xpu_float64", - "test_cpu_gpu_parity_nn_GRUCell_xpu_float64", - "test_cpu_gpu_parity_nn_GRU_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_GRU_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_LSTMCell_xpu_float64", - "test_cpu_gpu_parity_nn_LSTM_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_LSTM_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_Linear_xpu_float64", - "test_cpu_gpu_parity_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_MultiheadAttention_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_RNNCell_xpu_float64", - "test_cpu_gpu_parity_nn_RNN_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_RNN_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerDecoderLayer_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_cpu_gpu_parity_nn_TransformerEncoder_train_mode_xpu_float64", - "test_cpu_gpu_parity_nn_Transformer_xpu_float64", - "test_forward_nn_Bilinear_xpu_float64", - "test_forward_nn_GRUCell_xpu_float64", - "test_forward_nn_GRU_eval_mode_xpu_float64", - "test_forward_nn_GRU_train_mode_xpu_float64", - "test_forward_nn_LSTMCell_xpu_float64", - "test_forward_nn_LSTM_eval_mode_xpu_float64", - "test_forward_nn_LSTM_train_mode_xpu_float64", - "test_forward_nn_Linear_xpu_float64", - "test_forward_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_forward_nn_MultiheadAttention_train_mode_xpu_float64", - "test_forward_nn_RNNCell_xpu_float64", - "test_forward_nn_RNN_eval_mode_xpu_float64", - "test_forward_nn_RNN_train_mode_xpu_float64", - "test_forward_nn_TransformerDecoderLayer_xpu_float64", - "test_forward_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_forward_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_forward_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_forward_nn_TransformerEncoder_train_mode_xpu_float64", - "test_forward_nn_Transformer_xpu_float64", - "test_grad_nn_Bilinear_xpu_float64", - "test_grad_nn_GRUCell_xpu_float64", - "test_grad_nn_GRU_eval_mode_xpu_float64", - "test_grad_nn_GRU_train_mode_xpu_float64", - "test_grad_nn_LSTMCell_xpu_float64", - "test_grad_nn_LSTM_eval_mode_xpu_float64", - "test_grad_nn_LSTM_train_mode_xpu_float64", - "test_grad_nn_Linear_xpu_float64", - "test_grad_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_grad_nn_MultiheadAttention_train_mode_xpu_float64", - "test_grad_nn_RNNCell_xpu_float64", - "test_grad_nn_RNN_eval_mode_xpu_float64", - "test_grad_nn_RNN_train_mode_xpu_float64", - "test_grad_nn_TransformerDecoderLayer_xpu_float64", - "test_grad_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_grad_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_grad_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_grad_nn_TransformerEncoder_train_mode_xpu_float64", - "test_grad_nn_Transformer_xpu_float64", - "test_gradgrad_nn_Bilinear_xpu_float64", - "test_gradgrad_nn_GRUCell_xpu_float64", - "test_gradgrad_nn_GRU_eval_mode_xpu_float64", - "test_gradgrad_nn_GRU_train_mode_xpu_float64", - "test_gradgrad_nn_LSTMCell_xpu_float64", - "test_gradgrad_nn_LSTM_eval_mode_xpu_float64", - "test_gradgrad_nn_LSTM_train_mode_xpu_float64", - "test_gradgrad_nn_Linear_xpu_float64", - "test_gradgrad_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_gradgrad_nn_MultiheadAttention_train_mode_xpu_float64", - "test_gradgrad_nn_RNNCell_xpu_float64", - "test_gradgrad_nn_RNN_eval_mode_xpu_float64", - "test_gradgrad_nn_RNN_train_mode_xpu_float64", - "test_gradgrad_nn_TransformerDecoderLayer_xpu_float64", - "test_gradgrad_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_gradgrad_nn_TransformerEncoder_train_mode_xpu_float64", - "test_gradgrad_nn_Transformer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Bilinear_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_GRUCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_LSTMCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Linear_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_RNNCell_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerDecoderLayer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerEncoderLayer_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_TransformerEncoder_xpu_float64", - "test_if_train_and_eval_modes_differ_nn_Transformer_xpu_float64", - "test_memory_format_nn_GRUCell_xpu_float64", - "test_memory_format_nn_GRU_eval_mode_xpu_float64", - "test_memory_format_nn_GRU_train_mode_xpu_float64", - "test_memory_format_nn_LSTMCell_xpu_float64", - "test_memory_format_nn_LSTM_eval_mode_xpu_float64", - "test_memory_format_nn_LSTM_train_mode_xpu_float64", - "test_memory_format_nn_RNNCell_xpu_float64", - "test_memory_format_nn_RNN_eval_mode_xpu_float64", - "test_memory_format_nn_RNN_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Bilinear_xpu_float64", - "test_multiple_device_transfer_nn_GRUCell_xpu_float64", - "test_multiple_device_transfer_nn_GRU_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_GRU_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_LSTMCell_xpu_float64", - "test_multiple_device_transfer_nn_LSTM_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_LSTM_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Linear_xpu_float64", - "test_multiple_device_transfer_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_MultiheadAttention_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_RNNCell_xpu_float64", - "test_multiple_device_transfer_nn_RNN_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_RNN_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerDecoderLayer_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_multiple_device_transfer_nn_TransformerEncoder_train_mode_xpu_float64", - "test_multiple_device_transfer_nn_Transformer_xpu_float64", - "test_non_contiguous_tensors_nn_Bilinear_xpu_float64", - "test_non_contiguous_tensors_nn_GRUCell_xpu_float64", - "test_non_contiguous_tensors_nn_GRU_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_GRU_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_LSTMCell_xpu_float64", - "test_non_contiguous_tensors_nn_LSTM_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_LSTM_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_Linear_xpu_float64", - "test_non_contiguous_tensors_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_MultiheadAttention_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_RNNCell_xpu_float64", - "test_non_contiguous_tensors_nn_RNN_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_RNN_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerDecoderLayer_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_non_contiguous_tensors_nn_TransformerEncoder_train_mode_xpu_float64", - "test_non_contiguous_tensors_nn_Transformer_xpu_float64", - "test_save_load_nn_Bilinear_xpu_float64", - "test_save_load_nn_GRUCell_xpu_float64", - "test_save_load_nn_GRU_eval_mode_xpu_float64", - "test_save_load_nn_GRU_train_mode_xpu_float64", - "test_save_load_nn_LSTMCell_xpu_float64", - "test_save_load_nn_LSTM_eval_mode_xpu_float64", - "test_save_load_nn_LSTM_train_mode_xpu_float64", - "test_save_load_nn_Linear_xpu_float64", - "test_save_load_nn_MultiheadAttention_eval_mode_xpu_float64", - "test_save_load_nn_MultiheadAttention_train_mode_xpu_float64", - "test_save_load_nn_RNNCell_xpu_float64", - "test_save_load_nn_RNN_eval_mode_xpu_float64", - "test_save_load_nn_RNN_train_mode_xpu_float64", - "test_save_load_nn_TransformerDecoderLayer_xpu_float64", - "test_save_load_nn_TransformerEncoderLayer_eval_mode_xpu_float64", - "test_save_load_nn_TransformerEncoderLayer_train_mode_xpu_float64", - "test_save_load_nn_TransformerEncoder_eval_mode_xpu_float64", - "test_save_load_nn_TransformerEncoder_train_mode_xpu_float64", - "test_save_load_nn_Transformer_xpu_float64", - # Unexpected success: - "test_cpu_gpu_parity_nn_ConvTranspose1d_xpu_complex32", - "test_cpu_gpu_parity_nn_ConvTranspose2d_xpu_complex32", - # CPU fallback fails - # RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. - # AssertionError: False is not true - "test_to_nn_BatchNorm1d_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_BatchNorm1d_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_BatchNorm2d_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_BatchNorm2d_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_BatchNorm3d_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_BatchNorm3d_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Bilinear_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Conv1d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Conv2d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Conv3d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_ConvTranspose1d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_ConvTranspose2d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_ConvTranspose3d_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Embedding_swap_True_set_grad_True_xpu_float32", - "test_to_nn_GRUCell_swap_True_set_grad_True_xpu_float32", - "test_to_nn_GRU_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_GRU_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_GroupNorm_swap_True_set_grad_True_xpu_float32", - "test_to_nn_LSTMCell_swap_True_set_grad_True_xpu_float32", - "test_to_nn_LSTM_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_LSTM_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_LayerNorm_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Linear_swap_True_set_grad_True_xpu_float32", - "test_to_nn_MultiheadAttention_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_MultiheadAttention_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_PReLU_swap_True_set_grad_True_xpu_float32", - "test_to_nn_RMSNorm_swap_True_set_grad_True_xpu_float32", - "test_to_nn_RNNCell_swap_True_set_grad_True_xpu_float32", - "test_to_nn_RNN_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_RNN_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_TransformerDecoderLayer_swap_True_set_grad_True_xpu_float32", - "test_to_nn_TransformerEncoderLayer_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_TransformerEncoderLayer_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_TransformerEncoder_eval_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_TransformerEncoder_train_mode_swap_True_set_grad_True_xpu_float32", - "test_to_nn_Transformer_swap_True_set_grad_True_xpu_float32", - # Unexpected succuss - "test_memory_format_nn_Conv2d_xpu_float64", - "test_memory_format_nn_ConvTranspose2d_xpu_float64", - "test_memory_format_nn_LazyConv2d_xpu_float64", - "test_memory_format_nn_LazyConvTranspose2d_xpu_float64", - ), - "test_nn_xpu.py": ( - # AttributeError: module 'torch.xpu' has no attribute 'FloatTensor' - "test_type", - # rnn fallback to cpu - "test_cudnn_weight_format", - # oneDNN issues - # AssertionError: MultiheadAttention does not support NestedTensor outside of its fast path. The fast path was not hit because some Tensor argument's device is neither one of cpu, cuda or privateuseone - "test_TransformerEncoderLayer_empty_xpu", - "test_transformerencoderlayer_xpu_float16", - "test_transformerencoderlayer_xpu_float32", - # oneDNN issues - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_TransformerDecoderLayer_empty_xpu", - "test_TransformerDecoder_empty_xpu", - "test_TransformerEncoder_empty_xpu", - "test_Transformer_empty_xpu", - "test_affine_grid", - "test_affine_grid_3d", - "test_RNN_cpu_vs_cudnn_no_dropout", - "test_RNN_cpu_vs_cudnn_with_dropout", - "test_GRU_grad_and_gradgrad_xpu_float64", - "test_LSTM_grad_and_gradgrad_xpu_float64", - "test_lstmcell_backward_only_one_output_grad_xpu_float64", - "test_module_to_empty_xpu_float64", - "test_RNN_change_dropout", - "test_RNN_dropout", - "test_rnn_fused_xpu_float64", - "test_rnn_retain_variables_xpu_float64", - "test_transformerencoderlayer_xpu_float64", - "test_variable_sequence_xpu_float64", - # Unexpected success: CUDA only test case, launch grid_y == 2**16 (larger than CUDA maximum y-dimension limit 65535) and expect fail. - # SYCL don't have this limitation and hence can pass. - "test_upsamplingNearest2d_launch_fail_xpu", - # Could not run 'aten::_thnn_fused_lstm_cell' with arguments from the 'CPU' backend. - "test_RNN_cudnn_weight_norm", - "test_partial_flat_weights", - "test_variable_sequence_xpu_float16", - "test_variable_sequence_xpu_float32", - # CPU fallback could not cover - # NotImplementedError: Could not run 'aten::_thnn_fused_gru_cell' with arguments from the 'CPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build pro... - "test_cudnn_weight_tying", - "test_RNN_input_size_zero", - "test_rnn_fused_xpu_float32", - "test_rnn_retain_variables_xpu_float16", - "test_rnn_retain_variables_xpu_float32", - # AssertionError: False is not true - "test_ctc_loss_cudnn_xpu", # want "xpu" in function name - "test_ctc_loss_cudnn_tensor", # want "xpu" in function name - # RuntimeError: reflection_pad2d_backward_xpu does not have a deterministic implementation, but you set 'torch.use_deterministic_algorithms(True)'. - "test_ReflectionPad2d_large_deterministic_xpu", - # Case updated in pytorch commit 97272e4 - "test_hardswish_grad_corner_xpu_bfloat16", - "test_hardswish_grad_corner_xpu_float16", - "test_hardswish_grad_corner_xpu_float32", - # x_cuda = x.clone().detach().to("cuda").requires_grad_(): Torch not compiled with CUDA enabled - "test_layer_norm_backwards_eps", - ), - "test_indexing_xpu.py": ( - # XPU implementation doesn't claimn FP8 now - # https://github.com/intel/torch-xpu-ops/issues/461 - "test_index_put_src_datatype_xpu_float8_e5m2", - "test_index_put_src_datatype_xpu_float8_e4m3fn", - ), - "nn/test_pooling_xpu.py": None, - "nn/test_dropout_xpu.py": None, - "test_dataloader_xpu.py": ( - # Skip for XPU didn't support - # https://github.com/intel/torch-xpu-ops/issues/613 - "test_nested_tensor_multiprocessing_context_forkserver_xpu", - "test_nested_tensor_multiprocessing_context_spawn_xpu", - # pinned memory issue - # https://github.com/intel/torch-xpu-ops/issues/296 - "test_custom_batch_pin", - "test_sequential_pin_memory", - "test_shuffle_pin_memory", - "test_pin_memory", - # failed in preci - # https://github.com/intel/torch-xpu-ops/issues/928 - "test_segfault", - ), - "test_tensor_creation_ops_xpu.py": ( - # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan. - "test_float_to_int_conversion_finite_xpu_int8", - "test_float_to_int_conversion_finite_xpu_int16", - # Dispatch issue. It is a composite operator. But it is implemented by - # DispatchStub. XPU doesn't support DispatchStub. - "test_kaiser_window_xpu", - ), - "test_autocast_xpu.py": None, - "test_autograd_xpu.py": ( - # https://github.com/intel/torch-xpu-ops/issues/618 - # c10::NotImplementedError - "test_autograd_composite_implicit_and_dispatch_registration_xpu", - "test_autograd_multiple_dispatch_registrations_xpu", - # AttributeError: module 'torch.xpu' has no attribute - "test_profiler_emit_nvtx_xpu", - # Double and complex datatype matmul is not supported in oneDNN - "test_mv_grad_stride_0_xpu", - # module 'torch._C' has no attribute '_scatter' - "test_checkpointing_without_reentrant_dataparallel", - "test_dataparallel_saved_tensors_hooks", - # Runtime error after enabling PTI - # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200 - # https://github.com/intel/torch-xpu-ops/issues/731 - "test_profiler", - "test_record_function", - # Sometimes, will raise AssertionError: "Simulate error" does not match "grad can be implicitly created only for scalar outputs" - # https://github.com/intel/torch-xpu-ops/issues/1071 - "test_reentrant_parent_error_on_cpu_xpu", - ), - "test_reductions_xpu.py": ( - # Accumulate error due to different accumulation order. - "test_logcumsumexp_complex_xpu_complex64", - ), - "test_unary_ufuncs_xpu.py": ( - # AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available. - "_jiterator_", - # For extreme value processing, Numpy and XPU results are inconsistent - # std operations get different behavior on std::complex operarands for extremal cases - "test_reference_numerics_extremal__refs_log_xpu_complex64", - "test_reference_numerics_extremal_log_xpu_complex64", - "test_reference_numerics_extremal__refs_tanh_xpu_complex128", - "test_reference_numerics_extremal__refs_tanh_xpu_complex64", - "test_reference_numerics_extremal_tanh_xpu_complex128", - "test_reference_numerics_extremal_tanh_xpu_complex64", - "test_reference_numerics_extremal__refs_acos_xpu_complex64", - "test_reference_numerics_extremal__refs_acosh_xpu_complex64", - "test_reference_numerics_extremal_acos_xpu_complex64", - "test_reference_numerics_extremal_acosh_xpu_complex64", - "test_reference_numerics_extremal__refs_asinh_xpu_complex64", - "test_reference_numerics_extremal_asinh_xpu_complex64", - "test_reference_numerics_extremal__refs_asin_xpu_complex64", - "test_reference_numerics_extremal_asin_xpu_complex64", - "test_reference_numerics_large__refs_acosh_xpu_complex64", - "test_reference_numerics_large_acosh_xpu_complex64", - "test_reference_numerics_extremal__refs_log10_xpu_complex64", - "test_reference_numerics_extremal__refs_log1p_xpu_complex64", - "test_reference_numerics_extremal_log10_xpu_complex64", - "test_reference_numerics_extremal_log1p_xpu_complex64", - "test_reference_numerics_extremal__refs_tan_xpu_complex128", - "test_reference_numerics_extremal__refs_tan_xpu_complex64", - "test_reference_numerics_extremal_tan_xpu_complex128", - "test_reference_numerics_extremal_tan_xpu_complex64", - "test_reference_numerics_large__refs_tan_xpu_complex32", - "test_reference_numerics_large_tan_xpu_complex32", - "test_reference_numerics_large__refs_asinh_xpu_complex128", - "test_reference_numerics_large__refs_asinh_xpu_complex64", - "test_reference_numerics_large__refs_asinh_xpu_complex32", - "test_reference_numerics_large_asinh_xpu_complex128", - "test_reference_numerics_large_asinh_xpu_complex64", - "test_reference_numerics_large_asinh_xpu_complex32", - "test_reference_numerics_normal_exp_xpu_complex128", - # AssertionError: Tensor-likes are not close! - # exceeded maximum allowed difference - # Greatest absolute difference: 6.266784475883469e-05 at index (463, 204) (up to 1e-05 allowed) - # Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed) - "test_reference_numerics_normal__refs_asinh_xpu_complex64", - "test_reference_numerics_normal_asinh_xpu_complex64", - "test_batch_vs_slicing__refs_sigmoid_xpu_complex128", - # Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue. - "test_reference_numerics_large_rsqrt_xpu_complex32", - # Numeric difference - # https://github.com/intel/torch-xpu-ops/issues/544 - # Expected 0.00497517 but got 0.00497520063072443. - # Absolute difference: 3.063072442997111e-08 (up to 0.0 allowed) - # Relative difference: 6.156719153309558e-06 (up to 1e-06 allowed) - "test_log1p_complex_xpu_complex64", - # Issue: https://github.com/intel/torch-xpu-ops/issues/622 - # Mismatched elements: 8 / 943593 (0.0%) - # Greatest absolute difference: inf at index (9, 860) (up to 0.001 allowed) - # Greatest relative difference: inf at index (9, 860) (up to 0.0012 allowed) - "test_reference_numerics_normal_polygamma_polygamma_n_1_xpu_float16", - "test_reference_numerics_normal_polygamma_polygamma_n_2_xpu_float16", - "test_reference_numerics_normal_polygamma_polygamma_n_3_xpu_float16", - "test_reference_numerics_normal_polygamma_polygamma_n_4_xpu_float16", - # CUDA XFAIL - "test_reference_numerics_large__refs_rsqrt_xpu_complex32", - # 2025 bundle std::pow complex result is different on host and device - "test_exp_xpu_complex64", - "test_reference_numerics_extremal__refs_exp2_xpu_complex64", - "test_reference_numerics_extremal__refs_exp_xpu_complex64", - "test_reference_numerics_extremal_exp2_xpu_complex64", - "test_reference_numerics_extremal_exp_xpu_complex64", - "test_reference_numerics_large__refs_exp_xpu_complex32", - "test_reference_numerics_large_exp_xpu_complex32", - ), - "test_masked_xpu.py": ( - # Summary: Sparse CSR for XPU is not supported - # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend. - # https://github.com/intel/torch-xpu-ops/issues/357 - "test_mask_layout_sparse_coo_masked_amax_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_amax_xpu_float16", - "test_mask_layout_sparse_coo_masked_amax_xpu_float32", - "test_mask_layout_sparse_coo_masked_amax_xpu_float64", - "test_mask_layout_sparse_coo_masked_amin_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_amin_xpu_float16", - "test_mask_layout_sparse_coo_masked_amin_xpu_float32", - "test_mask_layout_sparse_coo_masked_amin_xpu_float64", - "test_mask_layout_sparse_coo_masked_prod_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_prod_xpu_bool", - "test_mask_layout_sparse_coo_masked_prod_xpu_complex128", - "test_mask_layout_sparse_coo_masked_prod_xpu_complex64", - "test_mask_layout_sparse_coo_masked_prod_xpu_float16", - "test_mask_layout_sparse_coo_masked_prod_xpu_float32", - "test_mask_layout_sparse_coo_masked_prod_xpu_float64", - "test_mask_layout_sparse_coo_masked_prod_xpu_int16", - "test_mask_layout_sparse_coo_masked_prod_xpu_int32", - "test_mask_layout_sparse_coo_masked_prod_xpu_int64", - "test_mask_layout_sparse_coo_masked_prod_xpu_int8", - "test_mask_layout_sparse_coo_masked_prod_xpu_uint8", - # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend. - "test_mask_layout_sparse_coo_masked_sum_xpu_bfloat16", - "test_mask_layout_sparse_coo_masked_sum_xpu_bool", - "test_mask_layout_sparse_coo_masked_sum_xpu_complex128", - "test_mask_layout_sparse_coo_masked_sum_xpu_complex64", - "test_mask_layout_sparse_coo_masked_sum_xpu_float16", - "test_mask_layout_sparse_coo_masked_sum_xpu_float32", - "test_mask_layout_sparse_coo_masked_sum_xpu_float64", - "test_mask_layout_sparse_coo_masked_sum_xpu_int16", - "test_mask_layout_sparse_coo_masked_sum_xpu_int32", - "test_mask_layout_sparse_coo_masked_sum_xpu_int64", - "test_mask_layout_sparse_coo_masked_sum_xpu_int8", - "test_mask_layout_sparse_coo_masked_sum_xpu_uint8", - ), - "test_view_ops_xpu.py": ( - # Need quantization support, NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend. - "test_flatten_xpu", - "test_ravel_xpu", - ), - "test_shape_ops_xpu.py": ( - # Need quantization support. - # https://github.com/intel/torch-xpu-ops/issues/275 - # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend. - "test_flip_xpu_float32", - ), - "test_content_store_xpu.py": None, - "test_native_functions_xpu.py": None, - "nn/test_init_xpu.py": None, - "test_namedtensor_xpu.py": None, - "nn/test_lazy_modules_xpu.py": None, - "test_linalg_xpu.py": ( - # Summary: - # All linear algebra related ops are not supported for XPU. - # _convert_weight_to_int4pack not support - "_int4_mm_m_", - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_tensordot_out_kernel_errors_with_autograd_xpu_complex64", - "test_tensordot_out_kernel_errors_with_autograd_xpu_float32", - "test_1_sized_with_0_strided_xpu_float64", - "test_addbmm_xpu_complex128", - "test_addbmm_xpu_complex64", - "test_addbmm_xpu_float64", - "test_addmm_gelu_xpu_float64", - "test_addmm_relu_xpu_float64", - "test_addmm_sizes_xpu_float64", - "test_addmm_xpu_complex128", - "test_addmm_xpu_complex64", - "test_addmm_xpu_float64", - "test_addmv_rowmajor_colmajor_incx_incy_lda_xpu_float64", - "test_addmv_xpu_complex128", - "test_addmv_xpu_complex64", - "test_addmv_xpu_float64", - "test_baddbmm_xpu_complex128", - "test_baddbmm_xpu_complex64", - "test_baddbmm_xpu_float64", - "test_bmm_xpu_complex128", - "test_bmm_xpu_complex64", - "test_bmm_xpu_float64", - "test_blas_alpha_beta_empty_xpu_float64", - "test_cholesky_errors_and_warnings_xpu_complex128", - "test_cholesky_errors_and_warnings_xpu_complex64", - "test_cholesky_errors_and_warnings_xpu_float64", - "test_cholesky_ex_xpu_complex128", - "test_cholesky_ex_xpu_complex64", - "test_cholesky_ex_xpu_float64", - "test_cholesky_inverse_xpu_complex128", - "test_cholesky_inverse_xpu_complex64", - "test_cholesky_inverse_xpu_float64", - "test_cholesky_solve_backward_xpu_float64", - "test_cholesky_solve_batched_many_batches_xpu_complex128", - "test_cholesky_solve_batched_many_batches_xpu_complex64", - "test_cholesky_solve_batched_many_batches_xpu_float64", - "test_cholesky_solve_batched_xpu_complex128", - "test_cholesky_solve_batched_xpu_complex64", - "test_cholesky_solve_batched_xpu_float64", - "test_cholesky_solve_xpu_complex128", - "test_cholesky_solve_xpu_complex64", - "test_cholesky_solve_xpu_float64", - "test_cholesky_xpu_complex128", - "test_cholesky_xpu_complex64", - "test_cholesky_xpu_float64", - "test_corner_cases_of_cublasltmatmul_xpu_complex128", - "test_corner_cases_of_cublasltmatmul_xpu_complex64", - "test_corner_cases_of_cublasltmatmul_xpu_float64", - "test_det_logdet_slogdet_batched_xpu_float64", - "test_det_logdet_slogdet_xpu_float64", - "test_eig_check_magma_xpu_float32", - "test_einsum_random_xpu_complex128", - "test_einsum_random_xpu_float64", - "test_einsum_sublist_format_xpu_complex128", - "test_einsum_sublist_format_xpu_float64", - "test_einsum_xpu_complex128", - "test_einsum_xpu_float64", - "test_inner_xpu_complex64", - "test_invariance_error_spectral_decompositions_xpu_complex128", - "test_inverse_many_batches_xpu_complex128", - "test_inverse_many_batches_xpu_complex64", - "test_inverse_many_batches_xpu_float64", - "test_inverse_xpu_complex128", - "test_inverse_xpu_complex64", - "test_inverse_xpu_float64", - "test_ldl_factor_xpu_complex128", - "test_ldl_factor_xpu_complex64", - "test_ldl_factor_xpu_float64", - "test_ldl_solve_xpu_complex128", - "test_ldl_solve_xpu_complex64", - "test_ldl_solve_xpu_float64", - "test_linalg_lstsq_batch_broadcasting_xpu_complex128", - "test_linalg_lstsq_batch_broadcasting_xpu_complex64", - "test_linalg_lstsq_batch_broadcasting_xpu_float64", - "test_linalg_lstsq_xpu_complex128", - "test_linalg_lstsq_xpu_complex64", - "test_linalg_lstsq_xpu_float64", - "test_linalg_lu_family_xpu_complex128", - "test_linalg_lu_family_xpu_complex64", - "test_linalg_lu_family_xpu_float64", - "test_linalg_lu_solve_xpu_complex128", - "test_linalg_lu_solve_xpu_complex64", - "test_linalg_lu_solve_xpu_float64", - "test_linalg_solve_triangular_broadcasting_xpu_complex128", - "test_linalg_solve_triangular_broadcasting_xpu_complex64", - "test_linalg_solve_triangular_broadcasting_xpu_float64", - "test_linalg_solve_triangular_large_xpu_complex128", - "test_linalg_solve_triangular_large_xpu_complex64", - "test_linalg_solve_triangular_large_xpu_float64", - "test_linalg_solve_triangular_xpu_complex128", - "test_linalg_solve_triangular_xpu_complex64", - "test_linalg_solve_triangular_xpu_float64", - "test_lobpcg_basic_xpu_float64", - "test_lobpcg_ortho_xpu_float64", - "test_lu_solve_batched_broadcasting_xpu_complex128", - "test_lu_solve_batched_broadcasting_xpu_complex64", - "test_lu_solve_batched_broadcasting_xpu_float64", - "test_lu_solve_batched_many_batches_xpu_complex128", - "test_lu_solve_batched_many_batches_xpu_complex64", - "test_lu_solve_batched_many_batches_xpu_float64", - "test_lu_solve_batched_xpu_complex128", - "test_lu_solve_batched_xpu_complex64", - "test_lu_solve_batched_xpu_float64", - "test_lu_solve_large_matrices_xpu_complex128", - "test_lu_solve_large_matrices_xpu_complex64", - "test_lu_solve_large_matrices_xpu_float64", - "test_lu_solve_xpu_complex128", - "test_lu_solve_xpu_complex64", - "test_lu_solve_xpu_float64", - "test_matmul_out_kernel_errors_with_autograd_xpu_complex64", - "test_matmul_small_brute_force_1d_Nd_xpu_complex64", - "test_matmul_small_brute_force_2d_Nd_xpu_complex64", - "test_matmul_small_brute_force_3d_Nd_xpu_complex64", - "test_matrix_power_negative_xpu_complex128", - "test_matrix_power_negative_xpu_float64", - "test_matrix_power_non_negative_xpu_complex128", - "test_matrix_power_non_negative_xpu_float64", - "test_matrix_rank_atol_rtol_xpu_float64", - "test_matrix_rank_xpu_complex128", - "test_matrix_rank_xpu_complex64", - "test_matrix_rank_xpu_float64", - "test_mm_bmm_non_memory_dense_xpu", - "test_mm_conjtranspose_xpu", - "test_mm_xpu_complex128", - "test_mm_xpu_complex64", - "test_mm_xpu_float64", - "test_multi_dot_xpu_complex128", - "test_multi_dot_xpu_float64", - "test_old_cholesky_batched_many_batches_xpu_float64", - "test_old_cholesky_batched_upper_xpu_complex128", - "test_old_cholesky_batched_upper_xpu_complex64", - "test_old_cholesky_batched_upper_xpu_float64", - "test_old_cholesky_batched_xpu_complex128", - "test_old_cholesky_batched_xpu_complex64", - "test_old_cholesky_batched_xpu_float64", - "test_old_cholesky_xpu_complex128", - "test_old_cholesky_xpu_complex64", - "test_old_cholesky_xpu_float64", - "test_ormqr_xpu_complex128", - "test_ormqr_xpu_complex64", - "test_ormqr_xpu_float64", - "test_pca_lowrank_xpu", - "test_pinv_errors_and_warnings_xpu_complex128", - "test_pinv_errors_and_warnings_xpu_complex64", - "test_pinv_errors_and_warnings_xpu_float64", - "test_pinv_xpu_complex128", - "test_pinv_xpu_complex64", - "test_pinv_xpu_float64", - "test_pinverse_xpu_complex128", - "test_pinverse_xpu_complex64", - "test_pinverse_xpu_float64", - "test_slogdet_xpu_complex128", - "test_slogdet_xpu_complex64", - "test_slogdet_xpu_float64", - "test_solve_batched_broadcasting_xpu_complex128", - "test_solve_batched_broadcasting_xpu_complex64", - "test_solve_batched_broadcasting_xpu_float64", - "test_solve_xpu_complex128", - "test_solve_xpu_complex64", - "test_solve_xpu_float64", - "test_strided_mm_bmm_xpu_float64", - "test_svd_lowrank_xpu_complex128", - "test_svd_lowrank_xpu_float64", - "test_svd_xpu_complex128", - "test_svd_xpu_complex64", - "test_svd_xpu_float64", - "test_triangular_solve_batched_broadcasting_xpu_complex128", - "test_triangular_solve_batched_broadcasting_xpu_complex64", - "test_triangular_solve_batched_broadcasting_xpu_float64", - "test_triangular_solve_batched_many_batches_xpu_complex128", - "test_triangular_solve_batched_many_batches_xpu_complex64", - "test_triangular_solve_batched_many_batches_xpu_float64", - "test_triangular_solve_batched_xpu_complex128", - "test_triangular_solve_batched_xpu_complex64", - "test_triangular_solve_batched_xpu_float64", - "test_triangular_solve_xpu_complex128", - "test_triangular_solve_xpu_complex64", - "test_triangular_solve_xpu_float64", - # https://github.com/intel/torch-xpu-ops/issues/821 - # addmm.out, addmv.out, linalg_lstsq, vdot&dot, _int_mm lack XPU support and fallback to CPU - "test_addmm_sizes_xpu_complex128", - "test_addmm_sizes_xpu_complex64", - "test_blas_alpha_beta_empty_xpu_complex128", - "test_blas_alpha_beta_empty_xpu_complex64", - "test_linalg_lstsq_input_checks_xpu_complex128", - "test_linalg_lstsq_input_checks_xpu_complex64", - "test_linalg_lstsq_input_checks_xpu_float32", - "test_linalg_lstsq_input_checks_xpu_float64", - "test_dot_invalid_args_xpu", - "test_vdot_invalid_args_xpu", - "test__int_mm_errors_xpu", - # https://github.com/intel/torch-xpu-ops/issues/821 - # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200 - "test_norm_fused_type_promotion_xpu_bfloat16", - # AssertionError: True is not false - "test_norm_fused_type_promotion_xpu_float16", - # https://github.com/intel/torch-xpu-ops/issues/814 - # xpu does not have '_cuda_tunableop_is_enabled' API - "_tunableop_", - "test_matmul_small_brute_force_tunableop_xpu_float16", - "test_matmul_small_brute_force_tunableop_xpu_float32", - "test_matmul_small_brute_force_tunableop_xpu_float64", - "test_matmul_offline_tunableop_xpu_float16", - # XPU does not support tunable. - "test_bmm_tunableop_rocm_xpu_float32", - "test_numeric_check_leak_tunableop_rocm_xpu_float32", - "test_dump_results_on_exit_tunableop_xpu_float32", - "test_rotating_buffer_tunableop_xpu_float32", - "test_gemm_bias_tunableop_xpu_bfloat16", - "test_scaled_gemm_tunableop_xpu_float8_e4m3fnuz", - "test_scaled_gemm_tunableop_xpu_float8_e5m2fnuz", - # CUDA bias cases added in latest PyTorch - # AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable' - "test_matmul_check_entries_tunableop_xpu_float16", - "test_minimum_tuning_iteration_tunableop_xpu_float16", - "test_validator_tunableop_rocm_xpu_float32", - "test_addmm_relu_tunableop_rocm_xpu_float32", - "test_addmm_relu_tunableop_rocm_xpu_float64", - "_tuning_tunableop_", - # TODO: align input data type for convert_weight_to_int4pack with CUDA - # XPU expects weight to be kInt, while CUDA expects kByte - "test__int4_mm_m_32_k_32_n_48_xpu", - "test__int4_mm_m_32_k_32_n_64_xpu", - "test__int4_mm_m_32_k_64_n_48_xpu", - "test__int4_mm_m_32_k_64_n_64_xpu", - "test__int4_mm_m_64_k_32_n_48_xpu", - "test__int4_mm_m_64_k_32_n_64_xpu", - "test__int4_mm_m_64_k_64_n_48_xpu", - "test__int4_mm_m_64_k_64_n_64_xpu", - "test_compile_int4_mm_m_32_k_32_n_48_xpu", - "test_compile_int4_mm_m_32_k_32_n_64_xpu", - "test_compile_int4_mm_m_32_k_64_n_48_xpu", - "test_compile_int4_mm_m_32_k_64_n_64_xpu", - "test_compile_int4_mm_m_64_k_32_n_48_xpu", - "test_compile_int4_mm_m_64_k_32_n_64_xpu", - "test_compile_int4_mm_m_64_k_64_n_48_xpu", - "test_compile_int4_mm_m_64_k_64_n_64_xpu", - "test__int4_mm_m_32_k_32_n_48_xpu", - "test__int4_mm_m_32_k_32_n_64_xpu", - "test__int4_mm_m_32_k_64_n_48_xpu", - "test__int4_mm_m_32_k_64_n_64_xpu", - "test__int4_mm_m_64_k_32_n_48_xpu", - "test__int4_mm_m_64_k_32_n_64_xpu", - "test__int4_mm_m_64_k_64_n_48_xpu", - "test__int4_mm_m_64_k_64_n_64_xpu", - "test_compile_int4_mm_m_32_k_32_n_48_xpu", - "test_compile_int4_mm_m_32_k_32_n_64_xpu", - "test_compile_int4_mm_m_32_k_64_n_48_xpu", - "test_compile_int4_mm_m_32_k_64_n_64_xpu", - "test_compile_int4_mm_m_64_k_32_n_48_xpu", - "test_compile_int4_mm_m_64_k_32_n_64_xpu", - "test_compile_int4_mm_m_64_k_64_n_48_xpu", - "test_compile_int4_mm_m_64_k_64_n_64_xpu", - # float8 is not supported - "test_matmul_scaled_gemm_offline_tunableop_xpu_float8_e4m3fnuz", - "test_matmul_scaled_gemm_offline_tunableop_xpu_float8_e5m2fnuz", - "test_scaled_gemm_offline_tunableop_xpu_float8_e4m3fnuz", - "test_scaled_gemm_offline_tunableop_xpu_float8_e5m2fnuz", - # case need to port for xpu - "test_gemm_bias_offline_tunableop_xpu_bfloat16", - ), - "test_ops_fwd_gradients_xpu.py": ( - # All of the followings are oneDNN issues - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_fn_fwgrad_bwgrad___rmatmul___xpu_complex128", - "test_fn_fwgrad_bwgrad___rmatmul___xpu_float64", - "test_fn_fwgrad_bwgrad_addbmm_xpu_float64", - "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_complex128", - "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_float64", - "test_fn_fwgrad_bwgrad_addmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_addmm_xpu_float64", - "test_fn_fwgrad_bwgrad_addmv_xpu_complex128", - "test_fn_fwgrad_bwgrad_addmv_xpu_float64", - "test_fn_fwgrad_bwgrad_addr_xpu_complex128", - "test_fn_fwgrad_bwgrad_addr_xpu_float64", - "test_fn_fwgrad_bwgrad_baddbmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_baddbmm_xpu_float64", - "test_fn_fwgrad_bwgrad_bmm_xpu_complex128", - "test_fn_fwgrad_bwgrad_bmm_xpu_float64", - "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_complex128", - "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_float64", - "test_fn_fwgrad_bwgrad_cholesky_solve_xpu_complex128", - "test_fn_fwgrad_bwgrad_cholesky_solve_xpu_float64", - "test_fn_fwgrad_bwgrad_cholesky_xpu_complex128", - "test_fn_fwgrad_bwgrad_cholesky_xpu_float64", - "test_fn_fwgrad_bwgrad_corrcoef_xpu_complex128", - "test_fn_fwgrad_bwgrad_corrcoef_xpu_float64", - "test_fn_fwgrad_bwgrad_einsum_xpu_complex128", - "test_fn_fwgrad_bwgrad_einsum_xpu_float64", - "test_fn_fwgrad_bwgrad_inner_xpu_complex128", - "test_fn_fwgrad_bwgrad_inner_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_cholesky_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_cholesky_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_cond_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_cond_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_det_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_det_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_eig_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_eig_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_eigh_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_eigh_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_eigvals_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_eigvals_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_eigvalsh_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_eigvalsh_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_householder_product_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_householder_product_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_inv_ex_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_inv_ex_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_inv_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_inv_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_lstsq_grad_oriented_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_lstsq_grad_oriented_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_lu_factor_ex_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_lu_factor_ex_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_lu_factor_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_lu_factor_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_lu_solve_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_lu_solve_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_lu_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_lu_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_matrix_norm_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_matrix_norm_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_norm_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_pinv_singular_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_pinv_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_pinv_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_qr_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_qr_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_slogdet_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_slogdet_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_solve_ex_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_solve_ex_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_solve_triangular_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_solve_triangular_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_solve_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_solve_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_svd_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_svd_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_svdvals_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_svdvals_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_float64", - "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_complex128", - "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_float64", - "test_fn_fwgrad_bwgrad_logdet_xpu_complex128", - "test_fn_fwgrad_bwgrad_logdet_xpu_float64", - "test_fn_fwgrad_bwgrad_lu_solve_xpu_complex128", - "test_fn_fwgrad_bwgrad_lu_solve_xpu_float64", - "test_fn_fwgrad_bwgrad_lu_xpu_complex128", - "test_fn_fwgrad_bwgrad_lu_xpu_float64", - "test_fn_fwgrad_bwgrad_matmul_xpu_complex128", - "test_fn_fwgrad_bwgrad_matmul_xpu_float64", - "test_fn_fwgrad_bwgrad_mm_xpu_complex128", - "test_fn_fwgrad_bwgrad_mm_xpu_float64", - "test_fn_fwgrad_bwgrad_mv_xpu_complex128", - "test_fn_fwgrad_bwgrad_mv_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_bilinear_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_fn_fwgrad_bwgrad_norm_nuc_xpu_complex128", - "test_fn_fwgrad_bwgrad_norm_nuc_xpu_float64", - "test_fn_fwgrad_bwgrad_ormqr_xpu_complex128", - "test_fn_fwgrad_bwgrad_ormqr_xpu_float64", - "test_fn_fwgrad_bwgrad_pca_lowrank_xpu_float64", - "test_fn_fwgrad_bwgrad_pinverse_xpu_complex128", - "test_fn_fwgrad_bwgrad_pinverse_xpu_float64", - "test_fn_fwgrad_bwgrad_qr_xpu_complex128", - "test_fn_fwgrad_bwgrad_qr_xpu_float64", - "test_fn_fwgrad_bwgrad_svd_lowrank_xpu_float64", - "test_fn_fwgrad_bwgrad_svd_xpu_complex128", - "test_fn_fwgrad_bwgrad_svd_xpu_float64", - "test_fn_fwgrad_bwgrad_tensordot_xpu_complex128", - "test_fn_fwgrad_bwgrad_tensordot_xpu_float64", - "test_forward_mode_AD___rmatmul___xpu_complex128", - "test_forward_mode_AD___rmatmul___xpu_float64", - "test_forward_mode_AD_addbmm_xpu_float64", - "test_forward_mode_AD_addmm_decomposed_xpu_complex128", - "test_forward_mode_AD_addmm_decomposed_xpu_float64", - "test_forward_mode_AD_addmm_xpu_complex128", - "test_forward_mode_AD_addmm_xpu_float64", - "test_forward_mode_AD_addmv_xpu_complex128", - "test_forward_mode_AD_addmv_xpu_float64", - "test_forward_mode_AD_baddbmm_xpu_complex128", - "test_forward_mode_AD_baddbmm_xpu_float64", - "test_forward_mode_AD_bmm_xpu_complex128", - "test_forward_mode_AD_bmm_xpu_float64", - "test_forward_mode_AD_cholesky_inverse_xpu_complex128", - "test_forward_mode_AD_cholesky_inverse_xpu_float64", - "test_forward_mode_AD_cholesky_solve_xpu_complex128", - "test_forward_mode_AD_cholesky_solve_xpu_float64", - "test_forward_mode_AD_cholesky_xpu_complex128", - "test_forward_mode_AD_cholesky_xpu_float64", - "test_forward_mode_AD_corrcoef_xpu_complex128", - "test_forward_mode_AD_corrcoef_xpu_float64", - "test_forward_mode_AD_dot_xpu_complex128", - "test_forward_mode_AD_dot_xpu_float64", - "test_forward_mode_AD_einsum_xpu_complex128", - "test_forward_mode_AD_einsum_xpu_float64", - "test_forward_mode_AD_inner_xpu_complex128", - "test_forward_mode_AD_inner_xpu_float64", - "test_forward_mode_AD_linalg_cholesky_ex_xpu_complex128", - "test_forward_mode_AD_linalg_cholesky_ex_xpu_float64", - "test_forward_mode_AD_linalg_cholesky_xpu_complex128", - "test_forward_mode_AD_linalg_cholesky_xpu_float64", - "test_forward_mode_AD_linalg_cond_xpu_complex128", - "test_forward_mode_AD_linalg_cond_xpu_float64", - "test_forward_mode_AD_linalg_det_singular_xpu_complex128", - "test_forward_mode_AD_linalg_det_singular_xpu_float64", - "test_forward_mode_AD_linalg_det_xpu_complex128", - "test_forward_mode_AD_linalg_det_xpu_float64", - "test_forward_mode_AD_linalg_eig_xpu_complex128", - "test_forward_mode_AD_linalg_eig_xpu_float64", - "test_forward_mode_AD_linalg_eigh_xpu_complex128", - "test_forward_mode_AD_linalg_eigh_xpu_float64", - "test_forward_mode_AD_linalg_eigvals_xpu_complex128", - "test_forward_mode_AD_linalg_eigvals_xpu_float64", - "test_forward_mode_AD_linalg_eigvalsh_xpu_complex128", - "test_forward_mode_AD_linalg_eigvalsh_xpu_float64", - "test_forward_mode_AD_linalg_householder_product_xpu_complex128", - "test_forward_mode_AD_linalg_householder_product_xpu_float64", - "test_forward_mode_AD_linalg_inv_ex_xpu_complex128", - "test_forward_mode_AD_linalg_inv_ex_xpu_float64", - "test_forward_mode_AD_linalg_inv_xpu_complex128", - "test_forward_mode_AD_linalg_inv_xpu_float64", - "test_forward_mode_AD_linalg_lstsq_grad_oriented_xpu_complex128", - "test_forward_mode_AD_linalg_lstsq_grad_oriented_xpu_float64", - "test_forward_mode_AD_linalg_lu_factor_ex_xpu_complex128", - "test_forward_mode_AD_linalg_lu_factor_ex_xpu_float64", - "test_forward_mode_AD_linalg_lu_factor_xpu_complex128", - "test_forward_mode_AD_linalg_lu_factor_xpu_float64", - "test_forward_mode_AD_linalg_lu_solve_xpu_complex128", - "test_forward_mode_AD_linalg_lu_solve_xpu_float64", - "test_forward_mode_AD_linalg_lu_xpu_complex128", - "test_forward_mode_AD_linalg_lu_xpu_float64", - "test_forward_mode_AD_linalg_matrix_norm_xpu_complex128", - "test_forward_mode_AD_linalg_matrix_norm_xpu_float64", - "test_forward_mode_AD_linalg_matrix_power_xpu_complex128", - "test_forward_mode_AD_linalg_matrix_power_xpu_float64", - "test_forward_mode_AD_linalg_multi_dot_xpu_complex128", - "test_forward_mode_AD_linalg_multi_dot_xpu_float64", - "test_forward_mode_AD_linalg_norm_xpu_float64", - "test_forward_mode_AD_linalg_pinv_hermitian_xpu_complex128", - "test_forward_mode_AD_linalg_pinv_hermitian_xpu_float64", - "test_forward_mode_AD_linalg_pinv_singular_xpu_complex128", - "test_forward_mode_AD_linalg_pinv_singular_xpu_float64", - "test_forward_mode_AD_linalg_pinv_xpu_complex128", - "test_forward_mode_AD_linalg_pinv_xpu_float64", - "test_forward_mode_AD_linalg_qr_xpu_complex128", - "test_forward_mode_AD_linalg_qr_xpu_float64", - "test_forward_mode_AD_linalg_slogdet_xpu_complex128", - "test_forward_mode_AD_linalg_slogdet_xpu_float64", - "test_forward_mode_AD_linalg_solve_ex_xpu_complex128", - "test_forward_mode_AD_linalg_solve_ex_xpu_float64", - "test_forward_mode_AD_linalg_solve_triangular_xpu_complex128", - "test_forward_mode_AD_linalg_solve_triangular_xpu_float64", - "test_forward_mode_AD_linalg_solve_xpu_complex128", - "test_forward_mode_AD_linalg_solve_xpu_float64", - "test_forward_mode_AD_linalg_svd_xpu_complex128", - "test_forward_mode_AD_linalg_svd_xpu_float64", - "test_forward_mode_AD_linalg_svdvals_xpu_complex128", - "test_forward_mode_AD_linalg_svdvals_xpu_float64", - "test_forward_mode_AD_linalg_tensorinv_xpu_complex128", - "test_forward_mode_AD_linalg_tensorinv_xpu_float64", - "test_forward_mode_AD_linalg_tensorsolve_xpu_complex128", - "test_forward_mode_AD_linalg_tensorsolve_xpu_float64", - "test_forward_mode_AD_logdet_xpu_complex128", - "test_forward_mode_AD_logdet_xpu_float64", - "test_forward_mode_AD_lu_solve_xpu_complex128", - "test_forward_mode_AD_lu_solve_xpu_float64", - "test_forward_mode_AD_lu_xpu_complex128", - "test_forward_mode_AD_lu_xpu_float64", - "test_forward_mode_AD_matmul_xpu_complex128", - "test_forward_mode_AD_matmul_xpu_float64", - "test_forward_mode_AD_mm_xpu_complex128", - "test_forward_mode_AD_mm_xpu_float64", - "test_forward_mode_AD_mv_xpu_complex128", - "test_forward_mode_AD_mv_xpu_float64", - "test_forward_mode_AD_nn_functional_bilinear_xpu_float64", - "test_forward_mode_AD_nn_functional_linear_xpu_complex128", - "test_forward_mode_AD_nn_functional_linear_xpu_float64", - "test_forward_mode_AD_norm_nuc_xpu_complex128", - "test_forward_mode_AD_norm_nuc_xpu_float64", - "test_forward_mode_AD_pca_lowrank_xpu_float64", - "test_forward_mode_AD_pinverse_xpu_complex128", - "test_forward_mode_AD_pinverse_xpu_float64", - "test_forward_mode_AD_qr_xpu_complex128", - "test_forward_mode_AD_qr_xpu_float64", - "test_forward_mode_AD_svd_lowrank_xpu_float64", - "test_forward_mode_AD_svd_xpu_complex128", - "test_forward_mode_AD_svd_xpu_float64", - "test_forward_mode_AD_tensordot_xpu_complex128", - "test_forward_mode_AD_tensordot_xpu_float64", - "test_forward_mode_AD_triangular_solve_xpu_complex128", - "test_forward_mode_AD_triangular_solve_xpu_float64", - "test_inplace_forward_mode_AD_addbmm_xpu_float64", - "test_inplace_forward_mode_AD_addmm_decomposed_xpu_complex128", - "test_inplace_forward_mode_AD_addmm_decomposed_xpu_float64", - "test_inplace_forward_mode_AD_addmm_xpu_complex128", - "test_inplace_forward_mode_AD_addmm_xpu_float64", - "test_inplace_forward_mode_AD_addmv_xpu_complex128", - "test_inplace_forward_mode_AD_addmv_xpu_float64", - "test_inplace_forward_mode_AD_baddbmm_xpu_complex128", - "test_inplace_forward_mode_AD_baddbmm_xpu_float64", - "test_forward_mode_AD_pca_lowrank_xpu_complex128", - "test_forward_mode_AD_svd_lowrank_xpu_complex128", - # RuntimeError: value cannot be converted to type float without overflow - "test_fn_fwgrad_bwgrad_addbmm_xpu_complex128", - "test_forward_mode_AD_addbmm_xpu_complex128", - "test_inplace_forward_mode_AD_addbmm_xpu_complex128", - # torch.autograd.gradcheck.GradcheckError: While considering the real part of complex inputs only, Jacobian computed with forward mode mismatch for output 0 with respect to input 0, - "test_fn_fwgrad_bwgrad_linalg_norm_xpu_complex128", - # torch.autograd.gradcheck.GradcheckError: While considering the imaginary part of complex inputs only, Jacobian computed with forward mode mismatch for output 0 with respect to input 0, - "test_forward_mode_AD_linalg_norm_xpu_complex128", - # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive - "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose2d_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose2d_xpu_float64", - "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose3d_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose3d_xpu_float64", - "test_forward_mode_AD_nn_functional_conv_transpose2d_xpu_complex128", - "test_forward_mode_AD_nn_functional_conv_transpose2d_xpu_float64", - "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_complex128", - "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_float64", - # issue: https://github.com/intel/torch-xpu-ops/issues/809 - "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128", - "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64", - ), - # "test_matmul_cuda_xpu.py": ( - # # AssertionError: "Bias is not supported when out_dtype is set to Float32" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend. - # "test_float32_output_errors_with_bias_xpu", - # # RuntimeError: "eye" not implemented for 'Float8_e4m3fn' - # "test_float8_basics_xpu", - # # AssertionError: "For row-wise scaling, scale_a must be size 1024 but got 1 and scale_b must be size 2048 but got 2" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend. - # "test_float8_error_messages_xpu", - # # NotImplementedError: Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend. - # "test_float8_bias_relu_edgecase_xpu", - # "test_float8_bias_xpu", - # "test_float8_rowwise_scaling_sanity_use_fast_accum_False_xpu", - # "test_float8_rowwise_scaling_sanity_use_fast_accum_True_xpu", - # "test_float8_scale_fast_accum_xpu", - # "test_float8_scale_xpu", - # "test_non_divisible_leading_dim_bias_False_xpu", - # "test_non_divisible_leading_dim_bias_True_xpu", - # "test_scaled_mm_change_stride_bfloat16_xpu", - # "test_scaled_mm_change_stride_float16_xpu", - # "test_scaled_mm_change_stride_float32_xpu", - # "test_scaled_mm_vs_emulated_bfloat16_xpu", - # "test_scaled_mm_vs_emulated_float16_xpu", - # "test_scaled_mm_vs_emulated_float32_xpu", - # "test_scaled_mm_vs_emulated_row_wise_bfloat16_xpu", - # # AssertionError: Torch not compiled with CUDA enabled - # "test_zero_dim_tensorwise_which_dim_zero", - # # New added case in 2.7 - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_bfloat16", - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_float16", - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_bfloat16", - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_float16", - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_bfloat16", - # "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_float16", - # "test_cublas_and_lt_reduced_precision_fp16_accumulate_xpu", - # ), - "test_maskedtensor_xpu.py": None, - "quantization/core/test_quantized_op_xpu.py": ( - # AssertionError: Torch not compiled with CUDA enabled - "test_qgelu_xpu", - "test_qrelu_xpu", - # AttributeError: 'TestQuantizedOpsXPU' object has no attribute 'test_qsoftmax' - "test_qsoftmax_qnnpack_xpu", - ), - "quantization/core/test_workflow_ops_xpu.py": ( - # AssertionError: Not equal to tolerance rtol=1e-06, atol=1e-06 - # Max absolute difference among violations: 1.731507e+10 - # Max relative difference among violations: 0.01587304 - # ACTUAL: array([-1.108163e+12, 1.108163e+12], dtype=float32) - # DESIRED: array([-1.108163e+12, 1.090847e+12], dtype=float32) - "test_fq_module_per_tensor_xpu", - ), - "quantization/core/test_workflow_module_xpu.py": None, - "quantization/core/test_quantized_tensor_xpu.py": ( - # Summary: Quantized OPs are not supported for XPU - # NotImplementedError: Could not run 'aten::dequantize.self' with arguments from the 'QuantizedXPU' backend - "test_compare_per_channel_device_numerics_xpu", - # NotImplementedError: Could not run 'aten::dequantize.self' with arguments from the 'QuantizedXPU' backend. - "test_compare_per_tensor_device_numerics_xpu", - # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend. - "test_cuda_quantization_does_not_pin_memory_xpu", - # NotImplementedError: Could not run 'aten::_empty_per_channel_affine_quantized' with arguments from the 'QuantizedXPU' backend. - "test_per_channel_qtensor_creation_cuda_xpu", - # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend. - "test_per_channel_to_device_xpu", - # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend. - "test_per_tensor_to_device_xpu", - # NotImplementedError: Could not run 'aten::q_scale' with arguments from the 'QuantizedXPU' backend. - "test_qtensor_cuda_xpu", - # NotImplementedError: Could not run 'aten::_index_put_impl_' with arguments from the 'QuantizedXPU' backend. - "test_qtensor_index_put_cuda_xpu", - # NotImplementedError: Could not run 'aten::index_select' with arguments from the 'QuantizedXPU' backend. - "test_qtensor_index_select_cuda_xpu", - # NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend. - "test_qtensor_masked_fill_cuda_xpu", - ), - "nn/test_packed_sequence_xpu.py": ( - # test case porting issue - "test_to and not test_to_memory and not test_total", - ), - "test_ops_gradients_xpu.py": ( - # All are oneDNN issues - ### Error #0 in TestBwdGradientsXPU , totally 271 , RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_fn_grad_index_reduce_prod_xpu_float64", - "test_inplace_grad_index_reduce_prod_xpu_float64", - "test_fn_grad___rmatmul___xpu_complex128", - "test_fn_grad___rmatmul___xpu_float64", - "test_fn_grad_addbmm_xpu_float64", - "test_fn_grad_addmm_decomposed_xpu_complex128", - "test_fn_grad_addmm_decomposed_xpu_float64", - "test_fn_grad_addmm_xpu_complex128", - "test_fn_grad_addmm_xpu_float64", - "test_fn_grad_addmv_xpu_complex128", - "test_fn_grad_addmv_xpu_float64", - "test_fn_grad_addr_xpu_complex128", - "test_fn_grad_addr_xpu_float64", - "test_fn_grad_baddbmm_xpu_complex128", - "test_fn_grad_baddbmm_xpu_float64", - "test_fn_grad_bmm_xpu_complex128", - "test_fn_grad_bmm_xpu_float64", - "test_fn_grad_cdist_xpu_float64", - "test_fn_grad_cholesky_inverse_xpu_complex128", - "test_fn_grad_cholesky_inverse_xpu_float64", - "test_fn_grad_cholesky_solve_xpu_complex128", - "test_fn_grad_cholesky_solve_xpu_float64", - "test_fn_grad_cholesky_xpu_complex128", - "test_fn_grad_cholesky_xpu_float64", - "test_fn_grad_corrcoef_xpu_complex128", - "test_fn_grad_corrcoef_xpu_float64", - "test_fn_grad_einsum_xpu_complex128", - "test_fn_grad_einsum_xpu_float64", - "test_fn_grad_inner_xpu_complex128", - "test_fn_grad_inner_xpu_float64", - "test_fn_grad_linalg_cholesky_ex_xpu_complex128", - "test_fn_grad_linalg_cholesky_ex_xpu_float64", - "test_fn_grad_linalg_cholesky_xpu_complex128", - "test_fn_grad_linalg_cholesky_xpu_float64", - "test_fn_grad_linalg_cond_xpu_complex128", - "test_fn_grad_linalg_cond_xpu_float64", - "test_fn_grad_linalg_det_singular_xpu_complex128", - "test_fn_grad_linalg_det_singular_xpu_float64", - "test_fn_grad_linalg_det_xpu_complex128", - "test_fn_grad_linalg_det_xpu_float64", - "test_fn_grad_linalg_eig_xpu_complex128", - "test_fn_grad_linalg_eig_xpu_float64", - "test_fn_grad_linalg_eigh_xpu_complex128", - "test_fn_grad_linalg_eigh_xpu_float64", - "test_fn_grad_linalg_eigvals_xpu_complex128", - "test_fn_grad_linalg_eigvals_xpu_float64", - "test_fn_grad_linalg_eigvalsh_xpu_complex128", - "test_fn_grad_linalg_eigvalsh_xpu_float64", - "test_fn_grad_linalg_householder_product_xpu_complex128", - "test_fn_grad_linalg_householder_product_xpu_float64", - "test_fn_grad_linalg_inv_ex_xpu_complex128", - "test_fn_grad_linalg_inv_ex_xpu_float64", - "test_fn_grad_linalg_inv_xpu_complex128", - "test_fn_grad_linalg_inv_xpu_float64", - "test_fn_grad_linalg_lstsq_grad_oriented_xpu_complex128", - "test_fn_grad_linalg_lstsq_grad_oriented_xpu_float64", - "test_fn_grad_linalg_lu_factor_ex_xpu_complex128", - "test_fn_grad_linalg_lu_factor_ex_xpu_float64", - "test_fn_grad_linalg_lu_factor_xpu_complex128", - "test_fn_grad_linalg_lu_factor_xpu_float64", - "test_fn_grad_linalg_lu_solve_xpu_complex128", - "test_fn_grad_linalg_lu_solve_xpu_float64", - "test_fn_grad_linalg_lu_xpu_complex128", - "test_fn_grad_linalg_lu_xpu_float64", - "test_fn_grad_linalg_matrix_norm_xpu_complex128", - "test_fn_grad_linalg_matrix_norm_xpu_float64", - "test_fn_grad_linalg_matrix_power_xpu_complex128", - "test_fn_grad_linalg_matrix_power_xpu_float64", - "test_fn_grad_linalg_multi_dot_xpu_complex128", - "test_fn_grad_linalg_multi_dot_xpu_float64", - "test_fn_grad_linalg_norm_xpu_float64", - "test_fn_grad_linalg_pinv_hermitian_xpu_complex128", - "test_fn_grad_linalg_pinv_hermitian_xpu_float64", - "test_fn_grad_linalg_pinv_singular_xpu_complex128", - "test_fn_grad_linalg_pinv_singular_xpu_float64", - "test_fn_grad_linalg_pinv_xpu_complex128", - "test_fn_grad_linalg_pinv_xpu_float64", - "test_fn_grad_linalg_qr_xpu_complex128", - "test_fn_grad_linalg_qr_xpu_float64", - "test_fn_grad_linalg_slogdet_xpu_complex128", - "test_fn_grad_linalg_slogdet_xpu_float64", - "test_fn_grad_linalg_solve_ex_xpu_complex128", - "test_fn_grad_linalg_solve_ex_xpu_float64", - "test_fn_grad_linalg_solve_triangular_xpu_complex128", - "test_fn_grad_linalg_solve_triangular_xpu_float64", - "test_fn_grad_linalg_solve_xpu_complex128", - "test_fn_grad_linalg_solve_xpu_float64", - "test_fn_grad_linalg_svd_xpu_complex128", - "test_fn_grad_linalg_svd_xpu_float64", - "test_fn_grad_linalg_svdvals_xpu_complex128", - "test_fn_grad_linalg_svdvals_xpu_float64", - "test_fn_grad_linalg_tensorinv_xpu_complex128", - "test_fn_grad_linalg_tensorinv_xpu_float64", - "test_fn_grad_linalg_tensorsolve_xpu_complex128", - "test_fn_grad_linalg_tensorsolve_xpu_float64", - "test_fn_grad_logdet_xpu_complex128", - "test_fn_grad_logdet_xpu_float64", - "test_fn_grad_lu_solve_xpu_complex128", - "test_fn_grad_lu_solve_xpu_float64", - "test_fn_grad_lu_xpu_complex128", - "test_fn_grad_lu_xpu_float64", - "test_fn_grad_matmul_xpu_complex128", - "test_fn_grad_matmul_xpu_float64", - "test_fn_grad_mm_xpu_complex128", - "test_fn_grad_mm_xpu_float64", - "test_fn_grad_mv_xpu_complex128", - "test_fn_grad_mv_xpu_float64", - "test_fn_grad_nn_functional_bilinear_xpu_float64", - "test_fn_grad_nn_functional_linear_xpu_complex128", - "test_fn_grad_nn_functional_linear_xpu_float64", - "test_fn_grad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_grad_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_fn_grad_norm_nuc_xpu_complex128", - "test_fn_grad_norm_nuc_xpu_float64", - "test_fn_grad_ormqr_xpu_complex128", - "test_fn_grad_ormqr_xpu_float64", - "test_fn_grad_pca_lowrank_xpu_float64", - "test_fn_grad_pinverse_xpu_complex128", - "test_fn_grad_pinverse_xpu_float64", - "test_fn_grad_qr_xpu_complex128", - "test_fn_grad_qr_xpu_float64", - "test_fn_grad_svd_lowrank_xpu_float64", - "test_fn_grad_svd_xpu_complex128", - "test_fn_grad_svd_xpu_float64", - "test_fn_grad_tensordot_xpu_complex128", - "test_fn_grad_tensordot_xpu_float64", - "test_fn_grad_triangular_solve_xpu_complex128", - "test_fn_grad_triangular_solve_xpu_float64", - "test_fn_gradgrad___rmatmul___xpu_complex128", - "test_fn_gradgrad___rmatmul___xpu_float64", - "test_fn_gradgrad_addbmm_xpu_float64", - "test_fn_gradgrad_addmm_decomposed_xpu_complex128", - "test_fn_gradgrad_addmm_decomposed_xpu_float64", - "test_fn_gradgrad_addmm_xpu_complex128", - "test_fn_gradgrad_addmm_xpu_float64", - "test_fn_gradgrad_addmv_xpu_complex128", - "test_fn_gradgrad_addmv_xpu_float64", - "test_fn_gradgrad_addr_xpu_complex128", - "test_fn_gradgrad_addr_xpu_float64", - "test_fn_gradgrad_baddbmm_xpu_complex128", - "test_fn_gradgrad_baddbmm_xpu_float64", - "test_fn_gradgrad_bmm_xpu_complex128", - "test_fn_gradgrad_bmm_xpu_float64", - "test_fn_gradgrad_cholesky_inverse_xpu_complex128", - "test_fn_gradgrad_cholesky_inverse_xpu_float64", - "test_fn_gradgrad_cholesky_solve_xpu_complex128", - "test_fn_gradgrad_cholesky_solve_xpu_float64", - "test_fn_gradgrad_cholesky_xpu_complex128", - "test_fn_gradgrad_cholesky_xpu_float64", - "test_fn_gradgrad_corrcoef_xpu_complex128", - "test_fn_gradgrad_corrcoef_xpu_float64", - "test_fn_gradgrad_einsum_xpu_complex128", - "test_fn_gradgrad_einsum_xpu_float64", - "test_fn_gradgrad_inner_xpu_complex128", - "test_fn_gradgrad_inner_xpu_float64", - "test_fn_gradgrad_linalg_cholesky_ex_xpu_complex128", - "test_fn_gradgrad_linalg_cholesky_ex_xpu_float64", - "test_fn_gradgrad_linalg_cholesky_xpu_complex128", - "test_fn_gradgrad_linalg_cholesky_xpu_float64", - "test_fn_gradgrad_linalg_cond_xpu_complex128", - "test_fn_gradgrad_linalg_cond_xpu_float64", - "test_fn_gradgrad_linalg_det_xpu_complex128", - "test_fn_gradgrad_linalg_det_xpu_float64", - "test_fn_gradgrad_linalg_eig_xpu_complex128", - "test_fn_gradgrad_linalg_eig_xpu_float64", - "test_fn_gradgrad_linalg_eigh_xpu_complex128", - "test_fn_gradgrad_linalg_eigh_xpu_float64", - "test_fn_gradgrad_linalg_eigvals_xpu_complex128", - "test_fn_gradgrad_linalg_eigvals_xpu_float64", - "test_fn_gradgrad_linalg_eigvalsh_xpu_complex128", - "test_fn_gradgrad_linalg_eigvalsh_xpu_float64", - "test_fn_gradgrad_linalg_householder_product_xpu_complex128", - "test_fn_gradgrad_linalg_householder_product_xpu_float64", - "test_fn_gradgrad_linalg_inv_ex_xpu_complex128", - "test_fn_gradgrad_linalg_inv_ex_xpu_float64", - "test_fn_gradgrad_linalg_inv_xpu_complex128", - "test_fn_gradgrad_linalg_inv_xpu_float64", - "test_fn_gradgrad_linalg_lstsq_grad_oriented_xpu_complex128", - "test_fn_gradgrad_linalg_lstsq_grad_oriented_xpu_float64", - "test_fn_gradgrad_linalg_lu_factor_ex_xpu_complex128", - "test_fn_gradgrad_linalg_lu_factor_ex_xpu_float64", - "test_fn_gradgrad_linalg_lu_factor_xpu_complex128", - "test_fn_gradgrad_linalg_lu_factor_xpu_float64", - "test_fn_gradgrad_linalg_lu_solve_xpu_complex128", - "test_fn_gradgrad_linalg_lu_solve_xpu_float64", - "test_fn_gradgrad_linalg_lu_xpu_complex128", - "test_fn_gradgrad_linalg_lu_xpu_float64", - "test_fn_gradgrad_linalg_matrix_norm_xpu_complex128", - "test_fn_gradgrad_linalg_matrix_norm_xpu_float64", - "test_fn_gradgrad_linalg_matrix_power_xpu_complex128", - "test_fn_gradgrad_linalg_matrix_power_xpu_float64", - "test_fn_gradgrad_linalg_multi_dot_xpu_complex128", - "test_fn_gradgrad_linalg_multi_dot_xpu_float64", - "test_fn_gradgrad_linalg_pinv_hermitian_xpu_complex128", - "test_fn_gradgrad_linalg_pinv_hermitian_xpu_float64", - "test_fn_gradgrad_linalg_pinv_singular_xpu_float64", - "test_fn_gradgrad_linalg_pinv_xpu_complex128", - "test_fn_gradgrad_linalg_pinv_xpu_float64", - "test_fn_gradgrad_linalg_qr_xpu_complex128", - "test_fn_gradgrad_linalg_qr_xpu_float64", - "test_fn_gradgrad_linalg_slogdet_xpu_complex128", - "test_fn_gradgrad_linalg_slogdet_xpu_float64", - "test_fn_gradgrad_linalg_solve_ex_xpu_complex128", - "test_fn_gradgrad_linalg_solve_ex_xpu_float64", - "test_fn_gradgrad_linalg_solve_triangular_xpu_complex128", - "test_fn_gradgrad_linalg_solve_triangular_xpu_float64", - "test_fn_gradgrad_linalg_solve_xpu_complex128", - "test_fn_gradgrad_linalg_solve_xpu_float64", - "test_fn_gradgrad_linalg_svd_xpu_complex128", - "test_fn_gradgrad_linalg_svd_xpu_float64", - "test_fn_gradgrad_linalg_svdvals_xpu_complex128", - "test_fn_gradgrad_linalg_svdvals_xpu_float64", - "test_fn_gradgrad_linalg_tensorinv_xpu_complex128", - "test_fn_gradgrad_linalg_tensorinv_xpu_float64", - "test_fn_gradgrad_linalg_tensorsolve_xpu_complex128", - "test_fn_gradgrad_linalg_tensorsolve_xpu_float64", - "test_fn_gradgrad_logdet_xpu_complex128", - "test_fn_gradgrad_logdet_xpu_float64", - "test_fn_gradgrad_lu_solve_xpu_complex128", - "test_fn_gradgrad_lu_solve_xpu_float64", - "test_fn_gradgrad_lu_xpu_complex128", - "test_fn_gradgrad_lu_xpu_float64", - "test_fn_gradgrad_matmul_xpu_complex128", - "test_fn_gradgrad_matmul_xpu_float64", - "test_fn_gradgrad_mm_xpu_complex128", - "test_fn_gradgrad_mm_xpu_float64", - "test_fn_gradgrad_mv_xpu_complex128", - "test_fn_gradgrad_mv_xpu_float64", - "test_fn_gradgrad_nn_functional_bilinear_xpu_float64", - "test_fn_gradgrad_nn_functional_linear_xpu_complex128", - "test_fn_gradgrad_nn_functional_linear_xpu_float64", - "test_fn_gradgrad_nn_functional_multi_head_attention_forward_xpu_float64", - "test_fn_gradgrad_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_fn_gradgrad_norm_nuc_xpu_complex128", - "test_fn_gradgrad_norm_nuc_xpu_float64", - "test_fn_gradgrad_ormqr_xpu_complex128", - "test_fn_gradgrad_ormqr_xpu_float64", - "test_fn_gradgrad_pca_lowrank_xpu_float64", - "test_fn_gradgrad_pinverse_xpu_complex128", - "test_fn_gradgrad_pinverse_xpu_float64", - "test_fn_gradgrad_qr_xpu_complex128", - "test_fn_gradgrad_qr_xpu_float64", - "test_fn_gradgrad_svd_lowrank_xpu_float64", - "test_fn_gradgrad_svd_xpu_complex128", - "test_fn_gradgrad_svd_xpu_float64", - "test_fn_gradgrad_tensordot_xpu_complex128", - "test_fn_gradgrad_tensordot_xpu_float64", - "test_fn_gradgrad_triangular_solve_xpu_complex128", - "test_fn_gradgrad_triangular_solve_xpu_float64", - "test_inplace_grad_addbmm_xpu_float64", - "test_inplace_grad_addmm_decomposed_xpu_complex128", - "test_inplace_grad_addmm_decomposed_xpu_float64", - "test_inplace_grad_addmm_xpu_complex128", - "test_inplace_grad_addmm_xpu_float64", - "test_inplace_grad_addmv_xpu_complex128", - "test_inplace_grad_addmv_xpu_float64", - "test_inplace_grad_addr_xpu_complex128", - "test_inplace_grad_addr_xpu_float64", - "test_inplace_grad_baddbmm_xpu_complex128", - "test_inplace_grad_baddbmm_xpu_float64", - "test_inplace_gradgrad_addbmm_xpu_float64", - "test_inplace_gradgrad_addmm_decomposed_xpu_complex128", - "test_inplace_gradgrad_addmm_decomposed_xpu_float64", - "test_inplace_gradgrad_addmm_xpu_complex128", - "test_inplace_gradgrad_addmm_xpu_float64", - "test_inplace_gradgrad_addmv_xpu_complex128", - "test_inplace_gradgrad_addmv_xpu_float64", - "test_inplace_gradgrad_addr_xpu_complex128", - "test_inplace_gradgrad_addr_xpu_float64", - "test_inplace_gradgrad_baddbmm_xpu_complex128", - "test_inplace_gradgrad_baddbmm_xpu_float64", - "test_fn_grad_pca_lowrank_xpu_complex128", - "test_fn_grad_svd_lowrank_xpu_complex128", - "test_fn_gradgrad_pca_lowrank_xpu_complex128", - "test_fn_gradgrad_svd_lowrank_xpu_complex128", - "test_fn_grad_linalg_norm_xpu_complex128", - ### Error #1 in TestBwdGradientsXPU , totally 4 , RuntimeError: value cannot be converted to type float without overflow - "test_fn_grad_addbmm_xpu_complex128", - "test_fn_gradgrad_addbmm_xpu_complex128", - "test_inplace_grad_addbmm_xpu_complex128", - "test_inplace_gradgrad_addbmm_xpu_complex128", - ### Error #4 in TestBwdGradientsXPU , totally 8 , RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive - "test_fn_grad_nn_functional_conv_transpose2d_xpu_complex128", - "test_fn_grad_nn_functional_conv_transpose2d_xpu_float64", - "test_fn_grad_nn_functional_conv_transpose3d_xpu_complex128", - "test_fn_grad_nn_functional_conv_transpose3d_xpu_float64", - "test_fn_gradgrad_nn_functional_conv_transpose2d_xpu_complex128", - "test_fn_gradgrad_nn_functional_conv_transpose2d_xpu_float64", - "test_fn_gradgrad_nn_functional_conv_transpose3d_xpu_complex128", - "test_fn_gradgrad_nn_functional_conv_transpose3d_xpu_float64", - "test_fn_gradgrad_index_reduce_mean_xpu_float64", - "test_fn_gradgrad_index_reduce_prod_xpu_float64", - "test_inplace_gradgrad_index_reduce_mean_xpu_float64", - "test_inplace_gradgrad_index_reduce_prod_xpu_float64", - # issue: https://github.com/intel/torch-xpu-ops/issues/809 - "test_fn_gradgrad_nn_functional_conv3d_xpu_complex128", - "test_fn_gradgrad_nn_functional_conv3d_xpu_float64", - ), - "test_torch_xpu.py": ( - # 'torch.xpu' has no attribute ... - ### Error #1 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'FloatTensor' - "test_grad_scaling_state_dict_xpu", - ### Error #2 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: 'torch.storage.TypedStorage' object has no attribute 'is_xpu' - ### Error #3 in TestTorchDeviceTypeXPU , totally 3 , AttributeError: module 'torch.xpu' has no attribute 'ByteStorage' - "test_storage_setitem_xpu_uint8", - "test_tensor_storage_type_xpu_uint8", - ### Error #4 in TestTorchDeviceTypeXPU , totally 4 , AttributeError: module 'torch.xpu' has no attribute 'FloatStorage' - "test_storage_setitem_xpu_float32", - "test_tensor_storage_type_xpu_float32", - ### Error #7 in TestTorchDeviceTypeXPU , totally 1 , TypeError: map2_ is only implemented on CPU tensors - "test_broadcast_fn_map2_xpu", - ### Error #8 in TestTorchDeviceTypeXPU , totally 1 , TypeError: map_ is only implemented on CPU tensors - "test_broadcast_fn_map_xpu", - ### Error #9 in TestTorchDeviceTypeXPU , totally 1 , RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_corrcoef_xpu_complex64", - ### Error #12 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'amp' - "test_grad_scaler_pass_itself_xpu", - "test_pickle_gradscaler_xpu", - ### Error #15 in TestTorchDeviceTypeXPU , totally 2 , AssertionError: Tensor-likes are not close! - "test_index_put_non_accumulate_deterministic_xpu", - ### Error #17 in TestTorchDeviceTypeXPU , totally 2 , AssertionError: False is not true - "test_sync_warning_xpu", - ### Error #19 in TestTorchDeviceTypeXPU , totally 1 , RuntimeError: _share_fd_: only available on CPU - "test_module_share_memory_xpu", - # 'torch.xpu' has no attribute ... - ### Error #30 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'BoolStorage' - "test_storage_setitem_xpu_bool", - "test_tensor_storage_type_xpu_bool", - ### Error #31 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ComplexDoubleStorage' - "test_storage_setitem_xpu_complex128", - "test_tensor_storage_type_xpu_complex128", - ### Error #32 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ComplexFloatStorage' - "test_storage_setitem_xpu_complex64", - "test_tensor_storage_type_xpu_complex64", - ### Error #33 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'DoubleStorage' - "test_storage_setitem_xpu_float64", - "test_tensor_storage_type_xpu_float64", - ### Error #34 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ShortStorage' - "test_storage_setitem_xpu_int16", - "test_tensor_storage_type_xpu_int16", - ### Error #35 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'IntStorage' - "test_storage_setitem_xpu_int32", - "test_tensor_storage_type_xpu_int32", - ### Error #36 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'LongStorage' - "test_storage_setitem_xpu_int64", - "test_tensor_storage_type_xpu_int64", - ### Error #37 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'CharStorage' - "test_storage_setitem_xpu_int8", - "test_tensor_storage_type_xpu_int8", - ### Error #38 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: module 'torch.xpu' has no attribute 'BFloat16Storage' - "test_tensor_storage_type_xpu_bfloat16", - ### Error #39 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: module 'torch.xpu' has no attribute 'HalfStorage' - "test_tensor_storage_type_xpu_float16", - ### Module 'torch.xpu' has no attribute 'ByteStorage' - "test_tensor_storage_type_xpu_uint8", - # issue 302 , 8 - "test_print", - "test_storage_error", - "test_storage_error_no_attribute", - # issue 302, 6 - "test_storage_error", - "test_typed_storage_deprecation_warning", - "test_typed_storage_internal_no_warning", - # issue 302, 11 - "test_cuda_vitals_gpu_only_xpu", - # torch.utils.swap_tensors AssertionError: RuntimeError not raised - "test_swap_basic", - # internally uses index_put deterministic implementation - # dependent on "test_index_put_non_accumulate_deterministic" - "test_index_copy_deterministic", - # scatter_add needs handle XPU deterministic - # https://github.com/intel/torch-xpu-ops/issues/906 - "test_gather_backward_deterministic_path_xpu", - "test_scatter_add_one_dim_deterministic_xpu", - # Precision error - # Fail occasionally - # Mismatched elements: 1 / 60 (1.7%) - # Greatest absolute difference: 0.0625 at index (2, 1, 4) (up to 1e-05 allowed) - # Greatest relative difference: 0.001125335693359375 at index (2, 1, 4) (up to 0.001 allowed) - "test_index_reduce_reduce_mean_xpu_bfloat16", - "test_index_reduce_reduce_mean_xpu_float16", - "test_index_reduce_reduce_prod_xpu_float16", - ), - "nn/test_multihead_attention_xpu.py": ( - # known oneDNN issue - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_multihead_attention_dtype_batch_first_xpu_float64", - "test_multihead_attention_dtype_xpu_float64", - "test_multihead_attn_fast_path_query_and_bias_have_different_dtypes_xpu_float64", - "test_multihead_attn_fast_path_small_test_xpu_float64", - "test_multihead_attn_in_proj_bias_none_xpu_float64", - "test_multihead_attn_in_proj_weight_none_xpu_float64", - ), - "test_native_mha_xpu.py": ( - # NestedTensorXPU related OPs - # NotImplementedError: Could not run 'aten::_native_multi_head_attention' with arguments from the 'NestedTensorXPU' backend. - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16", - "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32", - "test_transform_bias_rescale_qkv_nested_xpu_float32", - ), - "test_comparison_utils_xpu.py": None, - "test_segment_reductions_xpu.py": None, - "nn/test_pruning_xpu.py": None, - "test_foreach_xpu.py": ( - # RuntimeError: Tried to instantiate dummy base class CUDAGraph - "use_cuda_graph_True", - ), - "nn/test_convolution_xpu.py": ( - # Summary: all of them are oneDNN related issues - # XPU unsupport ops, skip. - # https://github.com/intel/torch-xpu-ops/issues/348 - "test_cudnn_convolution_relu_xpu_float16", - "test_cudnn_convolution_relu_xpu_float32", - "test_cudnn_convolution_add_relu_xpu_float16", - "test_cudnn_convolution_add_relu_xpu_float32", - # accuracy issue, TODO - "test_Conv2d_naive_groups_xpu_float16", - "test_Conv2d_groups_nobias", - # issue: https://github.com/intel/torch-xpu-ops/issues/809 - "test_thnn_conv_strided_padded_dilated", - ), - "test_dynamic_shapes_xpu.py": None, - "nn/test_load_state_dict_xpu.py": None, - "nn/test_module_hooks_xpu.py": ( - # TypeError: TestStateDictHooks.test_register_state_dict_post_hook() missing 1 required positional argument: 'private' - # https://github.com/intel/torch-xpu-ops/issues/658 - "test_register_state_dict_post_hook", - ), - "nn/test_parametrization_xpu.py": None, - "test_meta_xpu.py": ( - # https://github.com/intel/torch-xpu-ops/issues/774 - "_jiterator_", - # RuntimeError: Short is not supported in oneDNN! Need oneDNN's support, suggest to keep skip. - "test_dispatch_meta_outplace_nn_functional_linear_xpu_int16", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int16", - "test_meta_outplace_nn_functional_linear_xpu_int16", - # RuntimeError: Long is not supported in oneDNN! Need oneDNN's support, suggest to keep skip. - "test_dispatch_meta_outplace_nn_functional_linear_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int64", - "test_meta_outplace_nn_functional_linear_xpu_int64", - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_dispatch_meta_inplace_addbmm_xpu_complex", - "test_dispatch_meta_outplace_addbmm_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_complex", - "test_meta_inplace_addbmm_xpu_complex", - "test_meta_outplace_addbmm_xpu_complex", - "test_dispatch_meta_inplace_addbmm_xpu_float64", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_complex", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_float64", - "test_dispatch_meta_inplace_addmm_xpu_complex", - "test_dispatch_meta_inplace_addmm_xpu_float64", - "test_dispatch_meta_inplace_addmv_xpu_complex", - "test_dispatch_meta_inplace_addmv_xpu_float64", - "test_dispatch_meta_inplace_baddbmm_xpu_complex", - "test_dispatch_meta_inplace_baddbmm_xpu_float64", - "test_dispatch_meta_outplace___rmatmul___xpu_complex", - "test_dispatch_meta_outplace___rmatmul___xpu_float64", - "test_dispatch_meta_outplace_addbmm_xpu_float64", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_complex", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_float64", - "test_dispatch_meta_outplace_addmm_xpu_complex", - "test_dispatch_meta_outplace_addmm_xpu_float64", - "test_dispatch_meta_outplace_addmv_xpu_complex", - "test_dispatch_meta_outplace_addmv_xpu_float64", - "test_dispatch_meta_outplace_baddbmm_xpu_complex", - "test_dispatch_meta_outplace_baddbmm_xpu_float64", - "test_dispatch_meta_outplace_bmm_xpu_complex", - "test_dispatch_meta_outplace_bmm_xpu_float64", - "test_dispatch_meta_outplace_cdist_xpu_float64", - "test_dispatch_meta_outplace_cholesky_inverse_xpu_complex", - "test_dispatch_meta_outplace_cholesky_inverse_xpu_float64", - "test_dispatch_meta_outplace_cholesky_solve_xpu_complex", - "test_dispatch_meta_outplace_cholesky_solve_xpu_float64", - "test_dispatch_meta_outplace_cholesky_xpu_complex", - "test_dispatch_meta_outplace_cholesky_xpu_float64", - "test_dispatch_meta_outplace_corrcoef_xpu_complex", - "test_dispatch_meta_outplace_corrcoef_xpu_float64", - "test_dispatch_meta_outplace_cov_xpu_complex", - "test_dispatch_meta_outplace_cov_xpu_float64", - "test_dispatch_meta_outplace_einsum_xpu_complex", - "test_dispatch_meta_outplace_einsum_xpu_float64", - "test_dispatch_meta_outplace_geqrf_xpu_complex", - "test_dispatch_meta_outplace_geqrf_xpu_float64", - "test_dispatch_meta_outplace_inner_xpu_complex", - "test_dispatch_meta_outplace_inner_xpu_float64", - "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_complex", - "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_float64", - "test_dispatch_meta_outplace_linalg_cholesky_xpu_complex", - "test_dispatch_meta_outplace_linalg_cholesky_xpu_float64", - "test_dispatch_meta_outplace_linalg_det_singular_xpu_complex", - "test_dispatch_meta_outplace_linalg_det_singular_xpu_float64", - "test_dispatch_meta_outplace_linalg_det_xpu_complex", - "test_dispatch_meta_outplace_linalg_det_xpu_float64", - "test_dispatch_meta_outplace_linalg_eig_xpu_complex", - "test_dispatch_meta_outplace_linalg_eig_xpu_float64", - "test_dispatch_meta_outplace_linalg_eigh_xpu_complex", - "test_dispatch_meta_outplace_linalg_eigh_xpu_float64", - "test_dispatch_meta_outplace_linalg_eigvals_xpu_complex", - "test_dispatch_meta_outplace_linalg_eigvals_xpu_float64", - "test_dispatch_meta_outplace_linalg_eigvalsh_xpu_complex", - "test_dispatch_meta_outplace_linalg_eigvalsh_xpu_float64", - "test_dispatch_meta_outplace_linalg_inv_ex_xpu_complex", - "test_dispatch_meta_outplace_linalg_inv_ex_xpu_float64", - "test_dispatch_meta_outplace_linalg_inv_xpu_complex", - "test_dispatch_meta_outplace_linalg_inv_xpu_float64", - "test_dispatch_meta_outplace_linalg_ldl_factor_ex_xpu_complex", - "test_dispatch_meta_outplace_linalg_ldl_factor_ex_xpu_float64", - "test_dispatch_meta_outplace_linalg_ldl_factor_xpu_complex", - "test_dispatch_meta_outplace_linalg_ldl_factor_xpu_float64", - "test_dispatch_meta_outplace_linalg_ldl_solve_xpu_complex", - "test_dispatch_meta_outplace_linalg_ldl_solve_xpu_float64", - "test_dispatch_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex", - "test_dispatch_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64", - "test_dispatch_meta_outplace_linalg_lstsq_xpu_complex", - "test_dispatch_meta_outplace_linalg_lstsq_xpu_float64", - "test_dispatch_meta_outplace_linalg_lu_factor_xpu_complex", - "test_dispatch_meta_outplace_linalg_lu_factor_xpu_float64", - "test_dispatch_meta_outplace_linalg_lu_solve_xpu_complex", - "test_dispatch_meta_outplace_linalg_lu_solve_xpu_float64", - "test_dispatch_meta_outplace_linalg_matrix_power_xpu_complex", - "test_dispatch_meta_outplace_linalg_matrix_power_xpu_float64", - "test_dispatch_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex", - "test_dispatch_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64", - "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_complex", - "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_float64", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_complex", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_float64", - "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_complex", - "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_float64", - "test_dispatch_meta_outplace_linalg_pinv_singular_xpu_complex", - "test_dispatch_meta_outplace_linalg_pinv_singular_xpu_float64", - "test_dispatch_meta_outplace_linalg_pinv_xpu_complex", - "test_dispatch_meta_outplace_linalg_pinv_xpu_float64", - "test_dispatch_meta_outplace_linalg_qr_xpu_complex", - "test_dispatch_meta_outplace_linalg_qr_xpu_float64", - "test_dispatch_meta_outplace_linalg_slogdet_xpu_complex", - "test_dispatch_meta_outplace_linalg_slogdet_xpu_float64", - "test_dispatch_meta_outplace_linalg_solve_ex_xpu_complex", - "test_dispatch_meta_outplace_linalg_solve_ex_xpu_float64", - "test_dispatch_meta_outplace_linalg_solve_xpu_complex", - "test_dispatch_meta_outplace_linalg_solve_xpu_float64", - "test_dispatch_meta_outplace_linalg_svd_xpu_complex", - "test_dispatch_meta_outplace_linalg_svd_xpu_float64", - "test_dispatch_meta_outplace_linalg_tensorinv_xpu_complex", - "test_dispatch_meta_outplace_linalg_tensorinv_xpu_float64", - "test_dispatch_meta_outplace_logdet_xpu_complex", - "test_dispatch_meta_outplace_logdet_xpu_float64", - "test_dispatch_meta_outplace_lu_solve_xpu_complex", - "test_dispatch_meta_outplace_lu_solve_xpu_float64", - "test_dispatch_meta_outplace_lu_xpu_complex", - "test_dispatch_meta_outplace_lu_xpu_float64", - "test_dispatch_meta_outplace_matmul_xpu_complex", - "test_dispatch_meta_outplace_matmul_xpu_float64", - "test_dispatch_meta_outplace_mm_xpu_complex", - "test_dispatch_meta_outplace_mm_xpu_float64", - "test_dispatch_meta_outplace_mv_xpu_complex", - "test_dispatch_meta_outplace_mv_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_complex", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_dispatch_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_dispatch_meta_outplace_pca_lowrank_xpu_complex", - "test_dispatch_meta_outplace_pca_lowrank_xpu_float64", - "test_dispatch_meta_outplace_pinverse_xpu_complex", - "test_dispatch_meta_outplace_pinverse_xpu_float64", - "test_dispatch_meta_outplace_qr_xpu_complex", - "test_dispatch_meta_outplace_qr_xpu_float64", - "test_dispatch_meta_outplace_svd_lowrank_xpu_complex", - "test_dispatch_meta_outplace_svd_lowrank_xpu_float64", - "test_dispatch_meta_outplace_svd_xpu_complex", - "test_dispatch_meta_outplace_svd_xpu_float64", - "test_dispatch_meta_outplace_tensordot_xpu_complex", - "test_dispatch_meta_outplace_tensordot_xpu_float64", - "test_dispatch_meta_outplace_triangular_solve_xpu_complex", - "test_dispatch_meta_outplace_triangular_solve_xpu_float64", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_float64", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_float64", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_float64", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_complex", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_float64", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_complex", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_complex", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_float64", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_float64", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cdist_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_complex", - "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cholesky_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_cholesky_solve_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cholesky_xpu_complex", - "test_dispatch_symbolic_meta_outplace_cholesky_xpu_float64", - "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_complex", - "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_float64", - "test_dispatch_symbolic_meta_outplace_cov_xpu_complex", - "test_dispatch_symbolic_meta_outplace_cov_xpu_float64", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_complex", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_float64", - "test_dispatch_symbolic_meta_outplace_geqrf_xpu_complex", - "test_dispatch_symbolic_meta_outplace_geqrf_xpu_float64", - "test_dispatch_symbolic_meta_outplace_inner_xpu_complex", - "test_dispatch_symbolic_meta_outplace_inner_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_cholesky_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_cholesky_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_det_singular_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_det_singular_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_det_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_det_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_eig_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_eig_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_eigh_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_eigh_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_eigvals_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_eigvals_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_eigvalsh_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_eigvalsh_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_inv_ex_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_inv_ex_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_inv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_inv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_ex_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_ex_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_ldl_solve_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_lstsq_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_lstsq_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_lu_factor_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_lu_factor_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_lu_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_lu_solve_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_power_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_power_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_singular_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_singular_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_pinv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_qr_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_qr_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_slogdet_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_slogdet_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_solve_ex_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_solve_ex_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_solve_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_svd_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_svd_xpu_float64", - "test_dispatch_symbolic_meta_outplace_linalg_tensorinv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_linalg_tensorinv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_logdet_xpu_complex", - "test_dispatch_symbolic_meta_outplace_logdet_xpu_float64", - "test_dispatch_symbolic_meta_outplace_lu_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_lu_solve_xpu_float64", - "test_dispatch_symbolic_meta_outplace_lu_xpu_complex", - "test_dispatch_symbolic_meta_outplace_lu_xpu_float64", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_complex", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_float64", - "test_dispatch_symbolic_meta_outplace_mm_xpu_complex", - "test_dispatch_symbolic_meta_outplace_mm_xpu_float64", - "test_dispatch_symbolic_meta_outplace_mv_xpu_complex", - "test_dispatch_symbolic_meta_outplace_mv_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_complex", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_dispatch_symbolic_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_complex", - "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_float64", - "test_dispatch_symbolic_meta_outplace_pinverse_xpu_complex", - "test_dispatch_symbolic_meta_outplace_pinverse_xpu_float64", - "test_dispatch_symbolic_meta_outplace_qr_xpu_complex", - "test_dispatch_symbolic_meta_outplace_qr_xpu_float64", - "test_dispatch_symbolic_meta_outplace_svd_lowrank_xpu_complex", - "test_dispatch_symbolic_meta_outplace_svd_lowrank_xpu_float64", - "test_dispatch_symbolic_meta_outplace_svd_xpu_complex", - "test_dispatch_symbolic_meta_outplace_svd_xpu_float64", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_complex", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_float64", - "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_complex", - "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_float64", - "test_meta_inplace_addbmm_xpu_float64", - "test_meta_inplace_addmm_decomposed_xpu_complex", - "test_meta_inplace_addmm_decomposed_xpu_float64", - "test_meta_inplace_addmm_xpu_complex", - "test_meta_inplace_addmm_xpu_float64", - "test_meta_inplace_addmv_xpu_complex", - "test_meta_inplace_addmv_xpu_float64", - "test_meta_inplace_baddbmm_xpu_complex", - "test_meta_inplace_baddbmm_xpu_float64", - "test_meta_outplace___rmatmul___xpu_complex", - "test_meta_outplace___rmatmul___xpu_float64", - "test_meta_outplace_addbmm_xpu_float64", - "test_meta_outplace_addmm_decomposed_xpu_complex", - "test_meta_outplace_addmm_decomposed_xpu_float64", - "test_meta_outplace_addmm_xpu_complex", - "test_meta_outplace_addmm_xpu_float64", - "test_meta_outplace_addmv_xpu_complex", - "test_meta_outplace_addmv_xpu_float64", - "test_meta_outplace_baddbmm_xpu_complex", - "test_meta_outplace_baddbmm_xpu_float64", - "test_meta_outplace_bmm_xpu_complex", - "test_meta_outplace_bmm_xpu_float64", - "test_meta_outplace_cdist_xpu_float64", - "test_meta_outplace_cholesky_inverse_xpu_complex", - "test_meta_outplace_cholesky_inverse_xpu_float64", - "test_meta_outplace_cholesky_solve_xpu_complex", - "test_meta_outplace_cholesky_solve_xpu_float64", - "test_meta_outplace_cholesky_xpu_complex", - "test_meta_outplace_cholesky_xpu_float64", - "test_meta_outplace_corrcoef_xpu_complex", - "test_meta_outplace_corrcoef_xpu_float64", - "test_meta_outplace_cov_xpu_complex", - "test_meta_outplace_cov_xpu_float64", - "test_meta_outplace_einsum_xpu_complex", - "test_meta_outplace_einsum_xpu_float64", - "test_meta_outplace_geqrf_xpu_complex", - "test_meta_outplace_geqrf_xpu_float64", - "test_meta_outplace_inner_xpu_complex", - "test_meta_outplace_inner_xpu_float64", - "test_meta_outplace_linalg_cholesky_ex_xpu_complex", - "test_meta_outplace_linalg_cholesky_ex_xpu_float64", - "test_meta_outplace_linalg_cholesky_xpu_complex", - "test_meta_outplace_linalg_cholesky_xpu_float64", - "test_meta_outplace_linalg_det_singular_xpu_complex", - "test_meta_outplace_linalg_det_singular_xpu_float64", - "test_meta_outplace_linalg_det_xpu_complex", - "test_meta_outplace_linalg_det_xpu_float64", - "test_meta_outplace_linalg_eig_xpu_complex", - "test_meta_outplace_linalg_eig_xpu_float64", - "test_meta_outplace_linalg_eigh_xpu_complex", - "test_meta_outplace_linalg_eigh_xpu_float64", - "test_meta_outplace_linalg_eigvals_xpu_complex", - "test_meta_outplace_linalg_eigvals_xpu_float64", - "test_meta_outplace_linalg_eigvalsh_xpu_complex", - "test_meta_outplace_linalg_eigvalsh_xpu_float64", - "test_meta_outplace_linalg_inv_ex_xpu_complex", - "test_meta_outplace_linalg_inv_ex_xpu_float64", - "test_meta_outplace_linalg_inv_xpu_complex", - "test_meta_outplace_linalg_inv_xpu_float64", - "test_meta_outplace_linalg_ldl_factor_ex_xpu_complex", - "test_meta_outplace_linalg_ldl_factor_ex_xpu_float64", - "test_meta_outplace_linalg_ldl_factor_xpu_complex", - "test_meta_outplace_linalg_ldl_factor_xpu_float64", - "test_meta_outplace_linalg_ldl_solve_xpu_complex", - "test_meta_outplace_linalg_ldl_solve_xpu_float64", - "test_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex", - "test_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64", - "test_meta_outplace_linalg_lstsq_xpu_complex", - "test_meta_outplace_linalg_lstsq_xpu_float64", - "test_meta_outplace_linalg_lu_factor_xpu_complex", - "test_meta_outplace_linalg_lu_factor_xpu_float64", - "test_meta_outplace_linalg_lu_solve_xpu_complex", - "test_meta_outplace_linalg_lu_solve_xpu_float64", - "test_meta_outplace_linalg_matrix_power_xpu_complex", - "test_meta_outplace_linalg_matrix_power_xpu_float64", - "test_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex", - "test_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64", - "test_meta_outplace_linalg_matrix_rank_xpu_complex", - "test_meta_outplace_linalg_matrix_rank_xpu_float64", - "test_meta_outplace_linalg_multi_dot_xpu_complex", - "test_meta_outplace_linalg_multi_dot_xpu_float64", - "test_meta_outplace_linalg_pinv_hermitian_xpu_complex", - "test_meta_outplace_linalg_pinv_hermitian_xpu_float64", - "test_meta_outplace_linalg_pinv_singular_xpu_complex", - "test_meta_outplace_linalg_pinv_singular_xpu_float64", - "test_meta_outplace_linalg_pinv_xpu_complex", - "test_meta_outplace_linalg_pinv_xpu_float64", - "test_meta_outplace_linalg_qr_xpu_complex", - "test_meta_outplace_linalg_qr_xpu_float64", - "test_meta_outplace_linalg_slogdet_xpu_complex", - "test_meta_outplace_linalg_slogdet_xpu_float64", - "test_meta_outplace_linalg_solve_ex_xpu_complex", - "test_meta_outplace_linalg_solve_ex_xpu_float64", - "test_meta_outplace_linalg_solve_xpu_complex", - "test_meta_outplace_linalg_solve_xpu_float64", - "test_meta_outplace_linalg_svd_xpu_complex", - "test_meta_outplace_linalg_svd_xpu_float64", - "test_meta_outplace_linalg_tensorinv_xpu_complex", - "test_meta_outplace_linalg_tensorinv_xpu_float64", - "test_meta_outplace_logdet_xpu_complex", - "test_meta_outplace_logdet_xpu_float64", - "test_meta_outplace_lu_solve_xpu_complex", - "test_meta_outplace_lu_solve_xpu_float64", - "test_meta_outplace_lu_xpu_complex", - "test_meta_outplace_lu_xpu_float64", - "test_meta_outplace_matmul_xpu_complex", - "test_meta_outplace_matmul_xpu_float64", - "test_meta_outplace_mm_xpu_complex", - "test_meta_outplace_mm_xpu_float64", - "test_meta_outplace_mv_xpu_complex", - "test_meta_outplace_mv_xpu_float64", - "test_meta_outplace_nn_functional_bilinear_xpu_float64", - "test_meta_outplace_nn_functional_linear_xpu_complex", - "test_meta_outplace_nn_functional_linear_xpu_float64", - "test_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64", - "test_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64", - "test_meta_outplace_pca_lowrank_xpu_complex", - "test_meta_outplace_pca_lowrank_xpu_float64", - "test_meta_outplace_pinverse_xpu_complex", - "test_meta_outplace_pinverse_xpu_float64", - "test_meta_outplace_qr_xpu_complex", - "test_meta_outplace_qr_xpu_float64", - "test_meta_outplace_svd_lowrank_xpu_complex", - "test_meta_outplace_svd_lowrank_xpu_float64", - "test_meta_outplace_svd_xpu_complex", - "test_meta_outplace_svd_xpu_float64", - "test_meta_outplace_tensordot_xpu_complex", - "test_meta_outplace_tensordot_xpu_float64", - "test_meta_outplace_triangular_solve_xpu_complex", - "test_meta_outplace_triangular_solve_xpu_float64", - # RuntimeError: Short is not supported in oneDNN! - "test_dispatch_meta_inplace_addbmm_xpu_int16", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_int16", - "test_dispatch_meta_inplace_addmm_xpu_int16", - "test_dispatch_meta_inplace_addmv_xpu_int16", - "test_dispatch_meta_inplace_baddbmm_xpu_int16", - "test_dispatch_meta_outplace___rmatmul___xpu_int16", - "test_dispatch_meta_outplace_addbmm_xpu_int16", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_int16", - "test_dispatch_meta_outplace_addmm_xpu_int16", - "test_dispatch_meta_outplace_addmv_xpu_int16", - "test_dispatch_meta_outplace_baddbmm_xpu_int16", - "test_dispatch_meta_outplace_bmm_xpu_int16", - "test_dispatch_meta_outplace_einsum_xpu_int16", - "test_dispatch_meta_outplace_inner_xpu_int16", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int16", - "test_dispatch_meta_outplace_matmul_xpu_int16", - "test_dispatch_meta_outplace_mm_xpu_int16", - "test_dispatch_meta_outplace_mv_xpu_int16", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int16", - "test_dispatch_meta_outplace_tensordot_xpu_int16", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int16", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int16", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_int16", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_int16", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int16", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int16", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int16", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int16", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_int16", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_int16", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int16", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_int16", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_int16", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int16", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int16", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_int16", - "test_dispatch_symbolic_meta_outplace_mm_xpu_int16", - "test_dispatch_symbolic_meta_outplace_mv_xpu_int16", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int16", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int16", - "test_meta_inplace_addbmm_xpu_int16", - "test_meta_inplace_addmm_decomposed_xpu_int16", - "test_meta_inplace_addmm_xpu_int16", - "test_meta_inplace_addmv_xpu_int16", - "test_meta_inplace_baddbmm_xpu_int16", - "test_meta_outplace___rmatmul___xpu_int16", - "test_meta_outplace_addbmm_xpu_int16", - "test_meta_outplace_addmm_decomposed_xpu_int16", - "test_meta_outplace_addmm_xpu_int16", - "test_meta_outplace_addmv_xpu_int16", - "test_meta_outplace_baddbmm_xpu_int16", - "test_meta_outplace_bmm_xpu_int16", - "test_meta_outplace_einsum_xpu_int16", - "test_meta_outplace_inner_xpu_int16", - "test_meta_outplace_linalg_multi_dot_xpu_int16", - "test_meta_outplace_matmul_xpu_int16", - "test_meta_outplace_mm_xpu_int16", - "test_meta_outplace_mv_xpu_int16", - "test_meta_outplace_nn_functional_bilinear_xpu_int16", - "test_meta_outplace_tensordot_xpu_int16", - # RuntimeError: could not create a primitive descriptor for a matmul primitive - "test_dispatch_meta_inplace_addbmm_xpu_int32", - "test_dispatch_meta_inplace_addbmm_xpu_uint8", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_int32", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_uint8", - "test_dispatch_meta_inplace_addmm_xpu_int32", - "test_dispatch_meta_inplace_addmm_xpu_uint8", - "test_dispatch_meta_inplace_addmv_xpu_int32", - "test_dispatch_meta_inplace_addmv_xpu_uint8", - "test_dispatch_meta_inplace_baddbmm_xpu_int32", - "test_dispatch_meta_inplace_baddbmm_xpu_uint8", - "test_dispatch_meta_outplace___rmatmul___xpu_int32", - "test_dispatch_meta_outplace___rmatmul___xpu_uint8", - "test_dispatch_meta_outplace_addbmm_xpu_int32", - "test_dispatch_meta_outplace_addbmm_xpu_uint8", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_int32", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_uint8", - "test_dispatch_meta_outplace_addmm_xpu_int32", - "test_dispatch_meta_outplace_addmm_xpu_uint8", - "test_dispatch_meta_outplace_addmv_xpu_int32", - "test_dispatch_meta_outplace_addmv_xpu_uint8", - "test_dispatch_meta_outplace_baddbmm_xpu_int32", - "test_dispatch_meta_outplace_baddbmm_xpu_uint8", - "test_dispatch_meta_outplace_bmm_xpu_int32", - "test_dispatch_meta_outplace_bmm_xpu_uint8", - "test_dispatch_meta_outplace_einsum_xpu_int32", - "test_dispatch_meta_outplace_einsum_xpu_uint8", - "test_dispatch_meta_outplace_inner_xpu_int32", - "test_dispatch_meta_outplace_inner_xpu_uint8", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int32", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_uint8", - "test_dispatch_meta_outplace_matmul_xpu_int32", - "test_dispatch_meta_outplace_matmul_xpu_uint8", - "test_dispatch_meta_outplace_mm_xpu_int32", - "test_dispatch_meta_outplace_mm_xpu_uint8", - "test_dispatch_meta_outplace_mv_xpu_int32", - "test_dispatch_meta_outplace_mv_xpu_uint8", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_uint8", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_int32", - "test_dispatch_meta_outplace_nn_functional_linear_xpu_uint8", - "test_dispatch_meta_outplace_tensordot_xpu_int32", - "test_dispatch_meta_outplace_tensordot_xpu_uint8", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_uint8", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_uint8", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_uint8", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_int32", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_uint8", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int32", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int32", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_uint8", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_int32", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_int32", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int32", - "test_dispatch_symbolic_meta_outplace_inner_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int32", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_int32", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_mm_xpu_int32", - "test_dispatch_symbolic_meta_outplace_mm_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_mv_xpu_int32", - "test_dispatch_symbolic_meta_outplace_mv_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int32", - "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_uint8", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int32", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_uint8", - "test_meta_inplace_addbmm_xpu_int32", - "test_meta_inplace_addbmm_xpu_uint8", - "test_meta_inplace_addmm_decomposed_xpu_int32", - "test_meta_inplace_addmm_decomposed_xpu_uint8", - "test_meta_inplace_addmm_xpu_int32", - "test_meta_inplace_addmm_xpu_uint8", - "test_meta_inplace_addmv_xpu_int32", - "test_meta_inplace_addmv_xpu_uint8", - "test_meta_inplace_baddbmm_xpu_int32", - "test_meta_inplace_baddbmm_xpu_uint8", - "test_meta_outplace___rmatmul___xpu_int32", - "test_meta_outplace___rmatmul___xpu_uint8", - "test_meta_outplace_addbmm_xpu_int32", - "test_meta_outplace_addbmm_xpu_uint8", - "test_meta_outplace_addmm_decomposed_xpu_int32", - "test_meta_outplace_addmm_decomposed_xpu_uint8", - "test_meta_outplace_addmm_xpu_int32", - "test_meta_outplace_addmm_xpu_uint8", - "test_meta_outplace_addmv_xpu_int32", - "test_meta_outplace_addmv_xpu_uint8", - "test_meta_outplace_baddbmm_xpu_int32", - "test_meta_outplace_baddbmm_xpu_uint8", - "test_meta_outplace_bmm_xpu_int32", - "test_meta_outplace_bmm_xpu_uint8", - "test_meta_outplace_einsum_xpu_int32", - "test_meta_outplace_einsum_xpu_uint8", - "test_meta_outplace_inner_xpu_int32", - "test_meta_outplace_inner_xpu_uint8", - "test_meta_outplace_linalg_multi_dot_xpu_int32", - "test_meta_outplace_linalg_multi_dot_xpu_uint8", - "test_meta_outplace_matmul_xpu_int32", - "test_meta_outplace_matmul_xpu_uint8", - "test_meta_outplace_mm_xpu_int32", - "test_meta_outplace_mm_xpu_uint8", - "test_meta_outplace_mv_xpu_int32", - "test_meta_outplace_mv_xpu_uint8", - "test_meta_outplace_nn_functional_bilinear_xpu_int32", - "test_meta_outplace_nn_functional_bilinear_xpu_uint8", - "test_meta_outplace_nn_functional_linear_xpu_int32", - "test_meta_outplace_nn_functional_linear_xpu_uint8", - "test_meta_outplace_tensordot_xpu_int32", - "test_meta_outplace_tensordot_xpu_uint8", - # RuntimeError: Long is not supported in oneDNN! - "test_dispatch_meta_inplace_addbmm_xpu_int64", - "test_dispatch_meta_inplace_addmm_decomposed_xpu_int64", - "test_dispatch_meta_inplace_addmm_xpu_int64", - "test_dispatch_meta_inplace_addmv_xpu_int64", - "test_dispatch_meta_inplace_baddbmm_xpu_int64", - "test_dispatch_meta_outplace___rmatmul___xpu_int64", - "test_dispatch_meta_outplace_addbmm_xpu_int64", - "test_dispatch_meta_outplace_addmm_decomposed_xpu_int64", - "test_dispatch_meta_outplace_addmm_xpu_int64", - "test_dispatch_meta_outplace_addmv_xpu_int64", - "test_dispatch_meta_outplace_baddbmm_xpu_int64", - "test_dispatch_meta_outplace_bmm_xpu_int64", - "test_dispatch_meta_outplace_einsum_xpu_int64", - "test_dispatch_meta_outplace_inner_xpu_int64", - "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int64", - "test_dispatch_meta_outplace_matmul_xpu_int64", - "test_dispatch_meta_outplace_mm_xpu_int64", - "test_dispatch_meta_outplace_mv_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv1d_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv2d_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv3d_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv_transpose1d_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_int64", - "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_int64", - "test_dispatch_meta_outplace_tensordot_xpu_int64", - "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int64", - "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int64", - "test_dispatch_symbolic_meta_inplace_addmm_xpu_int64", - "test_dispatch_symbolic_meta_inplace_addmv_xpu_int64", - "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int64", - "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int64", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int64", - "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int64", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_int64", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_int64", - "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int64", - "test_dispatch_symbolic_meta_outplace_bmm_xpu_int64", - "test_dispatch_symbolic_meta_outplace_einsum_xpu_int64", - "test_dispatch_symbolic_meta_outplace_inner_xpu_int64", - "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int64", - "test_dispatch_symbolic_meta_outplace_matmul_xpu_int64", - "test_dispatch_symbolic_meta_outplace_mm_xpu_int64", - "test_dispatch_symbolic_meta_outplace_mv_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv1d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv2d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv3d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose1d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_int64", - "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int64", - "test_meta_inplace_addbmm_xpu_int64", - "test_meta_inplace_addmm_decomposed_xpu_int64", - "test_meta_inplace_addmm_xpu_int64", - "test_meta_inplace_addmv_xpu_int64", - "test_meta_inplace_baddbmm_xpu_int64", - "test_meta_outplace___rmatmul___xpu_int64", - "test_meta_outplace_addbmm_xpu_int64", - "test_meta_outplace_addmm_decomposed_xpu_int64", - "test_meta_outplace_addmm_xpu_int64", - "test_meta_outplace_addmv_xpu_int64", - "test_meta_outplace_baddbmm_xpu_int64", - "test_meta_outplace_bmm_xpu_int64", - "test_meta_outplace_einsum_xpu_int64", - "test_meta_outplace_inner_xpu_int64", - "test_meta_outplace_linalg_multi_dot_xpu_int64", - "test_meta_outplace_matmul_xpu_int64", - "test_meta_outplace_mm_xpu_int64", - "test_meta_outplace_mv_xpu_int64", - "test_meta_outplace_nn_functional_bilinear_xpu_int64", - "test_meta_outplace_nn_functional_conv1d_xpu_int64", - "test_meta_outplace_nn_functional_conv2d_xpu_int64", - "test_meta_outplace_nn_functional_conv3d_xpu_int64", - "test_meta_outplace_nn_functional_conv_transpose1d_xpu_int64", - "test_meta_outplace_nn_functional_conv_transpose2d_xpu_int64", - "test_meta_outplace_nn_functional_conv_transpose3d_xpu_int64", - "test_meta_outplace_tensordot_xpu_int64", - # RuntimeError: could not create a primitive - "test_dispatch_meta_outplace_addbmm_xpu_bfloat16", - "test_dispatch_meta_outplace_addbmm_xpu_float16", - "test_dispatch_meta_outplace_addbmm_xpu_float32", - "test_dispatch_meta_outplace_addbmm_xpu_int8", - "test_dispatch_meta_outplace_addmm_xpu_bfloat16", - "test_dispatch_meta_outplace_addmm_xpu_float16", - "test_dispatch_meta_outplace_addmm_xpu_float32", - "test_dispatch_meta_outplace_addmm_xpu_int8", - "test_dispatch_meta_outplace_addmv_xpu_bfloat16", - "test_dispatch_meta_outplace_addmv_xpu_float16", - "test_dispatch_meta_outplace_addmv_xpu_float32", - "test_dispatch_meta_outplace_addmv_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addmm_xpu_int8", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float16", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_float32", - "test_dispatch_symbolic_meta_outplace_addmv_xpu_int8", - "test_dispatch_symbolic_meta_outplace_all_strides_addbmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_all_strides_addmm_xpu_float32", - "test_dispatch_symbolic_meta_outplace_all_strides_addmv_xpu_float32", - "test_meta_outplace_addbmm_xpu_bfloat16", - "test_meta_outplace_addbmm_xpu_float16", - "test_meta_outplace_addbmm_xpu_float32", - "test_meta_outplace_addbmm_xpu_int8", - "test_meta_outplace_addmm_xpu_bfloat16", - "test_meta_outplace_addmm_xpu_float16", - "test_meta_outplace_addmm_xpu_float32", - "test_meta_outplace_addmm_xpu_int8", - "test_meta_outplace_addmv_xpu_bfloat16", - "test_meta_outplace_addmv_xpu_float16", - "test_meta_outplace_addmv_xpu_float32", - "test_meta_outplace_addmv_xpu_int8", - # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive - "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16", - "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_complex", - "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_float", - "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16", - "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_complex", - "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_float", - "test_dispatch_symbolic_meta_outplace_all_strides_nn_functional_conv_transpose2d_xpu_float32", - "test_dispatch_symbolic_meta_outplace_all_strides_nn_functional_conv_transpose3d_xpu_float32", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_complex", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_float", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_complex", - "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_float", - "test_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16", - "test_meta_outplace_nn_functional_conv_transpose2d_xpu_complex", - "test_meta_outplace_nn_functional_conv_transpose2d_xpu_float", - "test_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16", - "test_meta_outplace_nn_functional_conv_transpose3d_xpu_complex", - "test_meta_outplace_nn_functional_conv_transpose3d_xpu_float", - # Not implemented, try these cases after implementing vdot - "test_dispatch_meta_outplace_vdot_xpu_complex", - "test_dispatch_symbolic_meta_outplace_vdot_xpu_complex", - "test_meta_outplace_vdot_xpu_complex", - # Unexpected success: - "test_dispatch_symbolic_meta_outplace_all_strides_narrow_copy_xpu_float32", - # New added case in 2.7 - "test_nonzero_xpu", - # https://github.com/intel/torch-xpu-ops/issues/1569 - # RuntimeError: output 0: meta disagrees with real impl - "test_dispatch_meta_outplace_norm_fro_xpu_bfloat16", - "test_dispatch_meta_outplace_norm_fro_xpu_complex128", - "test_dispatch_meta_outplace_norm_fro_xpu_complex64", - "test_dispatch_meta_outplace_norm_fro_xpu_float", - "test_dispatch_symbolic_meta_outplace_all_strides_norm_fro_xpu_float32", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_bfloat16", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_complex128", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_complex64", - "test_dispatch_symbolic_meta_outplace_norm_fro_xpu_float", - ), - "test_type_promotion_xpu.py": None, - "test_distributions_xpu.py": ( - # TODO: Passed on lts driver version, but failed on rolling driver version - "test_gamma_gpu_sample_xpu", - ), - "test_optim_xpu.py": ( - # oneDNN issues - # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_foreach_matches_forloop_ASGD_xpu_float64", - "test_foreach_matches_forloop_Adadelta_xpu_float64", - "test_foreach_matches_forloop_Adafactor_xpu_float64", - "test_foreach_matches_forloop_Adagrad_xpu_float64", - "test_foreach_matches_forloop_AdamW_xpu_float64", - "test_foreach_matches_forloop_Adam_xpu_float64", - "test_foreach_matches_forloop_Adamax_xpu_float64", - "test_foreach_matches_forloop_NAdam_xpu_float64", - "test_foreach_matches_forloop_RAdam_xpu_float64", - "test_foreach_matches_forloop_RMSprop_xpu_float64", - "test_foreach_matches_forloop_Rprop_xpu_float64", - "test_foreach_matches_forloop_SGD_xpu_float64", - "test_fused_cpu_matches_cuda_AdamW_xpu_float64", - "test_fused_cpu_matches_cuda_Adam_xpu_float64", - "test_fused_cpu_matches_cuda_SGD_xpu_float64", - "test_fused_matches_forloop_AdamW_xpu_float64", - "test_fused_matches_forloop_Adam_xpu_float64", - "test_fused_matches_forloop_SGD_xpu_float64", - "test_set_default_dtype_works_with_foreach_ASGD_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adadelta_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adafactor_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adagrad_xpu_float64", - "test_set_default_dtype_works_with_foreach_AdamW_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adam_xpu_float64", - "test_set_default_dtype_works_with_foreach_Adamax_xpu_float64", - "test_set_default_dtype_works_with_foreach_NAdam_xpu_float64", - "test_set_default_dtype_works_with_foreach_RAdam_xpu_float64", - "test_set_default_dtype_works_with_foreach_RMSprop_xpu_float64", - "test_set_default_dtype_works_with_foreach_Rprop_xpu_float64", - "test_set_default_dtype_works_with_foreach_SGD_xpu_float64", - ), - "test_spectral_ops_xpu.py": ( - # CUDA specific case - "test_cufft_plan_cache_xpu_float64", - ), - "test_sparse_xpu.py": ( - "test_bmm_deterministic_xpu_float64", # - AssertionError: Torch not compiled with CUDA enabled - "test_bmm_oob_xpu", # - NotImplementedError: Could not run 'aten::bmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was ... - "test_bmm_xpu_float64", # - NotImplementedError: Could not run 'aten::bmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was ... - "test_dsmm_xpu_float64", # - NotImplementedError: Could not run 'aten::mm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was o... - "test_empty_like_xpu_complex128", # - AssertionError: "Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend" does not match "Could not run 'aten::empty_strided' with argu... - "test_empty_like_xpu_float64", # - AssertionError: "Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend" does not match "Could not run 'aten::empty_strided' with argu... - "test_factory_device_type_inference_xpu", # - RuntimeError: PyTorch is not linked with support for cuda devices - "test_hsmm_xpu_float64", # - NotImplementedError: Could not run 'aten::hspmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - "test_mv_xpu_float64", # - NotImplementedError: Could not run 'aten::mm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was o... - "test_new_device_single_gpu_xpu", # - RuntimeError: PyTorch was compiled without CUDA support - "test_print_coalesced_xpu_float64", # - RuntimeError: I got this output for TestSparseXPU.test_print_coalesced_xpu_float64: - "test_print_uncoalesced_xpu_float64", # - RuntimeError: I got this output for TestSparseXPU.test_print_uncoalesced_xpu_float64 - "test_sparse_addmm_xpu_bfloat16", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - "test_sparse_addmm_xpu_complex128", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - "test_sparse_addmm_xpu_float16", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - "test_sparse_addmm_xpu_float64", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - "test_sparse_matmul_xpu_complex128", # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_sparse_matmul_xpu_complex64", # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_sparse_matmul_xpu_float32", # - NotImplementedError: Could not run 'aten::_sparse_sparse_matmul' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for thi... - "test_sparse_matmul_xpu_float64", # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_sparse_mm_xpu_float64", # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa... - ), + "test_foreach_xpu.py": None, } From ee8213dc09a9f7a4cffd6a48efb98180421f6781 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Sat, 3 May 2025 02:12:49 -0700 Subject: [PATCH 11/13] fix lint issue --- .github/workflows/_linux_ut.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index ba1165a8b..e72892bab 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -433,4 +433,3 @@ jobs: with: name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-xpu_distributed path: ${{ github.workspace }}/ut_log - From ce388322342d2c9f9d61ed0fd43c96a24e898eb8 Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Sat, 3 May 2025 04:28:39 -0700 Subject: [PATCH 12/13] only run op_ut test --- .github/workflows/_linux_ut.yml | 5 ++--- .github/workflows/pull.yml | 6 +++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index e72892bab..307736629 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -6,7 +6,7 @@ on: pytorch: required: false type: string - default: 'nightly_wheel' + default: 'main' description: Pytorch branch/commit keep_torch_xpu_ops: required: false @@ -22,8 +22,7 @@ on: required: true type: string default: '' - #description: UT scope. `op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu` Delimiter is comma - description: UT scope. `op_ut,` Delimiter is comma + description: UT scope. `op_regression,op_regression_dev1,op_extended,op_ut,torch_xpu` Delimiter is comma python: required: false type: string diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 7592e24b6..d3cfff752 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -64,8 +64,12 @@ jobs: uses: ./.github/workflows/_linux_ut.yml with: pytorch: ${{ needs.preci-linux-build.outputs.torch_commit_id }} - ut: op_regression,op_regression_dev1,op_extended,op_ut,xpu_distributed + ut: op_ut runner: linux.idc.xpu + #with: + # pytorch: ${{ needs.preci-linux-build.outputs.torch_commit_id }} + # ut: op_regression,op_regression_dev1,op_extended,op_ut,xpu_distributed + # runner: linux.idc.xpu Inductor-XPU-E2E-CI-Tests: name: preci-linux / e2e_test From d805605c633101a4b33edab43af569dea0efe30a Mon Sep 17 00:00:00 2001 From: Daisy Deng Date: Sat, 3 May 2025 05:46:31 -0700 Subject: [PATCH 13/13] update skip_list_common.py to run some failed test --- test/xpu/skip_list_common.py | 657 ++++++++++++++++++++++++++++++++++- 1 file changed, 656 insertions(+), 1 deletion(-) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 2b3665cfc..83eea1324 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -1,3 +1,658 @@ skip_dict = { - "test_foreach_xpu.py": None, + "test_ops_xpu.py": ( + # Skip list of base line + # XPU implementation doesn't claimn FP8 now + # https://github.com/intel/torch-xpu-ops/issues/461 + "float8", + # workarounds for the following tests + # https://github.com/intel/torch-xpu-ops/issues/1214 + # "test_python_ref__refs_exp_xpu_complex128", + "test_python_ref__refs_sigmoid_xpu_complex128", + "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128", + "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128", + "test_python_ref_torch_fallback__refs_log2_xpu_complex128", + "test_python_ref_torch_fallback__refs_log10_xpu_complex128", + "test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128", + "test_python_ref_executor__refs_log10_executor_aten_xpu_complex128", + "test_noncontiguous_samples_histogram_xpu_float32", + "test_python_ref_executor__refs_sigmoid_executor_aten_xpu_complex128", + # TODO: Fix the following tests + "test_out_warning_torch__scaled_mm_xpu", + # To be removed from this file. + # CUDA and XPU both XFAIL now. + "test_out_narrow_copy_xpu_float32", + # This case is marked as skip but XPU failed. However, CUDA and XPU throw the same runtime error. + "test_out_histc_xpu_float32", + # Data type is not supported in oneDNN! + "test_dtypes_nn_functional_conv1d_xpu", + "test_dtypes_nn_functional_conv2d_xpu", + "test_dtypes_nn_functional_conv3d_xpu", + "test_dtypes_nn_functional_conv_transpose1d_xpu", + "test_dtypes_nn_functional_conv_transpose2d_xpu", + "test_dtypes_nn_functional_conv_transpose3d_xpu", + # AssertionError: The supported dtypes for nn.functional.softsign on device type xpu are incorrect! + "test_dtypes_nn_functional_softsign_xpu", + # AssertionError: The supported dtypes for sparse.sampled_addmm on device type xpu are incorrect! - OPs not supported + "test_dtypes_sparse_sampled_addmm_xpu", + # OPs not supported + # "test_errors_dot_xpu", + "test_errors_vdot_xpu", + # Linalg OPs not supported + "test_noncontiguous_samples_linalg_det_xpu_float32", + "test_noncontiguous_samples_linalg_slogdet_xpu_float32", + "test_noncontiguous_samples_linalg_solve_ex_xpu_float32", + "test_noncontiguous_samples_linalg_solve_xpu_float32", + "test_noncontiguous_samples_linalg_tensorsolve_xpu_float32", + "test_noncontiguous_samples_logdet_xpu_float32", + # Sparse CSR OPs not supported + # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta + # https://github.com/intel/torch-xpu-ops/issues/357 + # "test_compare_cpu_sparse_sampled_addmm_xpu_float32", + "test_out_requires_grad_error_sparse_sampled_addmm_xpu_complex64", + "test_out_requires_grad_error_sparse_sampled_addmm_xpu_float32", + # OneDNN issues, https://github.com/intel/torch-xpu-ops/issues/253 + # RuntimeError: Long is not supported in oneDNN! + # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive + # RuntimeError: Double and complex datatype matmul is not supported in oneDNN + "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64", + "test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64", + "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64", + "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32", + "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_int64", + "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_complex64", + "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_float32", + "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64", + "test_noncontiguous_samples_nn_functional_conv1d_xpu_int64", + "test_noncontiguous_samples_nn_functional_conv2d_xpu_int64", + # Linalg OPs not supported + # RuntimeError: mode only supports CPU AND CUDA device type, got: xpu + # Issue https://github.com/intel/torch-xpu-ops/issues/327 + "test_numpy_ref_linalg_tensorinv_xpu_float64", + # RuntimeError: could not create a primitive descriptor for a deconvolution + # https://github.com/intel/torch-xpu-ops/issues/253 + "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_complex64", + "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_float32", + "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_complex64", + "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_float32", + # Linalg OPs not supported + "test_compare_cpu_linalg_lu_factor_ex_xpu_float32", + "test_compare_cpu_linalg_lu_factor_xpu_float32", + "test_compare_cpu_linalg_lu_xpu_float32", + # XPU hang. CUDA hang as well. + # https://github.com/pytorch/pytorch/issues/79528 + "test_compare_cpu_special_hermite_polynomial_h_xpu_float32", + # XFAIL of CUDA and XPU, unexpected success in fallback + # Linalg OPs not supported + "test_out_cholesky_inverse_xpu_float32", + "test_out_geqrf_xpu_float32", + "test_out_ormqr_xpu_float32", + # XFAIL of CUDA, XPU got unexpected success + "test_python_ref__refs_div_no_rounding_mode_xpu_complex32", + "test_python_ref__refs_pow_xpu_complex32", + "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", + "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32", + "test_python_ref__refs_pow_xpu_complex32", + "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32", + "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32", + "test_python_ref_torch_fallback__refs_pow_xpu_complex32", + # unexpected success because of cpu fallback + # Linalg OPs not supported + "test_out_triangular_solve_xpu_float32", + # Newly added: + # Cuda skipped it + "test_non_standard_bool_values_sort_xpu_bool", # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'. + # Cuda XFAIL (stock pytorch commit: e7cf7d0) + "test_non_standard_bool_values_argsort_xpu_bool", + # Unexpected success + "test_python_ref_executor__refs_pow_executor_aten_xpu_complex32", # Didn't align with CUDA, Unexpected success + # Unexpected success + # "test_errors_histogramdd_xpu", #XFAIL now + # Jiterator is only supported on CUDA and ROCm GPUs, none are available. + # https://github.com/intel/torch-xpu-ops/issues/584 + "_jiterator_", + # https://github.com/intel/torch-xpu-ops/issues/157 + # Segfault: + "test_dtypes_nn_functional_multi_head_attention_forward_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 + # Linalg OPs not supported + "test_dtypes_pca_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 + "test_dtypes_svd_lowrank_xpu", # https://github.com/intel/torch-xpu-ops/issues/157 + # RuntimeError: Long is not supported in oneDNN! + "test_noncontiguous_samples_nn_functional_linear_xpu_int64", # https://github.com/intel/torch-xpu-ops/issues/157 + # https://github.com/intel/torch-xpu-ops/issues/157 + # Datatype not supported in oneDNN + "test_dtypes_addmm_decomposed_xpu", + "test_dtypes_addmm_xpu", + "test_dtypes_addmv_xpu", + "test_dtypes_addr_xpu", + "test_dtypes_baddbmm_xpu", + "test_dtypes_cholesky_inverse_xpu", + "test_dtypes_cholesky_solve_xpu", + "test_dtypes_cholesky_xpu", + "test_dtypes_corrcoef_xpu", + "test_dtypes_cov_xpu", + "test_dtypes_linalg_cholesky_ex_xpu", + "test_dtypes_linalg_cholesky_xpu", + "test_dtypes_linalg_cond_xpu", + "test_dtypes_linalg_det_singular_xpu", + "test_dtypes_linalg_det_xpu", + "test_dtypes_linalg_eig_xpu", + "test_dtypes_linalg_eigh_xpu", + "test_dtypes_linalg_eigvals_xpu", + "test_dtypes_linalg_eigvalsh_xpu", + "test_dtypes_linalg_inv_ex_xpu", + "test_dtypes_linalg_inv_xpu", + "test_dtypes_linalg_ldl_factor_ex_xpu", + "test_dtypes_linalg_ldl_factor_xpu", + "test_dtypes_linalg_ldl_solve_xpu", + "test_dtypes_linalg_lstsq_grad_oriented_xpu", + "test_dtypes_linalg_lstsq_xpu", + "test_dtypes_linalg_lu_factor_ex_xpu", + "test_dtypes_linalg_lu_factor_xpu", + "test_dtypes_linalg_lu_solve_xpu", + "test_dtypes_linalg_lu_xpu", + "test_dtypes_linalg_matrix_power_xpu", + "test_dtypes_linalg_matrix_rank_hermitian_xpu", + "test_dtypes_linalg_matrix_rank_xpu", + "test_dtypes_linalg_pinv_hermitian_xpu", + "test_dtypes_linalg_pinv_xpu", + "test_dtypes_linalg_qr_xpu", + "test_dtypes_linalg_slogdet_xpu", + "test_dtypes_linalg_solve_ex_xpu", + "test_dtypes_linalg_solve_xpu", + "test_dtypes_linalg_svd_xpu", + "test_dtypes_linalg_tensorinv_xpu", + "test_dtypes_linalg_tensorsolve_xpu", + "test_dtypes_logdet_xpu", + "test_dtypes_lu_solve_xpu", + "test_dtypes_lu_xpu", + "test_dtypes_mv_xpu", + "test_dtypes_nn_functional_scaled_dot_product_attention_xpu", + "test_dtypes_norm_nuc_xpu", + "test_dtypes_pinverse_xpu", + "test_dtypes_qr_xpu", + "test_dtypes_svd_xpu", + "test_dtypes_tensordot_xpu", + "test_dtypes_triangular_solve_xpu", + "test_noncontiguous_samples___rmatmul___xpu_complex64", + "test_noncontiguous_samples___rmatmul___xpu_int64", + "test_noncontiguous_samples_addbmm_xpu_complex64", + "test_noncontiguous_samples_addbmm_xpu_float32", + "test_noncontiguous_samples_addbmm_xpu_int64", + "test_noncontiguous_samples_addmm_decomposed_xpu_complex64", + "test_noncontiguous_samples_addmm_decomposed_xpu_int64", + "test_noncontiguous_samples_addmm_xpu_complex64", + "test_noncontiguous_samples_addmm_xpu_float32", + "test_noncontiguous_samples_addmm_xpu_int64", + "test_noncontiguous_samples_addmv_xpu_complex64", + "test_noncontiguous_samples_addmv_xpu_float32", + "test_noncontiguous_samples_addmv_xpu_int64", + "test_noncontiguous_samples_addr_xpu_complex64", + "test_noncontiguous_samples_baddbmm_xpu_complex64", + "test_noncontiguous_samples_baddbmm_xpu_int64", + "test_noncontiguous_samples_bmm_xpu_complex64", + "test_noncontiguous_samples_bmm_xpu_int64", + "test_noncontiguous_samples_cholesky_inverse_xpu_complex64", + "test_noncontiguous_samples_cholesky_solve_xpu_complex64", + "test_noncontiguous_samples_cholesky_xpu_complex64", + "test_noncontiguous_samples_corrcoef_xpu_complex64", + "test_noncontiguous_samples_cov_xpu_complex64", + "test_noncontiguous_samples_einsum_xpu_complex64", + "test_noncontiguous_samples_einsum_xpu_int64", + "test_noncontiguous_samples_geqrf_xpu_complex64", + "test_noncontiguous_samples_inner_xpu_complex64", + "test_noncontiguous_samples_inner_xpu_int64", + "test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64", + "test_noncontiguous_samples_linalg_cholesky_xpu_complex64", + "test_noncontiguous_samples_linalg_cond_xpu_complex64", + "test_noncontiguous_samples_linalg_det_xpu_complex64", + "test_noncontiguous_samples_linalg_eig_xpu_complex64", + "test_noncontiguous_samples_linalg_eig_xpu_float32", + "test_noncontiguous_samples_linalg_eigh_xpu_complex64", + "test_noncontiguous_samples_linalg_eigvals_xpu_complex64", + "test_noncontiguous_samples_linalg_eigvalsh_xpu_complex64", + "test_noncontiguous_samples_linalg_householder_product_xpu_complex64", + "test_noncontiguous_samples_linalg_inv_ex_xpu_complex64", + "test_noncontiguous_samples_linalg_inv_xpu_complex64", + "test_noncontiguous_samples_linalg_ldl_factor_ex_xpu_complex64", + "test_noncontiguous_samples_linalg_ldl_factor_xpu_complex64", + "test_noncontiguous_samples_linalg_ldl_solve_xpu_complex64", + "test_noncontiguous_samples_linalg_lstsq_grad_oriented_xpu_complex64", + "test_noncontiguous_samples_linalg_lstsq_xpu_complex64", + "test_noncontiguous_samples_linalg_lu_factor_ex_xpu_complex64", + "test_noncontiguous_samples_linalg_lu_factor_xpu_complex64", + "test_noncontiguous_samples_linalg_lu_solve_xpu_complex64", + "test_noncontiguous_samples_linalg_lu_xpu_complex64", + "test_noncontiguous_samples_linalg_matrix_norm_xpu_complex64", + "test_noncontiguous_samples_linalg_matrix_power_xpu_complex64", + "test_noncontiguous_samples_linalg_matrix_rank_hermitian_xpu_complex64", + "test_noncontiguous_samples_linalg_matrix_rank_xpu_complex64", + "test_noncontiguous_samples_linalg_norm_subgradients_at_zero_xpu_complex64", + "test_noncontiguous_samples_linalg_norm_xpu_complex64", + "test_noncontiguous_samples_linalg_pinv_hermitian_xpu_complex64", + "test_noncontiguous_samples_linalg_pinv_singular_xpu_complex64", + "test_noncontiguous_samples_linalg_pinv_xpu_complex64", + "test_noncontiguous_samples_linalg_qr_xpu_complex64", + "test_noncontiguous_samples_linalg_slogdet_xpu_complex64", + "test_noncontiguous_samples_linalg_solve_ex_xpu_complex64", + "test_noncontiguous_samples_linalg_solve_triangular_xpu_complex64", + "test_noncontiguous_samples_linalg_solve_xpu_complex64", + "test_noncontiguous_samples_linalg_svd_xpu_complex64", + "test_noncontiguous_samples_linalg_svdvals_xpu_complex64", + "test_noncontiguous_samples_linalg_tensorinv_xpu_complex64", + "test_noncontiguous_samples_linalg_tensorsolve_xpu_complex64", + "test_noncontiguous_samples_logdet_xpu_complex64", + "test_noncontiguous_samples_lu_solve_xpu_complex64", + "test_noncontiguous_samples_lu_xpu_complex64", + "test_noncontiguous_samples_matmul_xpu_complex64", + "test_noncontiguous_samples_matmul_xpu_int64", + "test_noncontiguous_samples_mm_xpu_complex64", + "test_noncontiguous_samples_mm_xpu_int64", + "test_noncontiguous_samples_mv_xpu_complex64", + "test_noncontiguous_samples_mv_xpu_int64", + "test_noncontiguous_samples_nn_functional_bilinear_xpu_int64", + "test_noncontiguous_samples_nn_functional_linear_xpu_complex64", + "test_noncontiguous_samples_norm_nuc_xpu_complex64", + "test_noncontiguous_samples_ormqr_xpu_complex64", + "test_noncontiguous_samples_pinverse_xpu_complex64", + "test_noncontiguous_samples_qr_xpu_complex64", + "test_noncontiguous_samples_svd_xpu_complex64", + "test_noncontiguous_samples_tensordot_xpu_complex64", + "test_noncontiguous_samples_tensordot_xpu_int64", + "test_noncontiguous_samples_triangular_solve_xpu_complex64", + "test_numpy_ref_addbmm_xpu_complex128", + "test_numpy_ref_addbmm_xpu_float64", + "test_numpy_ref_addbmm_xpu_int64", + "test_numpy_ref_linalg_tensorinv_xpu_complex128", + "test_out_addbmm_xpu_float32", + "test_out_addmm_xpu_float32", + "test_out_addmv_xpu_float32", + "test_out_baddbmm_xpu_float32", + "test_out_mm_xpu_float32", + "test_out_mv_xpu_float32", + "test_out_requires_grad_error_addbmm_xpu_complex64", + "test_out_requires_grad_error_addmm_decomposed_xpu_complex64", + "test_out_requires_grad_error_addmm_xpu_complex64", + "test_out_requires_grad_error_addmv_xpu_complex64", + "test_out_requires_grad_error_baddbmm_xpu_complex64", + "test_out_requires_grad_error_bmm_xpu_complex64", + "test_out_requires_grad_error_cholesky_inverse_xpu_complex64", + "test_out_requires_grad_error_cholesky_solve_xpu_complex64", + "test_out_requires_grad_error_cholesky_xpu_complex64", + "test_out_requires_grad_error_inner_xpu_complex64", + "test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64", + "test_out_requires_grad_error_linalg_cholesky_xpu_complex64", + "test_out_requires_grad_error_linalg_det_singular_xpu_complex64", + "test_out_requires_grad_error_linalg_eig_xpu_complex64", + "test_out_requires_grad_error_linalg_eigh_xpu_complex64", + "test_out_requires_grad_error_linalg_eigvals_xpu_complex64", + "test_out_requires_grad_error_linalg_eigvalsh_xpu_complex64", + "test_out_requires_grad_error_linalg_inv_ex_xpu_complex64", + "test_out_requires_grad_error_linalg_inv_xpu_complex64", + "test_out_requires_grad_error_linalg_lstsq_xpu_complex64", + "test_out_requires_grad_error_linalg_lu_factor_xpu_complex64", + "test_out_requires_grad_error_linalg_lu_solve_xpu_complex64", + "test_out_requires_grad_error_linalg_multi_dot_xpu_complex64", + "test_out_requires_grad_error_linalg_pinv_hermitian_xpu_complex64", + "test_out_requires_grad_error_linalg_pinv_xpu_complex64", + "test_out_requires_grad_error_linalg_qr_xpu_complex64", + "test_out_requires_grad_error_linalg_solve_ex_xpu_complex64", + "test_out_requires_grad_error_linalg_solve_xpu_complex64", + "test_out_requires_grad_error_linalg_tensorinv_xpu_complex64", + "test_out_requires_grad_error_lu_solve_xpu_complex64", + "test_out_requires_grad_error_lu_xpu_complex64", + "test_out_requires_grad_error_mm_xpu_complex64", + "test_out_requires_grad_error_mv_xpu_complex64", + "test_out_requires_grad_error_nn_functional_linear_xpu_complex64", + "test_out_requires_grad_error_qr_xpu_complex64", + "test_out_requires_grad_error_tensordot_xpu_complex64", + "test_out_requires_grad_error_triangular_solve_xpu_complex64", + "test_out_warning_addmm_decomposed_xpu", + "test_out_warning_addmm_xpu", + "test_out_warning_addmv_xpu", + "test_out_warning_baddbmm_xpu", + "test_out_warning_bmm_xpu", + "test_out_warning_matmul_xpu", + "test_out_warning_mm_xpu", + "test_out_warning_mv_xpu", + "test_out_warning_nn_functional_linear_xpu", + "test_python_ref__refs_linalg_svd_xpu_complex128", + "test_python_ref__refs_linalg_svd_xpu_complex64", + "test_python_ref__refs_linalg_svd_xpu_float64", + "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128", + "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64", + "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64", + "test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64", + "test_python_ref_meta__refs_linalg_svd_xpu_complex128", + "test_python_ref_meta__refs_linalg_svd_xpu_complex64", + "test_python_ref_meta__refs_linalg_svd_xpu_float64", + "test_python_ref_meta__refs_nn_functional_pdist_xpu_float64", + "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128", + "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64", + "test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64", + "test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64", + "test_variant_consistency_eager___rmatmul___xpu_complex64", + "test_variant_consistency_eager_addmm_decomposed_xpu_complex64", + "test_variant_consistency_eager_addmm_xpu_complex64", + "test_variant_consistency_eager_addmm_xpu_float32", + "test_variant_consistency_eager_addmv_xpu_complex64", + "test_variant_consistency_eager_addmv_xpu_float32", + "test_variant_consistency_eager_baddbmm_xpu_complex64", + "test_variant_consistency_eager_baddbmm_xpu_float32", + "test_variant_consistency_eager_bmm_xpu_complex64", + "test_variant_consistency_eager_cholesky_inverse_xpu_complex64", + "test_variant_consistency_eager_cholesky_solve_xpu_complex64", + "test_variant_consistency_eager_cholesky_xpu_complex64", + "test_variant_consistency_eager_corrcoef_xpu_complex64", + "test_variant_consistency_eager_cov_xpu_complex64", + "test_variant_consistency_eager_einsum_xpu_complex64", + "test_variant_consistency_eager_geqrf_xpu_complex64", + "test_variant_consistency_eager_inner_xpu_complex64", + "test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64", + "test_variant_consistency_eager_linalg_cholesky_xpu_complex64", + "test_variant_consistency_eager_linalg_cond_xpu_complex64", + "test_variant_consistency_eager_linalg_det_singular_xpu_complex64", + "test_variant_consistency_eager_linalg_det_xpu_complex64", + "test_variant_consistency_eager_linalg_eig_xpu_complex64", + "test_variant_consistency_eager_linalg_eigh_xpu_complex64", + "test_variant_consistency_eager_linalg_eigvals_xpu_complex64", + "test_variant_consistency_eager_linalg_eigvalsh_xpu_complex64", + "test_variant_consistency_eager_linalg_householder_product_xpu_complex64", + "test_variant_consistency_eager_linalg_inv_ex_xpu_complex64", + "test_variant_consistency_eager_linalg_inv_xpu_complex64", + "test_variant_consistency_eager_linalg_ldl_factor_ex_xpu_complex64", + "test_variant_consistency_eager_linalg_ldl_factor_xpu_complex64", + "test_variant_consistency_eager_linalg_ldl_solve_xpu_complex64", + "test_variant_consistency_eager_linalg_lstsq_grad_oriented_xpu_complex64", + "test_variant_consistency_eager_linalg_lstsq_xpu_complex64", + "test_variant_consistency_eager_linalg_lu_factor_xpu_complex64", + "test_variant_consistency_eager_linalg_lu_solve_xpu_complex64", + "test_variant_consistency_eager_linalg_matrix_norm_xpu_complex64", + "test_variant_consistency_eager_linalg_matrix_power_xpu_complex64", + "test_variant_consistency_eager_linalg_matrix_rank_hermitian_xpu_complex64", + "test_variant_consistency_eager_linalg_matrix_rank_xpu_complex64", + "test_variant_consistency_eager_linalg_multi_dot_xpu_complex64", + "test_variant_consistency_eager_linalg_norm_subgradients_at_zero_xpu_complex64", + "test_variant_consistency_eager_linalg_norm_xpu_complex64", + "test_variant_consistency_eager_linalg_pinv_hermitian_xpu_complex64", + "test_variant_consistency_eager_linalg_pinv_singular_xpu_complex64", + "test_variant_consistency_eager_linalg_pinv_xpu_complex64", + "test_variant_consistency_eager_linalg_qr_xpu_complex64", + "test_variant_consistency_eager_linalg_slogdet_xpu_complex64", + "test_variant_consistency_eager_linalg_solve_ex_xpu_complex64", + "test_variant_consistency_eager_linalg_solve_triangular_xpu_complex64", + "test_variant_consistency_eager_linalg_solve_xpu_complex64", + "test_variant_consistency_eager_linalg_svd_xpu_complex64", + "test_variant_consistency_eager_linalg_svdvals_xpu_complex64", + "test_variant_consistency_eager_linalg_tensorinv_xpu_complex64", + "test_variant_consistency_eager_linalg_tensorsolve_xpu_complex64", + "test_variant_consistency_eager_logdet_xpu_complex64", + "test_variant_consistency_eager_lu_solve_xpu_complex64", + "test_variant_consistency_eager_lu_xpu_complex64", + "test_variant_consistency_eager_matmul_xpu_complex64", + "test_variant_consistency_eager_mm_xpu_complex64", + "test_variant_consistency_eager_mv_xpu_complex64", + "test_variant_consistency_eager_nn_functional_linear_xpu_complex64", + "test_variant_consistency_eager_norm_nuc_xpu_complex64", + "test_variant_consistency_eager_ormqr_xpu_complex64", + "test_variant_consistency_eager_pinverse_xpu_complex64", + "test_variant_consistency_eager_qr_xpu_complex64", + "test_variant_consistency_eager_svd_xpu_complex64", + "test_variant_consistency_eager_tensordot_xpu_complex64", + "test_variant_consistency_eager_triangular_solve_xpu_complex64", + # oneDNN issues + # RuntimeError: value cannot be converted to type float without overflow + # https://github.com/intel/torch-xpu-ops/issues/683 + "test_conj_view_addbmm_xpu_complex64", + "test_neg_conj_view_addbmm_xpu_complex128", + ### Error #0 in TestMathBitsXPU , RuntimeError: Double and complex datatype matmul is not supported in oneDNN + # https://github.com/intel/torch-xpu-ops/issues/254 + "test_conj_view___rmatmul___xpu_complex64", + "test_conj_view__refs_linalg_svd_xpu_complex64", + "test_conj_view_addmm_decomposed_xpu_complex64", + "test_conj_view_addmm_xpu_complex64", + "test_conj_view_addmv_xpu_complex64", + "test_conj_view_addr_xpu_complex64", + "test_conj_view_baddbmm_xpu_complex64", + "test_conj_view_bmm_xpu_complex64", + "test_conj_view_cholesky_inverse_xpu_complex64", + "test_conj_view_cholesky_solve_xpu_complex64", + "test_conj_view_cholesky_xpu_complex64", + "test_conj_view_corrcoef_xpu_complex64", + "test_conj_view_cov_xpu_complex64", + "test_conj_view_einsum_xpu_complex64", + "test_conj_view_geqrf_xpu_complex64", + "test_conj_view_inner_xpu_complex64", + "test_conj_view_linalg_cholesky_ex_xpu_complex64", + "test_conj_view_linalg_cholesky_xpu_complex64", + "test_conj_view_linalg_cond_xpu_complex64", + "test_conj_view_linalg_det_singular_xpu_complex64", + "test_conj_view_linalg_det_xpu_complex64", + "test_conj_view_linalg_eig_xpu_complex64", + "test_conj_view_linalg_eigh_xpu_complex64", + "test_conj_view_linalg_eigvals_xpu_complex64", + "test_conj_view_linalg_eigvalsh_xpu_complex64", + "test_conj_view_linalg_householder_product_xpu_complex64", + "test_conj_view_linalg_inv_ex_xpu_complex64", + "test_conj_view_linalg_inv_xpu_complex64", + "test_conj_view_linalg_ldl_factor_ex_xpu_complex64", + "test_conj_view_linalg_ldl_factor_xpu_complex64", + "test_conj_view_linalg_ldl_solve_xpu_complex64", + "test_conj_view_linalg_lstsq_grad_oriented_xpu_complex64", + "test_conj_view_linalg_lstsq_xpu_complex64", + "test_conj_view_linalg_lu_factor_xpu_complex64", + "test_conj_view_linalg_lu_solve_xpu_complex64", + "test_conj_view_linalg_matrix_norm_xpu_complex64", + "test_conj_view_linalg_matrix_power_xpu_complex64", + "test_conj_view_linalg_matrix_rank_hermitian_xpu_complex64", + "test_conj_view_linalg_matrix_rank_xpu_complex64", + "test_conj_view_linalg_multi_dot_xpu_complex64", + "test_conj_view_linalg_norm_subgradients_at_zero_xpu_complex64", + "test_conj_view_linalg_norm_xpu_complex64", + "test_conj_view_linalg_pinv_hermitian_xpu_complex64", + "test_conj_view_linalg_pinv_singular_xpu_complex64", + "test_conj_view_linalg_pinv_xpu_complex64", + "test_conj_view_linalg_qr_xpu_complex64", + "test_conj_view_linalg_slogdet_xpu_complex64", + "test_conj_view_linalg_solve_ex_xpu_complex64", + "test_conj_view_linalg_solve_triangular_xpu_complex64", + "test_conj_view_linalg_solve_xpu_complex64", + "test_conj_view_linalg_svd_xpu_complex64", + "test_conj_view_linalg_svdvals_xpu_complex64", + "test_conj_view_linalg_tensorinv_xpu_complex64", + "test_conj_view_linalg_tensorsolve_xpu_complex64", + "test_conj_view_logdet_xpu_complex64", + "test_conj_view_lu_solve_xpu_complex64", + "test_conj_view_lu_xpu_complex64", + "test_conj_view_matmul_xpu_complex64", + "test_conj_view_mm_xpu_complex64", + "test_conj_view_mv_xpu_complex64", + "test_conj_view_nn_functional_linear_xpu_complex64", + "test_conj_view_norm_nuc_xpu_complex64", + "test_conj_view_ormqr_xpu_complex64", + "test_conj_view_pinverse_xpu_complex64", + "test_conj_view_qr_xpu_complex64", + "test_conj_view_svd_xpu_complex64", + "test_conj_view_tensordot_xpu_complex64", + "test_conj_view_triangular_solve_xpu_complex64", + "test_neg_conj_view_addmm_decomposed_xpu_complex128", + "test_neg_conj_view_addmm_xpu_complex128", + "test_neg_conj_view_addmv_xpu_complex128", + "test_neg_conj_view_addr_xpu_complex128", + "test_neg_conj_view_baddbmm_xpu_complex128", + "test_neg_conj_view_bmm_xpu_complex128", + "test_neg_conj_view_cholesky_inverse_xpu_complex128", + "test_neg_conj_view_cholesky_solve_xpu_complex128", + "test_neg_conj_view_cholesky_xpu_complex128", + "test_neg_conj_view_corrcoef_xpu_complex128", + "test_neg_conj_view_cov_xpu_complex128", + "test_neg_conj_view_geqrf_xpu_complex128", + "test_neg_conj_view_inner_xpu_complex128", + "test_neg_conj_view_linalg_cholesky_ex_xpu_complex128", + "test_neg_conj_view_linalg_cholesky_xpu_complex128", + "test_neg_conj_view_linalg_cond_xpu_complex128", + "test_neg_conj_view_linalg_det_singular_xpu_complex128", + "test_neg_conj_view_linalg_eig_xpu_complex128", + "test_neg_conj_view_linalg_eigh_xpu_complex128", + "test_neg_conj_view_linalg_eigvals_xpu_complex128", + "test_neg_conj_view_linalg_eigvalsh_xpu_complex128", + "test_neg_conj_view_linalg_householder_product_xpu_complex128", + "test_neg_conj_view_linalg_inv_ex_xpu_complex128", + "test_neg_conj_view_linalg_inv_xpu_complex128", + "test_neg_conj_view_linalg_ldl_factor_ex_xpu_complex128", + "test_neg_conj_view_linalg_ldl_factor_xpu_complex128", + "test_neg_conj_view_linalg_ldl_solve_xpu_complex128", + "test_neg_conj_view_linalg_lstsq_grad_oriented_xpu_complex128", + "test_neg_conj_view_linalg_lstsq_xpu_complex128", + "test_neg_conj_view_linalg_lu_factor_xpu_complex128", + "test_neg_conj_view_linalg_lu_solve_xpu_complex128", + "test_neg_conj_view_linalg_matrix_rank_hermitian_xpu_complex128", + "test_neg_conj_view_linalg_matrix_rank_xpu_complex128", + "test_neg_conj_view_linalg_multi_dot_xpu_complex128", + "test_neg_conj_view_linalg_pinv_hermitian_xpu_complex128", + "test_neg_conj_view_linalg_pinv_singular_xpu_complex128", + "test_neg_conj_view_linalg_pinv_xpu_complex128", + "test_neg_conj_view_linalg_qr_xpu_complex128", + "test_neg_conj_view_linalg_solve_ex_xpu_complex128", + "test_neg_conj_view_linalg_solve_triangular_xpu_complex128", + "test_neg_conj_view_linalg_solve_xpu_complex128", + "test_neg_conj_view_linalg_svdvals_xpu_complex128", + "test_neg_conj_view_linalg_tensorinv_xpu_complex128", + "test_neg_conj_view_linalg_tensorsolve_xpu_complex128", + "test_neg_conj_view_lu_solve_xpu_complex128", + "test_neg_conj_view_lu_xpu_complex128", + "test_neg_conj_view_mm_xpu_complex128", + "test_neg_conj_view_mv_xpu_complex128", + "test_neg_conj_view_nn_functional_linear_xpu_complex128", + "test_neg_conj_view_norm_nuc_xpu_complex128", + "test_neg_conj_view_ormqr_xpu_complex128", + "test_neg_conj_view_pinverse_xpu_complex128", + "test_neg_conj_view_qr_xpu_complex128", + "test_neg_conj_view_tensordot_xpu_complex128", + "test_neg_conj_view_triangular_solve_xpu_complex128", + "test_neg_view___rmatmul___xpu_float64", + "test_neg_view__refs_linalg_svd_xpu_float64", + "test_neg_view__refs_nn_functional_pdist_xpu_float64", + "test_neg_view_addbmm_xpu_float64", + "test_neg_view_addmm_decomposed_xpu_float64", + "test_neg_view_addmm_xpu_float64", + "test_neg_view_addmv_xpu_float64", + "test_neg_view_addr_xpu_float64", + "test_neg_view_baddbmm_xpu_float64", + "test_neg_view_bmm_xpu_float64", + "test_neg_view_cdist_xpu_float64", + "test_neg_view_cholesky_inverse_xpu_float64", + "test_neg_view_cholesky_solve_xpu_float64", + "test_neg_view_cholesky_xpu_float64", + "test_neg_view_corrcoef_xpu_float64", + "test_neg_view_cov_xpu_float64", + "test_neg_view_einsum_xpu_float64", + "test_neg_view_geqrf_xpu_float64", + "test_neg_view_inner_xpu_float64", + "test_neg_view_linalg_cholesky_ex_xpu_float64", + "test_neg_view_linalg_cholesky_xpu_float64", + "test_neg_view_linalg_cond_xpu_float64", + "test_neg_view_linalg_det_singular_xpu_float64", + "test_neg_view_linalg_det_xpu_float64", + "test_neg_view_linalg_eig_xpu_float64", + "test_neg_view_linalg_eigh_xpu_float64", + "test_neg_view_linalg_eigvals_xpu_float64", + "test_neg_view_linalg_eigvalsh_xpu_float64", + "test_neg_view_linalg_householder_product_xpu_float64", + "test_neg_view_linalg_inv_ex_xpu_float64", + "test_neg_view_linalg_inv_xpu_float64", + "test_neg_view_linalg_ldl_factor_ex_xpu_float64", + "test_neg_view_linalg_ldl_factor_xpu_float64", + "test_neg_view_linalg_ldl_solve_xpu_float64", + "test_neg_view_linalg_lstsq_grad_oriented_xpu_float64", + "test_neg_view_linalg_lstsq_xpu_float64", + "test_neg_view_linalg_lu_factor_xpu_float64", + "test_neg_view_linalg_lu_solve_xpu_float64", + "test_neg_view_linalg_matrix_norm_xpu_float64", + "test_neg_view_linalg_matrix_power_xpu_float64", + "test_neg_view_linalg_matrix_rank_hermitian_xpu_float64", + "test_neg_view_linalg_matrix_rank_xpu_float64", + "test_neg_view_linalg_multi_dot_xpu_float64", + "test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64", + "test_neg_view_linalg_norm_xpu_float64", + "test_neg_view_linalg_pinv_hermitian_xpu_float64", + "test_neg_view_linalg_pinv_singular_xpu_float64", + "test_neg_view_linalg_pinv_xpu_float64", + "test_neg_view_linalg_qr_xpu_float64", + "test_neg_view_linalg_slogdet_xpu_float64", + "test_neg_view_linalg_solve_ex_xpu_float64", + "test_neg_view_linalg_solve_triangular_xpu_float64", + "test_neg_view_linalg_solve_xpu_float64", + "test_neg_view_linalg_svd_xpu_float64", + "test_neg_view_linalg_svdvals_xpu_float64", + "test_neg_view_linalg_tensorinv_xpu_float64", + "test_neg_view_linalg_tensorsolve_xpu_float64", + "test_neg_view_logdet_xpu_float64", + "test_neg_view_lu_solve_xpu_float64", + "test_neg_view_lu_xpu_float64", + "test_neg_view_matmul_xpu_float64", + "test_neg_view_mm_xpu_float64", + "test_neg_view_mv_xpu_float64", + "test_neg_view_nn_functional_bilinear_xpu_float64", + "test_neg_view_nn_functional_linear_xpu_float64", + "test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64", + "test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64", + "test_neg_view_norm_nuc_xpu_float64", + "test_neg_view_ormqr_xpu_float64", + "test_neg_view_pca_lowrank_xpu_float64", + "test_neg_view_pinverse_xpu_float64", + "test_neg_view_qr_xpu_float64", + "test_neg_view_svd_lowrank_xpu_float64", + "test_neg_view_svd_xpu_float64", + "test_neg_view_tensordot_xpu_float64", + "test_neg_view_triangular_solve_xpu_float64", + "test_noncontiguous_samples_pca_lowrank_xpu_complex64", + "test_noncontiguous_samples_svd_lowrank_xpu_complex64", + "test_variant_consistency_eager_pca_lowrank_xpu_complex64", + "test_variant_consistency_eager_svd_lowrank_xpu_complex64", + "test_conj_view_pca_lowrank_xpu_complex64", + "test_conj_view_svd_lowrank_xpu_complex64", + "test_neg_conj_view_pca_lowrank_xpu_complex128", + "test_neg_conj_view_svd_lowrank_xpu_complex128", + # oneDNN issues + ### Error #1 in TestMathBitsXPU , RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive + # https://github.com/intel/torch-xpu-ops/issues/253 + "test_conj_view_nn_functional_conv_transpose2d_xpu_complex64", + "test_conj_view_nn_functional_conv_transpose3d_xpu_complex64", + "test_neg_view_nn_functional_conv_transpose2d_xpu_float64", + "test_neg_view_nn_functional_conv_transpose3d_xpu_float64", + # implemented aten::histogram to align MPS operators coverage, CUDA doesn't support + # but test_dtypes infrastructure leverage CUDA supported datatypes + "test_dtypes_histogram_xpu", + # Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported + "test_errors_histogramdd_xpu", + # 2025 bundle std::pow complex result is different on host and device + "test_python_ref__refs_square_xpu_complex64", + "test_python_ref_torch_fallback__refs_square_xpu_complex64", + "test_python_ref_torch_fallback__refs_exp_xpu_complex128", + # Failed on rolling driver, passed on preci + "test_python_ref__refs_div_trunc_rounding_xpu_float64", + "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64", + "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64", + # TODO: passed from source code building version, investigate + "test_python_ref__refs_log2_xpu_complex128", + # The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}. + "test_dtypes_fft_fft2_xpu", + "test_dtypes_fft_fft_xpu", + "test_dtypes_fft_fftn_xpu", + "test_dtypes_fft_hfft2_xpu", + "test_dtypes_fft_hfft_xpu", + "test_dtypes_fft_hfftn_xpu", + "test_dtypes_fft_ifft2_xpu", + "test_dtypes_fft_ifft_xpu", + "test_dtypes_fft_ifftn_xpu", + "test_dtypes_fft_ihfft2_xpu", + "test_dtypes_fft_ihfft_xpu", + "test_dtypes_fft_ihfftn_xpu", + "test_dtypes_fft_irfft2_xpu", + "test_dtypes_fft_irfft_xpu", + "test_dtypes_fft_irfftn_xpu", + "test_dtypes_fft_rfft2_xpu", + "test_dtypes_fft_rfft_xpu", + "test_dtypes_fft_rfftn_xpu", + ), }