Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions tools/vllm/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# vLLM Buildkite CI Failure Reporter

Fetches the latest Buildkite CI build for a given branch, extracts all failed steps with failure reasons, and provides direct links to the relevant log lines.

## Usage

```bash
python3 fetch_failures.py --branch <BRANCH> --token <BUILDKITE_TOKEN>
```

### Arguments

| Flag | Required | Description |
|------|----------|-------------|
| `--branch` | Yes | Buildkite branch name, e.g. `atalman:release_212_tests` |
| `--token` | Yes | Buildkite API token (create at https://buildkite.com/user/api-access-tokens, scope: `read_builds`) |
| `--save-local-logs` | No | Save raw logs for each failed job to local files |
| `--output-dir` | No | Directory for saved logs (default: current directory) |

### Examples

```bash
# Print failure report
python3 fetch_failures.py --branch "atalman:release_212_tests" --token "bkua_xxx"

# Print report + save raw logs locally
python3 fetch_failures.py --branch "atalman:release_212_tests" --token "bkua_xxx" --save-local-logs

# Save logs to a specific directory
python3 fetch_failures.py --branch "atalman:release_212_tests" --token "bkua_xxx" --save-local-logs --output-dir /tmp/logs
```

### Sample Output

```
======================================================================
Build #63095 | Branch: atalman:release_212_tests | State: failed
Message: [CI] Fix Dockerfile.cpu to resolve torch 2.12.0 from CPU test channel
Created: 2026-04-27T13:15:10.279Z
Failed steps: 13
======================================================================

1. [Fusion E2E TP2 Quick (H100)]
Log: https://buildkite.com/vllm/ci/builds/63095#019dcf15-7f55-...
Local: /tmp/logs/build_63095/Fusion_E2E_TP2_Quick_H100.log
- tests/compile/fusions_e2e/test_tp2_ar_rms.py::test_tp2_ar_rms_fp8_fusions[...] | RuntimeError: ...
https://buildkite.com/vllm/ci/builds/63095#019dcf15-7f55-.../L1144
```

## How It Works

1. **Get latest build** -- queries Buildkite REST API for the most recent build on the given branch
2. **Get failed steps** -- fetches all steps with `hard_failed` outcome and their job IDs
3. **Extract failure reasons** -- fetches each failed job's log, parses `FAILED` lines from pytest output, and records the line number for deep linking
209 changes: 209 additions & 0 deletions tools/vllm/fetch_failures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
#!/usr/bin/env python3
"""Fetch vLLM Buildkite CI failure reports for a given branch."""

from __future__ import annotations

import json
import os
import re
import subprocess
import time
from dataclasses import dataclass, field
from typing import Any, Optional


ORG = "vllm"
PIPELINE = "ci"
API_BASE = f"https://api.buildkite.com/v2/organizations/{ORG}/pipelines/{PIPELINE}"
PUBLIC_BASE = f"https://buildkite.com/{ORG}/{PIPELINE}"

Failure = tuple[str, str]


@dataclass
class FailedStep:
label: str
job_id: str
log_url: str = ""
raw_log: str = ""
local_log_path: str = ""
failures: list[Failure] = field(default_factory=list)


@dataclass
class BuildInfo:
number: int
state: str
message: str
branch: str
created_at: str
failed_steps: list[FailedStep] = field(default_factory=list)


def _request(url: str, token: Optional[str] = None, max_retries: int = 3) -> Any:
cmd = ["curl", "-s"]
if token:
cmd += ["-H", f"Authorization: Bearer {token}"]
cmd.append(url)
for _ in range(max_retries):
r = subprocess.run(cmd, capture_output=True, text=True)
try:
data = json.loads(r.stdout)
except json.JSONDecodeError:
return None
if isinstance(data, dict) and "rate limit" in data.get("message", ""):
time.sleep(6)
continue
return data
return None


def get_latest_build(branch: str, token: str) -> Optional[BuildInfo]:
"""Step 1: Get the latest build for a branch."""
branch_enc = branch.replace(":", "%3A")
url = f"{API_BASE}/builds?branch={branch_enc}&per_page=1"
data = _request(url, token=token)
if not data or not isinstance(data, list) or len(data) == 0:
return None
b = data[0]
return BuildInfo(
number=b["number"],
state=b["state"],
message=b["message"].split("\n")[0],
branch=b["branch"],
created_at=b["created_at"],
)


def get_failed_steps(build_number: int) -> list[FailedStep]:
"""Step 2: Get all failed command steps and their job IDs."""
url = f"{PUBLIC_BASE}/builds/{build_number}/data/steps?state=failed"
data = _request(url)
if not data or not isinstance(data, list):
return []
steps = []
for step in data:
if step.get("type") == "command" and step.get("outcome") == "hard_failed":
job_id = step.get("statistics", {}).get("latest_job_id", "")
log_url = f"{PUBLIC_BASE}/builds/{build_number}#{job_id}"
steps.append(
FailedStep(label=step["label"], job_id=job_id, log_url=log_url)
)
return steps


def get_failure_reasons(
build_number: int, job_id: str, token: str
) -> tuple[list[Failure], str]:
"""Step 3: Fetch a job's log and extract failure reasons.

Returns (failures, raw_log).
"""
url = f"{API_BASE}/builds/{build_number}/jobs/{job_id}/log"
time.sleep(1.5)
data = _request(url, token=token)
if not data or not isinstance(data, dict):
return [("Could not fetch log", "")], ""
content = data.get("content", "")
lines = content.split("\n")

log_url = f"{PUBLIC_BASE}/builds/{build_number}#{job_id}"
failures: list[Failure] = []
for line_num, line in enumerate(lines, 1):
clean = re.sub(r"\x1b\[[0-9;]*m", "", line)
clean = re.sub(r"_bk;t=\d+\s*", "", clean)
clean = re.sub(r"\[\d{4}-\d{2}-\d{2}T[\d:Z]+\]\s*", "", clean)
if "FAILED" in clean and "::" in clean:
parts = clean.split(" - ", 1)
test = parts[0].replace("FAILED ", "").strip()
err = parts[1].strip() if len(parts) > 1 else ""
link = f"{log_url}/L{line_num}"
entry = f"{test} | {err}" if err else test
failures.append((entry, link))
elif "ERROR: No matching distribution" in clean:
link = f"{log_url}/L{line_num}"
failures.append((clean.strip()[:200], link))
return failures, content


def fetch_failure_report(branch: str, token: str) -> Optional[BuildInfo]:
"""Run the full pipeline: latest build -> failed steps -> failure reasons."""
build = get_latest_build(branch, token)
if not build:
print(f"No builds found for branch '{branch}'")
return None

build.failed_steps = get_failed_steps(build.number)

for step in build.failed_steps:
step.failures, step.raw_log = get_failure_reasons(
build.number, step.job_id, token
)

return build


def save_logs(build: BuildInfo, output_dir: str) -> None:
"""Save raw logs for each failed step to local files."""
log_dir = os.path.join(output_dir, f"build_{build.number}")
os.makedirs(log_dir, exist_ok=True)
for step in build.failed_steps:
if not step.raw_log:
continue
safe_label = re.sub(r"[^\w\- ]", "", step.label).strip().replace(" ", "_")
path = os.path.abspath(os.path.join(log_dir, f"{safe_label}.log"))
with open(path, "w") as f:
f.write(step.raw_log)
step.local_log_path = path
print(f"Logs saved to {log_dir}/")


def print_report(build: BuildInfo) -> None:
"""Print a formatted failure report."""
print("=" * 70)
print(f"Build #{build.number} | Branch: {build.branch} | State: {build.state}")
print(f"Message: {build.message}")
print(f"Created: {build.created_at}")
print(f"Failed steps: {len(build.failed_steps)}")
print("=" * 70)

for i, step in enumerate(build.failed_steps, 1):
print(f"\n {i}. [{step.label}]")
print(f" Log: {step.log_url}")
if step.local_log_path:
print(f" Local: {step.local_log_path}")
if step.failures:
for entry, link in step.failures:
print(f" - {entry}")
print(f" {link}")
else:
print(" (no specific test failures extracted)")


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser(description="Fetch vLLM Buildkite CI failures")
parser.add_argument(
"--branch",
required=True,
help="Branch name, e.g. atalman:release_212_tests",
)
parser.add_argument("--token", required=True, help="Buildkite API token")
Copy link
Copy Markdown
Contributor

@huydhn huydhn Apr 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just worth double check that ClickHouse for this, we have all the Buildkite CI signals from ClickHouse there. Using Buildkite API token is fine, but I guess not many of us have access to that while ClickHouse is more available

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this tool is mainly for release pytorch investigation, potentially this can be used in https://github.com/vllm-project/vllm-dashboard for us to investigate the release errors

parser.add_argument(
"--save-local-logs",
action="store_true",
help="Save raw logs to local files",
)
parser.add_argument(
"--output-dir",
default=".",
help="Directory for saved logs (default: current dir)",
)
args = parser.parse_args()

build = fetch_failure_report(args.branch, args.token)
if build:
if args.save_local_logs:
save_logs(build, args.output_dir)
print_report(build)
Loading