From 0cbd6a36579a080bba4e62068821e0469fdefa1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Oct 2025 06:58:46 +0000 Subject: [PATCH 01/17] build(deps): bump torch from 2.8.0 to 2.9.0 in /requirements Bumps [torch](https://github.com/pytorch/pytorch) from 2.8.0 to 2.9.0. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/main/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v2.8.0...v2.9.0) --- updated-dependencies: - dependency-name: torch dependency-version: 2.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements/fabric/base.txt | 2 +- requirements/pytorch/base.txt | 2 +- requirements/typing.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/fabric/base.txt b/requirements/fabric/base.txt index 0a7629151f6c1..ad36bc25fcc13 100644 --- a/requirements/fabric/base.txt +++ b/requirements/fabric/base.txt @@ -1,7 +1,7 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -torch >=2.1.0, <2.9.0 +torch >=2.1.0, <2.10.0 fsspec[http] >=2022.5.0, <2025.10.0 packaging >=20.0, <=25.0 typing-extensions >4.5.0, <4.16.0 diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt index 9c7a60bd49f0e..1dbf53134cb31 100644 --- a/requirements/pytorch/base.txt +++ b/requirements/pytorch/base.txt @@ -1,7 +1,7 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -torch >=2.1.0, <2.9.0 +torch >=2.1.0, <2.10.0 tqdm >=4.57.0, <4.68.0 PyYAML >5.4, <6.1.0 fsspec[http] >=2022.5.0, <2025.10.0 diff --git a/requirements/typing.txt b/requirements/typing.txt index dc848c55e583d..8c5ad38fb7825 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.18.2 -torch==2.8.0 +torch==2.9.0 types-Markdown types-PyYAML From c5bde432ce0f551e11f122fe052d6bb7ce4efc1a Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 20 Oct 2025 13:27:28 +0545 Subject: [PATCH 02/17] build(deps): update torchvision version constraint to <0.25.0 in examples.txt --- requirements/fabric/examples.txt | 2 +- requirements/pytorch/examples.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/fabric/examples.txt b/requirements/fabric/examples.txt index ab6ffb8b137df..72f13a4128e56 100644 --- a/requirements/fabric/examples.txt +++ b/requirements/fabric/examples.txt @@ -1,5 +1,5 @@ # NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment -torchvision >=0.16.0, <0.24.0 +torchvision >=0.16.0, <0.25.0 torchmetrics >=0.10.0, <1.9.0 diff --git a/requirements/pytorch/examples.txt b/requirements/pytorch/examples.txt index b64ed5ee47c67..b17e755cd18e2 100644 --- a/requirements/pytorch/examples.txt +++ b/requirements/pytorch/examples.txt @@ -2,6 +2,6 @@ # in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment requests <2.33.0 -torchvision >=0.16.0, <0.24.0 +torchvision >=0.16.0, <0.25.0 ipython[all] >=8.0.0, <10.0.0 torchmetrics >=0.10.0, <1.9.0 From 3c6c84aa6628e444f178b3fb69c2cec4ba29d240 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 20 Oct 2025 14:39:03 +0545 Subject: [PATCH 03/17] add ignore --- src/lightning/fabric/utilities/spike.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lightning/fabric/utilities/spike.py b/src/lightning/fabric/utilities/spike.py index 9c1b0a2a00572..13309458ff57e 100644 --- a/src/lightning/fabric/utilities/spike.py +++ b/src/lightning/fabric/utilities/spike.py @@ -126,16 +126,16 @@ def _handle_spike(self, fabric: "Fabric", batch_idx: int) -> None: raise TrainingSpikeException(batch_idx=batch_idx) def _check_atol(self, val_a: Union[float, torch.Tensor], val_b: Union[float, torch.Tensor]) -> bool: - return (self.atol is None) or bool(abs(val_a - val_b) >= abs(self.atol)) + return (self.atol is None) or bool(abs(val_a - val_b) >= abs(self.atol)) # type: ignore def _check_rtol(self, val_a: Union[float, torch.Tensor], val_b: Union[float, torch.Tensor]) -> bool: - return (self.rtol is None) or bool(abs(val_a - val_b) >= abs(self.rtol * val_b)) + return (self.rtol is None) or bool(abs(val_a - val_b) >= abs(self.rtol * val_b)) # type: ignore def _is_better(self, diff_val: torch.Tensor) -> bool: if self.mode == "min": - return bool((diff_val <= 0.0).all()) + return bool((diff_val <= 0.0).all()) # type: ignore[operator] if self.mode == "max": - return bool((diff_val >= 0).all()) + return bool((diff_val >= 0).all()) # type: ignore[operator] raise ValueError(f"Invalid mode. Has to be min or max, found {self.mode}") From e51148358ed476f0c82f668d39997534def1e498 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 20 Oct 2025 14:44:19 +0545 Subject: [PATCH 04/17] remove unused ignore --- src/lightning/fabric/utilities/spike.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lightning/fabric/utilities/spike.py b/src/lightning/fabric/utilities/spike.py index 13309458ff57e..cd2e05309e087 100644 --- a/src/lightning/fabric/utilities/spike.py +++ b/src/lightning/fabric/utilities/spike.py @@ -133,9 +133,9 @@ def _check_rtol(self, val_a: Union[float, torch.Tensor], val_b: Union[float, tor def _is_better(self, diff_val: torch.Tensor) -> bool: if self.mode == "min": - return bool((diff_val <= 0.0).all()) # type: ignore[operator] + return bool((diff_val <= 0.0).all()) if self.mode == "max": - return bool((diff_val >= 0).all()) # type: ignore[operator] + return bool((diff_val >= 0).all()) raise ValueError(f"Invalid mode. Has to be min or max, found {self.mode}") From 4e500b1c9346dd1dd97904f199a57174c4411750 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Tue, 21 Oct 2025 13:58:34 +0545 Subject: [PATCH 05/17] Empty Commit From f26f1d1dd5ff79ec09b36a5073858e52a7346c70 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 10 Nov 2025 10:52:21 +0545 Subject: [PATCH 06/17] fix: add handling for InductorSubproc thread in thread police function --- tests/tests_fabric/conftest.py | 1 + tests/tests_pytorch/conftest.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/tests_fabric/conftest.py b/tests/tests_fabric/conftest.py index 9d4a0b9462f2e..a11a9f93e569d 100644 --- a/tests/tests_fabric/conftest.py +++ b/tests/tests_fabric/conftest.py @@ -111,6 +111,7 @@ def thread_police_duuu_daaa_duuu_daaa(): sys.version_info >= (3, 9) and isinstance(thread, _ExecutorManagerThread) or "ThreadPoolExecutor-" in thread.name + or thread.name == "InductorSubproc" # torch.compile ): # probably `torch.compile`, can't narrow it down further continue diff --git a/tests/tests_pytorch/conftest.py b/tests/tests_pytorch/conftest.py index 878298c6bfd94..da48878c7f670 100644 --- a/tests/tests_pytorch/conftest.py +++ b/tests/tests_pytorch/conftest.py @@ -170,6 +170,7 @@ def thread_police_duuu_daaa_duuu_daaa(): sys.version_info >= (3, 9) and isinstance(thread, _ExecutorManagerThread) or "ThreadPoolExecutor-" in thread.name + or thread.name == "InductorSubproc" # torch.compile ): # probably `torch.compile`, can't narrow it down further continue From 16fa9239a893f3abda88eebe397e0e8730b849b3 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 10 Nov 2025 11:36:39 +0545 Subject: [PATCH 07/17] let's try with extra indes url --- .lightning/workflows/pytorch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index 15dfc4a1f9064..f15c7c9f46511 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -121,7 +121,7 @@ run: | echo "Install package" extra=$(python -c "print({'lightning': 'pytorch-'}.get('${PACKAGE_NAME}', ''))") - uv pip install -e ".[${extra}dev]" --upgrade + uv pip install -e ".[${extra}dev]" --upgrade --extra-index-url https://download.pytorch.org/whl/cu${CUDA_VERSION_MM} echo "Ensure only a single package is installed" if [ "${PACKAGE_NAME}" == "pytorch" ]; then From 8748445dc66732325fa400c186b528d3d405ed39 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 10 Nov 2025 12:02:17 +0545 Subject: [PATCH 08/17] fix: prefer CUDA-specific packages from PyTorch index using find-links --- .lightning/workflows/pytorch.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index f15c7c9f46511..7783c91b596d0 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -121,7 +121,10 @@ run: | echo "Install package" extra=$(python -c "print({'lightning': 'pytorch-'}.get('${PACKAGE_NAME}', ''))") - uv pip install -e ".[${extra}dev]" --upgrade --extra-index-url https://download.pytorch.org/whl/cu${CUDA_VERSION_MM} + + # Use find-links to prefer CUDA-specific packages from PyTorch index + uv pip install -e ".[${extra}dev]" --upgrade \ + --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM} echo "Ensure only a single package is installed" if [ "${PACKAGE_NAME}" == "pytorch" ]; then From 98a70c97ce8c8880f1cbdcb3c0f51a2639007fc5 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 10 Nov 2025 12:25:36 +0545 Subject: [PATCH 09/17] update --- .lightning/workflows/pytorch.yml | 3 +-- requirements/pytorch/test.txt | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index 7783c91b596d0..2abbf09c3666a 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -123,8 +123,7 @@ run: | extra=$(python -c "print({'lightning': 'pytorch-'}.get('${PACKAGE_NAME}', ''))") # Use find-links to prefer CUDA-specific packages from PyTorch index - uv pip install -e ".[${extra}dev]" --upgrade \ - --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM} + uv pip install -e ".[${extra}dev]" --upgrade echo "Ensure only a single package is installed" if [ "${PACKAGE_NAME}" == "pytorch" ]; then diff --git a/requirements/pytorch/test.txt b/requirements/pytorch/test.txt index 9a315c25bfa21..b22b2a3679946 100644 --- a/requirements/pytorch/test.txt +++ b/requirements/pytorch/test.txt @@ -21,5 +21,6 @@ uvicorn # for `ServableModuleValidator` # not setting version as re-defined in tensorboard >=2.11, <2.21.0 # for `TensorBoardLogger` -torch-tensorrt; platform_system == "Linux" and python_version >= "3.12" +# TODO: resolve GPU test failures for TensorRT due to defaulting to cu13 installations +torch-tensorrt<2.9.0; platform_system == "Linux" and python_version >= "3.12" huggingface-hub From 4674012102b69d83b4c30122fe4bb9bf5d9a4e93 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Mon, 10 Nov 2025 13:09:19 +0545 Subject: [PATCH 10/17] add find link --- .lightning/workflows/pytorch.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index 2abbf09c3666a..e6db4eeb33cd4 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -123,7 +123,9 @@ run: | extra=$(python -c "print({'lightning': 'pytorch-'}.get('${PACKAGE_NAME}', ''))") # Use find-links to prefer CUDA-specific packages from PyTorch index - uv pip install -e ".[${extra}dev]" --upgrade + uv pip install -e ".[${extra}dev]" --upgrade \ + --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}" + uv pip list echo "Ensure only a single package is installed" if [ "${PACKAGE_NAME}" == "pytorch" ]; then From cd17e08544b571de99140b40d13c22a508691897 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Tue, 18 Nov 2025 11:45:44 +0545 Subject: [PATCH 11/17] update; revert torch tensorrt pin --- requirements/pytorch/test.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements/pytorch/test.txt b/requirements/pytorch/test.txt index b22b2a3679946..9a315c25bfa21 100644 --- a/requirements/pytorch/test.txt +++ b/requirements/pytorch/test.txt @@ -21,6 +21,5 @@ uvicorn # for `ServableModuleValidator` # not setting version as re-defined in tensorboard >=2.11, <2.21.0 # for `TensorBoardLogger` -# TODO: resolve GPU test failures for TensorRT due to defaulting to cu13 installations -torch-tensorrt<2.9.0; platform_system == "Linux" and python_version >= "3.12" +torch-tensorrt; platform_system == "Linux" and python_version >= "3.12" huggingface-hub From 53265fb197a025e29fac31660f2f59e0780ff360 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Fri, 21 Nov 2025 22:17:30 +0545 Subject: [PATCH 12/17] add index url for torch tensorrt --- .lightning/workflows/pytorch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index e6db4eeb33cd4..846ab4ec43cbf 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -125,6 +125,7 @@ run: | # Use find-links to prefer CUDA-specific packages from PyTorch index uv pip install -e ".[${extra}dev]" --upgrade \ --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}" + --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}/torch-tensorrt" uv pip list echo "Ensure only a single package is installed" From 188a2976c136c779f71ff49e0ef2aef8fa095171 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Fri, 21 Nov 2025 22:29:29 +0545 Subject: [PATCH 13/17] fix multiline command --- .lightning/workflows/pytorch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.lightning/workflows/pytorch.yml b/.lightning/workflows/pytorch.yml index 846ab4ec43cbf..59b485946721e 100644 --- a/.lightning/workflows/pytorch.yml +++ b/.lightning/workflows/pytorch.yml @@ -124,7 +124,7 @@ run: | # Use find-links to prefer CUDA-specific packages from PyTorch index uv pip install -e ".[${extra}dev]" --upgrade \ - --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}" + --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}" \ --find-links="https://download.pytorch.org/whl/${UV_TORCH_BACKEND}/torch-tensorrt" uv pip list From f8efc0ff09172120cff942977669305823189e1d Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Fri, 21 Nov 2025 23:23:13 +0545 Subject: [PATCH 14/17] update --- tests/tests_pytorch/trainer/test_trainer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index ea3c31a370fce..6844cf2596e30 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -1750,6 +1750,7 @@ def on_train_epoch_start(self, trainer, *_): def current_memory(): # before measuring the memory force release any leftover allocations, including CUDA tensors gc.collect() + torch.cuda.empty_cache() return torch.cuda.memory_allocated(0) model = TestModel() From 0e9d81ba6da6e8ad805b85cfc901442380000354 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Fri, 21 Nov 2025 23:57:08 +0545 Subject: [PATCH 15/17] refactor: update torch version checks to use greater than or equal comparisons --- src/lightning/pytorch/utilities/imports.py | 2 +- tests/tests_pytorch/helpers/runif.py | 4 ++-- tests/tests_pytorch/trainer/test_trainer.py | 10 +++++++--- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/lightning/pytorch/utilities/imports.py b/src/lightning/pytorch/utilities/imports.py index ab99457eee2d1..efed7f0f2ce75 100644 --- a/src/lightning/pytorch/utilities/imports.py +++ b/src/lightning/pytorch/utilities/imports.py @@ -28,7 +28,7 @@ _TORCHMETRICS_GREATER_EQUAL_0_11 = RequirementCache("torchmetrics>=0.11.0") # using new API with task _TORCHMETRICS_GREATER_EQUAL_1_0_0 = RequirementCache("torchmetrics>=1.0.0") _TORCH_EQUAL_2_8 = RequirementCache("torch>=2.8.0,<2.9.0") -_TORCH_EQUAL_2_9 = RequirementCache("torch>=2.9.0,<2.10.0") +_TORCH_GREATER_EQUAL_2_8 = compare_version("torch", operator.ge, "2.8.0") _OMEGACONF_AVAILABLE = package_available("omegaconf") _TORCHVISION_AVAILABLE = RequirementCache("torchvision") diff --git a/tests/tests_pytorch/helpers/runif.py b/tests/tests_pytorch/helpers/runif.py index 76204784cce0a..cf2ff39c698c7 100644 --- a/tests/tests_pytorch/helpers/runif.py +++ b/tests/tests_pytorch/helpers/runif.py @@ -14,7 +14,7 @@ import pytest from lightning.fabric.utilities.imports import _IS_WINDOWS -from lightning.pytorch.utilities.imports import _TORCH_EQUAL_2_8, _TORCH_EQUAL_2_9 +from lightning.pytorch.utilities.imports import _TORCH_GREATER_EQUAL_2_8 from lightning.pytorch.utilities.testing import _runif_reasons @@ -27,6 +27,6 @@ def RunIf(**kwargs): _xfail_gloo_windows = pytest.mark.xfail( RuntimeError, strict=True, - condition=(_IS_WINDOWS and (_TORCH_EQUAL_2_8 or _TORCH_EQUAL_2_9)), + condition=(_IS_WINDOWS and _TORCH_GREATER_EQUAL_2_8), reason="makeDeviceForHostname(): unsupported gloo device", ) diff --git a/tests/tests_pytorch/trainer/test_trainer.py b/tests/tests_pytorch/trainer/test_trainer.py index 6844cf2596e30..76860fd82733f 100644 --- a/tests/tests_pytorch/trainer/test_trainer.py +++ b/tests/tests_pytorch/trainer/test_trainer.py @@ -55,7 +55,7 @@ from lightning.pytorch.strategies.launchers import _MultiProcessingLauncher, _SubprocessScriptLauncher from lightning.pytorch.trainer.states import RunningStage, TrainerFn from lightning.pytorch.utilities.exceptions import MisconfigurationException -from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE, _TORCH_EQUAL_2_8 +from lightning.pytorch.utilities.imports import _OMEGACONF_AVAILABLE, _TORCH_GREATER_EQUAL_2_8 from tests_pytorch.conftest import mock_cuda_count, mock_mps_count from tests_pytorch.helpers.datamodules import ClassifDataModule from tests_pytorch.helpers.runif import RunIf @@ -1730,7 +1730,12 @@ def test_exception_when_lightning_module_is_not_set_on_trainer(fn): @RunIf(min_cuda_gpus=1) # FixMe: the memory raises to 1024 from expected 512 -@pytest.mark.xfail(AssertionError, strict=True, condition=_TORCH_EQUAL_2_8, reason="temporarily disabled for torch 2.8") +@pytest.mark.xfail( + AssertionError, + strict=True, + condition=_TORCH_GREATER_EQUAL_2_8, + reason="temporarily disabled for torch >= 2.8", +) def test_multiple_trainer_constant_memory_allocated(tmp_path): """This tests ensures calling the trainer several times reset the memory back to 0.""" @@ -1750,7 +1755,6 @@ def on_train_epoch_start(self, trainer, *_): def current_memory(): # before measuring the memory force release any leftover allocations, including CUDA tensors gc.collect() - torch.cuda.empty_cache() return torch.cuda.memory_allocated(0) model = TestModel() From a600f416dfb6132a9eef474e07c6be80be0ee855 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Sat, 22 Nov 2025 00:44:51 +0545 Subject: [PATCH 16/17] update --- src/lightning/pytorch/utilities/imports.py | 1 + tests/tests_pytorch/models/test_torch_tensorrt.py | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/lightning/pytorch/utilities/imports.py b/src/lightning/pytorch/utilities/imports.py index efed7f0f2ce75..f7305fbda8f90 100644 --- a/src/lightning/pytorch/utilities/imports.py +++ b/src/lightning/pytorch/utilities/imports.py @@ -28,6 +28,7 @@ _TORCHMETRICS_GREATER_EQUAL_0_11 = RequirementCache("torchmetrics>=0.11.0") # using new API with task _TORCHMETRICS_GREATER_EQUAL_1_0_0 = RequirementCache("torchmetrics>=1.0.0") _TORCH_EQUAL_2_8 = RequirementCache("torch>=2.8.0,<2.9.0") +_TORCH_EQUAL_2_9 = RequirementCache("torch>=2.9.0,<2.10.0") _TORCH_GREATER_EQUAL_2_8 = compare_version("torch", operator.ge, "2.8.0") _OMEGACONF_AVAILABLE = package_available("omegaconf") diff --git a/tests/tests_pytorch/models/test_torch_tensorrt.py b/tests/tests_pytorch/models/test_torch_tensorrt.py index 630e59f711348..44a756ceb82de 100644 --- a/tests/tests_pytorch/models/test_torch_tensorrt.py +++ b/tests/tests_pytorch/models/test_torch_tensorrt.py @@ -10,6 +10,7 @@ from lightning.pytorch.core.module import _TORCH_TRT_AVAILABLE from lightning.pytorch.demos.boring_classes import BoringModel from lightning.pytorch.utilities.exceptions import MisconfigurationException +from lightning.pytorch.utilities.imports import _TORCH_EQUAL_2_9 from tests_pytorch.helpers.runif import RunIf @@ -110,7 +111,14 @@ def test_tensorrt_saves_on_multi_gpu(tmp_path): [ ("default", torch.fx.GraphModule), ("dynamo", torch.fx.GraphModule), - ("ts", torch.jit.ScriptModule), + pytest.param( + "ts", + torch.jit.ScriptModule, + marks=pytest.mark.skipif( + _TORCH_EQUAL_2_9, + reason="TorchScript IR crashes with torch_tensorrt on PyTorch 2.9", + ), + ), ], ) @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") From 3a9617b9434ffbf82927684b5cc2a854bae43b29 Mon Sep 17 00:00:00 2001 From: bhimrazy Date: Sat, 22 Nov 2025 01:16:57 +0545 Subject: [PATCH 17/17] update --- tests/tests_pytorch/models/test_torch_tensorrt.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/tests_pytorch/models/test_torch_tensorrt.py b/tests/tests_pytorch/models/test_torch_tensorrt.py index 44a756ceb82de..1ab8948d4482e 100644 --- a/tests/tests_pytorch/models/test_torch_tensorrt.py +++ b/tests/tests_pytorch/models/test_torch_tensorrt.py @@ -136,7 +136,17 @@ def test_tensorrt_save_ir_type(ir, export_type): ) @pytest.mark.parametrize( "ir", - ["default", "dynamo", "ts"], + [ + "default", + "dynamo", + pytest.param( + "ts", + marks=pytest.mark.skipif( + _TORCH_EQUAL_2_9, + reason="TorchScript IR crashes with torch_tensorrt on PyTorch 2.9", + ), + ), + ], ) @RunIf(tensorrt=True, min_cuda_gpus=1, min_torch="2.2.0") def test_tensorrt_export_reload(output_format, ir, tmp_path):