Skip to content

Commit b60111b

Browse files
upgrade torch from 2.10.dev to2.11.dev (#3989)
1 parent f974e86 commit b60111b

File tree

12 files changed

+54
-26
lines changed

12 files changed

+54
-26
lines changed

MODULE.bazel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module(
22
name = "torch_tensorrt",
3-
version = "2.10.0a0",
3+
version = "2.11.0a0",
44
repo_name = "org_pytorch_tensorrt",
55
)
66

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ Torch-TensorRT
55
<h4> Easily achieve the best inference performance for any PyTorch model on the NVIDIA platform. </h4>
66

77
[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
8-
[![pytorch](https://img.shields.io/badge/PyTorch-2.10-green)](https://download.pytorch.org/whl/nightly/cu130)
8+
[![pytorch](https://img.shields.io/badge/PyTorch-2.11-green)](https://download.pytorch.org/whl/nightly/cu130)
99
[![cuda](https://img.shields.io/badge/CUDA-13.0-green)](https://developer.nvidia.com/cuda-downloads)
10-
[![trt](https://img.shields.io/badge/TensorRT-10.14.0-green)](https://github.com/nvidia/tensorrt)
10+
[![trt](https://img.shields.io/badge/TensorRT-10.14.1-green)](https://github.com/nvidia/tensorrt)
1111
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
1212
[![Linux x86-64 Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml)
1313
[![Linux SBSA Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml)
@@ -121,7 +121,7 @@ auto results = trt_mod.forward({input_tensor});
121121
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
122122
123123
- Bazel 8.1.1
124-
- Libtorch 2.10.0.dev (latest nightly)
124+
- Libtorch 2.11.0.dev (latest nightly)
125125
- CUDA 13.0 (CUDA 12.6 on Jetson)
126126
- TensorRT 10.14.1.48 (TensorRT 10.3 on Jetson)
127127

cpp/include/torch_tensorrt/macros.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
#define STR(x) XSTR(x)
2525

2626
#define TORCH_TENSORRT_MAJOR_VERSION 2
27-
#define TORCH_TENSORRT_MINOR_VERSION 10
27+
#define TORCH_TENSORRT_MINOR_VERSION 11
2828
#define TORCH_TENSORRT_PATCH_VERSION 0
2929
#define TORCH_TENSORRT_VERSION \
3030
STR(TORCH_TENSORRT_MAJOR_VERSION) \

py/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ numpy
22
packaging
33
pybind11==2.6.2
44
--extra-index-url https://download.pytorch.org/whl/nightly/cu130
5-
torch>=2.10.0.dev,<2.11.0
5+
torch>=2.11.0.dev,<2.12.0
66
--extra-index-url https://pypi.ngc.nvidia.com
77
pyyaml
88
dllist

py/torch_tensorrt/_features.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,9 @@
5252
_WINDOWS_CROSS_COMPILE = check_cross_compile_trt_win_lib()
5353
_TRTLLM_AVAIL = load_tensorrt_llm_for_nccl()
5454

55-
if importlib.util.find_spec("tensorrt.plugin"):
55+
if importlib.util.find_spec("tensorrt.plugin") and importlib.util.find_spec(
56+
"tensorrt.plugin._lib"
57+
):
5658
_QDP_PLUGIN_AVAIL = True
5759
else:
5860
_QDP_PLUGIN_AVAIL = False

py/torch_tensorrt/dynamo/runtime/meta_ops/register_meta_ops.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,35 @@
66
from torch_tensorrt.dynamo.utils import input_is_dynamic, unwrap_tensor_shape
77

88

9+
@torch.library.register_fake("aten::cudnn_grid_sampler") # type: ignore
10+
def fake_aten_cudnn_grid_sampler(
11+
input: torch.Tensor,
12+
grid: torch.Tensor,
13+
interpolation_mode: int = 0,
14+
padding_mode: int = 0,
15+
align_corners: bool = True,
16+
) -> torch.Tensor:
17+
"""
18+
Meta kernel for aten::cudnn_grid_sampler to enable FakeTensor/compile flows.
19+
Shapes follow grid_sampler semantics:
20+
- 2D: input [N, C, H_in, W_in], grid [N, H_out, W_out, 2] -> output [N, C, H_out, W_out]
21+
- 3D: input [N, C, D_in, H_in, W_in], grid [N, D_out, H_out, W_out, 3] -> output [N, C, D_out, H_out, W_out]
22+
"""
23+
if grid.dim() == 4:
24+
n, h_out, w_out, _ = grid.shape
25+
c = input.shape[1]
26+
out_shape = [n, c, h_out, w_out]
27+
elif grid.dim() == 5:
28+
n, d_out, h_out, w_out, _ = grid.shape
29+
c = input.shape[1]
30+
out_shape = [n, c, d_out, h_out, w_out]
31+
else:
32+
raise RuntimeError(
33+
f"aten::cudnn_grid_sampler: unexpected grid rank {grid.dim()}"
34+
)
35+
return torch.empty(out_shape, dtype=input.dtype, device=input.device)
36+
37+
938
@torch.library.register_fake("tensorrt::execute_engine") # type: ignore
1039
def fake_tensorrt_execute_engine(
1140
inputs: List[torch.Tensor], fake_trt_engine: Any

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ requires = [
66
"ninja>=1.11.0",
77
"pyyaml>=6.0",
88
"cffi>=1.15.1",
9-
"torch>=2.10.0.dev,<2.11.0",
9+
"torch>=2.11.0.dev,<2.12.0",
1010
"pybind11==2.6.2",
1111
]
1212
build-backend = "setuptools.build_meta"

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,7 @@ def get_sbsa_requirements(base_requirements):
742742
# TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
743743
# also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
744744
return requirements + [
745-
"torch>=2.10.0.dev,<2.11.0",
745+
"torch>=2.11.0.dev,<2.12.0",
746746
"tensorrt>=10.14.1,<10.15.0",
747747
]
748748

@@ -753,7 +753,7 @@ def get_x86_64_requirements(base_requirements):
753753
if IS_DLFW_CI:
754754
return requirements
755755
else:
756-
requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
756+
requirements = requirements + ["torch>=2.11.0.dev,<2.12.0"]
757757
if USE_TRT_RTX:
758758
return requirements + [
759759
"tensorrt_rtx>=1.2.0.54",

tests/py/dynamo/automatic_plugin/test_automatic_plugin.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33

44
import torch
55
import torch.nn as nn
6+
import torch_tensorrt
67
import triton
78
import triton.language as tl
89
from parameterized import parameterized
910
from torch.testing._internal.common_utils import run_tests
1011

11-
import torch_tensorrt
12-
1312
from ..conversion.harness import DispatchTestCase
1413

1514

@@ -56,15 +55,15 @@ def elementwise_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
5655
return x
5756

5857

59-
if not torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx:
58+
if torch_tensorrt.ENABLED_FEATURES.qdp_plugin:
6059
torch_tensorrt.dynamo.conversion.plugins.custom_op(
6160
"torchtrt_ex::elementwise_mul", supports_dynamic_shapes=True
6261
)
6362

6463

6564
@unittest.skipIf(
66-
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,
67-
"TensorRT RTX does not support plugins",
65+
not torch_tensorrt.ENABLED_FEATURES.qdp_plugin,
66+
"QDP Plugin is not available",
6867
)
6968
class TestAutomaticPlugin(DispatchTestCase):
7069

tests/py/dynamo/automatic_plugin/test_automatic_plugin_with_attrs.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33

44
import torch
55
import torch.nn as nn
6+
import torch_tensorrt
67
import triton
78
import triton.language as tl
89
from parameterized import parameterized
910
from torch.testing._internal.common_utils import run_tests
1011

11-
import torch_tensorrt
12-
1312
from ..conversion.harness import DispatchTestCase
1413

1514

@@ -57,15 +56,15 @@ def _(x: torch.Tensor, y: torch.Tensor, b: float = 0.2, a: int = 2) -> torch.Ten
5756
return x
5857

5958

60-
if not torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx:
59+
if torch_tensorrt.ENABLED_FEATURES.qdp_plugin:
6160
torch_tensorrt.dynamo.conversion.plugins.custom_op(
6261
"torchtrt_ex::elementwise_scale_mul", supports_dynamic_shapes=True
6362
)
6463

6564

6665
@unittest.skipIf(
67-
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx,
68-
"TensorRT RTX does not support plugins",
66+
not torch_tensorrt.ENABLED_FEATURES.qdp_plugin,
67+
"QDP Plugin is not available",
6968
)
7069
class TestAutomaticPlugin(DispatchTestCase):
7170

0 commit comments

Comments
 (0)