@@ -725,71 +725,83 @@ def run(self):
725725with open(os.path.join(get_root_dir(), "README.md"), "r", encoding="utf-8") as fh:
726726 long_description = fh.read()
727727
728- base_requirements = [
729- "packaging>=23",
730- "typing-extensions>=4.7.0",
731- "dllist",
732- "psutil",
733- # dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
734- "nvidia-cuda-runtime-cu13==0.0.0a0",
735- ]
728+ def get_jetpack_requirements(base_requirements):
729+ requirements = base_requirements + ["numpy<2.0.0"]
730+ if IS_DLFW_CI:
731+ return requirements
732+ else:
733+ return requirements + ["torch>=2.8.0,<2.9.0", "tensorrt>=10.3.0,<10.4.0"]
736734
737735
738- def get_requirements():
739- if IS_JETPACK:
740- requirements = get_jetpack_requirements()
741- elif IS_SBSA:
742- requirements = get_sbsa_requirements()
736+ def get_sbsa_requirements(base_requirements):
737+ requirements = base_requirements + ["numpy"]
738+ if IS_DLFW_CI:
739+ return requirements
743740 else:
744- # standard linux and windows requirements
745- requirements = base_requirements + ["numpy"]
746- if not IS_DLFW_CI:
747- requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
748- if USE_TRT_RTX:
741+ # TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
742+ # also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
743+ return requirements + [
744+ "torch>=2.10.0.dev,<2.11.0",
745+ "tensorrt>=10.14.1,<10.15.0",
746+ ]
747+
748+ def get_x86_64_requirements(base_requirements):
749+ requirements = base_requirements + [
750+ "numpy"
751+ ]
752+
753+ if IS_DLFW_CI:
754+ return requirements
755+ else:
756+ requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
757+ if USE_TRT_RTX:
758+ return requirements + [
759+ "tensorrt_rtx>=1.2.0.54",
760+ ]
761+ else:
762+ requirements = requirements + [
763+ "tensorrt>=10.14.1,<10.15.0",
764+ ]
765+ cuda_version = torch.version.cuda
766+ if cuda_version.startswith("12"):
767+ # directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
768+ # which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
769+ tensorrt_prefix = "tensorrt-cu12"
749770 requirements = requirements + [
750- "tensorrt_rtx>=1.2.0.54",
771+ f"{tensorrt_prefix}>=10.14.1,<10.15.0",
772+ f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0",
773+ f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0",
774+ ]
775+ elif cuda_version.startswith("13"):
776+ tensorrt_prefix = "tensorrt-cu13"
777+ requirements = requirements + [
778+ f"{tensorrt_prefix}>=10.14.1,<10.15.0,!=10.14.1.48",
779+ f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0,!=10.14.1.48",
780+ f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0,!=10.14.1.48",
751781 ]
752782 else:
753- cuda_version = torch.version.cuda
754- if cuda_version.startswith("12"):
755- # directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
756- # which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
757- tensorrt_prefix = "tensorrt-cu12"
758- requirements = requirements + [
759- f"{tensorrt_prefix}>=10.14.1,<10.15.0",
760- f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0",
761- f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0",
762- ]
763- elif cuda_version.startswith("13"):
764- tensorrt_prefix = "tensorrt-cu13"
765- requirements = requirements + [
766- f"{tensorrt_prefix}>=10.14.1,<10.15.0,!=10.14.1.48",
767- f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0,!=10.14.1.48",
768- f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0,!=10.14.1.48",
769- ]
770- else:
771- raise ValueError(f"Unsupported CUDA version: {cuda_version}")
772- return requirements
773-
774-
775- def get_jetpack_requirements():
776- jetpack_requirements = base_requirements + ["numpy<2.0.0"]
777- if IS_DLFW_CI:
778- return jetpack_requirements
779- return jetpack_requirements + ["torch>=2.8.0,<2.9.0", "tensorrt>=10.3.0,<10.4.0"]
783+ raise ValueError(f"Unsupported CUDA version: {cuda_version}")
780784
785+ return requirements
781786
782- def get_sbsa_requirements():
783- sbsa_requirements = base_requirements + ["numpy"]
784- if IS_DLFW_CI:
785- return sbsa_requirements
786- # TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
787- # also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
788- return sbsa_requirements + [
789- "torch>=2.10.0.dev,<2.11.0",
790- "tensorrt>=10.14.1,<10.15.0",
787+ def get_requirements():
788+ base_requirements = [
789+ "packaging>=23",
790+ "typing-extensions>=4.7.0",
791+ "dllist",
792+ "psutil",
793+ # dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
794+ "nvidia-cuda-runtime-cu13==0.0.0a0",
791795 ]
792796
797+ if IS_JETPACK:
798+ requirements = get_jetpack_requirements(base_requirements)
799+ elif IS_SBSA:
800+ requirements = get_sbsa_requirements(base_requirements)
801+ else:
802+ # standard linux and windows requirements
803+ requirements = get_x86_64_requirements(base_requirements)
804+ return requirements
793805
794806setup(
795807 name="torch_tensorrt",
0 commit comments