1{
2 lib,
3 stdenv,
4 python,
5 buildPythonPackage,
6 pythonOlder,
7 pythonAtLeast,
8 fetchurl,
9
10 # nativeBuildInputs
11 addDriverRunpath,
12 autoAddDriverRunpath,
13 autoPatchelfHook,
14
15 # buildInputs
16 cudaPackages,
17
18 # dependencies
19 filelock,
20 jinja2,
21 networkx,
22 numpy,
23 pyyaml,
24 requests,
25 setuptools,
26 sympy,
27 typing-extensions,
28 triton,
29
30 callPackage,
31}:
32
33let
34 pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
35 srcs = import ./binary-hashes.nix version;
36 unsupported = throw "Unsupported system";
37 version = "2.8.0";
38in
39buildPythonPackage {
40 inherit version;
41
42 pname = "torch";
43 # Don't forget to update torch to the same version.
44
45 format = "wheel";
46
47 disabled = (pythonOlder "3.9") || (pythonAtLeast "3.14");
48
49 src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;
50
51 nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [
52 addDriverRunpath
53 autoAddDriverRunpath
54 autoPatchelfHook
55 ];
56
57 buildInputs = lib.optionals stdenv.hostPlatform.isLinux (
58 with cudaPackages;
59 [
60 # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships
61 # libnvToolsExt-$hash.so.1
62 cuda_nvtx
63
64 cuda_cudart
65 cuda_cupti
66 cuda_nvrtc
67 cudnn
68 cusparselt
69 libcublas
70 libcufft
71 libcufile
72 libcurand
73 libcusolver
74 libcusparse
75 nccl
76 ]
77 );
78
79 autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.hostPlatform.isLinux [
80 # This is the hardware-dependent userspace driver that comes from
81 # nvidia_x11 package. It must be deployed at runtime in
82 # /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
83 # than pinned in runpath
84 "libcuda.so.1"
85 ];
86
87 dependencies = [
88 filelock
89 jinja2
90 networkx
91 numpy
92 pyyaml
93 requests
94 setuptools
95 sympy
96 typing-extensions
97 ]
98 ++ lib.optionals (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isx86_64) [ triton ];
99
100 postInstall = ''
101 # ONNX conversion
102 rm -rf $out/bin
103 '';
104
105 postFixup = lib.optionalString stdenv.hostPlatform.isLinux ''
106 addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib"
107 '';
108
109 # See https://github.com/NixOS/nixpkgs/issues/296179
110 #
111 # This is a quick hack to add `libnvrtc` to the runpath so that torch can find
112 # it when it is needed at runtime.
113 extraRunpaths = lib.optionals stdenv.hostPlatform.isLinux [
114 "${lib.getLib cudaPackages.cuda_nvrtc}/lib"
115 ];
116 postPhases = lib.optionals stdenv.hostPlatform.isLinux [ "postPatchelfPhase" ];
117 postPatchelfPhase = ''
118 while IFS= read -r -d $'\0' elf ; do
119 for extra in $extraRunpaths ; do
120 echo patchelf "$elf" --add-rpath "$extra" >&2
121 patchelf "$elf" --add-rpath "$extra"
122 done
123 done < <(
124 find "''${!outputLib}" "$out" -type f -iname '*.so' -print0
125 )
126 '';
127
128 # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
129 dontStrip = true;
130
131 pythonImportsCheck = [ "torch" ];
132
133 passthru.tests = callPackage ../tests { };
134
135 meta = {
136 description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
137 homepage = "https://pytorch.org/";
138 changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
139 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
140 # https://docs.nvidia.com/cuda/eula/index.html
141 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
142 # torch's license is BSD3.
143 # torch-bin used to vendor CUDA. It still links against CUDA and MKL.
144 license = with lib.licenses; [
145 bsd3
146 issl
147 unfreeRedistributable
148 ];
149 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ];
150 platforms = [
151 "aarch64-darwin"
152 "aarch64-linux"
153 "x86_64-linux"
154 ];
155 hydraPlatforms = [ ]; # output size 3.2G on 1.11.0
156 maintainers = with lib.maintainers; [
157 GaetanLepage
158 junjihashimoto
159 ];
160 };
161}