1{
2 callPackage,
3 stdenv,
4 fetchzip,
5 lib,
6 libcxx,
7 llvmPackages,
8 config,
9
10 addDriverRunpath,
11 patchelf,
12 fixDarwinDylibNames,
13
14 cudaSupport ? config.cudaSupport,
15}:
16
17let
18 # The binary libtorch distribution statically links the CUDA
19 # toolkit. This means that we do not need to provide CUDA to
20 # this derivation. However, we should ensure on version bumps
21 # that the CUDA toolkit for `passthru.tests` is still
22 # up-to-date.
23 version = "2.5.0";
24 device = if cudaSupport then "cuda" else "cpu";
25 srcs = import ./binary-hashes.nix version;
26 unavailable = throw "libtorch is not available for this platform";
27 libcxx-for-libtorch = if stdenv.hostPlatform.isDarwin then libcxx else (lib.getLib stdenv.cc.cc);
28in
29stdenv.mkDerivation {
30 inherit version;
31 pname = "libtorch";
32
33 src = fetchzip srcs."${stdenv.hostPlatform.system}-${device}" or unavailable;
34
35 nativeBuildInputs =
36 if stdenv.hostPlatform.isDarwin then
37 [ fixDarwinDylibNames ]
38 else
39 [ patchelf ] ++ lib.optionals cudaSupport [ addDriverRunpath ];
40
41 dontBuild = true;
42 dontConfigure = true;
43 dontStrip = true;
44
45 installPhase = ''
46 # Copy headers and CMake files.
47 mkdir -p $dev
48 cp -r include $dev
49 cp -r share $dev
50
51 install -Dm755 -t $out/lib lib/*${stdenv.hostPlatform.extensions.sharedLibrary}*
52
53 # We do not care about Java support...
54 rm -f $out/lib/lib*jni* 2> /dev/null || true
55
56 # Fix up library paths for split outputs
57 substituteInPlace $dev/share/cmake/Torch/TorchConfig.cmake \
58 --replace \''${TORCH_INSTALL_PREFIX}/lib "$out/lib" \
59
60 substituteInPlace \
61 $dev/share/cmake/Caffe2/Caffe2Targets-release.cmake \
62 --replace \''${_IMPORT_PREFIX}/lib "$out/lib" \
63 '';
64
65 postFixup =
66 let
67 rpath = lib.makeLibraryPath [ stdenv.cc.cc ];
68 in
69 lib.optionalString stdenv.hostPlatform.isLinux ''
70 find $out/lib -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
71 echo "setting rpath for $lib..."
72 patchelf --set-rpath "${rpath}:$out/lib" "$lib"
73 ${lib.optionalString cudaSupport ''
74 addDriverRunpath "$lib"
75 ''}
76 done
77 ''
78 + lib.optionalString stdenv.hostPlatform.isDarwin ''
79 for f in $out/lib/*.dylib; do
80 otool -L $f
81 done
82 for f in $out/lib/*.dylib; do
83 if otool -L $f | grep "@rpath/libomp.dylib" >& /dev/null; then
84 install_name_tool -change "@rpath/libomp.dylib" ${llvmPackages.openmp}/lib/libomp.dylib $f
85 fi
86 install_name_tool -id $out/lib/$(basename $f) $f || true
87 for rpath in $(otool -L $f | grep rpath | awk '{print $1}');do
88 install_name_tool -change $rpath $out/lib/$(basename $rpath) $f
89 done
90 if otool -L $f | grep /usr/lib/libc++ >& /dev/null; then
91 install_name_tool -change /usr/lib/libc++.1.dylib ${libcxx-for-libtorch.outPath}/lib/libc++.1.0.dylib $f
92 fi
93 done
94 for f in $out/lib/*.dylib; do
95 otool -L $f
96 done
97 '';
98
99 outputs = [
100 "out"
101 "dev"
102 ];
103
104 passthru.tests.cmake = callPackage ./test {
105 inherit cudaSupport;
106 };
107
108 meta = with lib; {
109 description = "C++ API of the PyTorch machine learning framework";
110 homepage = "https://pytorch.org/";
111 sourceProvenance = with sourceTypes; [ binaryNativeCode ];
112 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
113 # https://docs.nvidia.com/cuda/eula/index.html
114 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
115 license = licenses.bsd3;
116 maintainers = with maintainers; [ junjihashimoto ];
117 platforms = [
118 "aarch64-darwin"
119 "x86_64-linux"
120 ];
121 };
122}