1{
2 lib,
3 stdenv,
4 buildPythonPackage,
5 fetchurl,
6
7 # buildInputs
8 llvmPackages,
9
10 # build-system
11 distutils,
12
13 # dependencies
14 ml-dtypes,
15 absl-py,
16 astunparse,
17 flatbuffers,
18 gast,
19 google-pasta,
20 grpcio,
21 h5py,
22 libclang,
23 numpy,
24 opt-einsum,
25 packaging,
26 protobuf,
27 requests,
28 six,
29 tensorboard,
30 termcolor,
31 typing-extensions,
32 wrapt,
33 isPy3k,
34 mock,
35
36 config,
37 cudaSupport ? config.cudaSupport,
38 cudaPackages,
39 zlib,
40 python,
41 addDriverRunpath,
42}:
43
44# We keep this binary build for three reasons:
45# - the source build doesn't work on Darwin.
46# - the source build is currently brittle and not easy to maintain
47# - the source build doesn't work on NVIDIA Jetson platforms
48
49let
50 packages = import ./binary-hashes.nix;
51 inherit (cudaPackages) cudatoolkit cudnn;
52
53 isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild;
54in
55buildPythonPackage rec {
56 pname = "tensorflow" + lib.optionalString cudaSupport "-gpu";
57 version = packages."${"version" + lib.optionalString isCudaJetson "_jetson"}";
58 format = "wheel";
59
60 src =
61 let
62 pyVerNoDot = lib.strings.stringAsChars (x: lib.optionalString (x != ".") x) python.pythonVersion;
63 platform = stdenv.system;
64 cuda = lib.optionalString cudaSupport (if isCudaJetson then "_jetson" else "_gpu");
65 key = "${platform}_${pyVerNoDot}${cuda}";
66 in
67 fetchurl (packages.${key} or (throw "tensorflow-bin: unsupported configuration: ${key}"));
68
69 buildInputs = [ llvmPackages.openmp ];
70
71 build-system = [
72 distutils
73 ];
74
75 nativeBuildInputs =
76 lib.optionals cudaSupport [ addDriverRunpath ]
77 ++ lib.optionals isCudaJetson [ cudaPackages.autoAddCudaCompatRunpath ];
78
79 dependencies = [
80 absl-py
81 astunparse
82 flatbuffers
83 gast
84 google-pasta
85 grpcio
86 h5py
87 libclang
88 ml-dtypes
89 numpy
90 opt-einsum
91 packaging
92 protobuf
93 requests
94 six
95 tensorboard
96 termcolor
97 typing-extensions
98 wrapt
99 ]
100 ++ lib.optional (!isPy3k) mock;
101
102 preConfigure = ''
103 unset SOURCE_DATE_EPOCH
104
105 # Make sure that dist and the wheel file are writable.
106 chmod u+rwx -R ./dist
107
108 pushd dist
109
110 for f in tensorflow-*+nv*.whl; do
111 # e.g. *nv24.07* -> *nv24.7*
112 mv "$f" "$(sed -E 's/(nv[0-9]+)\.0*([0-9]+)/\1.\2/' <<< "$f")"
113 done
114
115 popd
116 '';
117
118 postFixup =
119 # When using the cpu-only wheel, the final package will be named `tensorflow_cpu`.
120 # Then, in each package requiring `tensorflow`, our pythonRuntimeDepsCheck will fail with:
121 # importlib.metadata.PackageNotFoundError: No package metadata was found for tensorflow
122 # Hence, we manually rename the package to `tensorflow`.
123 lib.optionalString ((builtins.match ".*tensorflow_cpu.*" src.url) != null) ''
124 (
125 cd $out/${python.sitePackages}
126
127 dest="tensorflow-${version}.dist-info"
128
129 mv tensorflow_cpu-${version}.dist-info "$dest"
130
131 (
132 cd "$dest"
133
134 substituteInPlace METADATA \
135 --replace-fail "tensorflow_cpu" "tensorflow"
136 substituteInPlace RECORD \
137 --replace-fail "tensorflow_cpu" "tensorflow"
138 )
139 )
140 ''
141 # Note that we need to run *after* the fixup phase because the
142 # libraries are loaded at runtime. If we run in preFixup then
143 # patchelf --shrink-rpath will remove the cuda libraries.
144 + (
145 let
146 # rpaths we only need to add if CUDA is enabled.
147 cudapaths = lib.optionals cudaSupport [
148 cudatoolkit.out
149 cudatoolkit.lib
150 cudnn
151 ];
152
153 libpaths = [
154 (lib.getLib stdenv.cc.cc)
155 zlib
156 ];
157
158 rpath = lib.makeLibraryPath (libpaths ++ cudapaths);
159 in
160 lib.optionalString stdenv.hostPlatform.isLinux ''
161 # This is an array containing all the directories in the tensorflow2
162 # package that contain .so files.
163 #
164 # TODO: Create this list programmatically, and remove paths that aren't
165 # actually needed.
166 rrPathArr=(
167 "$out/${python.sitePackages}/tensorflow/"
168 "$out/${python.sitePackages}/tensorflow/core/kernels"
169 "$out/${python.sitePackages}/tensorflow/compiler/mlir/stablehlo/"
170 "$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/"
171 "$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/"
172 "$out/${python.sitePackages}/tensorflow/include/external/ml_dtypes/"
173 "$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/"
174 "$out/${python.sitePackages}/tensorflow/lite/python/analyzer_wrapper/"
175 "$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/"
176 "$out/${python.sitePackages}/tensorflow/lite/python/metrics/"
177 "$out/${python.sitePackages}/tensorflow/lite/python/optimize/"
178 "$out/${python.sitePackages}/tensorflow/python/"
179 "$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing"
180 "$out/${python.sitePackages}/tensorflow/python/client"
181 "$out/${python.sitePackages}/tensorflow/python/data/experimental/service"
182 "$out/${python.sitePackages}/tensorflow/python/framework"
183 "$out/${python.sitePackages}/tensorflow/python/grappler"
184 "$out/${python.sitePackages}/tensorflow/python/lib/core"
185 "$out/${python.sitePackages}/tensorflow/python/lib/io"
186 "$out/${python.sitePackages}/tensorflow/python/platform"
187 "$out/${python.sitePackages}/tensorflow/python/profiler/internal"
188 "$out/${python.sitePackages}/tensorflow/python/saved_model"
189 "$out/${python.sitePackages}/tensorflow/python/util"
190 "$out/${python.sitePackages}/tensorflow/tsl/python/lib/core"
191 "$out/${python.sitePackages}/tensorflow.libs/"
192 "${rpath}"
193 )
194
195 # The the bash array into a colon-separated list of RPATHs.
196 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}")
197 echo "about to run patchelf with the following rpath: $rrPath"
198
199 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
200 echo "about to patchelf $lib..."
201 chmod a+rx "$lib"
202 patchelf --set-rpath "$rrPath" "$lib"
203 ${lib.optionalString cudaSupport ''
204 addDriverRunpath "$lib"
205 ''}
206 done
207 ''
208 );
209
210 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
211 # and the propagated input tensorboard, which causes environment collisions.
212 # Another possibility would be to have tensorboard only in the buildInputs
213 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information.
214 postInstall = ''
215 rm $out/bin/tensorboard
216 '';
217
218 pythonImportsCheck = [
219 "tensorflow"
220 "tensorflow.python"
221 "tensorflow.python.framework"
222 ];
223
224 meta = {
225 description = "Computation using data flow graphs for scalable machine learning";
226 homepage = "http://tensorflow.org";
227 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ];
228 license = lib.licenses.asl20;
229 maintainers = [ ];
230 badPlatforms = [ "x86_64-darwin" ];
231 # unsupported combination
232 broken = stdenv.hostPlatform.isDarwin && cudaSupport;
233 };
234}