1{
2 config,
3 lib,
4 pkgs,
5 ...
6}: let
7 nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
8 nvidia_x11 =
9 if nvidiaEnabled || cfg.datacenter.enable
10 then cfg.package
11 else null;
12
13 cfg = config.hardware.nvidia;
14
15 pCfg = cfg.prime;
16 syncCfg = pCfg.sync;
17 offloadCfg = pCfg.offload;
18 reverseSyncCfg = pCfg.reverseSync;
19 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
20 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
21 ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
22 settingsFormat = pkgs.formats.keyValue {};
23in {
24 options = {
25 hardware.nvidia = {
26 datacenter.enable = lib.mkEnableOption ''
27 Data Center drivers for NVIDIA cards on a NVLink topology
28 '';
29 datacenter.settings = lib.mkOption {
30 type = settingsFormat.type;
31 default = {
32 LOG_LEVEL=4;
33 LOG_FILE_NAME="/var/log/fabricmanager.log";
34 LOG_APPEND_TO_LOG=1;
35 LOG_FILE_MAX_SIZE=1024;
36 LOG_USE_SYSLOG=0;
37 DAEMONIZE=1;
38 BIND_INTERFACE_IP="127.0.0.1";
39 STARTING_TCP_PORT=16000;
40 FABRIC_MODE=0;
41 FABRIC_MODE_RESTART=0;
42 STATE_FILE_NAME="/var/tmp/fabricmanager.state";
43 FM_CMD_BIND_INTERFACE="127.0.0.1";
44 FM_CMD_PORT_NUMBER=6666;
45 FM_STAY_RESIDENT_ON_FAILURES=0;
46 ACCESS_LINK_FAILURE_MODE=0;
47 TRUNK_LINK_FAILURE_MODE=0;
48 NVSWITCH_FAILURE_MODE=0;
49 ABORT_CUDA_JOBS_ON_FM_EXIT=1;
50 TOPOLOGY_FILE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
51 DATABASE_PATH="${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
52 };
53 defaultText = lib.literalExpression ''
54 {
55 LOG_LEVEL=4;
56 LOG_FILE_NAME="/var/log/fabricmanager.log";
57 LOG_APPEND_TO_LOG=1;
58 LOG_FILE_MAX_SIZE=1024;
59 LOG_USE_SYSLOG=0;
60 DAEMONIZE=1;
61 BIND_INTERFACE_IP="127.0.0.1";
62 STARTING_TCP_PORT=16000;
63 FABRIC_MODE=0;
64 FABRIC_MODE_RESTART=0;
65 STATE_FILE_NAME="/var/tmp/fabricmanager.state";
66 FM_CMD_BIND_INTERFACE="127.0.0.1";
67 FM_CMD_PORT_NUMBER=6666;
68 FM_STAY_RESIDENT_ON_FAILURES=0;
69 ACCESS_LINK_FAILURE_MODE=0;
70 TRUNK_LINK_FAILURE_MODE=0;
71 NVSWITCH_FAILURE_MODE=0;
72 ABORT_CUDA_JOBS_ON_FM_EXIT=1;
73 TOPOLOGY_FILE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
74 DATABASE_PATH="''${nvidia_x11.fabricmanager}/share/nvidia-fabricmanager/nvidia/nvswitch";
75 }
76 '';
77 description = ''
78 Additional configuration options for fabricmanager.
79 '';
80 };
81
82 powerManagement.enable = lib.mkEnableOption ''
83 experimental power management through systemd. For more information, see
84 the NVIDIA docs, on Chapter 21. Configuring Power Management Support
85 '';
86
87 powerManagement.finegrained = lib.mkEnableOption ''
88 experimental power management of PRIME offload. For more information, see
89 the NVIDIA docs, on Chapter 22. PCI-Express Runtime D3 (RTD3) Power Management
90 '';
91
92 dynamicBoost.enable = lib.mkEnableOption ''
93 dynamic Boost balances power between the CPU and the GPU for improved
94 performance on supported laptops using the nvidia-powerd daemon. For more
95 information, see the NVIDIA docs, on Chapter 23. Dynamic Boost on Linux
96 '';
97
98 modesetting.enable = lib.mkEnableOption ''
99 kernel modesetting when using the NVIDIA proprietary driver.
100
101 Enabling this fixes screen tearing when using Optimus via PRIME (see
102 {option}`hardware.nvidia.prime.sync.enable`. This is not enabled
103 by default because it is not officially supported by NVIDIA and would not
104 work with SLI
105 '';
106
107 prime.nvidiaBusId = lib.mkOption {
108 type = busIDType;
109 default = "";
110 example = "PCI:1:0:0";
111 description = ''
112 Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
113 shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
114 '';
115 };
116
117 prime.intelBusId = lib.mkOption {
118 type = busIDType;
119 default = "";
120 example = "PCI:0:2:0";
121 description = ''
122 Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
123 shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
124 '';
125 };
126
127 prime.amdgpuBusId = lib.mkOption {
128 type = busIDType;
129 default = "";
130 example = "PCI:4:0:0";
131 description = ''
132 Bus ID of the AMD APU. You can find it using lspci; for example if lspci
133 shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
134 '';
135 };
136
137 prime.sync.enable = lib.mkEnableOption ''
138 NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
139 If enabled, the NVIDIA GPU will be always on and used for all rendering,
140 while enabling output to displays attached only to the integrated Intel/AMD
141 GPU without a multiplexer.
142
143 Note that this option only has any effect if the "nvidia" driver is specified
144 in {option}`services.xserver.videoDrivers`, and it should preferably
145 be the only driver there.
146
147 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
148 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
149 {option}`hardware.nvidia.prime.intelBusId` or
150 {option}`hardware.nvidia.prime.amdgpuBusId`).
151
152 If you enable this, you may want to also enable kernel modesetting for the
153 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
154 to prevent tearing.
155
156 Note that this configuration will only be successful when a display manager
157 for which the {option}`services.xserver.displayManager.setupCommands`
158 option is supported is used
159 '';
160
161 prime.allowExternalGpu = lib.mkEnableOption ''
162 configuring X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus
163 '';
164
165 prime.offload.enable = lib.mkEnableOption ''
166 render offload support using the NVIDIA proprietary driver via PRIME.
167
168 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
169 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
170 {option}`hardware.nvidia.prime.intelBusId` or
171 {option}`hardware.nvidia.prime.amdgpuBusId`)
172 '';
173
174 prime.offload.enableOffloadCmd = lib.mkEnableOption ''
175 adding a `nvidia-offload` convenience script to {option}`environment.systemPackages`
176 for offloading programs to an nvidia device. To work, should have also enabled
177 {option}`hardware.nvidia.prime.offload.enable` or {option}`hardware.nvidia.prime.reverseSync.enable`.
178
179 Example usage `nvidia-offload sauerbraten_client`
180 '';
181
182 prime.reverseSync.enable = lib.mkEnableOption ''
183 NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
184 PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
185 enabling output to displays attached only to the NVIDIA GPU without a
186 multiplexer.
187
188 Warning: This feature is relatively new, depending on your system this might
189 work poorly. AMD support, especially so.
190 See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
191
192 Note that this option only has any effect if the "nvidia" driver is specified
193 in {option}`services.xserver.videoDrivers`, and it should preferably
194 be the only driver there.
195
196 If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
197 be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
198 {option}`hardware.nvidia.prime.intelBusId` or
199 {option}`hardware.nvidia.prime.amdgpuBusId`).
200
201 If you enable this, you may want to also enable kernel modesetting for the
202 NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
203 to prevent tearing.
204
205 Note that this configuration will only be successful when a display manager
206 for which the {option}`services.xserver.displayManager.setupCommands`
207 option is supported is used
208 '';
209
210 nvidiaSettings =
211 (lib.mkEnableOption ''
212 nvidia-settings, NVIDIA's GUI configuration tool
213 '')
214 // {default = true;};
215
216 nvidiaPersistenced = lib.mkEnableOption ''
217 nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
218 It ensures all GPUs stay awake even during headless mode
219 '';
220
221 forceFullCompositionPipeline = lib.mkEnableOption ''
222 forcefully the full composition pipeline.
223 This sometimes fixes screen tearing issues.
224 This has been reported to reduce the performance of some OpenGL applications and may produce issues in WebGL.
225 It also drastically increases the time the driver needs to clock down after load
226 '';
227
228 package = lib.mkOption {
229 default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
230 defaultText = lib.literalExpression ''
231 config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
232 '';
233 example = "config.boot.kernelPackages.nvidiaPackages.legacy_470";
234 description = ''
235 The NVIDIA driver package to use.
236 '';
237 };
238
239 open = lib.mkEnableOption ''
240 the open source NVIDIA kernel module
241 '';
242 };
243 };
244
245 config = let
246 igpuDriver =
247 if pCfg.intelBusId != ""
248 then "modesetting"
249 else "amdgpu";
250 igpuBusId =
251 if pCfg.intelBusId != ""
252 then pCfg.intelBusId
253 else pCfg.amdgpuBusId;
254 in
255 lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
256 # Common
257 ({
258 assertions = [
259 {
260 assertion = !(nvidiaEnabled && cfg.datacenter.enable);
261 message = "You cannot configure both X11 and Data Center drivers at the same time.";
262 }
263 ];
264 boot = {
265 blacklistedKernelModules = ["nouveau" "nvidiafb"];
266
267 # Don't add `nvidia-uvm` to `kernelModules`, because we want
268 # `nvidia-uvm` be loaded only after `udev` rules for `nvidia` kernel
269 # module are applied.
270 #
271 # Instead, we use `softdep` to lazily load `nvidia-uvm` kernel module
272 # after `nvidia` kernel module is loaded and `udev` rules are applied.
273 extraModprobeConfig = ''
274 softdep nvidia post: nvidia-uvm
275 '';
276 };
277 systemd.tmpfiles.rules =
278 lib.optional config.virtualisation.docker.enableNvidia
279 "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
280 services.udev.extraRules =
281 ''
282 # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
283 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
284 KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
285 KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
286 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
287 KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
288 '';
289 hardware.opengl = {
290 extraPackages = [
291 nvidia_x11.out
292 ];
293 extraPackages32 = [
294 nvidia_x11.lib32
295 ];
296 };
297 environment.systemPackages = [
298 nvidia_x11.bin
299 ];
300 })
301 # X11
302 (lib.mkIf nvidiaEnabled {
303 assertions = [
304 {
305 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
306 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
307 }
308
309 {
310 assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
311 message = "Offload command requires offloading or reverse prime sync to be enabled.";
312 }
313
314 {
315 assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
316 message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
317 }
318
319 {
320 assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
321 message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
322 }
323
324 {
325 assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
326 message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
327 }
328
329 {
330 assertion = !(syncCfg.enable && offloadCfg.enable);
331 message = "PRIME Sync and Offload cannot be both enabled";
332 }
333
334 {
335 assertion = !(syncCfg.enable && reverseSyncCfg.enable);
336 message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
337 }
338
339 {
340 assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
341 message = "Sync precludes powering down the NVIDIA GPU.";
342 }
343
344 {
345 assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
346 message = "Fine-grained power management requires offload to be enabled.";
347 }
348
349 {
350 assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
351 message = "Required files for driver based power management only exist on versions >= 430.09.";
352 }
353
354 {
355 assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
356 message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
357 }
358
359 {
360 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
361 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
362 }];
363
364 # If Optimus/PRIME is enabled, we:
365 # - Specify the configured NVIDIA GPU bus ID in the Device section for the
366 # "nvidia" driver.
367 # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
368 # "nvidia" driver, in order to allow the X server to start without any outputs.
369 # - Add a separate Device section for the Intel GPU, using the "modesetting"
370 # driver and with the configured BusID.
371 # - OR add a separate Device section for the AMD APU, using the "amdgpu"
372 # driver and with the configures BusID.
373 # - Reference that Device section from the ServerLayout section as an inactive
374 # device.
375 # - Configure the display manager to run specific `xrandr` commands which will
376 # configure/enable displays connected to the Intel iGPU / AMD APU.
377
378 # reverse sync implies offloading
379 hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
380
381 services.xserver.drivers =
382 lib.optional primeEnabled {
383 name = igpuDriver;
384 display = offloadCfg.enable;
385 modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
386 deviceSection =
387 ''
388 BusID "${igpuBusId}"
389 ''
390 + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
391 Option "AccelMethod" "none"
392 '';
393 }
394 ++ lib.singleton {
395 name = "nvidia";
396 modules = [nvidia_x11.bin];
397 display = !offloadCfg.enable;
398 deviceSection =
399 ''
400 Option "SidebandSocketPath" "/run/nvidia-xdriver/"
401 '' +
402 lib.optionalString primeEnabled
403 ''
404 BusID "${pCfg.nvidiaBusId}"
405 ''
406 + lib.optionalString pCfg.allowExternalGpu ''
407 Option "AllowExternalGpus"
408 '';
409 screenSection =
410 ''
411 Option "RandRRotation" "on"
412 ''
413 + lib.optionalString syncCfg.enable ''
414 Option "AllowEmptyInitialConfiguration"
415 ''
416 + lib.optionalString cfg.forceFullCompositionPipeline ''
417 Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
418 Option "AllowIndirectGLXProtocol" "off"
419 Option "TripleBuffer" "on"
420 '';
421 };
422
423 services.xserver.serverLayoutSection =
424 lib.optionalString syncCfg.enable ''
425 Inactive "Device-${igpuDriver}[0]"
426 ''
427 + lib.optionalString reverseSyncCfg.enable ''
428 Inactive "Device-nvidia[0]"
429 ''
430 + lib.optionalString offloadCfg.enable ''
431 Option "AllowNVIDIAGPUScreens"
432 '';
433
434 services.xserver.displayManager.setupCommands = let
435 gpuProviderName =
436 if igpuDriver == "amdgpu"
437 then
438 # find the name of the provider if amdgpu
439 "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
440 else igpuDriver;
441 providerCmdParams =
442 if syncCfg.enable
443 then "\"${gpuProviderName}\" NVIDIA-0"
444 else "NVIDIA-G0 \"${gpuProviderName}\"";
445 in
446 lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
447 # Added by nvidia configuration module for Optimus/PRIME.
448 ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
449 ${lib.getExe pkgs.xorg.xrandr} --auto
450 '';
451
452 environment.etc = {
453 "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
454
455 # 'nvidia_x11' installs it's files to /run/opengl-driver/...
456 "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
457 };
458
459 hardware.opengl = {
460 extraPackages = [
461 pkgs.nvidia-vaapi-driver
462 ];
463 extraPackages32 = [
464 pkgs.pkgsi686Linux.nvidia-vaapi-driver
465 ];
466 };
467 environment.systemPackages =
468 lib.optional cfg.nvidiaSettings nvidia_x11.settings
469 ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
470 ++ lib.optional offloadCfg.enableOffloadCmd
471 (pkgs.writeShellScriptBin "nvidia-offload" ''
472 export __NV_PRIME_RENDER_OFFLOAD=1
473 export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
474 export __GLX_VENDOR_LIBRARY_NAME=nvidia
475 export __VK_LAYER_NV_optimus=NVIDIA_only
476 exec "$@"
477 '');
478
479 systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
480
481 systemd.services = let
482 nvidiaService = state: {
483 description = "NVIDIA system ${state} actions";
484 path = [pkgs.kbd];
485 serviceConfig = {
486 Type = "oneshot";
487 ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
488 };
489 before = ["systemd-${state}.service"];
490 requiredBy = ["systemd-${state}.service"];
491 };
492 in
493 lib.mkMerge [
494 (lib.mkIf cfg.powerManagement.enable {
495 nvidia-suspend = nvidiaService "suspend";
496 nvidia-hibernate = nvidiaService "hibernate";
497 nvidia-resume =
498 (nvidiaService "resume")
499 // {
500 before = [];
501 after = ["systemd-suspend.service" "systemd-hibernate.service"];
502 requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
503 };
504 })
505 (lib.mkIf cfg.nvidiaPersistenced {
506 "nvidia-persistenced" = {
507 description = "NVIDIA Persistence Daemon";
508 wantedBy = ["multi-user.target"];
509 serviceConfig = {
510 Type = "forking";
511 Restart = "always";
512 PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
513 ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
514 ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
515 };
516 };
517 })
518 (lib.mkIf cfg.dynamicBoost.enable {
519 "nvidia-powerd" = {
520 description = "nvidia-powerd service";
521 path = [
522 pkgs.util-linux # nvidia-powerd wants lscpu
523 ];
524 wantedBy = ["multi-user.target"];
525 serviceConfig = {
526 Type = "dbus";
527 BusName = "nvidia.powerd.server";
528 ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
529 };
530 };
531 })
532 ];
533 services.acpid.enable = true;
534
535 services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
536
537 hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
538
539 systemd.tmpfiles.rules = [
540 # Remove the following log message:
541 # (WW) NVIDIA: Failed to bind sideband socket to
542 # (WW) NVIDIA: '/var/run/nvidia-xdriver-b4f69129' Permission denied
543 #
544 # https://bbs.archlinux.org/viewtopic.php?pid=1909115#p1909115
545 "d /run/nvidia-xdriver 0770 root users"
546 ] ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
547 "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
548
549 boot = {
550 extraModulePackages =
551 if cfg.open
552 then [nvidia_x11.open]
553 else [nvidia_x11.bin];
554 # nvidia-uvm is required by CUDA applications.
555 kernelModules =
556 lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
557
558 # If requested enable modesetting via kernel parameter.
559 kernelParams =
560 lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
561 ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
562 ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
563 ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
564
565 # enable finegrained power management
566 extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
567 options nvidia "NVreg_DynamicPowerManagement=0x02"
568 '';
569 };
570 services.udev.extraRules =
571 lib.optionalString cfg.powerManagement.finegrained (
572 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
573 # Remove NVIDIA USB xHCI Host Controller devices, if present
574 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
575
576 # Remove NVIDIA USB Type-C UCSI devices, if present
577 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
578
579 # Remove NVIDIA Audio devices, if present
580 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
581 ''
582 + ''
583 # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
584 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
585 ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
586
587 # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
588 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
589 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
590 ''
591 );
592 })
593 # Data Center
594 (lib.mkIf (cfg.datacenter.enable) {
595 boot.extraModulePackages = [
596 nvidia_x11.bin
597 ];
598
599 systemd = {
600 tmpfiles.rules =
601 lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
602 "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
603
604 services = lib.mkMerge [
605 ({
606 nvidia-fabricmanager = {
607 enable = true;
608 description = "Start NVIDIA NVLink Management";
609 wantedBy = [ "multi-user.target" ];
610 unitConfig.After = [ "network-online.target" ];
611 unitConfig.Requires = [ "network-online.target" ];
612 serviceConfig = {
613 Type = "forking";
614 TimeoutStartSec = 240;
615 ExecStart = let
616 nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
617 in
618 "${lib.getExe nvidia_x11.fabricmanager} -c ${nv-fab-conf}";
619 LimitCORE="infinity";
620 };
621 };
622 })
623 (lib.mkIf cfg.nvidiaPersistenced {
624 "nvidia-persistenced" = {
625 description = "NVIDIA Persistence Daemon";
626 wantedBy = ["multi-user.target"];
627 serviceConfig = {
628 Type = "forking";
629 Restart = "always";
630 PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
631 ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
632 ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
633 };
634 };
635 })
636 ];
637 };
638
639 environment.systemPackages =
640 lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager
641 ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced;
642 })
643 ]);
644}