llama-cpp: require newer apple sdk (fix darwin) (#442216)

Changed files
+19 -10
pkgs
by-name
ll
llama-cpp
+19 -10
pkgs/by-name/ll/llama-cpp/package.nix
···
metalSupport ? stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64 && !openclSupport,
vulkanSupport ? false,
rpcSupport ? false,
+
apple-sdk_14,
curl,
+
llama-cpp,
shaderc,
vulkan-headers,
vulkan-loader,
···
++ optionals rocmSupport rocmBuildInputs
++ optionals blasSupport [ blas ]
++ optionals vulkanSupport vulkanBuildInputs
+
++ optionals metalSupport [ apple-sdk_14 ]
++ [ curl ];
preConfigure = ''
···
# the tests are failing as of 2025-08
doCheck = false;
-
passthru.updateScript = nix-update-script {
-
attrPath = "llama-cpp";
-
extraArgs = [
-
"--version-regex"
-
"b(.*)"
-
];
+
passthru = {
+
tests = {
+
metal = llama-cpp.override { metalSupport = true; };
+
};
+
updateScript = nix-update-script {
+
attrPath = "llama-cpp";
+
extraArgs = [
+
"--version-regex"
+
"b(.*)"
+
];
+
};
};
-
meta = with lib; {
+
meta = {
description = "Inference of Meta's LLaMA model (and others) in pure C/C++";
homepage = "https://github.com/ggml-org/llama.cpp";
-
license = licenses.mit;
+
license = lib.licenses.mit;
mainProgram = "llama";
-
maintainers = with maintainers; [
+
maintainers = with lib.maintainers; [
+
booxter
dit7ya
philiptaron
xddxdd
];
-
platforms = platforms.unix;
+
platforms = lib.platforms.unix;
badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin;
broken = metalSupport && !effectiveStdenv.hostPlatform.isDarwin;
};