ollama: 0.5.1 -> 0.5.4 (#365718)

Changed files
+31 -35
pkgs
by-name
+31 -22
pkgs/by-name/ol/ollama/package.nix
···
cudaPackages,
darwin,
autoAddDriverRunpath,
+
versionCheckHook,
+
# passthru
nixosTests,
testers,
ollama,
···
let
pname = "ollama";
# don't forget to invalidate all hashes each update
-
version = "0.5.1";
+
version = "0.5.4";
src = fetchFromGitHub {
owner = "ollama";
repo = "ollama";
-
rev = "v${version}";
-
hash = "sha256-llsK/rMK1jf2uneqgon9gqtZcbC9PuCDxoYfC7Ta6PY=";
+
tag = "v${version}";
+
hash = "sha256-JyP7A1+u9Vs6ynOKDwun1qLBsjN+CVHIv39Hh2TYa2U=";
fetchSubmodules = true;
};
···
++ lib.optionals enableCuda cudaLibs
++ lib.optionals stdenv.hostPlatform.isDarwin metalFrameworks;
-
patches = [
-
# ollama's build script is unable to find hipcc
-
./rocm.patch
-
];
-
+
# replace inaccurate version number with actual release version
postPatch = ''
-
# replace inaccurate version number with actual release version
-
substituteInPlace version/version.go --replace-fail 0.0.0 '${version}'
+
substituteInPlace version/version.go \
+
--replace-fail 0.0.0 '${version}'
'';
overrideModAttrs = (
···
}
);
-
preBuild = ''
+
preBuild =
+
let
+
dist_cmd =
+
if cudaRequested then
+
"dist_cuda_v${cudaMajorVersion}"
+
else if rocmRequested then
+
"dist_rocm"
+
else
+
"dist";
+
in
# build llama.cpp libraries for ollama
-
make -j $NIX_BUILD_CORES
-
'';
-
-
postInstall = lib.optionalString stdenv.hostPlatform.isLinux ''
-
# copy libggml_*.so and runners into lib
-
# https://github.com/ollama/ollama/blob/v0.4.4/llama/make/gpu.make#L90
-
mkdir -p $out/lib
-
cp -r dist/*/lib/* $out/lib/
-
'';
+
''
+
make ${dist_cmd} -j $NIX_BUILD_CORES
+
'';
postFixup =
+
# the app doesn't appear functional at the moment, so hide it
''
-
# the app doesn't appear functional at the moment, so hide it
mv "$out/bin/app" "$out/bin/.ollama-app"
''
+
# expose runtime libraries necessary to use the gpu
+ lib.optionalString (enableRocm || enableCuda) ''
-
# expose runtime libraries necessary to use the gpu
wrapProgram "$out/bin/ollama" ${wrapperArgs}
'';
···
"-X=github.com/ollama/ollama/version.Version=${version}"
"-X=github.com/ollama/ollama/server.mode=release"
];
+
+
__darwinAllowLocalNetworking = true;
+
+
nativeInstallCheck = [
+
versionCheckHook
+
];
+
versionCheckProgramArg = [ "--version" ];
+
doInstallCheck = true;
passthru = {
tests =
-13
pkgs/by-name/ol/ollama/rocm.patch
···
-
diff --git a/llama/make/Makefile.rocm b/llama/make/Makefile.rocm
-
index 4ab176b4..cd8be223 100644
-
--- a/llama/make/Makefile.rocm
-
+++ b/llama/make/Makefile.rocm
-
@@ -15,7 +15,7 @@ ifeq ($(OS),windows)
-
GPU_COMPILER:=$(GPU_COMPILER_WIN)
-
else ifeq ($(OS),linux)
-
GPU_LIB_DIR_LINUX := $(HIP_PATH)/lib
-
- GPU_COMPILER_LINUX := $(shell X=$$(which hipcc 2>/dev/null) && echo $$X)
-
+ GPU_COMPILER_LINUX := $(HIP_PATH)/bin/hipcc
-
GPU_COMPILER:=$(GPU_COMPILER_LINUX)
-
ROCM_TRANSITIVE_LIBS_INITIAL = $(sort $(shell ldd $(GPU_LIBS) | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -e rocm -e amdgpu -e libtinfo -e libnuma -e libelf))
-
GPU_TRANSITIVE_LIBS = $(sort $(shell readlink -f $(ROCM_TRANSITIVE_LIBS_INITIAL)) $(ROCM_TRANSITIVE_LIBS_INITIAL))