Personal Nix setup

Add prepackaged ollama 0.11.2 download

Changed files
+29 -2
home
apps
machines
irnbru
+6
home/apps/ollama.nix
···
OLLAMA_FLASH_ATTENTION = if cfg.ollama.flashAttention then "1" else "0";
OLLAMA_SCHED_SPREAD = if cfg.ollama.schedSpread then "1" else "0";
OLLAMA_INTEL_GPU = if cfg.ollama.intelGpu then "1" else "0";
+
OLLAMA_NEW_ENGINE = if cfg.ollama.newEngine then "1" else "0";
OLLAMA_KV_CACHE_TYPE = cfg.ollama.kvCacheType;
OLLAMA_CONTEXT_LENGTH = toString cfg.ollama.defaultContextLength;
OLLAMA_MAX_LOADED_MODELS = toString cfg.ollama.maxLoadedModels;
···
Effect: Enables multi-GPU usage for model inference.
Scenario: Beneficial in high-performance computing environments with multiple GPUs to maximize hardware utilization.
'';
+
type = types.bool;
+
};
+
+
newEngine = mkOption {
+
default = true;
type = types.bool;
};
+23 -2
machines/irnbru/home.nix
···
-
{ ... }:
+
{ pkgs, ... }:
{
modules = {
···
};
apps = {
enable = true;
-
ollama.enable = true;
ghostty.enable = true;
+
ollama = {
+
enable = true;
+
package = with pkgs; stdenv.mkDerivation rec {
+
pname = "ollama";
+
version = "0.11.2";
+
src = fetchurl {
+
url = "https://github.com/ollama/ollama/releases/download/v${version}/ollama-darwin.tgz";
+
hash = "sha256-gUKwOmZ6oViIEzozQx5FB5090Zs0q8nQTdRqjqa0t8I=";
+
};
+
sourceRoot = ".";
+
dontBuild = true;
+
dontConfigure = true;
+
installPhase = ''
+
runHook preInstall
+
mkdir -p $out/bin $out/lib/ollama
+
ls -lah
+
install -Dm755 ollama $out/bin/ollama
+
cp -r *.so *.dylib $out/lib/ollama/
+
runHook postInstall
+
'';
+
};
+
};
};
};
}