···
···
# internal dependency - for overriding in overlays
···
pkg: if pkg != null && lib.meta.availableOn stdenv.hostPlatform pkg then pkg else null;
97
-
# see CMakeLists.txt, grepping for GIT_TAG near cutlass
98
+
# see CMakeLists.txt, grepping for CUTLASS_REVISION
# https://github.com/vllm-project/vllm/blob/v${version}/CMakeLists.txt
cutlass = fetchFromGitHub {
···
116
-
rev = "575f7724b9762f265bbee5889df9c7d630801845";
117
-
hash = "sha256-8WrKMl0olr0nYV4FRJfwSaJ0F5gWQpssoFMjr9tbHBk=";
117
+
rev = "0e43e774597682284358ff2c54530757b654b8d1";
118
+
hash = "sha256-wxL/jtq/lsLg1o+4392KNgfw5TYlW6lqEVbmR3Jl4/Q=";
···
repo = "flash-attention";
143
-
rev = "1c2624e53c078854e0637ee566c72fe2107e75f4";
144
-
hash = "sha256-WWFhHEUSAlsXr2yR4rGlTQQnSafXKg8gO5PQA8HPYGE=";
144
+
rev = "57b4e68b9f9d94750b46de8f8dbd2bfcc86edd4f";
145
+
hash = "sha256-c7L7WZVVEnXMOTPBoSp7jhkl9d4TA4sj11QvOSWTDIE=";
···
libcusolver # cusolverDn.h
237
+
# cusparselt # cusparseLt.h
···
252
-
version = "0.10.0";
254
+
version = "0.10.1.1";
# https://github.com/vllm-project/vllm/issues/12083
···
264
-
hash = "sha256-R9arpFz+wkDGmB3lW+H8d/37EoAQDyCWjLHJW1VTutk=";
266
+
hash = "sha256-lLNjBv5baER0AArX3IV4HWjDZ2jTGXyGIvnHupR8MGM=";
268
-
# error: ‘BF16Vec16’ in namespace ‘vec_op’ does not name a type; did you mean ‘FP16Vec16’?
269
-
# Reported: https://github.com/vllm-project/vllm/issues/21714
270
-
# Fix from https://github.com/vllm-project/vllm/pull/21848
272
-
name = "build-fix-for-arm-without-bf16";
273
-
url = "https://github.com/vllm-project/vllm/commit/b876860c6214d03279e79e0babb7eb4e3e286cbd.patch";
274
-
hash = "sha256-tdBAObFxliVUNTWeSggaLtS4K9f8zEVu22nSgRmMsDs=";
./0002-setup.py-nix-support-respect-cmakeFlags.patch
./0003-propagate-pythonpath.patch
./0005-drop-intel-reqs.patch
···
opentelemetry-exporter-otlp
# vLLM needs Torch's compiler to be present in order to use torch.compile