at master 1.8 kB view raw
1{ 2 lib, 3 buildPythonPackage, 4 fetchFromGitHub, 5 6 # build-system 7 setuptools, 8 9 # dependencies 10 jinja2, 11 mlx, 12 numpy, 13 protobuf, 14 pyyaml, 15 transformers, 16 17 # tests 18 lm-eval, 19 sentencepiece, 20 pytestCheckHook, 21 writableTmpDirAsHomeHook, 22}: 23 24buildPythonPackage rec { 25 pname = "mlx-lm"; 26 version = "0.26.3"; 27 pyproject = true; 28 29 src = fetchFromGitHub { 30 owner = "ml-explore"; 31 repo = "mlx-lm"; 32 tag = "v${version}"; 33 hash = "sha256-O4wW7wvIqSeBv01LoUCHm0/CgcRc5RfFHjvwyccp6UM="; 34 }; 35 36 build-system = [ 37 setuptools 38 ]; 39 40 dependencies = [ 41 jinja2 42 mlx 43 numpy 44 protobuf 45 pyyaml 46 transformers 47 ]; 48 49 nativeCheckInputs = [ 50 lm-eval 51 pytestCheckHook 52 sentencepiece 53 writableTmpDirAsHomeHook 54 ]; 55 56 pythonImportsCheck = [ 57 "mlx_lm" 58 ]; 59 60 disabledTestPaths = [ 61 # Requires network access to huggingface.co 62 "tests/test_datsets.py" 63 "tests/test_generate.py" 64 "tests/test_server.py" 65 "tests/test_tokenizers.py" 66 "tests/test_utils.py::TestUtils::test_convert" 67 "tests/test_utils.py::TestUtils::test_load" 68 "tests/test_utils_load_model.py" 69 "tests/test_prompt_cache.py::TestPromptCache::test_cache_to_quantized" 70 "tests/test_prompt_cache.py::TestPromptCache::test_cache_with_generate" 71 "tests/test_prompt_cache.py::TestPromptCache::test_trim_cache_with_generate" 72 # RuntimeError: [metal_kernel] No GPU back-end. 73 "tests/test_losses.py" 74 "tests/test_models.py::TestModels::test_bitnet" 75 ]; 76 77 meta = { 78 description = "Run LLMs with MLX"; 79 homepage = "https://github.com/ml-explore/mlx-lm"; 80 changelog = "https://github.com/ml-explore/mlx-lm/releases/tag/v${version}"; 81 license = lib.licenses.mit; 82 platforms = [ 83 "aarch64-darwin" 84 ]; 85 maintainers = with lib.maintainers; [ ferrine ]; 86 }; 87}