1{
2 lib,
3 buildPythonPackage,
4 fetchPypi,
5 google-generativeai,
6 llama-index-core,
7 hatchling,
8 pythonOlder,
9}:
10
11buildPythonPackage rec {
12 pname = "llama-index-embeddings-gemini";
13 version = "0.4.1";
14 pyproject = true;
15
16 disabled = pythonOlder "3.9";
17
18 src = fetchPypi {
19 pname = "llama_index_embeddings_gemini";
20 inherit version;
21 hash = "sha256-XkFXYdaRr1i0Ez5GLkxIGIJZcR/hCS2mB2t5jWRUUs0=";
22 };
23
24 pythonRelaxDeps = [ "google-generativeai" ];
25
26 build-system = [ hatchling ];
27
28 dependencies = [
29 google-generativeai
30 llama-index-core
31 ];
32
33 # Tests are only available in the mono repo
34 doCheck = false;
35
36 pythonImportsCheck = [ "llama_index.embeddings.gemini" ];
37
38 meta = with lib; {
39 description = "LlamaIndex Llms Integration for Gemini";
40 homepage = "https://github.com/run-llama/llama_index/tree/main/llama-index-integrations/embeddings/llama-index-embeddings-gemini";
41 license = licenses.mit;
42 maintainers = with maintainers; [ fab ];
43 };
44}