nixos/hadoop: replace "enabled" options with "enable" options

The module has been using "enabled" in place of enable since init ( 0c10b2baa6bf61c8ddaed7cdb6c2f2dbaab42662 )

Changed files
+33 -33
nixos
modules
services
cluster
tests
+12 -12
nixos/modules/services/cluster/hadoop/hdfs.nix
···
{
options.services.hadoop.hdfs = {
namenode = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
datanode = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
journalnode = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
zkfc = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
inherit restartIfChanged;
};
httpfs = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
};
config = mkMerge [
-
(mkIf cfg.hdfs.namenode.enabled {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
···
8022 # namenode. servicerpc-address
]);
})
-
(mkIf cfg.hdfs.datanode.enabled {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
···
9867 # datanode.ipc.address
]);
})
-
(mkIf cfg.hdfs.journalnode.enabled {
systemd.services.hdfs-journalnode = {
description = "Hadoop HDFS JournalNode";
wantedBy = [ "multi-user.target" ];
···
8485 # dfs.journalnode.rpc-address
]);
})
-
(mkIf cfg.hdfs.zkfc.enabled {
systemd.services.hdfs-zkfc = {
description = "Hadoop HDFS ZooKeeper failover controller";
wantedBy = [ "multi-user.target" ];
···
};
};
})
-
(mkIf cfg.hdfs.httpfs.enabled {
systemd.services.hdfs-httpfs = {
description = "Hadoop httpfs";
wantedBy = [ "multi-user.target" ];
···
]);
})
(mkIf (
-
cfg.hdfs.namenode.enabled || cfg.hdfs.datanode.enabled || cfg.hdfs.journalnode.enabled || cfg.hdfs.zkfc.enabled
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
···
uid = config.ids.uids.hdfs;
};
})
-
(mkIf cfg.hdfs.httpfs.enabled {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";
···
{
options.services.hadoop.hdfs = {
namenode = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
datanode = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
journalnode = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
zkfc = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
inherit restartIfChanged;
};
httpfs = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
};
config = mkMerge [
+
(mkIf cfg.hdfs.namenode.enable {
systemd.services.hdfs-namenode = {
description = "Hadoop HDFS NameNode";
wantedBy = [ "multi-user.target" ];
···
8022 # namenode. servicerpc-address
]);
})
+
(mkIf cfg.hdfs.datanode.enable {
systemd.services.hdfs-datanode = {
description = "Hadoop HDFS DataNode";
wantedBy = [ "multi-user.target" ];
···
9867 # datanode.ipc.address
]);
})
+
(mkIf cfg.hdfs.journalnode.enable {
systemd.services.hdfs-journalnode = {
description = "Hadoop HDFS JournalNode";
wantedBy = [ "multi-user.target" ];
···
8485 # dfs.journalnode.rpc-address
]);
})
+
(mkIf cfg.hdfs.zkfc.enable {
systemd.services.hdfs-zkfc = {
description = "Hadoop HDFS ZooKeeper failover controller";
wantedBy = [ "multi-user.target" ];
···
};
};
})
+
(mkIf cfg.hdfs.httpfs.enable {
systemd.services.hdfs-httpfs = {
description = "Hadoop httpfs";
wantedBy = [ "multi-user.target" ];
···
]);
})
(mkIf (
+
cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable
) {
users.users.hdfs = {
description = "Hadoop HDFS user";
···
uid = config.ids.uids.hdfs;
};
})
+
(mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";
+5 -5
nixos/modules/services/cluster/hadoop/yarn.nix
···
{
options.services.hadoop.yarn = {
resourcemanager = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
nodemanager = {
-
enabled = mkOption {
type = types.bool;
default = false;
description = ''
···
config = mkMerge [
(mkIf (
-
cfg.yarn.resourcemanager.enabled || cfg.yarn.nodemanager.enabled
) {
users.users.yarn = {
···
};
})
-
(mkIf cfg.yarn.resourcemanager.enabled {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
···
]);
})
-
(mkIf cfg.yarn.nodemanager.enabled {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [
···
{
options.services.hadoop.yarn = {
resourcemanager = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
};
};
nodemanager = {
+
enable = mkOption {
type = types.bool;
default = false;
description = ''
···
config = mkMerge [
(mkIf (
+
cfg.yarn.resourcemanager.enable || cfg.yarn.nodemanager.enable
) {
users.users.yarn = {
···
};
})
+
(mkIf cfg.yarn.resourcemanager.enable {
systemd.services.yarn-resourcemanager = {
description = "Hadoop YARN ResourceManager";
wantedBy = [ "multi-user.target" ];
···
]);
})
+
(mkIf cfg.yarn.nodemanager.enable {
# Needed because yarn hardcodes /bin/bash in container start scripts
# These scripts can't be patched, they are generated at runtime
systemd.tmpfiles.rules = [
+11 -11
nixos/tests/hadoop/hadoop.nix
···
nn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.namenode.enabled = true;
-
hdfs.zkfc.enabled = true;
};
};
nn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.namenode.enabled = true;
-
hdfs.zkfc.enabled = true;
};
};
jn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.journalnode.enabled = true;
};
};
jn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.journalnode.enabled = true;
};
};
jn3 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.journalnode.enabled = true;
};
};
dn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
-
hdfs.datanode.enabled = true;
};
};
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-
yarn.resourcemanager.enabled = true;
};
};
rm2 = {pkgs, options, ...}: {
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-
yarn.resourcemanager.enabled = true;
};
};
nm1 = {pkgs, options, ...}: {
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
-
yarn.nodemanager.enabled = true;
};
};
};
···
nn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.namenode.enable = true;
+
hdfs.zkfc.enable = true;
};
};
nn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.namenode.enable = true;
+
hdfs.zkfc.enable = true;
};
};
jn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.journalnode.enable = true;
};
};
jn2 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.journalnode.enable = true;
};
};
jn3 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.journalnode.enable = true;
};
};
dn1 = {pkgs, options, ...}: {
services.hadoop = {
inherit package coreSite hdfsSite;
+
hdfs.datanode.enable = true;
};
};
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+
yarn.resourcemanager.enable = true;
};
};
rm2 = {pkgs, options, ...}: {
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+
yarn.resourcemanager.enable = true;
};
};
nm1 = {pkgs, options, ...}: {
···
services.hadoop = {
inherit package coreSite hdfsSite;
yarnSite = options.services.hadoop.yarnSite.default // yarnSiteHA;
+
yarn.nodemanager.enable = true;
};
};
};
+3 -3
nixos/tests/hadoop/hdfs.nix
···
package = pkgs.hadoop;
hdfs = {
namenode = {
-
enabled = true;
formatOnInit = true;
};
-
httpfs.enabled = true;
};
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
···
datanode = {pkgs, ...}: {
services.hadoop = {
package = pkgs.hadoop;
-
hdfs.datanode.enabled = true;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
"hadoop.proxyuser.httpfs.groups" = "*";
···
package = pkgs.hadoop;
hdfs = {
namenode = {
+
enable = true;
formatOnInit = true;
};
+
httpfs.enable = true;
};
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
···
datanode = {pkgs, ...}: {
services.hadoop = {
package = pkgs.hadoop;
+
hdfs.datanode.enable = true;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
"hadoop.proxyuser.httpfs.groups" = "*";
+2 -2
nixos/tests/hadoop/yarn.nix
···
nodes = {
resourcemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
-
services.hadoop.yarn.resourcemanager.enabled = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
};
nodemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
-
services.hadoop.yarn.nodemanager.enabled = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.hostname" = "resourcemanager";
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";
···
nodes = {
resourcemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
+
services.hadoop.yarn.resourcemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
};
};
nodemanager = {pkgs, ...}: {
services.hadoop.package = pkgs.hadoop;
+
services.hadoop.yarn.nodemanager.enable = true;
services.hadoop.yarnSite = {
"yarn.resourcemanager.hostname" = "resourcemanager";
"yarn.nodemanager.log-dirs" = "/tmp/userlogs";