+23
nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
+23
nixos/doc/manual/from_md/release-notes/rl-2111.section.xml
···
+5
nixos/doc/manual/release-notes/rl-2111.section.md
+5
nixos/doc/manual/release-notes/rl-2111.section.md
···- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.This allows activation scripts to output what they would change if the activation was really run.
···- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.+- HDFS and YARN now support production-ready highly available deployments with automatic failover.- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.This allows activation scripts to output what they would change if the activation was really run.
+1
nixos/modules/services/cluster/hadoop/conf.nix
+1
nixos/modules/services/cluster/hadoop/conf.nix
···
···
+36
-6
nixos/modules/services/cluster/hadoop/default.nix
+36
-6
nixos/modules/services/cluster/hadoop/default.nix
··················
···+<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml"/>···+<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml"/>···+<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml"/>···+<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml"/>+<link xlink:href="https://hadoop.apache.org/docs/current/hadoop-hdfs-httpfs/httpfs-default.html"/>···+<link xlink:href="https://hadoop.apache.org/docs/r2.7.2/hadoop-yarn/hadoop-yarn-site/SecureContainer.html"/>···
+105
-12
nixos/modules/services/cluster/hadoop/hdfs.nix
+105
-12
nixos/modules/services/cluster/hadoop/hdfs.nix
··················
···+Format HDFS namenode on first start. This is useful for quickly spinning up ephemeral HDFS clusters with a single namenode.+For HA clusters, initialization involves multiple steps across multiple nodes. Follow [this guide](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html)············+cfg.hdfs.namenode.enable || cfg.hdfs.datanode.enable || cfg.hdfs.journalnode.enable || cfg.hdfs.zkfc.enable···
+6
-17
nixos/modules/services/cluster/hadoop/yarn.nix
+6
-17
nixos/modules/services/cluster/hadoop/yarn.nix
···············
···············
+200
-40
nixos/tests/hadoop/hadoop.nix
+200
-40
nixos/tests/hadoop/hadoop.nix
···-assert "Estimated value of Pi is" in worker.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
···+# This test is very comprehensive. It tests whether all hadoop services work well with each other.+"dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";+# yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in+# hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70+assert "Estimated value of Pi is" in nm1.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~lib/hadoop-*/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
+22
-16
nixos/tests/hadoop/hdfs.nix
+22
-16
nixos/tests/hadoop/hdfs.nix
······
······+assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
+3
-12
nixos/tests/hadoop/yarn.nix
+3
-12
nixos/tests/hadoop/yarn.nix
···"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";···
···"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";···