配置HA spark集群
Master Worker
hadoop01 hadoop03
hadoop02 hadoop04
1.安装zookeeper
略
2.上传
3.解压
4.重命名
5.修改配置文件
cp spark-env.sh.template spark-env.sh
vim spark-env.sh
export SCALA_HOME=/home/hadoop/scala
export JAVA_HOME=/home/hadoop/java/jdk1.8.0_73
export SPARK_WORKER_MEMORY=500m
export HADOOP_CONF_DIR=/home/hadoop/hadoop/etc/hadoop
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=hadoop02:2181,hadoop03:2181,hadoop04:2181 -Dspark.deploy.zookeeper.dir=/home/hadoop/spark/meta"
cp slaves.template slaves
vim slaves
hadoop03
hadoop04
6.启动
sbin/start-all.sh
spark Master(主备):sbin/start-master.sh
spark Worker:sbin/start-slave.sh