51cto赵强HADOOP学习(七)

利用ZK实现Hadoop的HA

利用ZooKeeper实现Hadoop的高可用特性

image.png

配置信息

链接:https://pan.baidu.com/s/1geUjpSn 密码:7yoc

一、安装JDK(所有)

#mkdir tools
#mkdir training
#cd tools
#tar -zxvf jdk-8u144-linux-x64.tar.gz -C ~/training/
# cd ~/training/
# vi ~/.bash_profile
JAVA_HOME=/root/training/jdk1.8.0_144
export JAVA_HOME

PATH=$JAVA_HOME/bin:$PATH
export PATH

# source ~/.bash_profile
# vi /etc/hosts
192.168.56.11 hadoop11
192.168.56.12 hadoop12
192.168.56.13 hadoop13
192.168.56.21 hadoop21
192.168.56.22 hadoop22
192.168.56.23 hadoop23
192.168.56.24 hadoop24

hadoop11:

#cd
# ssh-keygen -t rsa

# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop12:

#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop13:

#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop21:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop22:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop23:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop24:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop11:

#pwd
/root
#cd tools
#tar -zxvf zookeeper-3.4.6.tar.gz -C ~/training
#cd ~/training
# cd zookeeper-3.4.6
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile
#mkdir data
#cd conf
#cp zoo_sample.cfg zoo.cfg
#vi zoo.cfg
dataDir=/root/training/zookeeper-3.4.6/data
server.1=hadoop11:2888:3888
server.2=hadoop12:2888:3888
server.3=hadoop13:2888:3888
#cd ../data
#echo 1 > myid
#cd ../..
#scp -r zookeeper-3.4.6/ root@hadoop12:/root/training/
#scp -r zookeeper-3.4.6/ root@hadoop13:/root/training/

hadoop12:

#cd training/zookeeper-3.4.6/data/
#echo 2 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile

hadoop13:

#cd training/zookeeper-3.4.6/data/
#echo 3 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile

hadoop21:

#cd tools
# tar -zxvf hadoop-2.4.1.tar.gz -C ~/training/
#cd ~/training
# cd hadoop-2.4.1/
# vi ~/.bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source ~/.bash_profile
#cd etc/hadoop/
#vi hadoop-env.sh
export JAVA_HOME=/root/training/jdk1.8.0_144
#mkdir ~/training/hadoop-2.4.1/tmm
#vi core-site.xml
<!-- 指定hdfs的nameservice为ns1 -->
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://ns1</value>
</property>
<!-- 指定hadoop临时目录 -->
<property>
    <name>hadoop.tmp.dir</name>
    <value>/root/training/hadoop-2.4.1/tmm</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
    <name>ha.zookeeper.quorum</name>                
        <value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
</property>
#vi hdfs-site.xml
<configuration>                 
        <!-- 指定hdfs的nameservice为ns1,需要和core-site.xml中的保持一致 -->
        <property>
                <name>dfs.nameservices</name>
                <value>ns1</value>
        </property>

        <!-- ns1下面有两个NameNode,分别是nn1,nn2 -->
        <property>
                <name>dfs.ha.namenodes.ns1</name>                         
                <value>nn1,nn2</value>
        </property>

        <!--nn1的RPC通信地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ns1.nn1</name>
                <value>hadoop21:9000</value>
        </property>
        <!-- nn1的http通信地址 -->
        <property>                              
                <name>dfs.namenode.http-address.ns1.nn1</name>
                <value>hadoop21:50070</value>
        </property>

        <!-- nn2的RPC通信地址 -->
        <property>                              
                <name>dfs.namenode.rpc-address.ns1.nn2</name>
                <value>hadoop24:9000</value>
        </property>
        <!-- nn2的http通信地址 -->              
        <property>
                <name>dfs.namenode.http-address.ns1.nn2</name>
                <value>hadoop24:50070</value>
        </property>

        <!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://hadoop21:8485;hadoop24:8485;/ns1</value>
        </property>
        <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
        <property>                      
                <name>dfs.journlanode.edits.dir</name>
                <value>/root/training/hadoop-2.4.1/journal</value>
        </property>

        <!--开启NameNode失败自动切换--> 
        <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>
        <!--配置失败自动切换实现方式--> 
        <property>
                <name>dfs.client.failover.proxy.provider.ns1</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvder</value>
        </property>
        <!--配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行--> 
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value> 
                        sshfence
                        shell(/bin/true)
                </value>
        </property>
        <!--使用sshfence隔离机制时需要ssh免登陆 -->
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/root/.ssh/id_rsa</value>
        </property>
        <!--配置sshfence隔离机制超时时间 -->
        <property>
                <name>dfs.ha.fencing.ssh.connect-timeout</name>
                <value>30000</value>
        </property>
</configuration>
#cp  mapred-site.xml.template mapred-site.xml
#vi mapred-site.xml
<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
</configuration>
#vi yarn-site.xml
<configuration>
        <!--开启RM高可靠-->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>
        <!--指定RM的cluster id-->
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>yrc</value>
        </property>
        <!--指定RM的名字--> 
        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>
        <!--分别指定RM的地址--> 
        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>hadoop21</value>
        </property>
        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>hadoop24</value>
        </property>
        <!--指定zk集群地址--> 
        <property>
                <name>yarn.resourcemanager.zk-address</name>
                <value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
</configuration>
#vi slaves
hadoop22
hadoop23
#cd ../../..
#scp -r hadoop-2.4.1/ root@hadoop22:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop23:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop24:/root/training/

hadoop22:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

hadoop23:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

hadoop24:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

启动zookeeper集群

#zkServer.sh start

hadoop21:

#hadoop-daemon.sh start journalnode   //hadoop21,24
#hdfs namenode -format
#pwd
/root/training
#scp -r ~/training/hadoop-2.4.1/tmm root@hadoop24:/root/training/
#hdfs zkfc -format
#start-all.sh
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 213,616评论 6 492
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 91,020评论 3 387
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 159,078评论 0 349
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 57,040评论 1 285
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 66,154评论 6 385
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 50,265评论 1 292
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,298评论 3 412
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 38,072评论 0 268
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,491评论 1 306
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 36,795评论 2 328
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,970评论 1 341
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 34,654评论 4 337
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 40,272评论 3 318
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,985评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,223评论 1 267
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 46,815评论 2 365
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 43,852评论 2 351

推荐阅读更多精彩内容