Hadoop最详细的伪分布式部署(HDFS)
1.添加Hadoop用户
[root@hadoop001 ~]# useradd hadoop
[root@hadoop001 ~]# vi /etc/sudoers
# 找到root ALL=(ALL) ALL,添加
hadoop ALL=(ALL) NOPASSWD:ALL
2.上传并解压
[root@hadoop001 software]# rz #上传hadoop-2.8.1.tar.gz
[root@hadoop001 software]# tar -xzvf hadoop-2.8.1.tar.gz
3.设置软连接
[root@hadoop001 software]# ln -s /opt/software/hadoop-2.8.1 /opt/software/hadoop
4.设置环境变量
[root@hadoop001 software]# vi /etc/profile
export HADOOP_HOME=/opt/software/hadoop
export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
[root@hadoop001 software]# source /etc/profile
5.设置用户 用户组
[root@hadoop001 software]# chown -R hadoop:hadoop hadoop
[root@hadoop001 software]# chown -R hadoop:hadoop hadoop/*
[root@hadoop001 software]# chown -R hadoop:hadoop hadoop-2.8.1
[root@hadoop001 software]# cd hadoop
[root@hadoop001 hadoop]# rm -f *.txt
6.切换hadoop用户
[root@hadoop001software]# su - hadoop
[root@hadoop001 hadoop]# ll
total 32
drwxrwxr-x. 2 hadoop hadoop 4096 Jun 2 14:24 bin
drwxrwxr-x. 3 hadoop hadoop 4096 Jun 2 14:24 etc
drwxrwxr-x. 2 hadoop hadoop 4096 Jun 2 14:24 include
drwxrwxr-x. 3 hadoop hadoop 4096 Jun 2 14:24 lib
drwxrwxr-x. 2 hadoop hadoop 4096 Aug 20 13:59 libexec
drwxr-xr-x. 2 hadoop hadoop 4096 Aug 20 13:59 logs
drwxrwxr-x. 2 hadoop hadoop 4096 Jun 2 14:24 sbin
drwxrwxr-x. 4 hadoop hadoop 4096 Jun 2 14:24 share
7.配置文件
[hadoop@hadoop001 ~]# cd /opt/software/hadoop
[hadoop@hadoop001 hadoop]# vi etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.137.130:9000</value> # 配置自己机器的IP
</property>
</configuration>
[hadoop@hadoop001 hadoop]# vi etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
8.配置SSH
8.1公钥/密钥 配置无密码登录
[hadoop@hadoop001 ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[hadoop@hadoop001 ~]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@hadoop001 ~]# chmod 0600 ~/.ssh/authorized_keys
8.2 查看日期,看是否配置成功
[hadoop@hadoop001 ~]# ssh hadoop001 date
The authenticity of host 'hadoop001 (192.168.137.130)' can't be established.
RSA key fingerprint is 09:f6:4a:f1:a0:bd:79:fd:34:e7:75:94:0b:3c:83:5a.
Are you sure you want to continue connecting (yes/no)? yes # 第一次回车输入yes
Warning: Permanently added 'hadoop001,192.168.137.130' (RSA) to the list of known hosts.
Sun Aug 20 14:22:28 CST 2017
[hadoop@hadoop001 ~]# ssh hadoop001 date #不需要回车输入yes,即OK
Sun Aug 20 14:22:29 CST 2017
[hadoop@hadoop001 ~]# ssh localhost date
The authenticity of host 'hadoop001 (192.168.137.130)' can't be established.
RSA key fingerprint is 09:f6:4a:f1:a0:bd:79:fd:34:e7:75:94:0b:3c:83:5a.
Are you sure you want to continue connecting (yes/no)? yes # 第一次回车输入yes
Warning: Permanently added 'hadoop001,192.168.137.130' (RSA) to the list of known hosts.
Sun Aug 20 14:22:28 CST 2017
[hadoop@hadoop001 ~]# ssh localhost date #不需要回车输入yes,即OK
Sun Aug 20 14:22:29 CST 2017
9.格式化和启动
[hadoop@hadoop001 hadoop]# bin/hdfs namenode -format
[hadoop@hadoop001 hadoop]# sbin/start-dfs.sh
ERROR:
hadoop001: Error: JAVA_HOME is not set and could not be found.
localhost: Error: JAVA_HOME is not set and could not be found.
9.1解决方法:添加环境变量
[hadoop@hadoop001 hadoop]# vi etc/hadoop/hadoop-env.sh
# 将export JAVA_HOME=${JAVA_HOME}改为
export JAVA_HOME=/usr/java/jdk1.8.0_45
[hadoop@hadoop001 hadoop]# sbin/start-dfs.sh
ERROR:
mkdir: cannot create directory `/opt/software/hadoop-2.8.1/logs': Permission denied
9.2解决方法:添加权限
[hadoop@hadoop001 hadoop]# exit
[root@hadoop001 hadoop]# cd ../
[root@hadoop001 software]# chown -R hadoop:hadoop hadoop-2.8.1
[root@hadoop001 software]# su - hadoop
[root@hadoop001 ~]# cd /opt/software/hadoop
9.3 继续启动
[hadoop@hadoop001 hadoop]# sbin/start-dfs.sh
9.4检查是否成功
[hadoop@hadoop001 hadoop]# jps
19536 DataNode
19440 NameNode
19876 Jps
19740 SecondaryNameNode
9.5访问: http://192.168.137.130:50070
9.6修改dfs启动的进程,以hadoop001启动
启动的三个进程:
namenode: hadoop001 bin/hdfs getconf -namenodes
datanode: localhost datanodes (using default slaves file) etc/hadoop/slaves
secondarynamenode: 0.0.0.0
[hadoop@hadoop001 ~]# cd /opt/software/hadoop
[hadoop@hadoop001 hadoop]# echo "hadoop001" > ./etc/hadoop/slaves
[hadoop@hadoop001 hadoop]# cat ./etc/hadoop/slaves
hadoop001
[hadoop@hadoop001 hadoop]# vi ./etc/hadoop/hdfs-site.xml
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop001:50090</value>
</property>
<property>
<name>dfs.namenode.secondary.https-address</name>
<value>hadoop001:50091</value>
</property>
9.7重启
[hadoop@hadoop001 hadoop]# sbin/stop-dfs.sh
[hadoop@hadoop001 hadoop]# sbin/start-dfs.sh