Hadoop下载
Hadoop安装配置
-
准备内容
1.虚拟机3台(centos6.9)
2.配置在同一网段
-
服务器配置
1.配置hosts(三台机子同样配置)
192.168.0.101 node1 192.168.0.102 node2 192.168.0.103 node3
2.配置JDK1.8(/etc/profile)
JAVA_HOME=/usr/java/jdk1.8.0_171/ PATH=$JAVA_HOME/bin:$PATH CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export JAVA_HOME export PATH export CLASSPATH
source /etc/profile
3.设置hadoop用户
useradd hadoop && echo hadoop | passwd --stdin hadoop echo "hadoopALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers su - hadoop
4.安装hadoop2.7
安装
解压安装在/home/hadoop目录下
配置环境变量
export HADOOP_HOME=/home/hadoop/hadoop/ export PATH=$HADOOP_HOME/bin:$PATH
创建目录
mkdir -p /home/hadoop/dfs/{name,data} mkdir -p /home/hadoop/tmp
创建备份目录
mkdir -p /data/hdfs/{name,data} chown -R hadoop:hadoop /data/
设置ssh
-
设置主节点和其它节点(${username}指的是默认登录用户名)
ssh-keygen -t rsa
ssh-copy-id ${username}@192.168.0.101
ssh-copy-id ${username}@192.168.0.102
ssh-copy-id ${username}@192.168.0.103
-
测试ssh登录
ssh ${username}@192.168.0.101
修改hadoop配置文件(/home/hadoop/hadoop/etc/hadoop)
-
hadoop-env.sh(配置JAVA_HOME)
# The java implementation to use.
#export JAVA_HOME=${JAVA_HOME}
export JAVA_HOME=/usr/java/jdk1.8.0_171/
-
yarn-evn.sh(配置JAVA_HOME)
# some Java parameters
# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
export JAVA_HOME=/usr/java/jdk1.8.0_171/
-
slaves(配置主机名)
node1
node2
node3
-
core-site.xml
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://node1:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/home/hadoop/tmp</value>
<description>Abase for other temporary directories.</description>
</property>
</configuration>
-
hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node1:9001</value>
<description># 通过web界面来查看HDFS状态 </description>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
<description># 每个Block有2个备份</description>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
-
mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>node1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>node1:19888</value>
</property>
</configuration>
-
yarn-site.xml
<?xml version="1.0"?>
<!-- yarn-site.xml -->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>node1:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>node1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>node1:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>node1:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>node1:8088</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>8192</value>
</property>
</configuration>
-
复制hadoop到其它节点
scp -r /home/hadoop/hadoop/ 192.168.0.102:/home/hadoop/
scp -r /home/hadoop/hadoop/ 192.168.0.103:/home/hadoop/
初始化和运行
-
初始化(只在node1主节点运行)
/home/hadoop/hadoop/bin/hdfs namenode -format
yum install tree
tree /home/hadoop/dfs
-
启动hadoop(hadoop权限)
/home/hadoop/hadoop/sbin/start-dfs.sh
查看进程
ps aux | grep --color namenode
ps aux | grep --color datanode
-
关闭hadoop(hadoop权限)
/home/hadoop/hadoop/sbin/stop-dfs.sh
-
启动yarn分布式计算框架
/home/hadoop/hadoop/sbin/start-yarn.sh starting yarn daemons
ps aux | grep --color resourcemanager
ps aux | grep --color nodemanager
-
简易启动/停止
/home/hadoop/hadoop/sbin/start-all.sh
/home/hadoop/hadoop/sbin/stop-all.sh
-
查看hdfs分布式文件系统状态
/home/hadoop/hadoop/bin/hdfs dfsadmin -report
-
用网页查看内容
192.168.0.101:50070