关闭防火墙
1. 检查 SuSE防火墙是否设置为启动
chkconfig --list | grep firewall
2. 关闭防火墙服务
chkconfig SuSEfirewall2_init off
chkconfig SuSEfirewall2_setup off
配置ssh免密钥登陆
SUSE配置本地源
mount -o loop SLES-11-SP3-DVD-x86_64-GM-DVD1.iso /media
zypper ar file:///media/local-sles
zypper lr
zypper clean
zypper ref
zypper install gcc
安装httpd2服务
zypper install apache2
启动httpd2 服务
service apache2 start
安装mysql
1、zypper install mysql
2、启动mysql
service mysql start
3、进入mysql更改密码
输入mysql
mysql>update mysql.user set password=password('123') where user="root";
mysql>flush privileges;
设置权限
mysql>GRANT ALL PRIVILEGES ON *.* TO 'root'@'%'IDENTIFIED BY '123' WITH GRANT OPTION;
mysql>FLUSH PRIVILEGES;
4、退出mysql
mysql>quit;
5、重新进入mysql 检验
mysql -uroot -p
6、设置大小写不敏感
vim /etc/my.cnf
在[mysqld]下加入
lower_case_table_names=1
7、重启mysql 服务
service mysql restart
安装jdk
rpm -ivh jdk
vim /etc/profile
export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
source /etc/profile
java -version
安装CDH
1.将CDH文件复制到/srv/www/htdocs/下面
cp -r CDH /srv/www/htdocs
CDH下面的文件有:
1、5.13.0
2、cloudera-cdh5.repo
3、RPM-GPG-KEY-cloudera
2.修改cloudera-cdh5.repo
[cloudera-cdh5]
# Packages for Cloudera's Distribution for Hadoop, Version 5, on SLES 11 x86_64
name=Cloudera's Distribution for Hadoop, Version 5
baseurl=https://httpserverip/CDH/5.13.0
gpgkey = https://httpserverip/CDH/RPM-GPG-KEY-cloudera
gpgcheck = 1
RPM-GPG-KEY-cloudera中的内容
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.5 (GNU/Linux)
mQGiBEpBgEURBAC+CL1a6BfVEoKAX1KcOHqq9Z10WdPGOgTM+AtnOVPJdJvIZcDk
YGUmycpaGxY3+xX1x8ZvxNb7WXiei8FMPm4sR/xQC/CF2iS5399tjLJqcDEjdqTV
/whQ4Rrg1JLGaHUjR0YmrOteT71xikEwlCalToxQuhBz7Nz4aBeDDPf9lwCgvG+x
CaOxict+He03g4HNSTZ0T0UEAIxKITpCA6ZvUPoEGhpn+Gt+wJK/ScB0FKCfW8Au
QQZP6tgxDEg0baasT8MxuXXE2+opaaWPTVa64ws7OvbyH5z1xhBOx4qRVBx8bZsF
YQUk/1PBvg6yA4Rmaqi7nTToHatP69/JMLfTyH8sXETMQ8z5T0LAD6a5ELAYBqql
bJWRA/4lkbaGIwkyLcOAop/g0SCERHt66ML1pwdjxvzE2rRKFUbjUbRZsHTqVq5E
BgpcTIeTuRy02yQ+Bh+JaBtYhn0AY5+t7jcCdJeTahS/7RKJPYPiSfbgI6zwpHM9
kX4FT+0yDgnVF1H/h9p19Uv/3ahIgt7op/M1eAdH0/eP6Dv04rQnWXVtIE1haW50
YWluZXIgPHdlYm1hc3RlckBjbG91ZGVyYS5jb20+iGAEExECACAFAkpBgEUCGwMG
CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRD5DA2P6PhqzRo1AKCIHNWJSd7OipbZ
qp58f/BWaIBlDACggNRH4Hvg92t3xtwYFdohRWF2Xbi5Ag0ESkGARxAIAMaPPGfQ
vsLkyLyM3ePtkkHi0bew0XGW1CYxWOZLMu8wnJgMHpfPD2dLgp6PEh+zpi2SM1ie
QGAW6K040TSuC9P+LcZB7SxanIE7lONHjz7spGQift30WFZcaIgF+MuyZIihNh7v
tZ9ip8JZYPA88XRNU1CKuXx4r8iCDJ4ICksFKeOwQUuzf/IRJapzEZ0ixfVTwx91
yG10TvHK63BRLXYHBML4Og9FaPZgFq2N9Yz4Wpu/Pn6tjZAMeSJXm2qNO2PSoTC/
kapubpMwSmOBlZqrHi9lcIWricXE9dcyaGVRAf3CJRlX4ZNuwcQjyks5BFibU3/z
qlzP6KgwTgDmaaMAAwUH/04KRM3k6Ow2KkDt2BKWveOI24mkIQahUJ7/iZlKsL27
3VcGQZ7jU28GT0FH9iYeAgbpLrrEuDAFZpGm9RoOVJGnxWX3DVL1+qkiS56pXfU+
8atZlkCGx09IilJgf0ATlmYxbTtYliTRPK4lQYOfNB1v23bdlBwISjcDRkWu22ao
atSBzr/FARL6fdZZqp2qfWOmcteiLagioo6s0ogxKNQH5PldUQy9n2W/oOXss5sC
lnUNvzKlzzx/pFkT8ZUAvuLY0v8gykk586vbjiuPkg8uAOBhtnsSWwJ6nEPaRCnu
iwlqGxgXmnJ7UMzOimkuf0XvqavhkMEEAqRJkNLyWVuISQQYEQIACQUCSkGARwIb
DAAKCRD5DA2P6PhqzUV2AJ0eV3C407Y3Xi4d27clLsz/wW0HMgCghcxCmiOT2kWH
6Ya7d9nkKz2UM+Y=
=+VR8
-----END PGP PUBLIC KEY BLOCK-----
3.在/srv/www/htdocs下下载cloudera-cdh5.repo
wget http://httpserverip/CDH/cloudera-cdh5.repo
4.将cloudera-cdh5.repo复制到/etc/zypp/repos.d/下
cp cloudera-cdh5.repo /etc/zypp/repos.d
安装CDH
安装hdfs
- 安装NameNode、等
zypper install hadoop-hdfs-namenode hadoop-yarn-resourcemanager hadoop-mapreduce-historyserver hadoop-yarn-proxyserver
a
- 安装datanode、nodemanager
zypper install hadoop-hdfs-datanode hadoop-yarn-nodemanager
a
3.修改四个配置文件
3.1. core-site.xml
/etc/hadoop/conf
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://CDH1:8020/</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop/log</value>
</property>
</configuration>
3.2 hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/mnt/drbd/hdfs/dfs/name</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/mnt/diskb/dfs</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
3.3 mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>CDH1:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>CDH1:19888</value>
</property>
<property>
<name>mapreduce.jobhistory.intermediate-done-dir</name>
<value>/user/history/done_intermediate</value>
</property>
<property>
<name>mapreduce.jobhistory.done-dir</name>
<value>/user/history/done</value>
</property>
</configuration>
3.4yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<description>List of directories to store localized files in.</description>
<name>yarn.nodemanager.local-dirs</name>
<value>/var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir</value>
</property>
<property>
<description>Where to store container logs.</description>
<name>yarn.nodemanager.log-dirs</name>
<value>/var/log/hadoop-yarn/containers</value>
</property>
<property>
<description>Where to aggregate logs to.</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>hdfs://CDH1/var/log/hadoop-yarn/apps</value>
</property>
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value>
$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,
$HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,
$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,
$HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*
</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>CDH1:8050</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>CDH1:8030</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>CDH1:8141</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>CDH1:8088</value>
</property>
<property>
<name>yarn.web-proxy.address</name>
<value>CDH1:19999</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/user</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>CDH1</value>
</property>
</configuration>
4.创建namenode目录,赋予权限
mkdir -p /mnt/drbd/hdfs/dfs/name
chown -R hdfs:hdfs /mnt/drbd/hdfs/dfs/name
chmod 700 /mnt/drbd/hdfs/dfs/name
mkdir -p /var/hadoop/log/dfs/namesecondary
chown -R hdfs:hdfs /var/hadoop/log/dfs/namesecondary
mkdir -p /mnt/diskb/dfs
chown -R hdfs:hdfs /mnt/diskb/dfs
5.格式化namenode
su - hdfs
hdfs namenode –format
6.启动namenode(root用户)
cdh5.7需要在/etc/default/hadoop
里面配置java环境变量
service hadoop-hdfs-namenode start
8.创建hdfs临时目录
su - hdfs
hadoop fs -mkdir -p /tmp
hadoop fs -chmod -R 1777 /tmp
hadoop fs -mkdir -p /var/log/hadoop-yarn
hadoop fs -chown yarn:mapred /var/log/hadoop-yarn
hadoop fs -mkdir -p /user/history
hadoop fs -chmod -R 1777 /user/history
hadoop fs -mkdir -p /user/history/done_intermediate
hadoop fs -chown mapred:hadoop /user/history/done_intermediate
hadoop fs -chmod -R 1777 /user/history/done_intermediate
hadoop fs -mkdir -p /user/history/done
hadoop fs -chown mapred:hadoop /user/history/done
hadoop fs -chmod -R 750 /user/history/done
hadoop fs -mkdir -p /user/mapreduce
hadoop fs -chown mapreduce /user/mapreduce
9.启动 其他服务
service hadoop-yarn-resourcemanager start
service hadoop-mapreduce-historyserver start
service hadoop-yarn-proxyserver start
service hadoop-hdfs-journalnode start
service hadoop-hdfs-secondarynamenode start
service hadoop-hdfs-datanode start
7.检查namenode是否启动成功
service --status-all | grep Hadoop
安装hive
1.安装hive
zypper install hive
a
2.配置hive
将mysql的驱动jar包放入/usr/lib/hive/lib
中。
3.在mysql数据库中创建metastore数据库
mysql -uroot -proot
CREATE DATABASE metastore;
USE metastore;
SOURCE /usr/lib/hive/scripts/metastore/upgrade/mysql/hive-txn-schema-0.13.0.mysql.sql;
SOURCE /usr/lib/hive/scripts/metastore/upgrade/mysql/hive-schema-1.1.0.mysql.sql;
4.修改hive的配置文件
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://CDH1/metastore</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>true</value>
</property>
<property>
<name>datanucleus.fixedDatastore</name>
<value>true</value>
</property>
</configuration>
5.在hdfs上创建hive的用户目录
sudo - hdfs
hdfs dfs -mkdir /user/hive
hdfs dfs -chmod a+w /user/hive
6.安装hive-metadata
zypper install hive-metastore
7.安装hive-server2
zypper install hive-server2
安装zookeeper
1.安装zookeeper
zypper install zookeeper-server
2.配置zookeeper
修改zoo.cfg文件
#在最后一行加入
server.1=CDH1:2888:3888
3.初始化zookeeper
/etc/init.d/zookeeper-server init --myid=1
4.在/usr/lib/zookeeper/bin/zkServer.sh
中添加java环境变量
4.启动zookeeper
service zookeeper-server start
5.检查zookeeper是否成功
service --status-all | grep zookeeper
安装HBase
1.安装HBase-Master
zypper install hbase-master hbase-regionserver
2.修改hbase-site.xml
<configuration>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://CDH1:8020/hbase</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>CDH1</value>
</property>
<property>
<name>hbase.hregion.majorcompaction.cron</name>
<value>0 0 1 * * ?</value>
</property>
</configuration>
3.在hdfs上创建hbase目录
sudo - hdfs
hdfs dfs -mkdir /hbase
hdfs dfs -chown hbase /hbase
4.启动服务:
service hbase-master start
service hbase-regionserver start
5.检查服务是否启动
service --status-all | grep HBase
配置spark
1.安装spark
zypper install spark-master spark-worker spark-history-server
2.修改配置文件
cd /etc/spark/conf
slaves
spark-defaults.conf
spark-env.sh
2.创建目录
su - hdfs
hdfs dfs -mkdir -p /user/spark/log
hdfs dfs -mkdir -p /user/spark/applicationHistory
2.启动服务:
service spark-master start
service spark-worker start
service spark-history-server start
3.检查spark是否成功
service --status-all | grep spark
配置kafka
1、安装kafka
/opt/kafka/3.0.0/RPMS/noarch
rpm -ivh kafka-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm kafka-mirror-maker-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm kafka-server-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm
1.修改配置文件
cd /etc/kafka/conf
server.properties
2.启动服务:
service kafka-server start
3.检查kafka是否成功
service --status-all | grep kafka