hadoop伪分布SSH安装

我用的是SSH远程安装的方式,首先需要在linux上建立一个HADOOP用户。
软件版本在代码中能看到,代码亲测有效,完美安装。不懂问我。

scp /mnt/d/安装/linux/jdk-8u221-linux-x64.tar.gz hadoop@192.168.0.104:/home/hadoop/Desktop

ssh hadoop@192.168.0.104

cd /usr/lib/

sudo mkdir jvm

cd ~

cd Desktop

sudo tar -zxvf ./jdk-8u221-linux-x64.tar.gz -C /usr/lib/jvm

vim ~/.bashrc
'''
export JAVA_HOME=/usr/lib/jvm/jdk1.8.0_221
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
'''

source ~/.bashrc

java -version

exit

scp /mnt/d/安装/linux/hadoop-3.1.2.tar.gz hadoop@192.168.0.104:/home/hadoop/Desktop

ssh hadoop@192.168.0.104

sudo tar -zxf ~/Desktop/hadoop-3.1.2.tar.gz -C /usr/local

cd /usr/local/

sudo mv ./hadoop-3.1.2/ ./hadoop

sudo chown -R hadoop ./hadoop

cd /usr/local/hadoop

./bin/hadoop version

cd /usr/local/hadoop/etc/hadoop/

sudo vim core-site.xml
'''
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop/tmp</value>
        <description>Abase for other temporary directories.</description>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://localhost:9000</value>
    </property>
    <property>
        <name>hadoop.proxyuser.hadoop.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.hadoop.groups</name>
        <value>*</value>
    </property>
</configuration>
'''

sudo vim hdfs-site.xml
'''
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/usr/local/hadoop/tmp/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/usr/local/hadoop/tmp/dfs/data</value>
    </property>
</configuration>
'''

/usr/local/hadoop/bin/hdfs namenode -format

cd ~/.ssh/

ssh-keygen -t rsa -P ""

cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys

/usr/local/hadoop/sbin/start-dfs.sh

jps
'''
SeconderyNameNode
DataNode
Jps
NameNode
'''

exit

scp /mnt/d/安装/linux/apache-hive-3.1.1-bin.tar.gz hadoop@192.168.0.104:/home/hadoop/Desktop

ssh hadoop@192.168.0.104

cd ~

cd Desktop

sudo tar -zxvf ./apache-hive-3.1.1-bin.tar.gz -C /usr/local

cd /usr/local/

sudo mv apache-hive-3.1.1-bin hive

vim ~/.bashrc
'''
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$HIVE_HOME/bin
'''

source ~/.bashrc

cd /usr/local/hive/conf

sudo mv hive-default.xml.template hive-default.xml

sudo touch hive-site.xml

sudo vim hive-site.xml
'''
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>
        <description>JDBC connect string for a JDBC metastore</description>
    </property>
    <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.cj.jdbc.Driver</value>
        <description>Driver class name for a JDBC metastore</description>
    </property>
    <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>hive</value>
        <description>username to use against metastore database</description>
    </property>
    <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>hive</value>
        <description>password to use against metastore database</description>
    </property>
</configuration>
'''

exit

scp /mnt/d/安装/linux/mysql-server_8.0.17-1ubuntu18.04_amd64.deb-bundle.tar hadoop@192.168.0.104:/home/hadoop/Desktop

ssh hadoop@192.168.0.104

cd ~/Desktop

mkdir mysql

tar -xvf ~/Desktop/mysql-server_8.0.17-1ubuntu18.04_amd64.deb-bundle.tar -C ~/Desktop/mysql

cd mysql

sudo apt-get update
sudo apt-get install libaio1 libmecab2

sudo dpkg -i mysql-common_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i libmysqlclient21_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i libmysqlclient-dev_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i mysql-community-client-core_8.0.17-1ubuntu18.04_amd64.deb

sudo dpkg -i mysql-community-client_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i mysql-client_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i mysql-community-server-core_8.0.17-1ubuntu18.04_amd64.deb
sudo dpkg -i mysql-community-server_8.0.17-1ubuntu18.04_amd64.deb
密码123
选L传统加密
不知道需要不,暂时没有装#sudo dpkg -i mysql-server_8.0.17-1ubuntu18.04_amd64.deb#启动sudo mysql service mysql restart

exit

scp /mnt/d/安装/linux/mysql-connector-java_8.0.17-1ubuntu18.04_all.deb hadoop@192.168.0.104:/home/hadoop/Desktop

ssh hadoop@192.168.0.104

cd ~/Desktop

sudo dpkg -i mysql-connector-java_8.0.17-1ubuntu18.04_all.deb

sudo mv /usr/share/java/mysql-connector-java-8.0.17.jar /usr/local/hive/lib

mysql -u root -p

123

#这两项是需要对应到hive-site.xml中的的name和password
create user 'hive' identified by 'hive';

grant all privileges on *.* to 'hive' with grant option;

flush privileges;

create database hive;

quit;

cd /usr/local/hive/conf/

sudo cp hive-env.sh.template hive-env.sh

sudo vim hive-env.sh
'''
export HADOOP_HOME=/usr/local/hadoop/
'''

source hive-env.sh

#jps查看,如果HADOOP伪分布未启动就启动一下/usr/local/hadoop/sbin/start-dfs.sh
hive

quit;

schematool -dbType mysql -initSchema


以下是使用方法:
/usr/local/hadoop/sbin/start-dfs.sh#启动HDFS
/usr/local/hadoop/sbin/start-yarn.sh#启动yarn
/usr/local/hive/bin/hive --service hiveserver2#如果想远程连接hive,则需要启动hiveserver2

©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。