文章来源:科多大数据
许多对大数据有一定了解的同学,对于大数据常用命令不是很熟悉。今天科多大数据老师就总结了大数据Hadoop培训学习常用命令,下面跟随着科多大数据老师一起来看一看吧。
每台服务器需要关闭防火墙
systemctl
daemon-reload(masterJ节点)
systemctl
stop firewalld
.删除文件夹
mkdir
/opt/tmp
rm
-fr /usr/hadoop/name
rm
-fr /usr/hadoop/data
mkdir
/usr/hadoop/name
mkdir
/usr/hadoop/data
.格式化namenode
hdfs
namenode -format
.启动hdfs
/usr/hadoop/sbin/start-dfs.sh
/usr/hadoop/sbin/start-yarn.sh
.停止hdfs
/usr/hadoop/sbin/stop-yarn.sh
/usr/hadoop/sbin/stop-dfs.sh
--cd
/usr/hadoop/sbin
.关闭安全模式
hdfs
dfsadmin -safemode leave (启动hadoop后,才能执行,msster节点运行即可)
sbin/hadoop-daemon.sh
start secondarynamenode
.启动zookeeper
4.1每台服务器启动zookeeper
/usr/zookeeper/bin/zkServer.sh
start
cd
/usr/zookeeper
4.2所有服务器台机器分别启动后,查看状态
/usr/zookeeper/bin/zkServer.sh
status
bin/zkServer.sh
status 查看启动是否成功,三台机器会选择一台做为leader,另两台为follower
cd
/usr/zookeeper
/usr/zookeeper/bin/zkServer.sh
status
.启动hbase
start-hbase.sh,(因配置了环境变量,不需指定具体路径)(msster节点运行即可)
.启动spark
cd
/usr/spark/sbin
./start-all.sh
.启动hive
metastore
hive
--service metastore,执行hive前一定要运行,重要,然后重新打开一个会话窗口
.登陆mysql
mysql
-u root -p Mysql5718%
.强制删除文件夹
rm
-fr /opt/spark
.修改hostname
[root@bogon
~]# hostname slave1
[root@bogon
~]# hostname slave2
[root@bogon
~]# hostname slave3
hive>set
-v;
修改时间
date
-s 14:24:00
6.查看日志
cat
/usr/hadoop/logs/hadoop-root-datanode-slave1.log
7.mysql密码 Mysql5718%
连接,mysql -u root -p
远程授权
GRANT
ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'Mysql5718%' WITH GRANT
OPTION;
FLUSH
PRIVILEGES;
GRANT
ALL PRIVILEGES ON *.* TO 'root'@'master' IDENTIFIED BY '12345678' WITH GRANT
OPTION;
GRANT
ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '12345678' WITH GRANT
OPTION;
schematool
-dbType mysql -initSchema
#
reboot #重启主机
#
shutdown -h now #立即关机
#
poweroff
*********************************
重要文件
vi
/etc/hosts
cd
/etc/sysconfig
vi
network
(
vi /etc/sysconfig/network )
cd
/usr/hadoop/etc/hadoop/
vi
yarn-site.xml
vi
hdfs-site.xml
vi
core-site.xml
vi
mapred-site.xml
******************************
1、查看mysql版本
方法一:status;
方法二:select version();
2、Mysql启动、停止、重启常用命令
service
mysqld start
service
mysqld stop
service
mysqld restart
*********************************
远程拷贝
scp
-r /usr/hadoop root@192.168.50.131:/usr/
*********************************
上传本地文件文件到hdfs
[root@master
bin]# hadoop fs -put /usr/hadoop/file/file1.txt /usr/hadoop/input
hadoop
fs -put /usr/spark/lib/spark-* /spark-jars
*********************************
调用java包,方法名,输入,输出
[root@master
sbin]# hadoop jar
/usr/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar
wordcount /input /output
*********************************
查看输出结果
[root@master
sbin]# hadoop fs -cat /usr/hadoop/output1/*
hadoop
fs -ls /spark-jars
hdfs
dfs -ls /spark-jars
*********************************
编译spark
./dev/make-distribution.sh
--name "hadoop2-without-hive" --tgz
"-Pyarn,hadoop-provided,hadoop-2.7,parquet-provided,-Dscala-2.11" -rf
:spark-mllib-local_2.11
./dev/make-distribution.sh
--name "hadoop2-without-hive" --tgz "-Pyarn,hadoop-provided,hadoop-2.7,parquet-provided,-Dscala-2.11"
-rf :spark-hive_2.11
./dev/make-distribution.sh
--name "hadoop2-without-hive" --tgz
"-Pyarn,hadoop-provided,hadoop-2.7,parquet-provided,-Dscala-2.11" -rf
:spark-repl_2.11
*********************************
编译hive
mvn
clean install -Phadoop-2,dist -DskinpTests -Dhadoop-23.version=2.7.1
-Dspark.version=2.0.3
mvn
clean install -Phadoop-2,dist -DskinpTests
以下命令会生成 hive_code_source/packaging/target/apache-hive-2.1.1-bin.tar.gz
mvn
clean package -Pdist -Dmaven.test.skip=true
*********************************
修改maven的conf文件夹下的settings.xml文件。
设置maven的本地仓库
<localRepository>/home/devil/maven_repos</localRepository>
mvn
clean install -DskipTests -X
*********************************
产看hive,hbase 版本命令
hive
--version
hbase
shell
*****************************
拷贝文件不提示
yes|cp
-fr /opt/hive211/conf/* /opt/hive2.1.1/conf
cp
/usr/spark/jars/scala-* /opt/hive2.1.1/lib
***************
spark-shell
cd
/usr/spark/bin
**************
netstat
-tunlp|grep 4040
netstat
-tunlp|grep java
*****************
./bin/spark-submit
--class org.apache.spark.examples.SparkPi --master yarn --deploy-mode client
lib/spark-examples-1.6.3-hadoop2.4.0.jar 10
*****************************
通过 ps 指令获得指定进程名称的 pid
2013/04/12
BY 虚伪的灵魂·
通过 ps 指令获得制定进程名称的 pid 步骤如下:
1.打印出全部进程的, 进程名称以及pid
ps
-ef
大概会得到类似如下结果:
UID
PID PPID C STIME TTY TIME CMD
root
1 0 0 09:01 ? 00:00:00 /sbin/init
root
2 0 0 09:01 ? 00:00:00 [kthreadd]
root
3 2 0 09:01 ? 00:00:00 [ksoftirqd/0]
root
5 2 0 09:01 ? 00:00:00 [kworker/u:0]
root
6 2 0 09:01 ? 00:00:00 [migration/0]
root
7 2 0 09:01 ? 00:00:00 [watchdog/0]
root
8 2 0 09:01 ? 00:00:00 [migration/1]
root
10 2 0 09:01 ? 00:00:00 [ksoftirqd/1]
root
12 2 0 09:01 ? 00:00:00 [watchdog/1]
2.过滤出指定的进程名称
ps
-ef | grep mysqld
大概会得到类似如下结果:
mysql
841 1 0 09:01 ? 00:00:02 /usr/sbin/mysqld
xwsoul
4532 4205 0 11:16 pts/0 00:00:00 grep --color=auto mysqld
3.这样就会多出一行我们刚刚的 grep mysqld 的结果, 因此我们要忽略该指令
ps
-ef | grep mysqld | grep -v 'grep '
大概会得到类似如下的结果:
mysql
841 1 0 09:01 ? 00:00:02 /usr/sbin/mysqld
4.使用 awk 打印出pid号
ps
-ef | grep mysqld | grep -v 'grep ' | awk '{print $2}'
大概会得到类似如下的结果:
841
同样的如果像获得进程的父进程号(ppid), 可按如下操作:
ps
-ef | grep mysqld | grep -v 'grep ' | awk '{print $3}'
****************************
hive2.1.1,spark2.0.2搭建
1)
spark
#组件:mvn-3.3.9 jdk-1.8
#wget
http://mirror.bit.edu.cn/apache/spark/spark-2.0.2/spark-2.0.2.tgz ---下载源码 (如果是Hive on spark---hive2.1.1对应spark1.6.0)
#tar
zxvf spark-2.0.2.tgz ---解压
#cd
spark-2.0.2/dev
##修改make-distribution.sh的MVN路径为$M2_HOME/bin/mvn ---查看并安装pom.xml的mvn版本
##cd
.. ---切换到spark-2.0.2
#./dev/change-scala-version.sh
2.11 ---更改scala版本(低于11不需要此步骤)
#./dev/make-distribution.sh
--name "hadoop2-without-hive" --tgz
"-Pyarn,hadoop-provided,hadoop-2.7,parquet-provided" ---生成在根目录下
2)
hive
wget
http://mirror.bit.edu.cn/apache/hive/hive-2.3.2/apache-hive-2.3.2-src.tar.gz
tar
-zxf apache-hive-2.1.1-src.tar.gz
mv
apache-hive-2.1.1-src.tar.gz hive2_1_1
cd
/opt/hive2_1_1
编译hive
mvn
clean package -Pdist -Dmaven.test.skip=true
**************************
hadoop常用命令
1、查看指定目录下内容:hadoop fs –ls [文件目录]
[root@cdh01
tmp]# hadoop fs -ls -h /tmp
Found
2 items
drwxrwxrwx
- hdfs supergroup 0 2016-01-21 10:24
/tmp/.cloudera_health_monitoring_canary_files
drwx-wx-wx
- hive supergroup 0 2016-01-21 10:02 /tmp/hive
[root@cdh01
tmp]# hadoop fs -ls -h /
Found
2 items
drwxrwxrwx
- hdfs supergroup 0 2016-01-21 10:02 /tmp
drwxrwxr-x
- hdfs supergroup 0 2016-01-21 10:01 /user
2、将本地文件夹存储至hadoop上:hadoop fs –put [本地目录] [hadoop目录]
[root@cdh01
/]# mkdir test_put_dir #创建目录
[root@cdh01
/]# chown hdfs:hadoop test_put_dir #赋目录权限给hadoop用户
[root@cdh01
/]# su hdfs #切换到hadoop用户
[hdfs@cdh01
/]$ ls
bin
boot dev dfs dfs_bak etc home lib lib64 lost+found media misc mnt net opt proc
root sbin selinux srv sys test_put_dir tmp usr var wawa.txt wbwb.txt wyp.txt
[hdfs@cdh01
/]$ hadoop fs -put test_put_dir /
[hdfs@cdh01
/]$ hadoop fs -ls /
Found
4 items
drwxr-xr-x
- hdfs supergroup 0 2016-01-21 11:07 /hff
drwxr-xr-x
- hdfs supergroup 0 2016-01-21 15:25 /test_put_dir
drwxrwxrwt
- hdfs supergroup 0 2016-01-21 10:39 /tmp
drwxr-xr-x
- hdfs supergroup 0 2016-01-21 10:39 /user
3、在hadoop指定目录内创建新目录:hadoop fs –mkdir [目录地址]
[root@cdh01
/]# su hdfs
[hdfs@cdh01
/]$ hadoop fs -mkdir /hff
4、在hadoop指定目录下新建一个空文件,使用touchz命令:
[hdfs@cdh01
/]$ hadoop fs -touchz /test_put_dir/test_new_file.txt
[hdfs@cdh01
/]$ hadoop fs -ls /test_put_dir
Found
1 items
-rw-r--r--
3 hdfs supergroup 0 2016-01-21 15:29 /test_put_dir/test_new_file.txt
5、将本地文件存储至hadoop上:hadoop fs –put [本地地址] [hadoop目录]
[hdfs@cdh01
/]$ hadoop fs -put wyp.txt /hff #直接目录
[hdfs@cdh01
/]$ hadoop fs -put wyp.txt hdfs://cdh01.cap.com:8020/hff #服务器目录
注:文件wyp.txt放在/根目录下,结构如:
bin
dfs_bak lib64 mnt root sys var
boot
etc lost+found net sbin test_put_dir wawa2.txt
dev
home media opt selinux tmp wbwb.txt
dfs
lib misc proc srv usr wyp.txt
6、打开某个已存在文件:hadoop fs –cat [file_path]
[hdfs@cdh01
/]$ hadoop fs -cat /hff/wawa.txt
1张三 男 135
2刘丽 女 235
3王五 男 335
7、将hadoop上某个文件重命名hadoop fs –mv [旧文件名] [新文件名]
[hdfs@cdh01
/]$ hadoop fs -mv /tmp /tmp_bak #修改文件夹名
8、将hadoop上某个文件down至本地已有目录下:hadoop fs -get [文件目录] [本地目录]
[hdfs@cdh01
/]$ hadoop fs -get /hff/wawa.txt /test_put_dir
[hdfs@cdh01
/]$ ls -l /test_put_dir/
total
4
-rw-r--r--
1 hdfs hdfs 42 Jan 21 15:39 wawa.txt
9、删除hadoop上指定文件:hadoop fs -rm [文件地址]
[hdfs@cdh01
/]$ hadoop fs -ls /test_put_dir/
Found
2 items
-rw-r--r--
3 hdfs supergroup 0 2016-01-21 15:41 /test_put_dir/new2.txt
-rw-r--r--
3 hdfs supergroup 0 2016-01-21 15:29 /test_put_dir/test_new_file.txt
[hdfs@cdh01
/]$ hadoop fs -rm /test_put_dir/new2.txt
16/01/21
15:42:24 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion
interval = 1440 minutes, Emptier interval = 0 minutes.
Moved:
'hdfs://cdh01.cap.com:8020/test_put_dir/new2.txt' to trash at:
hdfs://cdh01.cap.com:8020/user/hdfs/.Trash/Current
[hdfs@cdh01
/]$ hadoop fs -ls /test_put_dir/
Found
1 items
-rw-r--r--
3 hdfs supergroup 0 2016-01-21 15:29 /test_put_dir/test_new_file.txt
10、删除hadoop上指定文件夹(包含子目录等):hadoop fs –rm -r [目录地址]
[hdfs@cdh01
/]$ hadoop fs -rmr /test_put_dir
16/01/21
15:50:59 INFO fs.TrashPolicyDefault: Namenode trash configuration: Deletion
interval = 1440 minutes, Emptier interval = 0 minutes.
Moved:
'hdfs://cdh01.cap.com:8020/test_put_dir' to trash at:
hdfs://cdh01.cap.com:8020/user/hdfs/.Trash/Current
[hdfs@cdh01
/]$ hadoop fs -ls /
Found
3 items
drwxr-xr-x
- hdfs supergroup 0 2016-01-21 11:07 /hff
drwxrwxrwt
- hdfs supergroup 0 2016-01-21 10:39 /tmp
drwxr-xr-x
- hdfs supergroup 0 2016-01-21 15:42 /user
11、将hadoop指定目录下所有内容保存为一个文件,同时down至本地
hadoop
dfs –getmerge /user /home/t
12、将正在运行的hadoop作业kill掉
hadoop
job –kill [job-id]
13
sqoop 数据从oracle迁移到hdfs
sqoop
list-tables --connect jdbc:oracle:thin:@192.168.78.221:1521:orcl --username
scott --password=123456
sqoop
list-tables --connect jdbc:oracle:thin:@192.168.90.122:1521:xdc --username pdca
--password=XXXXXX
sqoop
import --connect jdbc:oracle:thin:@192.168.78.221:1521:orcl --username scott
--password=123456 --table EMP -m 1 --target-dir /sqoop --direct-split-size
67108864
sqoop
import -m 1 --connect jdbc:mysql://master:3306/mysql --username root --password
Mysql5718% --table user --target-dir /user/hdfs/testdata/
./sqoop
import --connect jdbc:mysql://master:3306/hive --table TBLS --username root
--password Mysql5718% -m 1
sqoop
import --append --connect jdbc:oracle:thin:@192.168.78.221:1521:orcl --username
scott --password=123456 --table EMP --columns ename --hbase-table
hive_hbase_test9 --hbase-row-key id --column-family empinfo
sqoop
import --connect jdbc:oracle:thin:@192.168.78.221:1521:orcl --username scott
--password=123456 --table EMP --warehouse-dir /user/nanyue/oracletest -m 1
sqoop
import --hive-import --connect jdbc:oracle:thin:@192.168.78.221:1521:orcl
--username scott --password 123456 --table EMP --hive-database default
--hive-table poke1 -m 1
sqoop
import --hive-import --connect jdbc:oracle:thin:@192.168.90.122:1521:xdc
--username pdca --password XXXXXX--table PDCA_PROJECT_T --hive-database default
--hive-table poke1 -m 1
sqoop
import --connect jdbc:oracle:thin:@192.168.90.122:1521:xdc --username pdca
--password XXXXX --table PDCA_AAB_LOOKUP_T --fields-terminated-by '\t'
--hive-drop-import-delims --map-column-java CONTENT=String --hive-import
--hive-overwrite --create-hive-table
--hive-table poke1 --delete-target-dir;
sqoop
import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc --username pdca
--password xxxxxx--hive-import -table poke1;
sqoop
import --hive-import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc
--username pdca --password xxxxxx--table PDCA_MES_LINE_T --hive-database
default --hive-table poke1 -m 1
--导入所有表
sqoop
import-all-tables --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc --username
PDCA --password xxxxxx--hive-database DEFAULT -m 1 --create-hive-table
--hive-import --hive-overwrite
--导入单个表
sqoop
import --hive-import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc
--username pdca --password xxxxxx--table PDCA_MES_LINE_T --hive-database
default -m 1 --create-hive-table --hive-import --hive-overwrite
--指定字段,单独输入密码
sqoop
import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc --username pdca --P
--table PDCA_MES_LINE_T --columns 'MES_LINE_CODE,MES_LINE_NAME'
--create-hive-table -target-dir /opt/hive2.1.1//tmp -m 1 --hive-table
PDCA_MES_LINE_T_Test --hive-import -- --default-character-set=utf-8
--指定字段,不单独指定密码
sqoop
import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc --username pdca
--password xxxxxx--table PDCA_MES_LINE_T --columns
'MES_LINE_CODE,MES_LINE_NAME' --create-hive-table -target-dir
/opt/hive2.1.1//tmp -m 1 --hive-table PDCA_MES_LINE_T_Test1 --hive-import
-- --default-character-set=utf-8
***************************************
导入hbase
sqoop
import --connect jdbc:oracle:thin:@10.3.60.123:1521:xdc --table PDCA_MES_LINE_T
--hbase-table A --column-family mesline --hbase-row-key MES_LINE_CODE
--hbase-create-table --username 'pdca' -P
****************************
查看hive 表在hdfs上的存储路径
1、执行hive,进入hive窗口
2、执行show databases,查看所有的database;
3、执行use origin_ennenergy_onecard; 则使用origin_ennenergy_onecard数据库
4、执行show create table M_BD_T_GAS_ORDER_INFO_H;则可以查看table在hdfs上的存储路径
*****************************
查看端口号绑定状态
查看10000端口号的绑定状态
sudo
netstat -nplt | grep 10000
以上就是大数据Hadoop培训学习常用命令,你学会了吗?更多大数据相关知识可以来科多大数据www.keduox.com了解哦。