import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
object TestFileCopy {
def main(args: Array[String]): Unit = {
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
val conf: SparkConf = new SparkConf()
.setIfMissing("spark.master", "local[4]")
.setAppName("Test File Copy App")
val spark: SparkSession = SparkSession.builder.config(conf).getOrCreate()
//获取 SparkSession 的 SparkContext
val sc: SparkContext = spark.sparkContext
// hdfsFileCopy1(sc)
hdfsFileCopy2(sc)
sc.stop()
}
def hdfsFileCopy1(sc: SparkContext){
// 在输入数据之前先将hadoop config配置为cluster1集群
sc.hadoopConfiguration.addResource("cluster1/core-site.xml")
sc.hadoopConfiguration.addResource("cluster1/hdfs-site.xml")
val sourceDatePath = "hdfs://cluster1/tmp/"
val source: RDD[String] = sc.textFile(sourceDatePath + "aaa.txt")
source.foreach(println(_))
// 再将 hadoop config 设为cluster2集群
sc.hadoopConfiguration.addResource("cluster2/core-site.xml")
sc.hadoopConfiguration.addResource("cluster2/hdfs-site.xml")
val targetDatePath = "hdfs://cluster2/tmp/hdb/"
source.saveAsTextFile(targetDatePath)
}
def hdfsFileCopy2(sc: SparkContext){
// cluster1
sc.hadoopConfiguration.set("fs.defaultFS", "hdfs://cluster1");
sc.hadoopConfiguration.set("dfs.nameservices", "cluster1");
sc.hadoopConfiguration.set("dfs.ha.namenodes.cluster1", "namenode98,namenode143");
sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster1.namenode98", "cdh-nn-01:8020");
sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster1.namenode143", "cdh-nn-02:8020");
sc.hadoopConfiguration.set("dfs.client.failover.proxy.provider.cluster1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
val sourceDatePath = "hdfs://cluster1/tmp/"
val source: RDD[String] = sc.textFile(sourceDatePath + "aaa.txt")
source.foreach(println(_))
// cluster2
sc.hadoopConfiguration.set("fs.defaultFS", "hdfs://cluster2");
sc.hadoopConfiguration.set("dfs.nameservices", "cluster2");
sc.hadoopConfiguration.set("dfs.ha.namenodes.cluster2", "namenode424,namenode417");
sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster2.namenode424", "node-nn-01:8020");
sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster2.namenode417", "node-nn-02:8020");
sc.hadoopConfiguration.set("dfs.client.failover.proxy.provider.cluster2", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
val targetDatePath = "hdfs://cluster2/tmp/hdb/"
source.saveAsTextFile(targetDatePath)
}
}
使用 Spark 跨集群同步HDFS数据
©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。