从hbase读取数据,插入es。第一批大约400w数据,用时4mins。
importorg.elasticsearch.spark._
importorg.elasticsearch.hadoop.cfg.ConfigurationOptions._
source += (ES_NODES->"127.0.0.1")
source += (ES_PORT->"9200")
source += (ES_BATCH_SIZE_BYTES->"5mb")
mediaRDD.map({case(w,r) => {
varrow:String=""
varsex:String=""
varage:String=""
valesMap = scala.collection.mutable.Map[String,String]()
r.list().map(keyValue => {
valq:String= keyValue.getQualifier
row = keyValue.getRow
valmArr = q.split("_")
if(q.startsWith("sex")) sex = mArr(1)
if(q.startsWith("age")) age = mArr(1)
})
r.list().map(keyValue => {
valq:String= keyValue.getQualifier
if(q.startsWith("province")) {
valmArr = q.split("_")
esMap += ("province"-> mArr(3),"media"-> mArr(4),"sex"-> sex,"age"-> age,"deviceId"-> row)
}
})
esMap
}
}).saveToEs(s"$index/$t",source)
sc.stop
}
}