取某一列的分位数
#Python:
df.approxQuantile("x", [0.5], 0.25)
#Scala:
df.stat.approxQuantile("x", Array(0.5), 0.25)
#第一个参数是列名,第二个参数为分位数,第三个参数为精度, 0表示完全没有误差
取出a_df中有,b_df中没有的行
a_df.except(b_df)
spark json array string
//假设string长成下面这样子
import spark.implicits._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
val egDF = Seq("""[{"text": "哈哈哈哈哈", "no": "hehe"},{"text": "嘻嘻嘻嘻", "no": "2"}]""",
"""[{"text": "噢噢噢噢", "no": "1"},{"text": "嘎嘎嘎嘎", "no": "2"}]""").toDF("value")
val schema = ArrayType(StructType(
Seq(StructField("text", StringType),StructField("no", StringType))
))
val jsonDF = egDF.withColumn("text_no_arr", from_json(col("value"), schema)).
withColumn("text_no", explode(col("text_no_arr"))).select(col("text_no")("text"))
// example2
val egDF = Seq("""{"text": "哈哈哈哈哈", "no": "hehe"}""",
"""{"text": "噢噢噢噢", "no": "1"}""").toDF("value")
val schema = StructType(
Seq(StructField("text", StringType),StructField("no", StringType))
)
val jsonDF = egDF.withColumn("text_no_arr", from_json(col("value"), schema)).
select(col("text_no_arr")("text"))
spark sql在读取hive表数据结构的时候经常会遇到struct类型的列,这种情况下对于后续数据处理是非常不方便的,下面给出一个将struct类型转为spark里面自定义的class类型的例子
//struct类型的字段处理成case class类型
import spark.implicits._
case class Location(lat: Double, lon: Double)
val eg_df = Seq((10, Location(35, 25)), (20, Location(45, 35))).toDF
// 方法一
val eg_ds = eg_df.map { row =>
(row: @unchecked) match {
case Row(a: Int, Row(b: Double, c: Double)) => (a, Location(b, c))
}
}
// 方法二
val eg_ds2 = eg_df.map(row => {
(row.getInt(0), Location(row.getStruct(1).getDouble(0), row.getStruct(1).getDouble(1)))
})
spark添加一列自增的id,使用rdd中的zipWithIndex方法
// 在原Schema信息的基础上添加一列 “id”信息
val schema: StructType = dataframe.schema.add(StructField("id", LongType))
// DataFrame转RDD 然后调用 zipWithIndex
val dfRDD: RDD[(Row, Long)] = dataframe.rdd.zipWithIndex()
val rowRDD: RDD[Row] = dfRDD.map(tp => Row.merge(tp._1, Row(tp._2)))
// 将添加了索引的RDD 转化为DataFrame
val df2 = spark.createDataFrame(rowRDD, schema)
df2.show()
scala class和json string相互转换
case class DeviceSeqObj(device_id: String,
text_id_click_seq: String,
text_tag_click_seq: String
)
val pushClickSeqFeatureDF = pushClickSeqFeature(jobConfig, spark).as[DeviceSeqObj].
map(row => {
val device_id = row.device_id
val gson = new Gson
val device_push_seq_feature = gson.toJson(row, classOf[DeviceSeqObj])
(device_id, device_push_seq_feature)
}).toDF("device_id", "feature")
val df = pushClickSeqFeatureDF.map(row => {
val feature = row.getString(row.fieldIndex("feature"))
val gson = new Gson
val device_push_seq_feature = gson.fromJson(feature, classOf[DeviceSeqObj])
device_push_seq_feature
})
spark dag图计算优化可能会改变执行顺序
val opPushDF = deviceInfo.join(broadcast(opTextDS), "explore").
withColumn("isSelected", isSelectedUDF(col("flow_ratio"))).
where(col("isSelected") === 1)
由于udf依赖的列只在opTextDS中出现,因此代码执行过程中会先在opTextDS中选出一部分行,然后和deviceInfo进行join
而事实上,我们期望的执行的效果是对于device+text的pair对进行随机选择。
这个种情况下可以冗余一个deviceInfo中的字段,确保udf在join之后生效
val opPushDF = deviceInfo.join(broadcast(opTextDS), "explore").
withColumn("isSelected", isSelectedUDF(col("device_id"),col("flow_ratio"))).
where(col("isSelected") === 1)