我正在尝试实现一个返回复杂类型的类型化 UDAF。不知何故,Spark 无法推断结果列的类型binary
并将序列化数据放在那里。这是重现问题的最小示例
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{SparkSession, Encoder, Encoders}
case class Data(key: Int)
class NoopAgg[I] extends Aggregator[I, Map[String, Int], Map[String, Int]] {
override def zero: Map[String, Int] = Map.empty[String, Int]
override def reduce(b: Map[String, Int], a: I): Map[String, Int] = b
override def merge(b1: Map[String, Int], b2: Map[String, Int]): Map[String, Int] = b1
override def finish(reduction: Map[String, Int]): Map[String, Int] = reduction
override def bufferEncoder: Encoder[Map[String, Int]] = Encoders.kryo[Map[String, Int]]
override def outputEncoder: Encoder[Map[String, Int]] = Encoders.kryo[Map[String, Int]]
}
object Question {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().master("local").getOrCreate()
val sc = spark.sparkContext
import spark.implicits._
val ds = sc.parallelize((1 to 10).map(i => Data(i))).toDS()
val noop = new NoopAgg[Data]().toColumn
val result = ds.groupByKey(_.key).agg(noop.as("my_sum").as[Map[String, Int]])
result.printSchema()
}
}
它打印
root
|-- value: integer (nullable = false)
|-- my_sum: binary (nullable = true)