package com.scala
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
/**
* scala版本的groupTopN
*/
object GroupTopN {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("groupByTopN").setMaster("local[1]")
//获取context
val sc = new SparkContext(conf)
//加载到内存RDD
val scores = sc.textFile("score.txt", 1)
//转换成为kv格式,方便分组操作
val scoresMap = scores.map(x => {
val al = x.split(" ")
(al(0), al(1).toInt)
})
//分组
val paris=scoresMap.groupByKey()
//sort进行排序
val result=paris.map(x=>(x._1,x._2.toList.sorted(Ordering.Int.reverse)))
//遍历取值
result.foreach(x =>{
println("01 "+x._1)
//前包后不包
println("02 "+x._2.slice(x._2.length-4, x._2.length))
})
//第二种方式:不建议使用
// println(res)
// def res():Any=result.foreach(x=>{
// (x._1,x._2.toList.sorted)
var lists2=List(2)//必须使用变量
for(score <- x._2){
lists2= lists2.:+(score.toInt)
println(lists2)
}
println("集合是:"+lists2)
l.sorted(Ordering.Int.reverse)
*/
// })
}
}