天天看點

Spark 的UV和PV操作

UV:

測試資料:

192.168.33.16,hunter,2017-09-16 10:30:20,/a

192.168.33.16,jack,2017-09-16 10:30:40,/a

192.168.33.16,jack,2017-09-16 10:30:40,/a

192.168.33.16,jack,2017-09-16 10:30:40,/a

192.168.33.16,jack,2017-09-16 10:30:40,/a

192.168.33.18,polo,2017-09-16 10:30:50,/b

192.168.33.39,nissan,2017-09-16 10:30:53,/b

192.168.33.39,nissan,2017-09-16 10:30:55,/b

192.168.33.39,nissan,2017-09-16 10:30:58,/c

192.168.33.20,ford,2017-09-16 10:30:54,/c

192.168.33.20,ford,2017-09-16 10:30:54,/c

測試結果:

(UV,4)

package com.spark.core

import org.apache.spark.{SparkConf, SparkContext}

object UV {
  System.setProperty("hadoop.home.dir","D:\\soft\\hadoop\\hadoop-2.9.2")

  def main(args: Array[String]): Unit = {
    //todo:建構SparkConf和 SparkContext
    val sparkConf = new SparkConf().setAppName("UV").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)
    //todo:讀取資料
    val file = sc.textFile("f:/out/access.txt")
    //todo:對每一行分隔,擷取IP位址
    val ips = file.map(_.split(",")).map(x=>x(0))
    //todo:對ip位址進行去重,最後輸出格式 ("UV",1)
    val uvAndOne = ips.distinct().map(x=>("UV",1))
    //todo:聚合輸出
    val totalUV = uvAndOne.reduceByKey(_+_)
    totalUV.foreach(println)
    //todo:資料結果儲存
    //    totalUV.saveAsTextFile("d:\\data\\out")
    sc.stop()
  }
}
           

PV:

測試結果:

(pv,11)

package com.spark.core

import org.apache.spark.{SparkConf, SparkContext}

import scala.util.Random

object PV {
  System.setProperty("hadoop.home.dir","D:\\soft\\hadoop\\hadoop-2.9.2")

  def main(args: Array[String]): Unit = {
    //todo:建立sparkconf,設定appName
    //todo:setMaster("local[2]")在本地模拟spark運作 這裡的數字表示 使用2個線程
    val sparkConf = new SparkConf().setAppName("PV").setMaster("local[2]")
    //todo:建立SparkContext
    val sc = new SparkContext(sparkConf)
    //todo:讀取資料
    val file = sc.textFile("f:/out/access.txt")

    //方法一
//        //todo:将一行資料作為輸入,輸出("pv",1)
//        val pvAndOne = file.map(x=>("pv",1))
//        //todo:聚合輸出
//        val totalPV = pvAndOne.reduceByKey(_+_)

    //方式二 (調優)
    val totalPV1 = file.map(x => (s"pv${Random.nextInt(file.getNumPartitions)}",1)).reduceByKey(_ + _)
    val totalPV = totalPV1.map(tuple => ("pv", tuple._2)).reduceByKey(_ + _)


    totalPV.foreach(println)
    sc.stop()
  }
}
           

繼續閱讀