您的位置:首页 > 数据库 > Redis

sparkstreaming整合kafka参数设置,message偏移量写入redis

2018-02-05 16:13 609 查看
 kafka高级数据源拉取到spark,偏移量自我维护写入到redis,建立redis连接池。

需要导入

<groupId>org.apache.spark</groupId>

<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>2.2.1</version>
</dependency>
<!--导入redis的客户端的java的依赖包,就可以实现和redis的连接。-->
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
栗子:

import java.{lang, util}

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

object WCKafkaRedisApp {

// Logger.getLogger("org").setLevel(Level.WARN)

def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local[*]").setAppName("xx")
//每秒钟每个分区kafka拉取消息的速率
.set("spark.streaming.kafka.maxRatePerPartition", "100")
// 序列化
.set("spark.serilizer", "org.apache.spark.serializer.KryoSerializer")
// 建议开启rdd的压缩
.set("spark.rdd.compress", "true")
val ssc = new StreamingContext(conf, Seconds(2))

//启动一参数设置
val groupId = "test002"
val kafkaParams = Map[String, Object](
"bootstrap.servers" -> "hdp01:9092,hdp02:9092,hdp03:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> groupId,
"auto.offset.reset" -> "earliest",
"enable.auto.commit" -> (false: lang.Boolean)
)
val topics = Array("test")

//启动二参数设置
var formdbOffset: Map[TopicPartition, Long] = JedisOffset(groupId)

//拉取kafka数据
val stream: InputDStream[ConsumerRecord[String, String]] = if (formdbOffset.size == 0) {
KafkaUtils.createDirectStream[String, String](
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
)
} else {
KafkaUtils.createDirectStream(
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Assign[String, String](formdbOffset.keys, kafkaParams, formdbOffset)
)
}

//数据偏移量处理。
stream.foreachRDD({
rdd =>
//获得偏移量对象数组
val offsetRange: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

//逻辑处理
rdd.flatMap(_.value().split(" ")).map((_, 1)).reduceByKey(_ + _).foreachPartition({
it =>
val jedis = RedisUtils.getJedis
it.foreach({
v =>
jedis.hincrBy("wordcount", v._1, v._2.toLong)
})
jedis.close()
})

//偏移量存入redis
val jedis: Jedis = RedisUtils.getJedis
for (or <- offsetRange) {
jedis.hset(groupId, or.topic + "-" + or.partition, or.untilOffset.toString)
}
jedis.close()

})

ssc.start()
ssc.awaitTermination()
}
}

......

import java.util

import org.apache.kafka.common.TopicPartition

object JedisOffset {

def apply(groupId: String) = {
var formdbOffset = Map[TopicPartition, Long]()
val jedis1 = RedisUtils.getJedis
val topicPartitionOffset: util.Map[String, String] = jedis1.hgetAll(groupId)
import scala.collection.JavaConversions._
val topicPartitionOffsetlist: List[(String, String)] = topicPartitionOffset.toList
for (topicPL <- topicPartitionOffsetlist) {
val split: Array[String] = topicPL._1.split("[-]")
formdbOffset += (new TopicPartition(split(0), split(1).toInt) -> topicPL._2.toLong)
}
formdbOffset
}
}

....还有redis的连接池创建
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: