sparksql优化1(小表大表关联优化 & union替换or)
2017-11-07 18:03
579 查看
----原语句(运行18min)
INSERT into TABLE schema.dstable
SELECT bb.ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1
WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN (SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
LEFT OUTER JOIN schema.dstable cc on cc.ip = bb.ip
WHERE cc.ip is null AND
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 500) OR (click_num > 1000)
分析:
1、aa表存放的就是一个指标数据,1条记录,列为小表
2、bb表存放的是按ip聚合的明细数据,记录很多,列为大表
3、cc表用来过滤ip,数量也很小,列为过滤表,作用很小。
查看执行计划,发现bb与aa进行left outer join时,引发了shuffle过程,造成大量的磁盘及网络IO,影响性能。
优化方案1:调整大小表位置,将小表放在左边后,提升至29s (该方案一直不太明白为啥会提升,执行计划里显示的也就是大小表位置调换下而已,跟之前的没其他区别)
优化方案2: 将 or 改成 union,提升至35s(各种调整,一直怀疑跟or有关系,后面调整成union其他不变,果真效率不一样;但方案1只是调整了下大小表顺序,并未调整其他,其效率同样提升很大;不太明白sparksql内部到底走了什么优化机制,后面继续研究);
优化方案3: 采用cache+broadcast方式,提升至20s(该方案将小表缓存至内存,进行map侧关联)
----方案2:or 改成 union(运行35s)
INSERT into TABLE schema.dstable
select aa.ip
from
(
SELECT bb.ip ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1 WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN(SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
WHERE
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 20)
union
SELECT bb.ip ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1 WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN (SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
WHERE click_num > 40
) aa
LEFT OUTER JOIN schema.dstable cc on aa.ip=cc.ip
where cc.ip is null
-----cache+broadcast方式(20s)
原理:使用broadcast将会把小表分发到每台执行节点上,因此,关联操作都在本地完成,基本就取消了shuffle的过程,运行效率大幅度提高。
cache table cta as SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020';
INSERT into TABLE schema.dstable
SELECT bb.ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1
WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN cta aa
LEFT OUTER JOIN schema.dstable cc on cc.ip = bb.ip
WHERE cc.ip is null AND
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 500) OR (click_num > 1000)
注意:
cache 表不一定会被广播到Executor,执行map side join,还受另外一个参数:spark.sql.autoBroadcastJoinThreshold影响,该参数判断是否将该表广播;
spark.sql.autoBroadcastJoinThreshold参数默认值是10M,所以只有cache的表小于10M的才被广播到Executor上去执行map side join。
INSERT into TABLE schema.dstable
SELECT bb.ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1
WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN (SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
LEFT OUTER JOIN schema.dstable cc on cc.ip = bb.ip
WHERE cc.ip is null AND
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 500) OR (click_num > 1000)
分析:
1、aa表存放的就是一个指标数据,1条记录,列为小表
2、bb表存放的是按ip聚合的明细数据,记录很多,列为大表
3、cc表用来过滤ip,数量也很小,列为过滤表,作用很小。
查看执行计划,发现bb与aa进行left outer join时,引发了shuffle过程,造成大量的磁盘及网络IO,影响性能。
优化方案1:调整大小表位置,将小表放在左边后,提升至29s (该方案一直不太明白为啥会提升,执行计划里显示的也就是大小表位置调换下而已,跟之前的没其他区别)
优化方案2: 将 or 改成 union,提升至35s(各种调整,一直怀疑跟or有关系,后面调整成union其他不变,果真效率不一样;但方案1只是调整了下大小表顺序,并未调整其他,其效率同样提升很大;不太明白sparksql内部到底走了什么优化机制,后面继续研究);
优化方案3: 采用cache+broadcast方式,提升至20s(该方案将小表缓存至内存,进行map侧关联)
----方案2:or 改成 union(运行35s)
INSERT into TABLE schema.dstable
select aa.ip
from
(
SELECT bb.ip ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1 WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN(SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
WHERE
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 20)
union
SELECT bb.ip ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1 WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN (SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020') aa
WHERE click_num > 40
) aa
LEFT OUTER JOIN schema.dstable cc on aa.ip=cc.ip
where cc.ip is null
-----cache+broadcast方式(20s)
原理:使用broadcast将会把小表分发到每台执行节点上,因此,关联操作都在本地完成,基本就取消了shuffle的过程,运行效率大幅度提高。
cache table cta as SELECT round(sum(click)/sum(imp),4) avg_click_rate FROM schema.srctable1 WHERE date = '20171020';
INSERT into TABLE schema.dstable
SELECT bb.ip FROM
(SELECT ip, sum(click) click_num, round(sum(click)/sum(imp),4) user_click_rate FROM schema.srctable1
WHERE date = '20171020' AND ip IS NOT NULL AND imp>0 GROUP BY ip) bb
LEFT OUTER JOIN cta aa
LEFT OUTER JOIN schema.dstable cc on cc.ip = bb.ip
WHERE cc.ip is null AND
(bb.user_click_rate > aa.avg_click_rate * 3 AND click_num > 500) OR (click_num > 1000)
注意:
cache 表不一定会被广播到Executor,执行map side join,还受另外一个参数:spark.sql.autoBroadcastJoinThreshold影响,该参数判断是否将该表广播;
spark.sql.autoBroadcastJoinThreshold参数默认值是10M,所以只有cache的表小于10M的才被广播到Executor上去执行map side join。
相关文章推荐
- spark sql优化:小表大表关联优化 & union替换or & broadcast join
- mysql通过将or改成union来优化sql性能问题一例
- sql 数据优化 union 代替or
- SQL索引优化2(MySQL的or/in/union与索引优化)
- 【SQL优化】B树索引位图转换及OR到UNION(ALL)的改写
- 使用or展开进行sql优化(即sql语法union all代替or可以提高效率)
- SQL优化实例:OR -- UNION
- sql优化之:数据库索引创建原则,or/in/union与索引优化,聚集索引/非聚集索引/联合索引/索引覆盖,MySQL冗余数据的三种方案,MySQL双主一致性架构优化(来源:架构师之路)
- sql中的or 用Union All 替换 效率高
- spark sql逻辑计划(优化完)转物理计划
- 一次涉及两个大表关联的优化
- Spark性能优化之通过YDB实现比原生Spark性能高100倍的多表关联
- oracle 优化or 替换为in、exists、union all的几种写法,测试没有问题!
- SparkSQL性能分析与优化及相关工具小结
- sql优化实战:把full join改为left join +union all(从5分钟降为10秒)
- spark sql 优化
- Spark-SQL介绍及优化策略
- SQL优化:化解表关联的多对多join
- SQL性能优化中的底层概念,时间复杂度,算法和数据结构,数据库组成,查询优化和表关联原理.
- Spark map-side-join 关联优化