1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
package mobvista.dmp.datasource.adn_sdk
import java.net.URI
import com.alibaba.fastjson.{JSON, JSONArray}
import mobvista.dmp.common.CommonSparkJob
import org.apache.commons.cli.{BasicParser, Options}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{SaveMode, SparkSession}
import scala.collection.mutable
/**
* @package: mobvista.dmp.datasource.adn_sdk
* @author: wangjf
* @date: 2020/4/2
* @time: 8:34 下午
* @email: jinfeng.wang@mobvista.com
* @phone: 152-1062-7698
*/
class AdnSdkMergeDaily extends CommonSparkJob with java.io.Serializable {
def commandOptions(): Options = {
val options = new Options()
options.addOption("date", true, "date")
options.addOption("input", true, "input")
options.addOption("output", true, "output")
options
}
override protected def run(args: Array[String]): Int = {
val parser = new BasicParser()
val options = commandOptions()
val commandLine = parser.parse(options, args)
val date = commandLine.getOptionValue("date")
val input = commandLine.getOptionValue("input")
val output = commandLine.getOptionValue("output")
val spark = SparkSession
.builder()
.appName(s"AdnSdkMergeDaily.$date")
.config("spark.rdd.compress", "true")
.config("spark.shuffle.compress", "true")
.config("spark.sql.orc.filterPushdown", "true")
.config("spark.io.compression.codec", "lz4")
.config("spark.io.compression.lz4.blockSize", "64k")
.config("spark.sql.warehouse.dir", "s3://mob-emr-test/spark-warehouse")
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.enableHiveSupport()
.getOrCreate()
val sc = spark.sparkContext
try {
FileSystem.get(new URI(s"s3://mob-emr-test"), sc.hadoopConfiguration).delete(new Path(output), true)
spark.udf.register("merge", merge _)
spark.read.schema(install_schema).orc(input).createOrReplaceTempView("adn_sdk_tab")
val sql: String =
"""
|SELECT device_id,device_type,platform,MAX(model) model,MAX(brand) brand,merge(COLLECT_SET(install_list)) install_list
| FROM adn_sdk_tab
| GROUP BY device_id,device_type,platform
|""".stripMargin
spark.sql(sql)
.coalesce(40)
.write
.mode(SaveMode.Overwrite)
.option("orc.compress", "snappy")
.orc(output)
} finally {
if (spark != null) {
spark.stop()
}
}
0
}
def merge(installList: mutable.WrappedArray[String]): String = {
val installJSONArray = new JSONArray()
installList.iterator.foreach(install => {
installJSONArray.fluentAddAll(String2JSONArray(install))
})
installJSONArray.toJSONString
}
def String2JSONArray(str: String): JSONArray = {
val jsonArray = if (null != str && !("" == str)) {
try {
val element = JSON.parseArray(str)
if (!element.isEmpty) {
element
} else {
new JSONArray()
}
} catch {
case _: Exception =>
new JSONArray()
}
} else {
new JSONArray()
}
jsonArray
}
val install_schema: StructType = StructType(Array(
StructField("device_id", StringType),
StructField("device_type", StringType),
StructField("platform", StringType),
StructField("model", StringType),
StructField("brand", StringType),
StructField("install_list", StringType)))
}
object AdnSdkMergeDaily {
def main(args: Array[String]): Unit = {
new AdnSdkMergeDaily().run(args)
}
}