1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
package mobvista.dmp.datasource.taobao
import mobvista.dmp.common.CommonSparkJob
import org.apache.commons.cli.Options
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel
import java.net.URI
/**
* @author jiangfan
* @date 2021/8/5 14:38
*/
class EtlLazadaIosActivitionDaily extends CommonSparkJob {
override protected def buildOptions(): Options = {
val options = new Options
options.addOption("vn_idfaoutput", true, "[must] vn_idfaoutput")
options.addOption("today", true, "[must] today")
options.addOption("last_req_day", true, "[must] last_req_day")
options.addOption("id_idfaoutput", true, "[must] id_idfaoutput")
options.addOption("th_idfaoutput", true, "[must] th_idfaoutput")
options.addOption("ph_idfaoutput", true, "[must] ph_idfaoutput")
options.addOption("my_idfaoutput", true, "[must] my_idfaoutput")
options.addOption("sg_idfaoutput", true, "[must] sg_idfaoutput")
options
}
override protected def run(args: Array[String]): Int = {
val commandLine = commParser.parse(options, args)
if (!checkMustOption(commandLine)) {
printUsage(options)
return -1
} else printOptions(commandLine)
val today = commandLine.getOptionValue("today")
val vn_idfaoutput = commandLine.getOptionValue("vn_idfaoutput")
val last_req_day = commandLine.getOptionValue("last_req_day")
val id_idfaoutput = commandLine.getOptionValue("id_idfaoutput")
val th_idfaoutput = commandLine.getOptionValue("th_idfaoutput")
val ph_idfaoutput = commandLine.getOptionValue("ph_idfaoutput")
val my_idfaoutput = commandLine.getOptionValue("my_idfaoutput")
val sg_idfaoutput = commandLine.getOptionValue("sg_idfaoutput")
val spark = SparkSession.builder()
.appName("EtlLazadaIosActivitionDaily")
.config("spark.rdd.compress", "true")
.config("spark.io.compression.codec", "snappy")
.config("spark.sql.orc.filterPushdown", "true")
.config("spark.sql.warehouse.dir", "s3://mob-emr-test/spark-warehouse")
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.enableHiveSupport()
.getOrCreate()
import spark.implicits._
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(vn_idfaoutput), true)
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(id_idfaoutput), true)
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(th_idfaoutput), true)
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(ph_idfaoutput), true)
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(my_idfaoutput), true)
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(sg_idfaoutput), true)
try {
val sql2=
s"""
|select lower(device_id) device_id,lower(country) country
|from dwh.ods_dmp_user_info where dt = '${today}' and last_req_day >= '${last_req_day}' and business not in ('other', 'ali_acquisition', 'ali_activation', 'adn_install')
| and device_type='idfa'
| and platform='ios'
| group by lower(device_id),lower(country)
""".stripMargin
val dfCache: DataFrame = spark.sql(sql2).persist(StorageLevel.MEMORY_AND_DISK_SER)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "VN").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(vn_idfaoutput)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "ID").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(id_idfaoutput)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "TH").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(th_idfaoutput)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "PH").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(ph_idfaoutput)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "MY").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(my_idfaoutput)
dfCache.rdd.filter(_.getAs[String]("country").toUpperCase() == "SG").map(_.getAs[String]("device_id")).coalesce(60).saveAsTextFile(sg_idfaoutput)
} finally {
spark.stop()
}
0
}
}
object EtlLazadaIosActivitionDaily {
def main(args: Array[String]): Unit = {
new EtlLazadaIosActivitionDaily().run(args)
}
}