1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
package mobvista.dmp.datasource.age_gender
import java.net.URI
import mobvista.dmp.common.CommonSparkJob
import org.apache.commons.cli.Options
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{Row, SaveMode, SparkSession}
class ThirdPartySourceDaily extends CommonSparkJob {
override protected def buildOptions(): Options = {
val options = new Options
options.addOption("outputtotal", true, "[must] outputtotal")
options.addOption("coalesce", true, "[must] coalesce")
options.addOption("today", true, "[must] today")
options.addOption("yesbef3", true, "[must] yesbef3")
options
}
override protected def run(args: Array[String]): Int = {
val commandLine = commParser.parse(options, args)
if (!checkMustOption(commandLine)) {
printUsage(options)
return -1
} else printOptions(commandLine)
val outputtotal = commandLine.getOptionValue("outputtotal")
val coalesce = commandLine.getOptionValue("coalesce")
val today = commandLine.getOptionValue("today")
val yesbef3 = commandLine.getOptionValue("yesbef3")
val spark = SparkSession.builder()
.appName("GenderThirdPartySourceDaily")
.config("spark.rdd.compress", "true")
.config("spark.io.compression.codec", "snappy")
.config("spark.sql.orc.filterPushdown", "true")
.config("spark.sql.warehouse.dir", "s3://mob-emr-test/spark-warehouse")
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.enableHiveSupport()
.getOrCreate()
FileSystem.get(new URI(s"s3://mob-emr-test"), spark.sparkContext.hadoopConfiguration).delete(new Path(outputtotal), true)
try {
val sql1 =
s"""
| select COALESCE(daily.device_id,total.device_id) device_id,
| COALESCE(daily.device_type,total.device_type) device_type,
| COALESCE(daily.platform,total.platform) platform,
| COALESCE(daily.gender,total.gender) gender
| from (select device_id,device_type,platform,case when gender = 'male' then 'm' when gender = 'female' then 'f' else gender end as gender from dwh.etl_gender_thirdparty_data_daily where dt = '${today}'
| group by device_id,device_type,platform,gender ) daily
| full join
| (select device_id,device_type,platform,case when gender = 'male' then 'm' when gender = 'female' then 'f' else gender end as gender from dwh.etl_gender_thirdparty_data_total where dt = '${yesbef3}'
| group by device_id,device_type,platform,gender ) total
| on (daily.device_id = total.device_id and daily.device_type = total.device_type and daily.platform = total.platform )
""".stripMargin
spark.sql(sql1)/*.filter(line=>{
val device_id = line.getAs[String]("device_id")
StringUtils.isNotBlank(device_id) && Pattern.compile("-").split(device_id, -1).length == 5 && !Pattern.compile("^0*-0*-0*-0*-0*$").matcher(device_id)
.matches()
})*/.coalesce(coalesce.toInt)
.write
.mode(SaveMode.Overwrite)
.option("orc.compress", "zlib")
.orc(outputtotal)
} finally {
spark.stop()
}
0
}
}
object ThirdPartySourceDaily {
def main(args: Array[String]): Unit = {
new ThirdPartySourceDaily().run(args)
}
}