1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash
source ../../dmp_env.sh
tb_type=$1
today=${ScheduleTime}
date=$(date +"%Y%m%d" -d "-1 day $today")
date_path=$(date +"%Y/%m/%d/" -d "-1 day $today")
INPUT="s3://mob-ad/adn/tracking-v3"
hours="23"
res="beijing virginia"
for hh in ${hours}; do
for re in ${res}; do
check_await "${INPUT}/${tb_type}/${date_path}/${re}/${hh}/_SUCCESS"
done
done
OUTPUT="s3://mob-emr-test/dataplatform/DataWareHouse/data/dwh/etl_adn_tracking_${tb_type}/${date_path}"
spark-submit --class mobvista.dmp.datasource.rtdmp.lazada.ETLJob \
--name "ETLJob.${tb_type}.${date}" \
--conf spark.sql.shuffle.partitions=100 \
--conf spark.default.parallelism=100 \
--conf spark.kryoserializer.buffer.max=256m \
--conf spark.executor.extraJavaOptions="-XX:+UseG1GC" \
--master yarn --deploy-mode cluster --executor-memory 6g --driver-memory 4g --executor-cores 4 --num-executors 5 \
../../${JAR} -dt ${date} -output ${OUTPUT} -tb_type ${tb_type}
if [[ $? -ne 0 ]]; then
exit 255
fi
mount_partition "etl_adn_tracking_${tb_type}" "dt='${date}'" "$OUTPUT"