1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env bash
source ../dmp_env.sh
source ././../ga_rawdata_analysis/common/tools.sh
echo "job begin!!!"
dt_today=$(date -d "$ScheduleTime" +"%Y%m%d")
dt_slash_today=$(date -d "$ScheduleTime" +"%Y/%m/%d")
update=$(date -d "$ScheduleTime" +"%Y-%m-%d")
dt_three_days_ago=$(date -d "$ScheduleTime 3 days ago" +"%Y%m%d")
INPUT="s3://mob-emr-test/dataplatform/DataWareHouse/data/dev/iqiyi_lahuo_tmp_daily/${dt_slash_today}"
#OUTPUT="s3://mob-emr-test/dataplatform/DataWareHouse/data/dwh/iqiyi_lahuo_tmp_daily_to_s3/${dt_today}"
OUTPUT="${IQiYi_LAHUO_TMP_DAILY_TO_S3}/${dt_slash_today}/iqiyi_activation"
check_await "${INPUT}/_SUCCESS"
hadoop fs -rm -r "$OUTPUT"
spark-submit --class mobvista.dmp.datasource.iqiyi.IQiYiTmpDataToDMP \
--conf spark.network.timeout=720s \
--conf spark.default.parallelism=2000 \
--conf spark.sql.shuffle.partitions=2000 \
--conf spark.sql.broadcastTimeout=1200 \
--conf spark.yarn.executor.memoryOverhead=4096 \
--conf spark.sql.autoBroadcastJoinThreshold=31457280 \
--master yarn --deploy-mode cluster --executor-memory 8g --driver-memory 4g --executor-cores 4 --num-executors 30 \
../${JAR} -input ${INPUT} \
-output ${OUTPUT} \
-update ${update}
if [ $? -ne 0 ];then
exit 255
fi
DMP_INSTALL_LIST_OUTPUT_PATH="${DM_INSTALL_LIST}_v2/${dt_slash_today}/iqiyi_activation"
mount_partition "dm_install_list_v2" "dt='${dt_today}', business='iqiyi_activation'" "${DMP_INSTALL_LIST_OUTPUT_PATH}"
mount_partition "iqiyi_lahuo_tmp_daily_to_s3" "dt='${dt_today}', business='iqiyi_activation'" "${OUTPUT}"
FOUR_DAYS_OUTPUT="${IQiYi_LAHUO_TMP_DAILY_TO_S3}/${dt_slash_today}/iqiyi_activation_four_days"
spark-submit --class mobvista.dmp.datasource.iqiyi.IQiYiLaHuoFourDaysDataDeduplication \
--conf spark.network.timeout=720s \
--conf spark.default.parallelism=2000 \
--conf spark.sql.shuffle.partitions=2000 \
--conf spark.sql.broadcastTimeout=1200 \
--conf spark.yarn.executor.memoryOverhead=4096 \
--conf spark.sql.autoBroadcastJoinThreshold=31457280 \
--master yarn --deploy-mode cluster --executor-memory 8g --driver-memory 4g --executor-cores 4 --num-executors 40 \
../${JAR} -dt_today ${dt_today} -dt_three_days_ago ${dt_three_days_ago} \
-output ${FOUR_DAYS_OUTPUT}
if [ $? -ne 0 ];then
exit 255
fi
mount_partition "iqiyi_lahuo_tmp_daily_to_s3" "dt='${dt_today}', business='iqiyi_activation_four_days'" "${FOUR_DAYS_OUTPUT}"
if [ $? -ne 0 ];then
exit 255
fi
HIVE_CMD=$(hive_func)
$HIVE_CMD -v -hivevar dt_today ${dt_today} -f iqiyi_tmp_daily_data_to_dmp.sql
if [ $? -ne 0 ];then
exit 255
fi
#hadoop fs -touchz ${DMP_INSTALL_LIST_OUTPUT_PATH}/_SUCCESS
echo "good job"