1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#!/bin/sh
# # # # # # # # # # # # # # # # # # # # # #
# @author : 刘凯
# @date : 2018-01-16
# @desc : 将ADN_SDK并到安装列表
# # # # # # # # # # # # # # # # # # # # # #
# @update : wangjf
source ../dmp_env.sh
LOG_TIME=$(date -d "$ScheduleTime 1 days ago" +"%Y%m%d")
dt=$(date -d "$ScheduleTime 1 days ago" +"%Y-%m-%d")
date_path=$(date -d "$ScheduleTime 1 days ago" +"%Y/%m/%d")
old_date_path=$(date -d "$ScheduleTime 2 days ago" +"%Y/%m/%d")
echo $date_path
echo $old_date_path
INPUT_PATH="${ADN_SDK_DAILY_PATH}/$date_path"
OLD_INPUT_PATH="${DM_INSTALL_LIST}/$old_date_path/adn_sdk_v2"
OUTPUT_PATH="${DM_INSTALL_LIST}/$date_path/adn_sdk_v2"
check_await "$INPUT_PATH/_SUCCESS"
check_await "$OLD_INPUT_PATH/_SUCCESS"
hadoop fs -rm -r "$OUTPUT_PATH/"
spark-submit --class mobvista.dmp.datasource.adn_sdk.AdnSdkInstallList \
--conf spark.yarn.executor.memoryOverhead=4096 \
--conf spark.default.parallelism=500 \
--conf spark.app.version=0 \
--conf spark.kryoserializer.buffer.max=512m \
--conf spark.kryoserializer.buffer=64m \
--conf spark.storage.memoryFraction=0.4 \
--conf spark.shuffle.memoryFraction=0.4 \
--conf spark.sql.adaptive.enabled=true \
--conf spark.sql.adaptive.advisoryPartitionSizeInBytes=268435456 \
--conf spark.app.version=1 \
--master yarn --deploy-mode cluster --name "adn_sdk_v2_install.${LOG_TIME}" --executor-memory 8g --driver-memory 4g --executor-cores 3 --num-executors 50 \
../${JAR} -input ${INPUT_PATH} -oldInput ${OLD_INPUT_PATH} -output ${OUTPUT_PATH} -date ${dt} -parallelism 200 -coalesce 200
if [[ $? -ne 0 ]];then
exit 255
fi
mount_partition "dm_install_list" "year='${LOG_TIME:0:4}', month='${LOG_TIME:4:2}', day='${LOG_TIME:6:2}', business='adn_sdk_v2'" "$OUTPUT_PATH"