adn_sdk_etl_hour.sh 1.49 KB
Newer Older
wang-jinfeng committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#!/bin/sh

# # # # # # # # # # # # # # # # # # # # # #
# @author : 王金锋
# @date : 2020-04-03 14:50:17
# @desc : 解析adn_sdk hour数据
# # # # # # # # # # # # # # # # # # # # # #

source ../dmp_env.sh

datetime=$(date +"%Y%m%d %H" -d "1 hour ago $ScheduleTime")
date=${datetime:0:8}
hour=${datetime:9:2}

date_path=$(date -d "$ScheduleTime 1 hour ago" +"%Y/%m/%d")

check_await "${ADN_SDK_LOG}/$date_path/virginia/${hour}/_SUCCESS"
check_await "${ADN_SDK_LOG}/$date_path/frankfurt/${hour}/_SUCCESS"
check_await "${ADN_SDK_LOG}/$date_path/singapore/${hour}/_SUCCESS"
check_await "${ADN_SDK_LOG}/$date_path/seoul/${hour}/_SUCCESS"

INPUT_PATH="${ADN_SDK_LOG}/$date_path/*/${hour}"

OUTPUT_PATH="${ADN_SDK_HOUR_PATH}/$date_path/${hour}"

spark-submit --class mobvista.dmp.datasource.adn_sdk.AdnSdkEtlHour \
 --conf spark.network.timeout=720s \
 --conf spark.yarn.executor.memoryOverhead=2048 \
 --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
 --conf spark.sql.shuffle.partitions=2000 \
 --conf spark.default.parallelism=2000 \
 --conf spark.shuffle.memoryFraction=0.4 \
 --conf spark.storage.memoryFraction=0.4 \
 --conf spark.driver.maxResultSize=8g \
 --conf spark.executor.extraJavaOptions="-XX:+UseG1GC" \
 --master yarn --deploy-mode cluster --name adn_sdk_etl_hour.${date}.${hour} --executor-memory 4g --driver-memory 4g  --executor-cores 4 --num-executors 20 \
 ../${JAR} -coalesce 4000 -date ${date} -hour ${hour} -input ${INPUT_PATH} -output ${OUTPUT_PATH}

if [[ $? -ne 0 ]];then
  exit 255
fi