adn_install_daily.sh 1.78 KB
Newer Older
wang-jinfeng committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#!/bin/bash

# # # # # # # # # # # # # # # # # # # # # # 
# @file  : adn_install_daily.sh
# @author: houying
# @date  : 16-11-1
# # # # # # # # # # # # # # # # # # # # # #

source ../../dmp_env.sh

LOG_TIME=$(date -d "$ScheduleTime 1 days ago" "+%Y%m%d")
date_path=$(date -d "$ScheduleTime 1 days ago" "+%Y/%m/%d")
old_path=$(date -d "$ScheduleTime 2 days ago" "+%Y/%m/%d")

INPUT_CAMPAIGN_LIST_PATH="$DIM_ADN_CAMPAIGN/$date_path"
INPUT_ADN_INSTALL_PATH="${ADN_INSTALL_PATH}/$date_path"
TMP_OUTPUT_ADN_INSTALL_DAILY="${TMP_INSTALL_DAILY_ADN}/$date_path"
OUTPUT_ADN_INSTALL_DAILY="$ETL_ADN_INSTALL_TMP_DAILY/$date_path"

check_await "$INPUT_ADN_INSTALL_PATH/virginia/23/_SUCCESS"
check_await "$INPUT_CAMPAIGN_LIST_PATH/_SUCCESS"

hadoop fs -rm -r "$TMP_OUTPUT_ADN_INSTALL_DAILY/*"

spark-submit --class mobvista.dmp.datasource.adn.AdnInstallDaily \
  --name "AdnInstallDaily.${LOG_TIME}" \
  --conf spark.sql.shuffle.partitions=100 \
  --conf spark.default.parallelism=100 \
  --conf spark.kryoserializer.buffer.max=512m \
  --conf spark.kryoserializer.buffer=64m \
  --master yarn --deploy-mode cluster \
  --executor-memory 4g --driver-memory 4g --executor-cores 2 --num-executors 5 \
  ../.././DMP.jar \
  -datetime ${LOG_TIME} -output ${TMP_OUTPUT_ADN_INSTALL_DAILY} -coalesce 20

if [[ $? -ne 0 ]]; then
    exit 255
fi

: '
hadoop jar ../../${JAR} mobvista.dmp.datasource.adn.mapreduce.AdnInstallDailyMR \
    -Dmapreduce.fileoutputcommitter.algorithm.version=2 \
    "$INPUT_CAMPAIGN_LIST_PATH" "$INPUT_ADN_INSTALL_PATH" "$TMP_OUTPUT_ADN_INSTALL_DAILY" || exit 1
'

# 为bundleId 匹配 packageName
matchBundlePackage "$date_path" "$old_path" "2" "5" "$TMP_OUTPUT_ADN_INSTALL_DAILY" "$OUTPUT_ADN_INSTALL_DAILY" "adn_install" "../../${JAR}"

if [[ $? -ne 0 ]]; then
    exit 255
fi

echo "[Adn Install Daily End!]"