adn_request_daily_v2.sh 1.71 KB
#!/bin/bash

# # # # # # # # # # # # # # # # # # # # # # 
# @file  : adn_request_daily_v2.sh
# @author: wangjf
# @date  : 2020-04-24 19:58:11
# @desc  : 从 ods_adn_trackingnew_request 中抽取 appid 并匹配出 package_name,保存到 etl_adn_sdk_request_daily
# # # # # # # # # # # # # # # # # # # # # #


source ../../dmp_env.sh

LOG_TIME=$(date -d "$ScheduleTime 1 days ago" "+%Y%m%d")
date_path=$(date -d "$ScheduleTime 1 days ago" "+%Y/%m/%d")

INPUT_ADN_REQUEST_SDK_HOUR="s3://mob-emr-test/dataplatform/DataWareHouse/data/dwh/tmp/etl_adn_org_request_daily_hours/${date_path}"
INPUT_MAPPING_PATH="$APP_ID_MAPPING/$date_path"

check_await "$INPUT_MAPPING_PATH/_SUCCESS"
check_await "$INPUT_ADN_REQUEST_SDK_HOUR/virginia/23/_SUCCESS"

TMP_OUTPUT_PATH="${TMP_AND_REQUEST_SDK_DAILY_PATH}/$date_path"

spark-submit --class mobvista.dmp.datasource.adn_request_sdk.AdnRequestSdkEtlDaily \
    --conf spark.network.timeout=720s \
    --conf spark.yarn.executor.memoryOverhead=2048 \
    --conf spark.sql.shuffle.partitions=2000 \
    --conf spark.default.parallelism=2000 \
    --conf spark.driver.maxResultSize=8g \
    --conf spark.executor.extraJavaOptions="-XX:+UseG1GC" \
    --conf spark.shuffle.memoryFraction=0.4 \
    --conf spark.storage.memoryFraction=0.4 \
    --conf spark.sql.files.maxPartitionBytes=268435456 \
    --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
    --deploy-mode cluster --name "AdnRequestSdkEtlDaily.${LOG_TIME}" --executor-memory 10g --driver-memory 6g  --executor-cores 5 --num-executors 100 \
    ../../${JAR} -appIdMapping ${INPUT_MAPPING_PATH} -manualAppIdMapping ${DIM_MANUAL_MAPPING} -output ${TMP_OUTPUT_PATH} -date ${LOG_TIME} -coalesce 2000

if [[ $? -ne 0 ]]; then
    exit 255
fi