#!/bin/sh # # # # # # # # # # # # # # # # # # # # # # # @author : 刘凯 # @date : 2018-01-16s # @desc : 解析adn_sdk daily数据 # # # # # # # # # # # # # # # # # # # # # # source ../dmp_env.sh LOG_TIME=$(date -d "$ScheduleTime 1 days ago" +"%Y%m%d") date_path=$(date -d "$ScheduleTime 1 days ago" +"%Y/%m/%d") INPUT_PATH="${ADN_SDK_LOG}/$date_path/*/*" OUTPUT_PATH="${ADN_SDK_DAILY_PATH}/$date_path" check_await "${ADN_SDK_LOG}/$date_path/virginia/23/_SUCCESS" hadoop fs -rm -r "$OUTPUT_PATH/" spark-submit --class mobvista.dmp.datasource.adn_sdk.AdnSdkDaily \ --conf spark.network.timeout=720s \ --conf spark.yarn.executor.memoryOverhead=2048 \ --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \ --conf spark.app.loadTime=${LOG_TIME} \ --conf spark.app.input_path=${INPUT_PATH} \ --conf spark.app.output_path=${OUTPUT_PATH} \ --conf spark.sql.shuffle.partitions=2000 \ --conf spark.default.parallelism=2000 \ --conf spark.shuffle.memoryFraction=0.4 \ --conf spark.storage.memoryFraction=0.4 \ --conf spark.driver.maxResultSize=8g \ --conf spark.executor.extraJavaOptions="-XX:+UseG1GC" \ --conf spark.app.coalesce=60000 \ --master yarn --deploy-mode cluster --name adn_sdk_daily --executor-memory 8g --driver-memory 6g --executor-cores 4 --num-executors 200 \ ../${JAR} if [[ $? -ne 0 ]];then exit 255 fi mount_partition "etl_adn_sdk_daily" "day='${LOG_TIME}'" "$OUTPUT_PATH"