#!/bin/bash # # # # # # # # # # # # # # # # # # # # # # # @file : merge_package_name.sh # @author: rongpei # @date : 17-04-27 # @desc : 数据由运维从关系型数据库中导出活跃数据,程序将天数据和合并到总量数据中,数据对接人:运维-胡劲斌 # # # # # # # # # # # # # # # # # # # # # # source ../dmp_env.sh LOG_TIME=$(date -d "$ScheduleTime" "+%Y%m%d") today_year=${LOG_TIME:0:4} today_month=${LOG_TIME:4:2} today_day=${LOG_TIME:6:2} LOG_TIME=$(date -d "$ScheduleTime 2 days ago" "+%Y%m%d") year=${LOG_TIME:0:4} month=${LOG_TIME:4:2} day=${LOG_TIME:6:2} LOG_TIME=$(date -d "$ScheduleTime 1 days ago" "+%Y%m%d") yester_year=${LOG_TIME:0:4} yester_month=${LOG_TIME:4:2} yester_day=${LOG_TIME:6:2} INPUT_DMP_DATA_ADN="$CAMPAIGN_LIST_SRC/$today_year/$today_month/$today_day" INPUT_CAMPAIGN_ADN="$DIM_ADN_CAMPAIGN/$year/$month/$day" CAMPAIGN_TAG_OUTPUT="$DIM_ADN_CAMPAIGN/$yester_year/$yester_month/$yester_day" check_await $INPUT_DMP_DATA_ADN/_SUCCESS hadoop fs -rm "$CAMPAIGN_TAG_OUTPUT/*" : ' hadoop jar ../${JAR} mobvista.dmp.datasource.packagelist.mapreduce.MergeCampaignList \ -Dtask.date=${yester_year}-${yester_month}-${yester_day} \ "$INPUT_DMP_DATA_ADN" "$INPUT_CAMPAIGN_ADN" "$CAMPAIGN_TAG_OUTPUT" || exit 1 ' #把mapreduce改写为spark spark-submit --class mobvista.dmp.datasource.packagelist.MergeCampaignList \ --conf spark.network.timeout=720s \ --conf spark.default.parallelism=100 \ --conf spark.sql.shuffle.partitions=100 \ --conf spark.sql.broadcastTimeout=1200 \ --conf spark.yarn.executor.memoryOverhead=2048 \ --conf spark.sql.autoBroadcastJoinThreshold=31457280 \ --master yarn --deploy-mode cluster --executor-memory 8g --driver-memory 4g --executor-cores 3 --num-executors 4 \ ../${JAR} -input_dmp_data_adn ${INPUT_DMP_DATA_ADN} -input_campaign_adn ${INPUT_CAMPAIGN_ADN} -coalesce 20 \ -output ${CAMPAIGN_TAG_OUTPUT} -today ${yester_year}-${yester_month}-${yester_day} || exit 1 mount_partition "dim_adn_campaign" "year='$yester_year', month='$yester_month', day ='$yester_day'" "$CAMPAIGN_TAG_OUTPUT" || exit 1 echo "[# merge_package_name END!]"