youku_laxin_data_to_dmp.sh 2.82 KB
Newer Older
wang-jinfeng committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
#!/usr/bin/env bash

source ../../dmp_env.sh
source ././../../ga_rawdata_analysis/common/tools.sh

echo "job  begin!!!"

dt_today=$(date -d "$ScheduleTime" +"%Y%m%d")
dt_begin_days=$(date -d "$ScheduleTime" +"%Y%m%d")
dt_slash_today=$(date -d "$ScheduleTime" +"%Y/%m/%d")
update=$(date -d "$ScheduleTime" +"%Y-%m-%d")

INPUT_PATH="${YOUKU_LAXIN_DAILY_TMP_PATH}/${dt_slash_today}"
OUTPUT_PATH="${YOUKU_LAXIN_TMP_DAILY_TO_S3}/${dt_slash_today}/youku_acquisition"

check_await "${INPUT_PATH}/imeimd5/_SUCCESS"
check_await "${INPUT_PATH}/oaidmd5/_SUCCESS"

hadoop fs -rm -r "${OUTPUT_PATH}"

spark-submit --class mobvista.dmp.datasource.taobao.YoukuTmpDataToDmp \
 --conf spark.network.timeout=720s \
 --conf spark.default.parallelism=2000 \
 --conf spark.sql.shuffle.partitions=2000 \
 --conf spark.sql.broadcastTimeout=1200 \
 --conf spark.yarn.executor.memoryOverhead=4096 \
 --conf spark.sql.autoBroadcastJoinThreshold=31457280 \
 --files ${HIVE_SITE_PATH} \
29
 --master yarn --deploy-mode cluster  --executor-memory 8g --driver-memory 4g  --executor-cores 4  --num-executors 40 \
wang-jinfeng committed
30
 ../../${JAR}  -Input "${INPUT_PATH}/*/*"   -Output ${OUTPUT_PATH}   \
wang-jinfeng committed
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
  -update ${update}


if [ $? -ne 0 ];then
  exit 255
fi

DMP_INSTALL_LIST_ACQUISITION_PATH="${DM_INSTALL_LIST}_v2/${dt_slash_today}/youku_acquisition"
mount_partition "dm_install_list_v2" "dt='${dt_today}', business='youku_acquisition'" "${DMP_INSTALL_LIST_ACQUISITION_PATH}"
mount_partition "youku_laxin_tmp_daily_to_s3" "dt='${dt_today}', business='youku_acquisition'" "${OUTPUT_PATH}"


if [ $? -ne 0 ];then
  exit 255
fi

ACQUISITIONOUTPUT="${YOUKU_LAXIN_TMP_DAILY_TO_S3}/${dt_slash_today}/youku_acquisition_polling"

spark-submit --class mobvista.dmp.datasource.taobao.YoukuLaXinPollingDataDeduplication \
 --conf spark.network.timeout=720s \
 --conf spark.default.parallelism=2000 \
 --conf spark.sql.shuffle.partitions=2000 \
 --conf spark.sql.broadcastTimeout=1200 \
 --conf spark.yarn.executor.memoryOverhead=4096 \
 --conf spark.sql.autoBroadcastJoinThreshold=31457280 \
 --files ${HIVE_SITE_PATH} \
57
 --master yarn --deploy-mode cluster  --executor-memory 8g --driver-memory 4g  --executor-cores 4  --num-executors 40 \
wang-jinfeng committed
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
 ../../${JAR}  -dt_today ${dt_today}  -dt_begin_days ${dt_begin_days} \
 -AcquisitionOutput ${ACQUISITIONOUTPUT}

if [ $? -ne 0 ];then
  exit 255
fi

mount_partition "youku_laxin_tmp_daily_to_s3" "dt='${dt_today}', business='youku_acquisition_polling'" "${ACQUISITIONOUTPUT}"

if [ $? -ne 0 ];then
  exit 255
fi

HIVE_CMD=$(hive_func)

$HIVE_CMD	-v  -hivevar  dt_today ${dt_today}     -f	youku_laxin_data_to_dmp.sql


if [ $? -ne 0 ];then
  exit 255
fi

hadoop fs -touchz ${DMP_INSTALL_LIST_ACQUISITION_PATH}/_SUCCESS


expire_date_path=$(date -d "$ScheduleTime 61 days ago" +"%Y/%m/%d")
EXPIRE_PATH="${YOUKU_LAXIN_TMP_DAILY_TO_S3}/${expire_date_path}"
hadoop fs -rm -r "${EXPIRE_PATH}"

echo "good job"