package mobvista.prd.datasource.source.mapreduce;

import com.google.common.collect.Sets;
import mobvista.dmp.util.MRUtils;
import mobvista.prd.datasource.table.MergeAppIDMR;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.Set;

/**
 * Created by Administrator on 2017/5/18 0018.
 * desc : 计算dsp与M系统的日志量
 */
public class CountDspAndMMR {
    public static void main(String[] args) throws InterruptedException, IOException, ClassNotFoundException {
        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

        Job job = Job.getInstance(conf, "count dsp and m");

        job.setJarByClass(MergeAppIDMR.class);
        FileOutputFormat.setCompressOutput(job, true);
        FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);

        job.setMapperClass(CountDspAndMMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setReducerClass(CountDspAndMReducer.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileInputFormat.addInputPath(job, new Path(otherArgs[1]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));

        System.exit(job.waitForCompletion(true) ? 0 : 1);

    }

    public static class CountDspAndMMapper extends Mapper <LongWritable, Text, Text, Text> {
        Text outKey = new Text();
        Text outValue = new Text();
        public void map (LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] fields = value.toString().split("\t",-1);

            String inputFile = context.getConfiguration().get("map.input.file");
            if (inputFile.contains("etl_dsp_request_daily")) {
                outKey.set(MRUtils.JOINER.join(fields[0], fields[1]));//device_id, device_type
                outValue.set("dsp系统");
                context.write(outKey, outValue);
            }
            if (inputFile.contains("etl_adn_sdk_req_3_day") || inputFile.contains("etl_adn_sdk_request_daily")) {
                outKey.set(MRUtils.JOINER.join(fields[0], fields[1]));//device_id, device_type
                outValue.set("m系统");
                context.write(outKey, outValue);
            }
        }
    }
    public static class CountDspAndMReducer extends Reducer <Text, Text, Text, NullWritable> {
        Text outKey = new Text();
        Text outValue = new Text();
        long dsp = 0L;
        long m = 0L;
        long dspAndM = 0;
        public void reduce (Text key, Iterable<Text> values, Context context) {
            Set<String> set = Sets.newHashSet();
            for (Text val : values) {
                set.add(val.toString());
            }
            if (set.contains("m系统")) {
                m++;
            }
            if (set.contains("dsp系统")) {
                dsp++;
            }
            if (set.contains("dsp系统") && set.contains("m系统")) {
                dspAndM++;
            }
        }
        public void cleanup (Context context) throws IOException, InterruptedException {
            outKey.set(MRUtils.JOINER.join(m, dsp, dspAndM));
            context.write(outKey, NullWritable.get());
        }
    }
}