package mobvista.prd.datasource.source.mapreduce; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import mobvista.prd.datasource.table.MergeAppIDMR; import mobvista.prd.datasource.util.MRUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.GenericOptionsParser; import java.io.IOException; import java.util.List; import java.util.Set; /** * Created by Administrator on 2017/5/16 0016. * desc:合并dsp与m系统,输出交集 */ public class DspJoinMMR { public static void main(String[] args) throws InterruptedException, IOException, ClassNotFoundException { Configuration conf = new Configuration(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); Job job = Job.getInstance(conf, "dsp join M"); job.setJarByClass(MergeAppIDMR.class); FileOutputFormat.setCompressOutput(job, true); FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class); job.setMapperClass(DspJoinMMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setReducerClass(DspJoinMReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0])); FileInputFormat.addInputPath(job, new Path(otherArgs[1])); FileOutputFormat.setOutputPath(job, new Path(otherArgs[2])); System.exit(job.waitForCompletion(true) ? 0 : 1); } public static class DspJoinMMapper extends Mapper<LongWritable, Text, Text, Text> { Text outKey = new Text(); Text outValue = new Text(); public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { String line = value.toString(); String[] fields = line.split("\t", -1); String inputFile = context.getConfiguration().get("map.input.file"); if (inputFile.contains("etl_dsp_request_daily")){ outValue.set(MRUtils.JOINER.join(fields[10] , "dsp系统", fields[3])); //pkg_name,来源,国家 outKey.set(MRUtils.JOINER.join(fields[0], fields[1]));//device_id,device_type context.write(outKey, outValue); } else if (inputFile.contains("etl_adn_sdk_request_daily") || inputFile.contains("etl_adn_sdk_req_3_day")) { outValue.set(MRUtils.JOINER.join(fields[3],"m系统", fields[2])); //pkg_name,来源,国家 outKey.set(MRUtils.JOINER.join(fields[0], fields[1]));//device_id,device_type context.write(outKey, outValue); } else { return; } } } public static class DspJoinMReducer extends Reducer<Text, Text, Text, Text> { Text outKey = new Text(); public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException { Set<String> set = Sets.newHashSet(); int dsp = 0; int m = 0; for (Text val : values) { String pkg = val.toString(); if (pkg.contains("m系统")) { m = 1; } else if (pkg.contains("dsp系统")) { dsp = 1; } set.add(val.toString()); } if (m == 1 && dsp == 1) { for (String packageNames : set) { String[] array = packageNames.split("\t",-1); String[] arr = array[0].replace("\"","").replace("[","").replace("]","").split(",",-1); for (String pkgName : arr) { outKey.set(pkgName + "\t" + array[1] + "\t" + array[2]);//pkg_name,来源,国家 context.write(key , outKey); } } } } } }