hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce

1300-黄同学

发表文章数:85

热门标签

,
首页 » 大数据 » 正文

本节是续写hadoop离线阶段(第八节—2)mapreduce原理解释和通过JavaAPI操作mapreduce的,所有的Demo都是在同一项目下新建不同的包,所以pom.xml与“第八节—2”中的一样

通过自定义partition来实现数据拆分

原始数据如下图,需要实现将第6列大于15的数据发送到分区0,将第6列小于等于15的数据发送到分区1。
hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce

  1. 写一个继承于org.apache.hadoop.mapreduce.Mapper的类,来完成Map阶段的处理
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;

public class PartitionMapper extends Mapper<LongWritable, Text,Text, NullWritable>
{
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException
    {
        context.write(value,NullWritable.get());
    }
}
  1. 写一个继承于org.apache.hadoop.mapreduce.Partitioner的类,来完成数据分区的处理
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
//Partitioner接收来自Mapper的k2、v2,对k2、v2进行判断后返回分区号
public class MyPartitioner extends Partitioner<Text, NullWritable>
{

    @Override
    public int getPartition(Text text, NullWritable nullWritable, int NumReduceTask)
    {
        String num=text.toString().split("/t")[5];
        if(Integer.parseInt(num) > 15)
            return 0;
        else
            return 1;
    }
}
  1. 写一个继承于org.apache.hadoop.mapreduce.Reducer的类,来完成Reduce阶段的处理
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;

public class PartitionReduce extends Reducer<Text, NullWritable,Text,NullWritable>
{
    @Override
    protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
    {
        context.write(key, NullWritable.get());
    }
}
  1. 编写主类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class PartitionRun extends Configured implements Tool
{
    @Override
    public int run(String[] strings) throws Exception
    {
        Job job= Job.getInstance(super.getConf(),"partition-test");
        job.setJarByClass(PartitionRun.class);

        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,new Path(strings[0]));

        job.setMapperClass(PartitionMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(NullWritable.class);

        job.setPartitionerClass(MyPartitioner.class);

        job.setReducerClass(PartitionReduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        //指定ReduceTasks的数量
        job.setNumReduceTasks(2);
        //如果ReduceTasks的数量大于Partitioner里需要的数量,那么就会生成空文件
        //job.setNumReduceTasks(3);
        //如果ReduceTasks的数量小于Partitioner里需要的数量,那么就有些Reduce就会处理更多的数据
        //job.setNumReduceTasks(1);

        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,new Path(strings[1]));

        return job.waitForCompletion(true)?0:1;
    }

    public static void main(String[] args) throws Exception
    {
        System.exit(ToolRunner.run(new Configuration(),new PartitionRun(),args));
    }
}

注意,凡是涉及到partition的mapreduce程序,必须发送到Linux机器上执行,而且输入文件和输出文件的路径也要位于hdfs上。

实现自定义排序、自定义计数器和自定义规约

原始数据
hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce
排序后的成果数据
hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce

  1. 写一个自定义类来封装一行数据中的字母和数字,并且实现WritableComparable接口,来实现数据间的大小比较
import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/*
这个类负责把原始数据的字母和数据拆开,
并且按先字母后数字,
字母升序,
数字降序,
进行比较大小
 */
public class Key2Bean implements WritableComparable<Key2Bean>
{
    private String first;
    private Integer second;

    public Key2Bean()
    {

    }

    public Key2Bean(String first,Integer second)
    {
        this.first=first;
        this.second=second;
    }

    @Override
    public int compareTo(Key2Bean o)
    {
        int f=this.first.compareTo(o.first);
        if(f == 0)
        {
            int s=0-this.second.compareTo(o.second); //实现降序排序
            return s;
        }
        else
            return f;
    }

    @Override
    public void write(DataOutput dataOutput) throws IOException
    {
        dataOutput.writeUTF(first);

        dataOutput.writeInt(second);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException
    {
        this.first=dataInput.readUTF();

        this.second=dataInput.readInt();
    }

    public String getFirst()
    {
        return first;
    }

    public void setFirst(String first)
    {
        this.first = first;
    }

    public Integer getSecond()
    {
        return second;
    }

    public void setSecond(Integer second)
    {
        this.second = second;
    }


    /*
    由于最终会以TextOutputFormat的形式输出Key2Bean形式的数据,
    所以要重写toString方法,
    并且要写成希望看到的数据组织方式
     */
    @Override
    public String toString()
    {
        return first+"/t"+second;
    }
}
  1. 写Mapper的子类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class SortMapper extends Mapper<LongWritable, Text,Key2Bean, NullWritable>
{
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException
    {
        //设置一个计数器来计数Mapper接收的数据条数,参数是两个随便自定义的字符串
        Counter counter=context.getCounter("amount_counter","amount");
        //每有一条数据进入,就对counter的结果加1,数据类型是long
        counter.increment(1L);

        String first=value.toString().split("/t")[0];
        Integer second=Integer.valueOf(value.toString().split("/t")[1]);
        Key2Bean kb=new Key2Bean(first,second);

        context.write(kb,NullWritable.get());
    }
}
  1. 写一个Reducer的子类
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class SortReducer extends Reducer<Key2Bean, NullWritable,Key2Bean,NullWritable>
{
    private static enum CounterType
    {
        Rudece_Input_Record,
        Rudece_Output_Record
    }

    @Override
    protected void reduce(Key2Bean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
    {
        //使用枚举类数据创建Counter,记录输入数据条数
        context.getCounter(CounterType.Rudece_Input_Record).increment(1L);

        /*
        由于原始数据有两个“a   9”的行,
        所以在reduce阶段时这两行会被合并到同一个key中,
        为了解决这个问题,需要遍历values这个迭代器,
        把所有value遍历出来,并写到context中,
        而不是按key输出了
         */
        for(NullWritable v:values)
        {
            //使用枚举类数据创建Counter,记录输出数据条数
            context.getCounter(CounterType.Rudece_Output_Record).increment(1L);
            context.write(key,NullWritable.get());
        }
    }
}
  1. 写主类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class SortRun extends Configured implements Tool
{
    @Override
    public int run(String[] strings) throws Exception
    {
        Job job= Job.getInstance(super.getConf(),"sort-test");
        job.setInputFormatClass(TextInputFormat.class);
        //设置为本地运行
        TextInputFormat.addInputPath(job,
                new Path("E://排序//input"));

        job.setMapperClass(SortMapper.class);
        job.setMapOutputKeyClass(Key2Bean.class);
        job.setMapOutputValueClass(NullWritable.class);

        /*
        setCombinerClass设置规约,
        规约的目的是在map阶段完成相同k2的合并,
        输出k2合并后的k2,v2给reduce,
        等于是把reduce阶段的合并键工作提前了,
        也减少了reduce需要接收的数据条数,
        提高了效率,
        注意,当使用reducer来作为规约的类时,需要reducer的k2、k3的数据类型相同,v2、v3的数据类型相同
         */
        job.setCombinerClass(SortReducer.class);

        job.setReducerClass(SortReducer.class);
        job.setOutputKeyClass(Key2Bean.class);
        job.setOutputValueClass(NullWritable.class);

        job.setOutputFormatClass(TextOutputFormat.class);
        //设置为本地运行
        TextOutputFormat.setOutputPath(job,
                new Path("E://排序//output"));


        return job.waitForCompletion(true)?0:1;
    }

    public static void main(String[] args) throws Exception
    {
        System.exit(ToolRunner.run(new Configuration(),new SortRun(),args));
    }
}

这个程序的输入文件和输出文件的目录都在于本机,所以本地运行即可。
小结:
1、要实现自定义排序需要写一个data型的类来实现org.apache.hadoop.io.WritableComparable接口;
2、要实现自定义的计数器,需要在mapper或reducer中设置counter,代码是context.getCounter(参数可以是String型的键值对也可以是枚举型).incream(1L)
3、要实现规约(在reduce阶段前完成数据的相同键合并),需要在主类的job中设置job.setCombinerClass,注意当使用reducer来作为规约的类时,需要reducer的k2、k3的数据类型相同,v2、v3的数据类型相同,否则需要重新写一个继承于Reducer<k2,v2,k2,v2>的类来实现规约,并且设置到job中。

手机流量统计

原始数据
hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce
需要实现,按照手机号(第2列),对上行流量(第7列)、下行流量(第8列)、日上行流量(第9列)、日下行流量(第10列)进行分组求和。

  1. 写一个类来封装上行流量、下行流量、日上行流量、日下行流量,需要实现org.apache.hadoop.io.Writable接口
import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowBean implements Writable
{
    private Integer upFlow;
    private Integer downFlow;
    private Integer totalUpFlow;
    private Integer totalDownFlow;

    public FlowBean()
    {

    }

    public FlowBean(Integer upFlow,Integer downFlow,Integer totalUpFlow,Integer totalDownFlow)
    {
        this.upFlow=upFlow;this.downFlow=downFlow;
        this.totalUpFlow=totalUpFlow;this.totalDownFlow=totalDownFlow;
    }

    @Override
    public void write(DataOutput dataOutput) throws IOException
    {
        dataOutput.writeInt(upFlow);
        dataOutput.writeInt(downFlow);
        dataOutput.writeInt(totalUpFlow);
        dataOutput.writeInt(totalDownFlow);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException
    {
        this.upFlow=dataInput.readInt();
        this.downFlow=dataInput.readInt();
        this.totalUpFlow=dataInput.readInt();
        this.totalDownFlow=dataInput.readInt();
    }

    public Integer getUpFlow()
    {
        return upFlow;
    }

    public void setUpFlow(Integer upFlow)
    {
        this.upFlow = upFlow;
    }

    public Integer getDownFlow()
    {
        return downFlow;
    }

    public void setDownFlow(Integer downFlow)
    {
        this.downFlow = downFlow;
    }

    public Integer getTotalUpFlow()
    {
        return totalUpFlow;
    }

    public void setTotalUpFlow(Integer totalUpFlow)
    {
        this.totalUpFlow = totalUpFlow;
    }

    public Integer getTotalDownFlow()
    {
        return totalDownFlow;
    }

    public void setTotalDownFlow(Integer totalDownFlow)
    {
        this.totalDownFlow = totalDownFlow;
    }

    @Override
    public String toString()
    {
        return upFlow + "/t" + downFlow + "/t"+ totalUpFlow +"/t" + totalDownFlow;
    }
}
  1. 写一个Mapper的子类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowStatisticsMapper extends Mapper<LongWritable, Text,Text,FlowBean>
{
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException
    {
        String phoneNum=value.toString().split("/t")[1];
        Integer upFlow=Integer.valueOf(value.toString().split("/t")[6]);
        Integer downFlow=Integer.valueOf(value.toString().split("/t")[7]);
        Integer totalUpFlow=Integer.valueOf(value.toString().split("/t")[8]);
        Integer totalDownFlow=Integer.valueOf(value.toString().split("/t")[9]);
        FlowBean fb=new FlowBean(upFlow,downFlow,totalUpFlow,totalDownFlow);

        context.write(new Text(phoneNum),fb);
    }
}
  1. 写一个Reducer的子类
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowStatisticsReducer extends Reducer<Text,FlowBean,Text,FlowBean>
{
    @Override
    protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException
    {
        int upFlowSum=0;
        int downFlowSum=0;
        int totalUpFlowSum=0;
        int totalDownFlowSum=0;
        for (FlowBean v:values)
        {
            upFlowSum+=v.getUpFlow();
            downFlowSum+=v.getDownFlow();
            totalUpFlowSum+=v.getTotalUpFlow();
            totalDownFlowSum+=v.getTotalDownFlow();
        }

        FlowBean sum=new FlowBean(upFlowSum,downFlowSum,totalUpFlowSum,totalDownFlowSum);

        context.write(key,sum);
    }
}
  1. 写主类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FlowStatisticsRun extends Configured implements Tool
{

    @Override
    public int run(String[] strings) throws Exception
    {
        Job job= Job.getInstance(super.getConf(),"Mobile Phone Flow Statistics");
        job.setInputFormatClass(TextInputFormat.class);

        TextInputFormat.addInputPath(job,
                new Path("E://流量统计//input"));

        job.setMapperClass(FlowStatisticsMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(FlowBean.class);

        job.setReducerClass(FlowStatisticsReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(FlowBean.class);

        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,
                new Path("E://流量统计//output"));


        return job.waitForCompletion(true)?0:1;
    }

    public static void main(String[] args) throws Exception
    {
        System.exit(ToolRunner.run(new Configuration(),new FlowStatisticsRun(),args));
    }
}

最终输出结果:
hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce

未经允许不得转载:作者:1300-黄同学, 转载或复制请以 超链接形式 并注明出处 拜师资源博客
原文地址:《hadoop离线阶段(第九节)通过JavaAPI操作Mapreduce》 发布于2020-11-12

分享到:
赞(0) 打赏

评论 抢沙发

评论前必须登录!

  注册



长按图片转发给朋友

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

微信扫一扫打赏

Vieu3.3主题
专业打造轻量级个人企业风格博客主题!专注于前端开发,全站响应式布局自适应模板。

登录

忘记密码 ?

您也可以使用第三方帐号快捷登录

Q Q 登 录
微 博 登 录