ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

MapReduce案例:统计共同好友+订单表多表合并+求每个订单中最贵的商品

2021-06-04 18:05:28  阅读:138  来源: 互联网

标签:MapReduce 表多表 订单 job org apache import hadoop class


案例三:

统计共同好友

任务需求:

如下的文本,

A:B,C,D,F,E,O
B:A,C,E,K
C:F,A,D,I
D:A,E,F,L
E:B,C,D,M,L
F:A,B,C,D,E,O,M
G:A,C,D,E,F
H:A,C,D,E,O
I:A,O
J:B,O
K:A,C,D
L:D,E,F
M:E,F,G
O:A,H,I,J


求出哪些人两两之间有共同好友,及他俩的共同好友都是谁

b -a
c -a
d -a
a -b

c -b

b -e

b -j

解题思路:

写两个mapreduce

第一个MR输出结果如:
b -> a e j
c ->a b e f h

 

第二个MR输出结果如:
a-e b
a-j b
e-j b
a-b c 
a-e c

 比如:

a-e b c d
a-m e f

代码如下:

第一个mapper:FindFriendMapTaskByOne

 

package com.gec.demo;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;
import java.io.PrintStream;

public class FindFriendMapTaskByOne extends Mapper<LongWritable, Text,Text,Text> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line=value.toString();
        String[] datas=line.split(":");
        String user=datas[0];

        String []friends=datas[1].split(",");
        for (String friend : friends) {
            context.write(new Text(friend),new Text(user));
        }

    }
}

第一个reducer:

package com.gec.demo;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FindFriendReducerTaskByOne extends Reducer<Text,Text,Text,Text> {
    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        StringBuffer strBuf=new StringBuffer();

        for (Text value : values) {

            strBuf.append(value).append("-");
        }

        context.write(key,new Text(strBuf.toString()));
    }
}

第一个job

package com.gec.demo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class FindFriendJobByOne {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration configuration=new Configuration();

        Job job=Job.getInstance(configuration);

        //设置Driver类
        job.setJarByClass(FindFriendJobByOne.class);

        //设置运行那个map task
        job.setMapperClass(FindFriendMapTaskByOne .class);
        //设置运行那个reducer task
        job.setReducerClass(FindFriendReducerTaskByOne .class);


        //设置map task的输出key的数据类型
        job.setMapOutputKeyClass(Text.class);
        //设置map task的输出value的数据类型
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        //指定要处理的数据所在的位置
        FileInputFormat.setInputPaths(job, "D://Bigdata//4、mapreduce//day05//homework//friendhomework.txt");
        //指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(job, new Path("D://Bigdata//4、mapreduce//day05//homework//output"));
        //向yarn集群提交这个job
        boolean res = job.waitForCompletion(true);
        System.exit(res?0:1);
    }
}

得出结果:

第二个mapper:

package com.gec.demo;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FindFriendMapTaskByTwo extends Mapper<LongWritable, Text,Text,Text> {
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        String line=value.toString();
        String []datas=line.split("\t");
        String []userlist=datas[1].split("-");
        for (int i=0;i<userlist.length-1;i++){
            for (int j=i+1;j<userlist.length;j++){
                String user1=userlist[i];
                String user2=userlist[j];
                String friendkey=user1+"-"+user2;
                context.write(new Text(friendkey),new Text(datas[0]));
            }
        }
    }
}

第二个reducer:

package com.gec.demo;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FindFriendReducerTaskByTwo extends Reducer<Text,Text,Text,Text> {
    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        StringBuffer stringBuffer=new StringBuffer();
        for (Text value : values) {
            stringBuffer.append(value).append(",");
        }

        context.write(key,new Text(stringBuffer.toString()));
    }
}

第二个job:

package com.gec.demo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class FindFriendJobByTwo {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration configuration=new Configuration();

        Job job=Job.getInstance(configuration);

        //设置Driver类
        job.setJarByClass(FindFriendJobByTwo.class);

        //设置运行那个map task
        job.setMapperClass(FindFriendMapTaskByTwo .class);
        //设置运行那个reducer task
        job.setReducerClass(FindFriendReducerTaskByTwo .class);


        //设置map task的输出key的数据类型
        job.setMapOutputKeyClass(Text.class);
        //设置map task的输出value的数据类型
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        //指定要处理的数据所在的位置
        FileInputFormat.setInputPaths(job, "D://Bigdata//4、mapreduce//day05//homework//friendhomework3.txt");
        //指定处理完成之后的结果所保存的位置
        FileOutputFormat.setOutputPath(job, new Path("D://Bigdata//4、mapreduce//day05//homework//output"));
        //向yarn集群提交这个job
        boolean res = job.waitForCompletion(true);
        System.exit(res?0:1);
    }
}

得出结果:

案例四

MapReduce中多表合并案例

1)需求:

订单数据表t_order:

id

pid

amount

1001

01

1

1002

02

2

1003

03

3

 

商品信息表t_product

id

pname

01

小米

02

华为

03

格力

 

       将商品信息表中数据根据商品id合并到订单数据表中。

最终数据形式:

id

pname

amount

1001

小米

1

1001

小米

1

1002

华为

2

1002

华为

2

1003

格力

3

1003

格力

3

3.4.1 需求1:reduce端表合并(数据倾斜)

通过将关联条件作为map输出的key,将两表满足join条件的数据并携带数据所来源的文件信息,发往同一个reduce task,在reduce中进行数据的串联。

 

 

1)创建商品和订合并后的bean类

package com.gec.mapreduce.table;

import java.io.DataInput;

import java.io.DataOutput;

import java.io.IOException;

import org.apache.hadoop.io.Writable;

 

public class TableBean implements Writable {

       private String order_id; // 订单id

       private String p_id; // 产品id

       private int amount; // 产品数量

       private String pname; // 产品名称

       private String flag;// 表的标记

 

       public TableBean() {

              super();

       }

 

       public TableBean(String order_id, String p_id, int amount, String pname, String flag) {

              super();

              this.order_id = order_id;

              this.p_id = p_id;

              this.amount = amount;

              this.pname = pname;

              this.flag = flag;

       }

 

       public String getFlag() {

              return flag;

       }

 

       public void setFlag(String flag) {

              this.flag = flag;

       }

 

       public String getOrder_id() {

              return order_id;

       }

 

       public void setOrder_id(String order_id) {

              this.order_id = order_id;

       }

 

       public String getP_id() {

              return p_id;

       }

 

       public void setP_id(String p_id) {

              this.p_id = p_id;

       }

 

       public int getAmount() {

              return amount;

       }

 

       public void setAmount(int amount) {

              this.amount = amount;

       }

 

       public String getPname() {

              return pname;

       }

 

       public void setPname(String pname) {

              this.pname = pname;

       }

 

       @Override

       public void write(DataOutput out) throws IOException {

              out.writeUTF(order_id);

              out.writeUTF(p_id);

              out.writeInt(amount);

              out.writeUTF(pname);

              out.writeUTF(flag);

       }

 

       @Override

       public void readFields(DataInput in) throws IOException {

              this.order_id = in.readUTF();

              this.p_id = in.readUTF();

              this.amount = in.readInt();

              this.pname = in.readUTF();

              this.flag = in.readUTF();

       }

 

       @Override

       public String toString() {

              return order_id + "\t" + p_id + "\t" + amount + "\t" ;

       }

}

2)编写TableMapper程序

package com.gec.mapreduce.table;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.lib.input.FileSplit;

 

public class TableMapper extends Mapper<LongWritable, Text, Text, TableBean>{

       TableBean bean = new TableBean();

       Text k = new Text();

      

       @Override

       protected void map(LongWritable key, Text value, Context context)

                     throws IOException, InterruptedException {

             

              // 1 获取输入文件类型

              FileSplit split = (FileSplit) context.getInputSplit();

              String name = split.getPath().getName();

             

              // 2 获取输入数据

              String line = value.toString();

             

              // 3 不同文件分别处理

              if (name.startsWith("order")) {// 订单表处理

                     // 3.1 切割

                     String[] fields = line.split(",");

                    

                     // 3.2 封装bean对象

                     bean.setOrder_id(fields[0]);

                     bean.setP_id(fields[1]);

                     bean.setAmount(Integer.parseInt(fields[2]));

                     bean.setPname("");

                     bean.setFlag("0");

                    

                     k.set(fields[1]);

              }else {// 产品表处理

                     // 3.3 切割

                     String[] fields = line.split(",");

                    

                     // 3.4 封装bean对象

                     bean.setP_id(fields[0]);

                     bean.setPname(fields[1]);

                     bean.setFlag("1");

                     bean.setAmount(0);

                     bean.setOrder_id("");

                    

                     k.set(fields[0]);

              }

              // 4 写出

              context.write(k, bean);

       }

}

3)编写TableReducer程序

package com.gec.mapreduce.table;

import java.io.IOException;

import java.util.ArrayList;

import org.apache.commons.beanutils.BeanUtils;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Reducer;

 

public class TableReducer extends Reducer<Text, TableBean, TableBean, NullWritable> {

 

       @Override

       protected void reduce(Text key, Iterable<TableBean> values, Context context)

                     throws IOException, InterruptedException {

 

              // 1准备存储订单的集合

              ArrayList<TableBean> orderBeans = new ArrayList<>();

              // 2 准备bean对象

              TableBean pdBean = new TableBean();

 

              for (TableBean bean : values) {

 

                     if ("0".equals(bean.getFlag())) {// 订单表

                            // 拷贝传递过来的每条订单数据到集合中

                            TableBean orderBean = new TableBean();

                            try {

                                   BeanUtils.copyProperties(orderBean, bean);

                            } catch (Exception e) {

                                   e.printStackTrace();

                            }

 

                            orderBeans.add(orderBean);

                     } else {// 产品表

                            try {

                                   // 拷贝传递过来的产品表到内存中

                                   BeanUtils.copyProperties(pdBean, bean);

                            } catch (Exception e) {

                                   e.printStackTrace();

                            }

                     }

              }

 

              // 3 表的拼接

              for(TableBean bean:orderBeans){

                     bean.setP_id(pdBean.getPname());

                    

                     // 4 数据写出去

                     context.write(bean, NullWritable.get());

              }

       }

}

4)编写TableDriver程序

package com.gec.mapreduce.table;

 

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class TableDriver {

 

       public static void main(String[] args) throws Exception {

              // 1 获取配置信息,或者job对象实例

              Configuration configuration = new Configuration();

              Job job = Job.getInstance(configuration);

 

              // 2 指定本程序的jar包所在的本地路径

              job.setJarByClass(TableDriver.class);

 

              // 3 指定本业务job要使用的mapper/Reducer业务类

              job.setMapperClass(TableMapper.class);

              job.setReducerClass(TableReducer.class);

 

              // 4 指定mapper输出数据的kv类型

              job.setMapOutputKeyClass(Text.class);

              job.setMapOutputValueClass(TableBean.class);

 

              // 5 指定最终输出的数据的kv类型

              job.setOutputKeyClass(TableBean.class);

              job.setOutputValueClass(NullWritable.class);

 

              // 6 指定job的输入原始文件所在目录

              FileInputFormat.setInputPaths(job, new Path(args[0]));

              FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

              // 7 将job中配置的相关参数,以及job所用的java类所在的jar包, 提交给yarn去运行

              boolean result = job.waitForCompletion(true);

              System.exit(result ? 0 : 1);

       }

}

3)运行程序查看结果

1001       小米       1    

1001       小米       1    

1002       华为       2    

1002       华为       2    

1003       格力       3    

1003       格力       3    

缺点:这种方式中,合并的操作是在reduce阶段完成,reduce端的处理压力太大,map节点的运算负载则很低,资源利用率不高,且在reduce阶段极易产生数据倾斜

解决方案: map端实现数据合并

3.4.2 需求2:map端表合并(Distributedcache)

1)分析

适用于关联表中有小表的情形;

可以将小表分发到所有的map节点,这样,map节点就可以在本地对自己所读到的大表数据进行合并并输出最终结果,可以大大提高合并操作的并发度,加快处理速度。

 

 

2)实操案例

(1)先在驱动模块中添加缓存文件

package test;

import java.net.URI;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class DistributedCacheDriver {

 

       public static void main(String[] args) throws Exception {

              // 1 获取job信息

              Configuration configuration = new Configuration();

              Job job = Job.getInstance(configuration);

 

              // 2 设置加载jar包路径

              job.setJarByClass(DistributedCacheDriver.class);

 

              // 3 关联map

              job.setMapperClass(DistributedCacheMapper.class);

             

              // 4 设置最终输出数据类型

              job.setOutputKeyClass(Text.class);

              job.setOutputValueClass(NullWritable.class);

 

              // 5 设置输入输出路径

              FileInputFormat.setInputPaths(job, new Path(args[0]));

              FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

              // 6 加载缓存数据

              job.addCacheFile(new URI("file:/e:/cache/pd.txt"));

             

              // 7 map端join的逻辑不需要reduce阶段,设置reducetask数量为0

              job.setNumReduceTasks(0);

 

              // 8 提交

              boolean result = job.waitForCompletion(true);

              System.exit(result ? 0 : 1);

       }

}

(2)读取缓存的文件数据

package test;

import java.io.BufferedReader;

import java.io.FileInputStream;

import java.io.IOException;

import java.io.InputStreamReader;

import java.util.HashMap;

import java.util.Map;

import org.apache.commons.lang.StringUtils;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class DistributedCacheMapper extends Mapper<LongWritable, Text, Text, NullWritable>{

 

       Map<String, String> pdMap = new HashMap<>();

      

       @Override

       protected void setup(Mapper<LongWritable, Text, Text, NullWritable>.Context context)

                     throws IOException, InterruptedException {

              // 1 获取缓存的文件

              BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream("pd.txt(的完整路径)")));

             

              String line;

              while(StringUtils.isNotEmpty(line = reader.readLine())){

                     // 2 切割

                     String[] fields = line.split("\t");

                    

                     // 3 缓存数据到集合

                     pdMap.put(fields[0], fields[1]);

              }

             

              // 4 关流

              reader.close();

       }

      

       Text k = new Text();

      

       @Override

       protected void map(LongWritable key, Text value, Context context)

                     throws IOException, InterruptedException {

              // 1 获取一行

              String line = value.toString();

             

              // 2 截取

              String[] fields = line.split("\t");

             

              // 3 获取订单id

              String orderId = fields[1];

             

              // 4 获取商品名称

              String pdName = pdMap.get(orderId);

             

              // 5 拼接

              k.set(line + "\t"+ pdName);

             

              // 6 写出

              context.write(k, NullWritable.get());

       }

}

 案例五

求每个订单中最贵的商品(GroupingComparator)

1)需求

有如下订单数据

订单id

商品id

成交金额

Order_0000001

Pdt_01

222.8

Order_0000001

Pdt_05

25.8

Order_0000002

Pdt_03

522.8

Order_0000002

Pdt_04

122.4

Order_0000002

Pdt_05

722.4

Order_0000003

Pdt_01

222.8

Order_0000003

Pdt_02

33.8

现在需要求出每一个订单中最贵的商品。

2)输入数据

 

输出数据预期:

    

3)分析

(1)利用“订单id和成交金额”作为key,可以将map阶段读取到的所有订单数据按照id分区,按照金额排序,发送到reduce。

(2)在reduce端利用groupingcomparator将订单id相同的kv聚合成组,然后取第一个即是最大值。

 

4)实现

定义订单信息OrderBean

package com.gec.mapreduce.order;

import java.io.DataInput;

import java.io.DataOutput;

import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

 

public class OrderBean implements WritableComparable<OrderBean> {

 

       private String orderId;

       private double price;

 

       public OrderBean() {

              super();

       }

 

       public OrderBean(String orderId, double price) {

              super();

              this.orderId = orderId;

              this.price = price;

       }

 

       public String getOrderId() {

              return orderId;

       }

 

       public void setOrderId(String orderId) {

              this.orderId = orderId;

       }

 

       public double getPrice() {

              return price;

       }

 

       public void setPrice(double price) {

              this.price = price;

       }

 

       @Override

       public void readFields(DataInput in) throws IOException {

              this.orderId = in.readUTF();

              this.price = in.readDouble();

       }

 

       @Override

       public void write(DataOutput out) throws IOException {

              out.writeUTF(orderId);

              out.writeDouble(price);

       }

 

       @Override

       public int compareTo(OrderBean o) {

              // 1 先按订单id排序(从小到大)

              int result = this.orderId.compareTo(o.getOrderId());

 

              if (result == 0) {

                     // 2 再按金额排序(从大到小)

                     result = price > o.getPrice() ? -1 : 1;

              }

 

              return result;

       }

       @Override

       public String toString() {

              return orderId + "\t" + price ;

       }

}

编写OrderSortMapper处理流程

package com.gec.mapreduce.order;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Mapper;

 

public class OrderSortMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable>{

       OrderBean bean = new OrderBean();

      

       @Override

       protected void map(LongWritable key, Text value,

                     Context context)throws IOException, InterruptedException {

              // 1 获取一行数据

              String line = value.toString();

             

              // 2 截取字段

              String[] fields = line.split("\t");

             

              // 3 封装bean

              bean.setOrderId(fields[0]);

              bean.setPrice(Double.parseDouble(fields[2]));

             

              // 4 写出

              context.write(bean, NullWritable.get());

       }

}

编写OrderSortReducer处理流程

package com.gec.mapreduce.order;

import java.io.IOException;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.mapreduce.Reducer;

 

public class OrderSortReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable>{

       @Override

       protected void reduce(OrderBean bean, Iterable<NullWritable> values,

                     Context context) throws IOException, InterruptedException {

              // 直接写出

              context.write(bean, NullWritable.get());

       }

}

编写OrderSortDriver处理流程

package com.gec.mapreduce.order;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

 

public class OrderSortDriver {

 

       public static void main(String[] args) throws Exception {

              // 1 获取配置信息

              Configuration conf = new Configuration();

              Job job = Job.getInstance(conf);

 

              // 2 设置jar包加载路径

              job.setJarByClass(OrderSortDriver.class);

 

              // 3 加载map/reduce类

              job.setMapperClass(OrderSortMapper.class);

              job.setReducerClass(OrderSortReducer.class);

 

              // 4 设置map输出数据key和value类型

              job.setMapOutputKeyClass(OrderBean.class);

              job.setMapOutputValueClass(NullWritable.class);

 

              // 5 设置最终输出数据的key和value类型

              job.setOutputKeyClass(OrderBean.class);

              job.setOutputValueClass(NullWritable.class);

 

              // 6 设置输入数据和输出数据路径

              FileInputFormat.setInputPaths(job, new Path(args[0]));

              FileOutputFormat.setOutputPath(job, new Path(args[1]));

 

              // 10 设置reduce端的分组

              job.setGroupingComparatorClass(OrderSortGroupingComparator.class);

             

              // 7 设置分区

              job.setPartitionerClass(OrderSortPartitioner.class);

             

              // 8 设置reduce个数

              job.setNumReduceTasks(3);

 

              // 9 提交

              boolean result = job.waitForCompletion(true);

              System.exit(result ? 0 : 1);

       }

}

编写OrderSortPartitioner处理流程

package com.gec.mapreduce.order;

import org.apache.hadoop.io.NullWritable;

import org.apache.hadoop.mapreduce.Partitioner;

 

public class OrderSortPartitioner extends Partitioner<OrderBean, NullWritable>{

 

       @Override

       public int getPartition(OrderBean key, NullWritable value, int numReduceTasks) {

             

              return (key.getOrderId().hashCode() & Integer.MAX_VALUE) % numReduceTasks;

       }

}

编写OrderSortGroupingComparator处理流程

package com.gec.mapreduce.order;

import org.apache.hadoop.io.WritableComparable;

import org.apache.hadoop.io.WritableComparator;

 

public class OrderSortGroupingComparator extends WritableComparator {

 

       protected OrderSortGroupingComparator() {

              super(OrderBean.class, true);

       }

 

       @Override

       public int compare(WritableComparable a, WritableComparable b) {

             

              OrderBean abean = (OrderBean) a;

              OrderBean bbean = (OrderBean) b;

             

              // 将orderId相同的bean都视为一组

              return abean.getOrderId().compareTo(bbean.getOrderId());

       }

}

 

标签:MapReduce,表多表,订单,job,org,apache,import,hadoop,class
来源: https://blog.51cto.com/u_15241496/2862986

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有