倒排索引(Inverted Index)就是建立单词到文件的索引。本节就来使用MapReduce程序来实现一个倒排索引的案例。
案例:输出每个单词在每个文件中出现的次数。
1.程序源码
//InvertedIndexMapper.java
package demo.invertedIndex;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class InvertedIndexMapper extends Mapper<LongWritable, Text, Text, Text> {
@Override
protected void map(LongWritable key1, Text value1, Context context)
throws IOException, InterruptedException {
// 得到文件路径:/myindex/data01.txt
String path = ((FileSplit)context.getInputSplit()).getPath().toString();
// 得到最后一个斜线
int index = path.lastIndexOf("/");
// 取出文件名
String fileName = path.substring(index+1);
// 数据:I love Beijing and love Shanghai 文件: data01.txt
String data = value1.toString();
String[] words = data.split(" ");
// 输出
for(String w:words) {
context.write(new Text(w+":"+fileName), new Text("1"));
}
}
}
//InvertedIndexCombiner.java
package demo.invertedIndex;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class InvertedIndexCombiner extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text k21, Iterable<Text> v21, Context context)
throws IOException, InterruptedException {
// 对一个文件中的词进行统计
long total = 0;
for(Text v:v21) {
total += Long.parseLong(v.toString());
}
//从k21中解析出文件名:love:data01.txt
String str = k21.toString();
//找到冒号的位置
int index = str.indexOf(":");
//分离出单词和文件名
String word = str.substring(0,index);
String fileName = str.substring(index+1);
//重新组合输出:单词 文件名:次数
context.write(new Text(word), new Text(fileName+":"+total));
}
}
//InvertedIndexReducer.java
package demo.invertedIndex;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class InvertedIndexReducer extends Reducer<Text, Text, Text, Text> {
@Override
protected void reduce(Text k3, Iterable<Text> v3, Context context)
throws IOException, InterruptedException {
// 将v3中的每个字符串进行拼加:文件名:次数
String str = "";
for(Text v:v3) {
str += "("+v.toString()+")";
}
//输出最终结果:(文件名1:次数1)(文件名2:次数2)……
context.write(k3, new Text(str));
}
}
//InvertedIndexMain.java
package demo.invertedIndex;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class InvertedIndexMain {
public static void main(String[] args) throws Exception {
// 创建一个job
Job job = Job.getInstance(new Configuration());
//指定任务的入口
job.setJarByClass(InvertedIndexMain.class);
//指定任务的mapper,和输出的数据类型
job.setMapperClass(InvertedIndexMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
//指定任务的Combiner
job.setCombinerClass(InvertedIndexCombiner.class);
//指定任务的reducer,和输出的数据类型
job.setReducerClass(InvertedIndexReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//指定输入和输出目录:HDFS的路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//执行任务
job.waitForCompletion(true);
}
}
2.打包执行
测试数据:
# hdfs dfs -ls /input/invertedindex
Found 3 items
-rw-r--r-- 1 root supergroup 33 2018-11-18 16:16 /input/invertedindex/data01.txt
-rw-r--r-- 1 root supergroup 32 2018-11-18 16:16 /input/invertedindex/data02.txt
-rw-r--r-- 1 root supergroup 16 2018-11-18 16:16 /input/invertedindex/data03.txt# hdfs dfs -cat /input/invertedindex/data01.txt
I love Beijing and love Shanghai# hdfs dfs -cat /input/invertedindex/data02.txt
Beijing is the capital of China# hdfs dfs -cat /input/invertedindex/data03.txt
I love Shanghai
将程序打包成InvertedIndex.jar,并上传到服务器执行:
# hadoop jar InvertedIndex.jar /input/invertedindex /output/invertedindex
……
18/11/18 16:18:51 INFO mapreduce.Job: map 0% reduce 0%
18/11/18 16:19:03 INFO mapreduce.Job: map 100% reduce 0%
18/11/18 16:19:08 INFO mapreduce.Job: map 100% reduce 100%
18/11/18 16:19:08 INFO mapreduce.Job: Job job_1542506318955_0009 completed successfully
……
查看结果:
# hdfs dfs -ls /output/invertedindex
Found 2 items
-rw-r--r-- 1 root supergroup 0 2018-11-18 16:19 /output/invertedindex/_SUCCESS
-rw-r--r-- 1 root supergroup 258 2018-11-18 16:19 /output/invertedindex/part-r-00000# hdfs dfs -cat /output/invertedindex/part-r-00000
Beijing (data01.txt:1)(data02.txt:1)
China (data02.txt:1)
I (data03.txt:1)(data01.txt:1)
Shanghai (data01.txt:1)(data03.txt:1)
and (data01.txt:1)
capital (data02.txt:1)
is (data02.txt:1)
love (data01.txt:2)(data03.txt:1)
of (data02.txt:1)
the (data02.txt:1)