【背景】
hadoop Streaming的处理流程是先通过inputFormat读出输入文件内容,将其传递mapper,再将mapper返回的key,value传给reducer,最后将reducer返回的值通过outputformat写入输出文件。
目前有个需求是通过hadoop streaming读取roc文件。使用正常的org.apache.orc.mapred.OrcInputFormat读orc文件时每行返回的值是:
null {"name":"123","age":"456"}
null {"name":"456","age":"789"}
返回这种数据的原因是OrcInputFormat读取文件返回的值是<NullWritable, OrcStruct>, NullWritable toString的返回值是null, OrcStruct toString的返回值是一个json串。
需要开发一个转换器,只返回OrcInputFormat返回的json串的value即可。即返回:
123 456
456 789
【重写InputFormat,单文件读取】
package is.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.*;
import org.apache.orc.TypeDescription;
import org.apache.orc.mapred.OrcInputFormat;
import org.apache.orc.mapred.OrcMapredRecordReader;
import org.apache.orc.mapred.OrcStruct;
import org.apache.orc.Reader;
import org.apache.orc.Reader.Options;
import java.io.IOException;
public class OrcInputAsTextInputFormat extends org.apache.hadoop.mapred.FileInputFormat<Text, Text> {
//真正读文件的还是OrcInputFormat
protected OrcInputFormat<OrcStruct> orcInputFormat = new OrcInputFormat();
public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
OrcMapredRecordReader realReader = (OrcMapredRecordReader) orcInputFormat.getRecordReader(split, job, reporter);
return new TextRecordReaderWrapper
(realReader);
}
public static boolean[] parseInclude(TypeDescription schema, String columnsStr) {
return OrcInputFormat.parseInclude(schema, columnsStr);
}
public static void setSearchArgument(Configuration conf, SearchArgument sarg, String[] columnNames) {
OrcInputFormat.setSearchArgument(conf, sarg, columnNames);
}
public static Options buildOptions(Configuration conf, Reader reader, long start, long length) {
return OrcInputFormat.buildOptions(conf, reader, start, length);
}
protected static class TextRecordReaderWrapper implements RecordReader<Text, Text> {
private OrcMapredRecordReader realReader;
private OrcStruct orcVal ;
private StringBuilder buffer;
private final int numOfFields;
public TextRecordReaderWrapper(OrcMapredRecordReader realReader) throws IOException{
this.realReader = realReader;
this.orcVal = (OrcStruct)realReader.createValue();
this.buffer = new StringBuilder();
this.numOfFields = this.orcVal.getNumFields();
}
public boolean next(Text key, Text value) throws IOException {
// 将第一个字段作为key,剩余的字段以\t为分隔符组成字符串作为value
if (realReader.next(NullWritable.get(), orcVal)){
buffer.setLength(0); //清空buffer
key.set(orcVal.getFieldValue(0).toString());
//以\t为分隔符,组装返回值
for(int i = 1; i < numOfFields; ++i) {
buffer.append("\t");
WritableComparable curField = orcVal.getFieldValue(i);
if (curField != null && ! curField.equals(NullWritable.get())){
buffer.append(curField.toString());
}
}
value.set(buffer.substring(1)); //去掉开始添加的\t
return Boolean.TRUE;
}
return Boolean.FALSE;
}
public Text createKey() {
return new Text();
}
public Text createValue() {
return new Text();
}
public long getPos() throws IOException {
return realReader.getPos();
}
public void close() throws IOException {
realReader.close();
}
public float getProgress() throws IOException {
return realReader.getProgress();
}
}
}
【多文件读取】
MapReduce在读数据的时候可以通过合并小文件的方式减少map个数,比如说CombineSequenceFileInputFormat。如果不合并小文件,可能出现map数过大的情况,资源消耗过多,且执行效率很慢。对应到orc格式时没找到官方提供的包,只能自己写一个。具体代码如下:
package is.orc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
import org.apache.hadoop.mapred.lib.CombineFileRecordReader;
import org.apache.hadoop.mapred.lib.CombineFileRecordReaderWrapper;
import org.apache.hadoop.mapred.lib.CombineFileSplit;
import java.io.IOException;
public class CombineOrcInputAsTextInputFormat
extends CombineFileInputFormat<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text> {
@SuppressWarnings({ "rawtypes", "unchecked" })
public RecordReader<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text> getRecordReader(InputSplit split, JobConf conf,
Reporter reporter) throws IOException {
return new CombineFileRecordReader(conf, (CombineFileSplit)split, reporter,
ORCFileRecordReaderWrapper.class);
}
/**
* A record reader that may be passed to <code>CombineFileRecordReader</code>
* so that it can be used in a <code>CombineFileInputFormat</code>-equivalent
* for <code>SequenceFileInputFormat</code>.
*
* @see CombineFileRecordReader
* @see CombineFileInputFormat
* @see SequenceFileInputFormat
*/
private static class ORCFileRecordReaderWrapper
extends CombineFileRecordReaderWrapper<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text> {
// this constructor signature is required by CombineFileRecordReader
public ORCFileRecordReaderWrapper(CombineFileSplit split,
Configuration conf, Reporter reporter, Integer idx) throws IOException {
//只需配置此处的InputFormat为第一部分编写的OrcInputAsTextInputFormat即可。具体的合并操作,CombineFileInputFormat已帮我们实现
super(new OrcInputAsTextInputFormat(), split, conf, reporter, idx);
}
}
}