对机器学习会有很大的帮助,模型简单,易用
Apache Beam 的两大特点
1、将数据的批处理(batch)和流处理(stream)编程范式进行了统一;
2、能够在任何的执行引擎上运行。
它不仅为模型设计、更为执行一系列数据导向的工作流提供了统一的模型。这些工作流包括数据处理、吸收和整合。
新建maven项目
pom.xml加入依赖
<code>
<dependencies>
<dependency>
<groupId>org.apache.beam</groupId>
<artifactId>beam-sdks-java-core</artifactId>
<version>0.4.0</version>
</dependency>
<dependency>
<groupId>org.apache.beam</groupId>
<artifactId>beam-runners-direct-java</artifactId>
<version>0.4.0</version>
</dependency>
</dependencies>
</code>
测试类WordCount.java
<code>
package org.tom;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.*;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import java.io.Serializable;
public class WordCount implements Serializable{
private transient Pipeline pipeline = null;
public WordCount() {
PipelineOptions options = PipelineOptionsFactory.create();
options.setJobName("wordcount");
pipeline = Pipeline.create(options);
}
public void transform() {
PCollection<String> collection = pipeline.apply(TextIO.Read.from("file:///d:/tom/beam-test/src/main/resources/word.txt"));
PCollection<String> extractWords = collection.apply("ExtractWords", ParDo.of(new DoFn<String, String>() {
@ProcessElement
public void processElement(ProcessContext c) {
String[] split = c.element().split(" ");
for (String word : split) {
if (!word.isEmpty()) {
c.output(word);
}
}
}
}));
PCollection<KV<String, Long>> pCollection = extractWords.apply(Count.<String>perElement());
PCollection<String> formatResults = pCollection.apply("FormatResults", MapElements.via(new SimpleFunction<KV<String, Long>, String>() {
public String apply(KV<String, Long> input) {
return input.getKey() + ": " + input.getValue();
}
}));
formatResults.apply(TextIO.Write.to("D:\tom\beam-test\src\main\resources\wordcounts"));
}
public void run(){
pipeline.run().waitUntilFinish();
}
public static void main(String[] args) {
WordCount wordCount = new WordCount();
wordCount.transform();
wordCount.run();
}
}
</code>
统计文本\resources\word.txt
<code>
tom
hello
tom
luo
hello
tom
tom
word
word
word
tom
</code>
运行结果
word: 3
luo: 1
tom: 5
hello: 2
结果生成了两个文件,是由于hash分区了