WordCount 实例
http://www.iteye.com/topic/606962
http://www.iteye.com/topic/1117343
看了到Hadoop的代码,还是不知道他的执行流程,怎么办呢。我想到了日志,在hadoop的目录下,有log4j,那就用户Log4j来记录Hadoop的执行过程吧.
- import java.io.IOException;
- import java.util.StringTokenizer;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.IntWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.Reducer;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- import org.apache.hadoop.util.GenericOptionsParser;
- import org.apache.log4j.Logger;
- public class WordCount {
- public static Logger loger = Wloger.loger;
- /**
- * TokenizerMapper 继续自 Mapper<Object, Text, Text, IntWritable>
- *
- * [一个文件就一个map,两个文件就会有两个map]
- * map[这里读入输入文件内容 以" \t\n\r\f" 进行分割,然后设置 word ==> one 的key/value对]
- *
- * @param Object Input key Type:
- * @param Text Input value Type:
- * @param Text Output key Type:
- * @param IntWritable Output value Type:
- *
- * Writable的主要特点是它使得Hadoop框架知道对一个Writable类型的对象怎样进行serialize以及deserialize.
- * WritableComparable在Writable的基础上增加了compareT接口,使得Hadoop框架知道怎样对WritableComparable类型的对象进行排序。
- *
- * @author yangchunlong.tw
- *
- */
- public static class TokenizerMapper
- extends Mapper<Object, Text, Text, IntWritable>{
- private final static IntWritable one = new IntWritable(1);
- private Text word = new Text();
- public void map(Object key, Text value, Context context
- ) throws IOException, InterruptedException {
- loger.info("Map <key>"+key+"</key>");
- loger.info("Map <value>"+value+"</key>");
- StringTokenizer itr = new StringTokenizer(value.toString());
- while (itr.hasMoreTokens()) {
- String wordstr = itr.nextToken();
- word.set(wordstr);
- loger.info("Map <word>"+wordstr+"</word>");
- context.write(word, one);
- }
- }
- }
- /**
- * IntSumReducer 继承自 Reducer<Text,IntWritable,Text,IntWritable>
- *
- * [不管几个Map,都只有一个Reduce,这是一个汇总]
- * reduce[循环所有的map值,把word ==> one 的key/value对进行汇总]
- *
- * 这里的key为Mapper设置的word[每一个key/value都会有一次reduce]
- *
- * 当循环结束后,最后的确context就是最后的结果.
- *
- * @author yangchunlong.tw
- *
- */
- public static class IntSumReducer
- extends Reducer<Text,IntWritable,Text,IntWritable> {
- private IntWritable result = new IntWritable();
- public void reduce(Text key, Iterable<IntWritable> values,
- Context context
- ) throws IOException, InterruptedException {
- loger.info("Reduce <key>"+key+"</key>");
- loger.info("Reduce <value>"+values+"</key>");
- int sum = 0;
- for (IntWritable val : values) {
- sum += val.get();
- }
- result.set(sum);
- loger.info("Reduce <sum>"+sum+"</sum>");
- context.write(key, result);
- }
- }
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
- /**
- * 这里必须有输入/输出
- */
- if (otherArgs.length != 2) {
- System.err.println("Usage: wordcount <in> <out>");
- System.exit(2);
- }
- Job job = new Job(conf, "word count");
- job.setJarByClass(WordCount.class);//主类
- job.setMapperClass(TokenizerMapper.class);//mapper
- job.setCombinerClass(IntSumReducer.class);//作业合成类
- job.setReducerClass(IntSumReducer.class);//reducer
- job.setOutputKeyClass(Text.class);//设置作业输出数据的关键类
- job.setOutputValueClass(IntWritable.class);//设置作业输出值类
- FileInputFormat.addInputPath(job, new Path(otherArgs[0]));//文件输入
- FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));//文件输出
- System.exit(job.waitForCompletion(true) ? 0 : 1);//等待完成退出.
- }
- }
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Logger;
public class WordCount {
public static Logger loger = Wloger.loger;
/**
* TokenizerMapper 继续自 Mapper<Object, Text, Text, IntWritable>
*
* [一个文件就一个map,两个文件就会有两个map]
* map[这里读入输入文件内容 以" \t\n\r\f" 进行分割,然后设置 word ==> one 的key/value对]
*
* @param Object Input key Type:
* @param Text Input value Type:
* @param Text Output key Type:
* @param IntWritable Output value Type:
*
* Writable的主要特点是它使得Hadoop框架知道对一个Writable类型的对象怎样进行serialize以及deserialize.
* WritableComparable在Writable的基础上增加了compareT接口,使得Hadoop框架知道怎样对WritableComparable类型的对象进行排序。
*
* @author yangchunlong.tw
*
*/
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
loger.info("Map <key>"+key+"</key>");
loger.info("Map <value>"+value+"</key>");
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
String wordstr = itr.nextToken();
word.set(wordstr);
loger.info("Map <word>"+wordstr+"</word>");
context.write(word, one);
}
}
}
/**
* IntSumReducer 继承自 Reducer<Text,IntWritable,Text,IntWritable>
*
* [不管几个Map,都只有一个Reduce,这是一个汇总]
* reduce[循环所有的map值,把word ==> one 的key/value对进行汇总]
*
* 这里的key为Mapper设置的word[每一个key/value都会有一次reduce]
*
* 当循环结束后,最后的确context就是最后的结果.
*
* @author yangchunlong.tw
*
*/
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
loger.info("Reduce <key>"+key+"</key>");
loger.info("Reduce <value>"+values+"</key>");
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
loger.info("Reduce <sum>"+sum+"</sum>");
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
/**
* 这里必须有输入/输出
*/
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
Job job = new Job(conf, "word count");
job.setJarByClass(WordCount.class);//主类
job.setMapperClass(TokenizerMapper.class);//mapper
job.setCombinerClass(IntSumReducer.class);//作业合成类
job.setReducerClass(IntSumReducer.class);//reducer
job.setOutputKeyClass(Text.class);//设置作业输出数据的关键类
job.setOutputValueClass(IntWritable.class);//设置作业输出值类
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));//文件输入
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));//文件输出
System.exit(job.waitForCompletion(true) ? 0 : 1);//等待完成退出.
}
}
这里输出了每一次Map,每一次Reduce.结果如下:
- f1 ==>Map Result
- Map <key>0</key>
- Map <value>ycl ycl is ycl good</key>
- Map <word>ycl</word>
- Map <word>ycl</word>
- Map <word>is</word>
- Map <word>ycl</word>
- Map <word>good</word>
- f1 ==>Reduce Result
- Reduce <key>good</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
- Reduce <sum>1</sum>
- Reduce <key>is</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
- Reduce <sum>1</sum>
- Reduce <key>ycl</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
- Reduce <sum>3</sum>
- f2 ==>Map Result
- Map <key>0</key>
- Map <value>hello ycl hello lg</key>
- Map <word>hello</word>
- Map <word>ycl</word>
- Map <word>hello</word>
- Map <word>lg</word>
- f2 ==>Reduce Result
- Reduce <key>hello</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
- Reduce <sum>2</sum>
- Reduce <key>lg</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
- Reduce <sum>1</sum>
- Reduce <key>ycl</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
- Reduce <sum>1</sum>
- f1,f2 ==> Reduce Result
- Reduce <key>good</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
- Reduce <sum>1</sum>
- Reduce <key>hello</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
- Reduce <sum>2</sum>
- Reduce <key>is</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
- Reduce <sum>1</sum>
- Reduce <key>lg</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
- Reduce <sum>1</sum>
- Reduce <key>ycl</key>
- Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
- Reduce <sum>4</sum>
f1 ==>Map Result
Map <key>0</key>
Map <value>ycl ycl is ycl good</key>
Map <word>ycl</word>
Map <word>ycl</word>
Map <word>is</word>
Map <word>ycl</word>
Map <word>good</word>
f1 ==>Reduce Result
Reduce <key>good</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
Reduce <sum>1</sum>
Reduce <key>is</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
Reduce <sum>1</sum>
Reduce <key>ycl</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1dfc547</key>
Reduce <sum>3</sum>
f2 ==>Map Result
Map <key>0</key>
Map <value>hello ycl hello lg</key>
Map <word>hello</word>
Map <word>ycl</word>
Map <word>hello</word>
Map <word>lg</word>
f2 ==>Reduce Result
Reduce <key>hello</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
Reduce <sum>2</sum>
Reduce <key>lg</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
Reduce <sum>1</sum>
Reduce <key>ycl</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@10f6d3</key>
Reduce <sum>1</sum>
f1,f2 ==> Reduce Result
Reduce <key>good</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
Reduce <sum>1</sum>
Reduce <key>hello</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
Reduce <sum>2</sum>
Reduce <key>is</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
Reduce <sum>1</sum>
Reduce <key>lg</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
Reduce <sum>1</sum>
Reduce <key>ycl</key>
Reduce <value>org.apache.hadoop.mapreduce.ReduceContext$ValueIterable@1989f84</key>
Reduce <sum>4</sum>
正常人应该能分析出map/reduce的执行机制,比如有两个输入文件,map/reduce是一个文件一个文件进行处理的,每map一个输入文件就会reduce一次,最后再进行总的reduce.
WordCount 实例相关推荐
- 大数据之MapReduce详解(MR的运行机制及配合WordCount实例来说明运行机制)
目录 前言: 1.MapReduce原理 2.mapreduce实践(WordCount实例) 目录 今天先总体说下MapReduce的相关知识,后续将会详细说明对应的shuffle.mr与yarn的 ...
- Hadoop伪分布式配置和搭建,hadoop单机安装,wordcount实例测试,hadoop安装java目录怎么找,问题及问题解决方法
Hadoop伪分布式配置和搭建,hadoop单机安装,wordcount实例测试,hadoop安装java目录怎么找,问题及问题解决方法 环境说明 系统:ubuntu18.04 主机名:test1 用 ...
- hadoop运行wordcount实例,hdfs简单操作
1.查看hadoop版本 [hadoop@ltt1 sbin]$ hadoop version Hadoop 2.6.0-cdh5.12.0 Subversion http://github.com/ ...
- docker运行storm及wordcount实例
序 本文简单介绍下怎么使用docker运行storm以及在springboot中使用storm. docker-compose version: '2' services:zookeeper:imag ...
- WordCount实例分析(一)
由上篇blog可知,Mapreduce架构处理问题过程中,需要map()函数和reduce()函数即可同时再添加驱动程序进行实现,本文根据老师上课所讲,对WordCount实例进行分析整理,为学习笔记 ...
- wordcount代码_通过腾讯云 Serverless Regsitry 快速开发与部署一个 WordCount 实例
在学习 MapReduce 的过程中,不少人接触的第一个项目就是单词计数.单词计数通过两个函数 Map 和 Reduce,可以快速地统计出文本文件中每个单词出现的个数,它虽然简单,但也是最能体现 Ma ...
- python hadoop wordcount_Hadoop之wordcount实例-MapReduce程序
实验目的 利用搭建好的大数据平台 Hadoop,对 HDFS 中的文本文件进行处理,采用 Hadoop Steaming 方式,使用 Python 语言实现英文单词的统计功能,并输出单词统计结果. 实 ...
- 这可能是最全的在Ubunto安装idea,maven以及进行Wordcount实例的一个博客了
文章目录 idea安装: 1.下载安装包并移动 2.解压 3.赋权限 4.启动idea 5.安装 安装maven 1.解压maven包 2.设置权限: 3.配置变量 4.验证是否存在 5.修改sett ...
- Hadoop运行wordcount实例任务卡在job running的多种情况及解决方法
第一种:配置问题 这是别人的图片,据楼主排查解决是因为hosts配置问题- 现象:各种无法运行.启动 解决办法: 1.修改日志级别 export HADOOP_ROOT_LOGGER=DEBUG,co ...
最新文章
- [LeetCode 111] - 二叉树的最小深度 (Minimum Depth of Binary Tree)
- 【算法与数据结构】关于代码运行时间复杂度的计算方法
- 20211130 正定矩阵的几个不等式
- Centos 搭建activemq
- 北京集训②DAY1 Morning
- 基本操作2-常用命令
- JUC与JVM并发编程学习笔记04
- Visual Studio 2012下载安装方法
- linux日志分析步骤,Linux系统日志分析的基本教程
- 苹果ios8_手机资讯:你必须要知道的iOS8实用小技巧汇总
- 基于腾讯云函数SCFddddocr免费验证码识别API响应系统
- mysql建表auto_increment_mysql create table auto_increment
- 简单实现账号密码登录(写死了)
- uniapp-Speech语音识别(百度)
- yy账号找回方法详解
- 全网最硬核 JVM TLAB 分析(单篇版不包含额外加菜)
- MATLAB 的函数
- 各种求圆周率π的算法(蒙特卡洛法的Java实现)
- linux DSA 开发(一)
- 无lnternet_无internet访问,详细教您怎么解决无internet访问