1.å¦ä½ä½¿ç¨ rdd dag ç¼å word count
2.å¦ä½è¿è¡èªå¸¦wordcount-Hadoop2
3.å¦ä½è¿è¡èªå¸¦wordcount
å¦ä½ä½¿ç¨ rdd dag ç¼å word count
ãã为äºå¨IDEAä¸ç¼åscalaï¼ä»å¤©å®è£ é ç½®å¦ä¹ äºIDEAéæå¼åç¯å¢ãIDEAç¡®å®å¾ä¼ç§ï¼å¦ä¼ä¹åï¼ç¨èµ·æ¥å¾é¡ºæãå ³äºå¦ä½æ建scalaåIDEAå¼åç¯å¢ï¼è¯·çææ«çåèèµæã
ããç¨ScalaåJavaå®ç°WordCountï¼å ¶ä¸Javaå®ç°çJavaWordCountæ¯sparkèªå¸¦çä¾åï¼$SPARK_HOME/examples/src/main/java/org/apache/spark/examples/JavaWordCount.javaï¼
ãã1.ç¯å¢
ããOS:Red Hat Enterprise Linux Server release 6.4 (Santiago)
ããHadoop:Hadoop 2.4.1
ããJDK:1.7.0_
ããSpark:1.1.0
ããScala:2..2
ããéæå¼åç¯å¢ï¼IntelliJ IDEA .1.3
ãã注æï¼éè¦å¨å®¢æ·ç«¯windowsç¯å¢ä¸å®è£ IDEAãScalaãJDKï¼å¹¶ä¸ä¸ºIDEAä¸è½½scalaæ件ã
ãã2.Scalaå®ç°åè¯è®¡æ°
ãã1 package com.hq
2
3 /
**4 * User: hadoop
5 * Date: //
6 * Time: :
7 */
8 import org.apache.spark.SparkConf
9 import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
/
*** ç»è®¡å符åºç°æ¬¡æ°
*/
object WordCount {
def main(args: Array[String]) {
if (args.length < 1) {
System.err.println("Usage: <file>")
System.exit(1)
}
val conf = new SparkConf()
val sc = new SparkContext(conf)
val line = sc.textFile(args(0))
line.flatMap(_.split(" ")).map((_,惠优影视源码 1)).reduceByKey(_+_).collect().foreach(println)
sc.stop()
}
}
ãã3.Javaå®ç°åè¯è®¡æ°
ãã1 package com.hq;
2
3 /
**4 * User: hadoop
5 * Date: //
6 * Time: :
7 */
8
9 import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
public final class JavaWordCount {
private static final Pattern SPACE = Pattern.compile(" ");
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("Usage: JavaWordCount <file>");
System.exit(1);
}
SparkConf sparkConf = new SparkConf().setAppName("JavaWordCount");
JavaSparkContext ctx = new JavaSparkContext(sparkConf);
JavaRDD<String> lines = ctx.textFile(args[0], 1);
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterable<String> call(String s) {
return Arrays.asList(SPACE.split(s));
}
});
JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
}
});
JavaPairRDD<String, Integer> counts = ones.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer i1, Integer i2) {
return i1 + i2;
}
});
List<Tuple2<String, Integer>> output = counts.collect();
for (Tuple2<?, ?> tuple : output) {
System.out.println(tuple._1() + ": " + tuple._2());
}
ctx.stop();
}
}
å¦ä½è¿è¡èªå¸¦wordcount-Hadoop2
1.æ¾å°examplesä¾åæ们éè¦æ¾æè¿ä¸ªä¾åçä½ç½®ï¼é¦å éè¦æ¾å°ä½ çhadoopæ件夹ï¼ç¶åä¾ç §ä¸é¢è·¯å¾ï¼/hadoop/share/hadoop/mapreduce第äºæ¥ï¼æ们éè¦éè¦åä¸ä¸è¿è¡éè¦çå·¥ä½ï¼æ¯å¦è¾å ¥è¾åºè·¯å¾ï¼ä¸ä¼ ä»ä¹æ件çã1.å å¨HDFSå建å 个æ°æ®ç®å½ï¼1.hadoopfs-mkdir-p/data/wordcount2.hadoopfs-mkdir-p/output/2.ç®å½/data/wordcountç¨æ¥åæ¾Hadoopèªå¸¦çWordCountä¾åçæ°æ®æ件ï¼è¿è¡è¿ä¸ªMapReduceä»»å¡çç»æè¾åºå°/output/wordcountç®å½ä¸ãé¦å æ°å»ºæ件inputWordï¼1.vi/usr/inputWordæ°å»ºå®æ¯ï¼æ¥çå 容ï¼å°æ¬å°æ件ä¸ä¼ å°HDFSä¸ï¼å¯ä»¥æ¥çä¸ä¼ åçæ件æ åµï¼æ§è¡å¦ä¸å½ä»¤ï¼1.hadoopfs-ls/data/wordcountå¯ä»¥çå°ä¸ä¼ å°HDFSä¸çæ件ãç»å½å°Webæ§å¶å°ï¼è®¿é®é¾æ¥å¯ä»¥çå°ä»»å¡è®°å½æ åµã
å¦ä½è¿è¡èªå¸¦wordcount
ä½ å¥½ï¼
ãã1.æ¾å°examplesä¾å
ããæ们éè¦æ¾æè¿ä¸ªä¾åçä½ç½®ï¼é¦å éè¦æ¾å°ä½ çhadoopæ件夹ï¼ç¶åä¾ç §ä¸é¢è·¯å¾ï¼
ãã/hadoop/share/hadoop/mapreduceä¼çå°å¦ä¸å¾ï¼
ããhadoop-mapreduce-examples-2.2.0.jar
ãã第äºæ¥ï¼
ããæ们éè¦éè¦åä¸ä¸è¿è¡éè¦çå·¥ä½ï¼æ¯å¦è¾å ¥è¾åºè·¯å¾ï¼ä¸ä¼ ä»ä¹æ件çã
ãã1.å å¨HDFSå建å 个æ°æ®ç®å½ï¼
ããhadoop fs -mkdir -p /data/wordcount
ããhadoop fs -mkdir -p /output/
ãã2.ç®å½/data/wordcountç¨æ¥åæ¾Hadoopèªå¸¦çWordCountä¾åçæ°æ®æ件ï¼è¿è¡è¿ä¸ªMapReduceä»»å¡çç»æè¾åºå°/output/wordcountç®å½ä¸ã
ããé¦å æ°å»ºæ件inputWordï¼
ããvi /usr/inputWord
ããæ°å»ºå®æ¯ï¼æ¥çå 容ï¼
ããcat /usr/inputWord
ããå°æ¬å°æ件ä¸ä¼ å°HDFSä¸ï¼
ããhadoop fs -put /usr/inputWord /data/wordcount/
ããå¯ä»¥æ¥çä¸ä¼ åçæ件æ åµï¼æ§è¡å¦ä¸å½ä»¤ï¼
ããhadoop fs -ls /data/wordcount
ããå¯ä»¥çå°ä¸ä¼ å°HDFSä¸çæ件ã
ããéè¿å½ä»¤
ããhadoop fs -text /data/wordcount/inputWord
ããçå°å¦ä¸å 容ï¼
ãã
ããä¸é¢ï¼è¿è¡WordCountä¾åï¼æ§è¡å¦ä¸å½ä»¤ï¼
ããhadoop jar /usr/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.2.0.jar wordcount /data/wordcount /output/wordcount
ãã
ããå¯ä»¥çå°æ§å¶å°è¾åºç¨åºè¿è¡çä¿¡æ¯
ããæ¥çç»æï¼æ§è¡å¦ä¸å½ä»¤ï¼
ããhadoop fs -text /output/wordcount/part-r-
ç»ææ°æ®ç¤ºä¾å¦ä¸ï¼
ç»å½å°Webæ§å¶å°ï¼è®¿é®é¾æ¥http.//master:/å¯ä»¥çå°ä»»å¡è®°å½æ åµã