Eclipseでhadoopワードカウントを実行しようとしています。hadoop ディレクトリと hadoop/lib ディレクトリ内のすべての jar ファイルをこのプロジェクトのライブラリに追加するだけですが、以下のエラーが発生します。
java.lang.Exception: java.lang.ArrayIndexOutOfBoundsException: 1
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:400)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
at org.orzota.bookx.mappers.MyHadoopMapper.map(MyHadoopMapper.java:23)
at org.orzota.bookx.mappers.MyHadoopMapper.map(MyHadoopMapper.java:1)
at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:54)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:400)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:335)
at
org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:232)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
at java.util.concurrent.FutureTask.run(FutureTask.java:166)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1146)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:679)
2013-10-23 18:59:20,841 INFO [main] mapreduce.Job
(Job.java:monitorAndPrintJob(1288)) Job job_local_0001 running in uber mode : false
2013-10-23 18:59:20,843 INFO [main] mapreduce.Job (Job.java:monitorAndPrintJob(1295))
map 0% reduce 0%
2013-10-23 18:59:20,847 INFO [main] mapreduce.Job (Job.java:monitorAndPrintJob(1308))
Job job_local_0001 failed with state FAILED due to: NA
2013-10-23 18:59:20,866 INFO [main] mapreduce.Job (Job.java:monitorAndPrintJob(1313))
Counters: 0
java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:891)
at org.orzota.bookx.mappers.MyHadoopDriver.main(MyHadoopDriver.java:46)
これを解決するのを手伝ってもらえますか?
MyhadoopMapper は次のとおりです。
package org.orzota.bookx.mappers;
import java.io.IOException;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
public class MyHadoopMapper extends MapReduceBase implements Mapper <LongWritable,
Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
public void map(LongWritable _key, Text value, OutputCollector<Text, IntWritable>
output, Reporter reporter) throws IOException {
String st = value.toString();
String[] bookdata = st.split(",");
//for (int i=0; i< bookdata.length; i++){
//System.out.println(bookdata[i]);
//}
//if (bookdata.length!=8){
//System.out.println("Warning, bad Entry.." + bookdata.length);
//return;
//}
output.collect(new Text(bookdata[1]), one);
}
}