1

1>これが私の主な方法です

package dataAnalysis;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

public class Weather {

public static void main(String[] args) {
    JobConf conf=new JobConf();
    Job job;
    try {
        job = new Job(conf,"WeatherDataExtraction");
        job.setJobName("WeatherDataExtraction");
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job,new    Path("E:\\Nitin\\Programming\\DATA\\01001.dat\\01001.dat"));
        FileOutputFormat.setOutputPath(conf,new Path("E:\\Nitin\\output20.txt"));
        try {
            job.waitForCompletion(true);
        } catch (ClassNotFoundException | IOException | InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}
}

2>THIS IS MY MAPPER CLASS データファイルのフォーマットは以下の通りです。最初の文字が # の場合、その行にはデータが記録された年に関する情報が含まれ、次の # の行が記録されるまで、部分文字列を使用して抽出した温度に関するデータが含まれます

package dataAnalysis;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class Map extends Mapper<LongWritable,Text,Text,IntWritable> {
        private final int ERROR=9999;
        static String year;
        private float airtemp; 
    public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException
    {
        String line=value.toString();
        if(line.charAt(0)=='#') {
            year=line.substring(6,9);
        }
        else {
            if(line.substring(15,20)!="9999" { 
                airtemp=Float.parseFloat(line.substring(15,20));
                context.write(new Text(year),new IntWritable((int)airtemp));
            }
        }
    } 
}

3>これは私のリデューサークラスです

package dataAnalysis;

import java.io.IOException;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class Reduce extends Reducer<Text,IntWritable,Text,IntWritable>{

public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException, InterruptedException
{
    Integer max=new Integer(0);
    for(IntWritable val:values) {
          if (val.get()>max.intValue()) { max=val.get();}
    }
    context.write(key,new IntWritable(max.intValue()));
}    
} 

4>これらは私が受け取ったエラー です 2013 年 9 月 29 日 1:24:51 AM org.apache.hadoop.util.NativeCodeLoader

WARNING: Unable to load native-hadoop library for your platform... using builtin-java      classes where applicable

Sep 29, 2013 1:24:51 AM org.apache.hadoop.mapred.JobClient copyAndConfigureFiles
WARNING: Use GenericOptionsParser for parsing the arguments. Applications should  implement Tool for the same.
Sep 29, 2013 1:24:51 AM org.apache.hadoop.mapred.JobClient copyAndConfigureFiles
WARNING: No job jar file set.  User classes may not be found. See JobConf(Class) or     JobConf#setJar(String).
Sep 29, 2013 1:24:51 AM org.apache.hadoop.mapred.JobClient$2 run
INFO: Cleaning up the staging area file:/tmp/hadoop-Nitin/mapred/staging/Nitin- 2062417840/.staging/job_local_0001
org.apache.hadoop.mapred.InvalidJobConfException: Output directory not set.
at      org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.checkOutputSpecs(FileOutputFormat.j ava:125)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:881)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:842)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Unknown Source)
at     org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1059)
    at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:842)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:465)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:495)
at dataAnalysis.Weather.main(Weather.java:30)

5>分析された .dat ファイルの形式へのリンク http://www1.ncdc.noaa.gov/pub/data/igra/readme.txt

4

0 に答える 0