0

HadoopMapReduceアプリケーションを介してシーケンシャルファイルからすべてのキー値のペアを取得したいと思います。メインクラスのシーケンシャルファイルを読み取るために、次の投稿http://lintool.github.com/Cloud9/docs/content/staging-records.htmlをフォローしましたが、それは機能します。すべてのkeysvalueペアをhdfsシステムの通常のテキストファイルに出力したいのですが、どうすればそれを達成できますか?私は自分のコードを次のように書きました。

            import java.io.File;
            import java.io.IOException;
            import java.util.*;
            import java.util.logging.Level;
            import java.util.logging.Logger;

            import org.apache.hadoop.fs.Path;
            import org.apache.hadoop.conf.*;
            import org.apache.hadoop.fs.FileSystem;
            import org.apache.hadoop.fs.FileUtil;
            import org.apache.hadoop.io.*;

            import org.apache.hadoop.mapreduce.*;
            import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat;
            import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
            import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
            import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

            public class WordCount
            {
                public static class Map extends Mapper
                {
                    private final static IntWritable one = new IntWritable(1);
                    private Text word = new Text();

                    public void map(BytesWritable key, BytesWritable value, Context context) throws IOException, InterruptedException
                    {
                        System.out.println(key.toString());
                        System.out.println(value.toString());
                        context.write(key, value);
                    }
                }
                public static class Reduce extends Reducer
                {
                    public void reduce(Text key, Iterable<IntWritable> values, Context context)
                            throws IOException, InterruptedException
                    {
                        int sum = 0;
                        for (IntWritable val : values)
                        {
                            sum += val.get();
                        }
                        context.write(key, new IntWritable(sum));
                    }
                }
                public static void main(String[] args) throws Exception
                {
                    FileUtil.fullyDelete(new File(args[1]));

                    Configuration conf = new Configuration();

                    Job job = new Job(conf, "wordcount");

                    job.setOutputKeyClass(BytesWritable.class);
                    job.setOutputValueClass(BytesWritable.class);

                    job.setMapperClass(Map.class);
                    job.setReducerClass(Reduce.class);

                    job.setInputFormatClass(org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat.class);
                    job.setOutputFormatClass(TextOutputFormat.class);

                    FileInputFormat.addInputPath(job, new Path(args[0]));
                    FileOutputFormat.setOutputPath(job, new Path(args[1]));

                    job.setJarByClass(WordCount.class);

                    job.waitForCompletion(true);
                }
            }
4

2 に答える 2

1

次のコードを使用して、すべてのキーと値のペアを読み取ります。必要に応じて変更してください。

public class SequenceFileReader {
    public static void main(String args[]) throws Exception {
        System.out.println("Readeing Sequence File");
        Configuration conf = new Configuration();
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/core-site.xml"));
        conf.addResource(new Path("/home/mohammad/hadoop-0.20.203.0/conf/hdfs-site.xml"));  
        FileSystem fs = FileSystem.get(conf);
        Path path = new Path("/seq/file");
        SequenceFile.Reader reader = null;      
        try {
            reader = new SequenceFile.Reader(fs, path, conf);
            Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
            Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
            while (reader.next(key, value)) {
                System.out.println(key + "  <===>  " + value.toString());
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            IOUtils.closeStream(reader);
        }

}

}

于 2012-05-29T12:24:27.337 に答える
1
Please find the Below program. It may be useful in getting some idea in converting BytesWritable to Text.

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.ReflectionUtils;


public class SequenceFileRead {
    public static void main(String args[]) throws IOException{
        Configuration conf=new Configuration();
        Path path=new Path(args[0]);
        SequenceFile.Reader reader=null;
        try{
        reader=new SequenceFile.Reader(conf, Reader.file(path));
        Text key= new Text();
        BytesWritable value=new BytesWritable();
        while(reader.next(key,value)){
            System.out.println(key);
            byte[] bytes=value.getBytes();
            int size=bytes.length;
            byte[] b=new byte[size];
            InputStream is=new ByteArrayInputStream(bytes);
            is.read(b);
            System.out.println(new String(b));
        }
        }
        finally {
            IOUtils.closeStream(reader);
        }

    }

}
于 2014-11-26T09:04:06.087 に答える