2

他のアプリから生成されたログ ファイルから情報を解析 (正規表現) するための小さな Hadoop マップ プログラムを作成しました。この記事を見つけましたhttp://www.nearinfinity.com//blogs/stephen_mouring_jr/2013/01/04/writing-hive-tables-from-mapreduce.html この記事では、解析してハイブテーブルに書き込む方法について説明します

これが私のコードです

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
    public class ParseDataToDB {
    public static final String SEPARATOR_FIELD = new String(new char[] {1});
    public static final String SEPARATOR_ARRAY_VALUE = new String(new char[] {2});
    public static final BytesWritable NULL_KEY =  new BytesWritable();

    public static class MyMapper extends Mapper<LongWritable, Text, BytesWritable, Text>  {
        //private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
        private ArrayList<String> bazValues = new ArrayList<String>();

        public void map(LongWritable key, Text value,
                OutputCollector<BytesWritable, Text> context)
                throws IOException {
            String line = value.toString();
            StringTokenizer tokenizer = new StringTokenizer(line);
            while(tokenizer.hasMoreTokens()){
                word.set(tokenizer.nextToken());
                if(word.find("extract") > -1) {
                    System.out.println("in herer");
                    bazValues.add(line);
                }
            }
            // Build up the array values as a delimited string.
            StringBuilder bazValueBuilder = new StringBuilder();
            int i = 0;
            for (String bazValue : bazValues) {
                bazValueBuilder.append(bazValue);
                ++i;
                if (i < bazValues.size()) {
                    bazValueBuilder.append(SEPARATOR_ARRAY_VALUE);
                }
            }

            // Build up the column values / fields as a delimited string.
            String hiveRow = new String();
            hiveRow += "fooValue";
            hiveRow += SEPARATOR_FIELD;
            hiveRow += "barValue";
            hiveRow += SEPARATOR_FIELD;
            hiveRow += bazValueBuilder.toString();
            System.out.println("in herer hiveRow" + hiveRow);

//          StringBuilder hiveRow = new StringBuilder();
//          hiveRow.append("fooValue");
//          hiveRow.append(SEPARATOR_FIELD);
//          hiveRow.append("barValue");
//          hiveRow.append(SEPARATOR_FIELD);
//          hiveRow.append(bazValueBuilder.toString());

            // Emit a null key and a Text object containing the delimited fields
            context.collect(NULL_KEY, new Text(hiveRow));           
        }
    } 


    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        Configuration conf = new Configuration();       
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        Job job = new Job(conf, "MyTest");
        job.setJarByClass(ParseDataToDB.class);
        job.setMapperClass(MyMapper.class);

        job.setMapOutputKeyClass(BytesWritable.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(BytesWritable.class);
        job.setOutputValueClass(Text.class);


        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}

しかし、このアプリを実行すると、「ByteWritable を期待していましたが、LongWritable を受け取りました。誰かが間違っていることを教えてもらえますか? Hadoop プログラミングは初めてです。また、外部テーブルを作成してそれを hdfs にポイントすることにもオープンです。また苦労しています。実装で. ありがとう.

4

2 に答える 2

0

NullWritableを使用できるように、マップからNULLをキーとして出力しようとしていると思います。したがって、あなたのコードは以下のようになります:-

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class ParseDataToDB {
public static final String SEPARATOR_FIELD = new String(new char[] {1});
public static final String SEPARATOR_ARRAY_VALUE = new String(new char[] {2});


public static class MyMapper extends Mapper<LongWritable, Text, NullWritable, Text>  {
    //private final static IntWritable one = new IntWritable(1);
    private Text word = new Text();
    private ArrayList<String> bazValues = new ArrayList<String>();

    public void map(LongWritable key, Text value,
            OutputCollector<NullWritable, Text> context)
            throws IOException {
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        while(tokenizer.hasMoreTokens()){
            word.set(tokenizer.nextToken());
            if(word.find("extract") > -1) {
                System.out.println("in herer");
                bazValues.add(line);
            }
        }
        // Build up the array values as a delimited string.
        StringBuilder bazValueBuilder = new StringBuilder();
        int i = 0;
        for (String bazValue : bazValues) {
            bazValueBuilder.append(bazValue);
            ++i;
            if (i < bazValues.size()) {
                bazValueBuilder.append(SEPARATOR_ARRAY_VALUE);
            }
        }

        // Build up the column values / fields as a delimited string.
        String hiveRow = new String();
        hiveRow += "fooValue";
        hiveRow += SEPARATOR_FIELD;
        hiveRow += "barValue";
        hiveRow += SEPARATOR_FIELD;
        hiveRow += bazValueBuilder.toString();
        System.out.println("in herer hiveRow" + hiveRow);



        // Emit a null key and a Text object containing the delimited fields
        context.collect(NullWritable.get(), new Text(hiveRow));           
    }
} 


public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

    Configuration conf = new Configuration();       
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    Job job = new Job(conf, "MyTest");
    job.setJarByClass(ParseDataToDB.class);
    job.setMapperClass(MyMapper.class);

    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(Text.class);

    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);


    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
    System.exit(job.waitForCompletion(true) ? 0 : 1);
}

}

于 2013-10-10T08:42:23.363 に答える