blog.Ring.idv.tw

WordCount - HBase 0.20.x

WordCount - HBase 0.20.x

本文是一個簡單的WordCount程式,經由MapReduce的處理之後直接輸出到HBase,實作的範例如下:

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

public class WordCountHBase
{
    public static class Map extends Mapper<LongWritable,Text,Text, IntWritable>
    {
        private IntWritable i = new IntWritable(1);
        @Override
        public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException
        {
            String s[] = value.toString().trim().split(" ");
            for( String m : s)
            {
                context.write(new Text(m), i);
            }
        }
    }
    public static class Reduce extends TableReducer<Text, IntWritable, NullWritable>
    {
        @Override
        public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
        {
            int sum = 0;
            for(IntWritable i : values)
            {
                sum += i.get();
            }
           
            Put put = new Put(Bytes.toBytes(key.toString()));
            put.add(Bytes.toBytes("content"), Bytes.toBytes("count"), Bytes.toBytes(String.valueOf(sum)));
            context.write(NullWritable.get(), put);
        }
    }
    public static void createHBaseTable(String tablename)throws IOException
    {
        HTableDescriptor htd = new HTableDescriptor(tablename);
        HColumnDescriptor col = new HColumnDescriptor("content:");
        htd.addFamily(col);
       
        HBaseConfiguration config = new HBaseConfiguration();
        HBaseAdmin admin = new HBaseAdmin(config);
        if(admin.tableExists(tablename))
        {
            admin.disableTable(tablename);
            admin.deleteTable(tablename);
        }
       
        System.out.println("create new table: " + tablename);
        admin.createTable(htd);
    }
   
    public static void main(String args[]) throws Exception
    {
        String tablename = "wordcount";
       
        Configuration conf = new Configuration();
        conf.set(TableOutputFormat.OUTPUT_TABLE, tablename);
        createHBaseTable(tablename);

        String input = args[0];
        Job job = new Job(conf, "WordCount table with " + input);
       
        job.setJarByClass(WordCountHBase.class);
        job.setNumReduceTasks(3);
        job.setMapperClass(Map.class);
        job.setReducerClass(Reduce.class);
       
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
       
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TableOutputFormat.class);

        FileInputFormat.addInputPath(job, new Path(input));
       
        System.exit(job.waitForCompletion(true)?0:1);
    }
}

從上述程式可以知道Reduce是直接繼承於TableReducer<KEYIN,VALUEIN,KEYOUT>,不過就如同API的說明,KEYOUT在TableOutputFormat當中是被忽略的,而且VALUEOUT也只能是PutDelete,可以從下述的原始碼得知:

TableOutputFormat.java

public void write(KEY key, Writable value) throws IOException
{
	if (value instanceof Put)
		this.table.put(new Put((Put) value));
	else if (value instanceof Delete)
		this.table.delete(new Delete((Delete) value));
	else
		throw new IOException("Pass a Delete or a Put");
}

至於該輸出至哪一個Table,則必須設置「TableOutputFormat.OUTPUT_TABLE」的組態設定,也可以自行設置「hbase.mapred.outputtable」。

TableOutputFormat.java

public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";

2009-12-08 21:55:33

2 comments on "WordCount - HBase 0.20.x"

  1. 1. carlos 說:

    你好

    關於hadoop 的問題請教

    Configuration conf = new Configuration();

    Job job = new Job(conf, "WordCount table with " + input);

    haddop 由 Configuration 來連接 , 但是我在網路上看到的sample都只直接new一個Object 之後即放入Job中,這是否是在pseudo環境下才能run,若是有一個cluster了,又該如何
    我曾經試過用addResource加入之前的hadoop-site.xml,可是現在已拆為三個core,hdfs,mapred???
    而且這樣的做法,會讓程式需要直接置於namenode上才能access到xml檔案
    請問若是machine1為cluster namenode,開發機台為machine2時,程式中Configuration該如何設置才能接上
    hadoop呢

    謝謝!!!

    2009-12-16 13:59:17

  2. 2. Shen 說:

    >>請問若是machine1為cluster namenode,開發機台為machine2時,程式中Configuration該如何設置才能接上hadoop

    我沒試過這樣的方式,我通常都是在單機架一個假分散式環境來開發。

    2009-12-16 20:17:52

Leave a Comment

Copyright (C) Ching-Shen Chen. All rights reserved.

::: 搜尋 :::

::: 分類 :::

::: 最新文章 :::

::: 最新回應 :::

::: 訂閱 :::

Atom feed
Atom Comment