`
thomas0988
  • 浏览: 473460 次
  • 性别: Icon_minigender_1
  • 来自: 南阳
社区版块
存档分类
最新评论

把xls的数据导到Hbase

 
阅读更多

这属于Hbase的一个例子,不过Hbase的例子有点问题,需要更改下。
其实我感觉Hbase属于一个BigTable,感觉和xls真的很像,闲话不说了,上code才是王道。

Java代码 复制代码 收藏代码
  1.   
  2.   
  3. import java.io.IOException;   
  4.   
  5. import org.apache.hadoop.conf.Configuration;   
  6. import org.apache.hadoop.fs.Path;   
  7. import org.apache.hadoop.hbase.HBaseConfiguration;   
  8. import org.apache.hadoop.hbase.client.Put;   
  9. import org.apache.hadoop.hbase.io.ImmutableBytesWritable;   
  10. import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;   
  11. import org.apache.hadoop.hbase.mapreduce.TableReducer;   
  12. import org.apache.hadoop.hbase.util.Bytes;   
  13. import org.apache.hadoop.io.LongWritable;   
  14. import org.apache.hadoop.io.Text;   
  15. import org.apache.hadoop.mapreduce.Job;   
  16. import org.apache.hadoop.mapreduce.Mapper;   
  17. import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;   
  18. import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;   
  19. import org.apache.hadoop.util.GenericOptionsParser;   
  20. import org.apache.log4j.Logger;   
  21.   
  22. /**  
  23.  * Sample Uploader MapReduce  
  24.  * <p>  
  25.  * This is EXAMPLE code.  You will need to change it to work for your context.  
  26.  * <p>  
  27.  * Uses {@link TableReducer} to put the data into HBase. Change the InputFormat  
  28.  * to suit your data.  In this example, we are importing a CSV file.  
  29.  * <p>  
  30.  * <pre>row,family,qualifier,value</pre>  
  31.  * <p>  
  32.  * The table and columnfamily we're to insert into must preexist.  
  33.  * <p>  
  34.  * There is no reducer in this example as it is not necessary and adds  
  35.  * significant overhead.  If you need to do any massaging of data before  
  36.  * inserting into HBase, you can do this in the map as well.  
  37.  * <p>Do the following to start the MR job:  
  38.  * <pre>  
  39.  * ./bin/hadoop org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME  
  40.  * </pre>  
  41.  * <p>  
  42.  * This code was written against HBase 0.21 trunk.  
  43.  */  
  44. public class SampleUploader {   
  45.   
  46.     public static Logger loger = Wloger.loger;   
  47.   
  48.   private static final String NAME = "SampleUploader";   
  49.   
  50.   static class Uploader   
  51.   extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {   
  52.   
  53.     private long checkpoint = 100;   
  54.     private long count = 0;   
  55.   
  56.     @Override  
  57.     public void map(LongWritable key, Text line, Context context)   
  58.     throws IOException {   
  59.   
  60.       // Input is a CSV file   
  61.       // Each map() is a single line, where the key is the line number   
  62.       // Each line is comma-delimited; row,family,qualifier,value   
  63.   
  64.       // Split CSV line   
  65.       String [] values = line.toString().split(",");   
  66.       if(values.length != 4) {   
  67.         return;   
  68.       }   
  69.   
  70.       // Extract each value   
  71.       byte [] row = Bytes.toBytes(values[0]);   
  72.       byte [] family = Bytes.toBytes(values[1]);   
  73.       byte [] qualifier = Bytes.toBytes(values[2]);   
  74.       byte [] value = Bytes.toBytes(values[3]);   
  75.       loger.info(values[0]+":"+values[1]+":"+values[2]+":"+values[3]);   
  76.   
  77.       // Create Put   
  78.       Put put = new Put(row);   
  79.       put.add(family, qualifier, value);   
  80.   
  81.       // Uncomment below to disable WAL. This will improve performance but means   
  82.       // you will experience data loss in the case of a RegionServer crash.   
  83.       // put.setWriteToWAL(false);   
  84.   
  85.       try {   
  86.         context.write(new ImmutableBytesWritable(row), put);   
  87.       } catch (InterruptedException e) {   
  88.         e.printStackTrace();   
  89.         loger.error("write到hbase 异常:",e);   
  90.       }   
  91.   
  92.       // Set status every checkpoint lines   
  93.       if(++count % checkpoint == 0) {   
  94.         context.setStatus("Emitting Put " + count);   
  95.       }   
  96.     }   
  97.   }   
  98.   
  99.   /**  
  100.    * Job configuration.  
  101.    */  
  102.   public static Job configureJob(Configuration conf, String [] args)   
  103.   throws IOException {   
  104.     Path inputPath = new Path(args[0]);   
  105.     String tableName = args[1];   
  106.     Job job = new Job(conf, NAME + "_" + tableName);   
  107.     job.setJarByClass(Uploader.class);   
  108.     FileInputFormat.setInputPaths(job, inputPath);   
  109.     job.setInputFormatClass(TextInputFormat.class);   
  110.     job.setMapperClass(Uploader.class);   
  111.     // No reducers.  Just write straight to table.  Call initTableReducerJob   
  112.     // because it sets up the TableOutputFormat.   
  113.     loger.error("TableName:"+tableName);   
  114.     TableMapReduceUtil.initTableReducerJob(tableName, null, job);   
  115.     job.setNumReduceTasks(0);   
  116.     return job;   
  117.   }   
  118.   
  119.   /**  
  120.    * Main entry point.  
  121.    *  
  122.    * @param args  The command line parameters.  
  123.    * @throws Exception When running the job fails.  
  124.    */  
  125.   public static void main(String[] args) throws Exception {   
  126.     Configuration conf = HBaseConfiguration.create();   
  127.     String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();   
  128.     if(otherArgs.length != 2) {   
  129.       System.err.println("Wrong number of arguments: " + otherArgs.length);   
  130.       System.err.println("Usage: " + NAME + " <input> <tablename>");   
  131.       System.exit(-1);   
  132.     }   
  133.     Job job = configureJob(conf, otherArgs);   
  134.     System.exit(job.waitForCompletion(true) ? 0 : 1);   
  135.   }   
  136. }  
import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.log4j.Logger;

/**
 * Sample Uploader MapReduce
 * <p>
 * This is EXAMPLE code.  You will need to change it to work for your context.
 * <p>
 * Uses {@link TableReducer} to put the data into HBase. Change the InputFormat
 * to suit your data.  In this example, we are importing a CSV file.
 * <p>
 * <pre>row,family,qualifier,value</pre>
 * <p>
 * The table and columnfamily we're to insert into must preexist.
 * <p>
 * There is no reducer in this example as it is not necessary and adds
 * significant overhead.  If you need to do any massaging of data before
 * inserting into HBase, you can do this in the map as well.
 * <p>Do the following to start the MR job:
 * <pre>
 * ./bin/hadoop org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
 * </pre>
 * <p>
 * This code was written against HBase 0.21 trunk.
 */
public class SampleUploader {

	public static Logger loger = Wloger.loger;

  private static final String NAME = "SampleUploader";

  static class Uploader
  extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> {

    private long checkpoint = 100;
    private long count = 0;

    @Override
    public void map(LongWritable key, Text line, Context context)
    throws IOException {

      // Input is a CSV file
      // Each map() is a single line, where the key is the line number
      // Each line is comma-delimited; row,family,qualifier,value

      // Split CSV line
      String [] values = line.toString().split(",");
      if(values.length != 4) {
        return;
      }

      // Extract each value
      byte [] row = Bytes.toBytes(values[0]);
      byte [] family = Bytes.toBytes(values[1]);
      byte [] qualifier = Bytes.toBytes(values[2]);
      byte [] value = Bytes.toBytes(values[3]);
      loger.info(values[0]+":"+values[1]+":"+values[2]+":"+values[3]);

      // Create Put
      Put put = new Put(row);
      put.add(family, qualifier, value);

      // Uncomment below to disable WAL. This will improve performance but means
      // you will experience data loss in the case of a RegionServer crash.
      // put.setWriteToWAL(false);

      try {
        context.write(new ImmutableBytesWritable(row), put);
      } catch (InterruptedException e) {
        e.printStackTrace();
        loger.error("write到hbase 异常:",e);
      }

      // Set status every checkpoint lines
      if(++count % checkpoint == 0) {
        context.setStatus("Emitting Put " + count);
      }
    }
  }

  /**
   * Job configuration.
   */
  public static Job configureJob(Configuration conf, String [] args)
  throws IOException {
    Path inputPath = new Path(args[0]);
    String tableName = args[1];
    Job job = new Job(conf, NAME + "_" + tableName);
    job.setJarByClass(Uploader.class);
    FileInputFormat.setInputPaths(job, inputPath);
    job.setInputFormatClass(TextInputFormat.class);
    job.setMapperClass(Uploader.class);
    // No reducers.  Just write straight to table.  Call initTableReducerJob
    // because it sets up the TableOutputFormat.
    loger.error("TableName:"+tableName);
    TableMapReduceUtil.initTableReducerJob(tableName, null, job);
    job.setNumReduceTasks(0);
    return job;
  }

  /**
   * Main entry point.
   *
   * @param args  The command line parameters.
   * @throws Exception When running the job fails.
   */
  public static void main(String[] args) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if(otherArgs.length != 2) {
      System.err.println("Wrong number of arguments: " + otherArgs.length);
      System.err.println("Usage: " + NAME + " <input> <tablename>");
      System.exit(-1);
    }
    Job job = configureJob(conf, otherArgs);
    System.exit(job.waitForCompletion(true) ? 0 : 1);
  }
}


Map/Reduce的输入/输出就不说了,不懂的,可以看hadoop专栏去.
[这个任务调用和上一个IndexBuilder有些不同哦,具体的可以参照上一个例子,相同点:都只有map任务]
xls内容如下:

Java代码 复制代码 收藏代码
  1. key3,family1,column1,xls1   
  2. key3,family1,column2,xls11   
  3. key4,family1,column1,xls2   
  4. key4,family1,column2,xls12  
key3,family1,column1,xls1
key3,family1,column2,xls11
key4,family1,column1,xls2
key4,family1,column2,xls12


这是csv格式的,如果是xls是可以导为csv格式的,具体可以google一下.
运行命令如下:

Java代码 复制代码 收藏代码
  1. bin/hadoop jar SampleUploader.jar SampleUploader /tmp/input.csv 'table1'  
bin/hadoop jar SampleUploader.jar SampleUploader /tmp/input.csv 'table1'


这里的'table1'是上一遍IndexBuilder的时候建的表,表就使用上一张表[懒]
注意,这里使用的文件需要提交到hdfs上,否则会提示找不到,因为map/reduce是使用的是hdfs的文件系统.

 

http://www.iteye.com/topic/1117572

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics