spark讀取hbase數據做分布式計算

jopen 12年前發布 | 70K 次閱讀 分布式/云計算/大數據 Spark

由于spark提供的hbaseTest是scala版本,并沒有提供java版。我將scala版本改為java版本,并根據數據做了些計算操作。

程序目的:查詢出hbase滿足條件的用戶,統計各個等級個數。

代碼如下,注釋已經寫詳細:

package com.sdyc.ndspark.sys;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;

/**
 * <pre>
 *
 * spark hbase 測試
 *
 *  Created with IntelliJ IDEA.
 * User: zhangdonghao
 * Date: 14-1-26
 * Time: 上午9:24
 * To change this template use File | Settings | File Templates.
 * </pre>
 *
 * @author zhangdonghao
 */
public class HbaseTest implements Serializable {

    public Log log = LogFactory.getLog(HbaseTest.class);

    /**
     * 將scan編碼,該方法copy自 org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil
     *
     * @param scan
     * @return
     * @throws IOException
     */
    static String convertScanToString(Scan scan) throws IOException {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        DataOutputStream dos = new DataOutputStream(out);
        scan.write(dos);
        return Base64.encodeBytes(out.toByteArray());
    }

    public void start() {
        //初始化sparkContext,這里必須在jars參數里面放上Hbase的jar,
        // 否則會報unread block data異常
        JavaSparkContext sc = new JavaSparkContext("spark://nowledgedata-n3:7077", "hbaseTest",
                "/home/hadoop/software/spark-0.8.1",
                new String[]{"target/ndspark.jar", "target\\dependency\\hbase-0.94.6.jar"});

        //使用HBaseConfiguration.create()生成Configuration
        // 必須在項目classpath下放上hadoop以及hbase的配置文件。
        Configuration conf = HBaseConfiguration.create();
        //設置查詢條件,這里值返回用戶的等級
        Scan scan = new Scan();
        scan.setStartRow(Bytes.toBytes("195861-1035177490"));
        scan.setStopRow(Bytes.toBytes("195861-1072173147"));
        scan.addFamily(Bytes.toBytes("info"));
        scan.addColumn(Bytes.toBytes("info"), Bytes.toBytes("levelCode"));

        try {
            //需要讀取的hbase表名
            String tableName = "usertable";
            conf.set(TableInputFormat.INPUT_TABLE, tableName);
            conf.set(TableInputFormat.SCAN, convertScanToString(scan));

            //獲得hbase查詢結果Result
            JavaPairRDD<ImmutableBytesWritable, Result> hBaseRDD = sc.newAPIHadoopRDD(conf,
                    TableInputFormat.class, ImmutableBytesWritable.class,
                    Result.class);

            //從result中取出用戶的等級,并且每一個算一次
            JavaPairRDD<Integer, Integer> levels = hBaseRDD.map(
                    new PairFunction<Tuple2<ImmutableBytesWritable, Result>, Integer, Integer>() {
                        @Override
                        public Tuple2<Integer, Integer> call(
                                Tuple2<ImmutableBytesWritable, Result> immutableBytesWritableResultTuple2)
                                throws Exception {
                            byte[] o = immutableBytesWritableResultTuple2._2().getValue(
                                    Bytes.toBytes("info"), Bytes.toBytes("levelCode"));
                            if (o != null) {
                                return new Tuple2<Integer, Integer>(Bytes.toInt(o), 1);
                            }
                            return null;
                        }
                    });

            //數據累加
            JavaPairRDD<Integer, Integer> counts = levels.reduceByKey(new Function2<Integer, Integer, Integer>() {
                public Integer call(Integer i1, Integer i2) {
                    return i1 + i2;
                }
            });

            //打印出最終結果
            List<Tuple2<Integer, Integer>> output = counts.collect();
            for (Tuple2 tuple : output) {
                System.out.println(tuple._1 + ": " + tuple._2);
            }

        } catch (Exception e) {
            log.warn(e);
        }

    }

    /**
     * spark如果計算沒寫在main里面,實現的類必須繼承Serializable接口,<br>
     * </>否則會報 Task not serializable: java.io.NotSerializableException 異常
     */
    public static void main(String[] args) throws InterruptedException {

        new HbaseTest().start();

        System.exit(0);
    }
}

運行結果如下:

0: 28528
11: 708
4: 28656
2: 36315
6: 23848
8: 19802
10: 6913
9: 15988
3: 31950
1: 38872
7: 21600
5: 27190
12: 17

 本文由用戶 jopen 自行上傳分享,僅供網友學習交流。所有權歸原作者,若您的權利被侵害,請聯系管理員。
 轉載本站原創文章,請注明出處,并保留原始鏈接、圖片水印。
 本站是一個以用戶分享為主的開源技術平臺,歡迎各類分享!