大数据应用之Windows平台Hbase客户端Eclipse环境搭建-Java版
作者:张子良
版权所有,转载请注明出处
引子:
大数据的场景下,NoSql型数据库的优势不言而喻,但是涉及NoSQL数据库的实际动手开发的东西多是Linux平台,大多语焉不详,至于Windows平台介绍的东西就更少了,而且大多无法运行。本文就Windows平台基于Eclipse搭建Hbase环境客户端开发环境做一个介绍。另外基于Thrift实现的Windows版本Hbase客户端库也做了封装,有需要的可以留言索取。
一、开发环境
操作系统:windows xp sp3
开发工具:Eclipse3.6
虚拟机:VMware
服务器环境:hadoop1.1.0 + Hbase0.94
备注:需要特别说明的是不需要Cygwin搭建Linux在Windows环境下的方针。
二、环境配置
2.1系统设置
修改Windows主机Hosts文件C:\WINDOWS\system32\drivers\etc\hosts,增加服务器配置
127.0.0.1 localhost
127.0.0.1 microsof-c2f4ea
192.168.230.133 hadoop1
2.2 Eclipse设置
运行Eclipse,创建一个新的Java工程“HBaseClient”,右键项目根目录,选择 “Properties”->“Java Build Path”->“Library”->“Add External JARs”,将HBase解压后根目录下的hbase-0.94.0.jar、hbase-0.94.0-tests.jar和lib子目录下所有jar 包添加到本工程的Classpath下。拷贝Hbase服务器端配置文件hbase-site.xml添加到本工程的Classpath中。配置文件如下所示:
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://192.168.230.133:9000/hbase</value>
<description>The directory shared by region servers.</description>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
<description>The mode the cluster will be in. Possible values are
false: standalone and pseudo-distributed setups with managed Zookeeper
true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
</description>
</property>
<property>
<name>hbase.master</name>
<value>hdfs://192.168.230.133:60000</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>192.168.230.133</value>
<description>Comma separated list of servers in the ZooKeeper Quorum. For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com". By default this is set to localhost for local and pseudo-distributed modes of operation. For a fully-distributed setup, this should be set to a full list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh this is the list of servers which we will start/stop ZooKeeper on.
</description>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/home/hadoop/log/zookeeper</value>
<description>Property from ZooKeeper's config zoo.cfg.
The directory where the snapshot is stored.
</description>
</property>
</configuration>
三、程序源码
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.util.Bytes;
public class HBaseTest {
private static Configuration conf = null;
/**
* 初始化配置
*/
static {
Configuration HBASE_CONFIG = new Configuration();
//与hbase/conf/hbase-site.xml中hbase.zookeeper.quorum配置的值相同
HBASE_CONFIG.set("hbase.master", "192.168.230.133:60000");
HBASE_CONFIG.set("hbase.zookeeper.quorum", "192.168.230.133");
//与hbase/conf/hbase-site.xml中hbase.zookeeper.property.clientPort配置的值相同
HBASE_CONFIG.set("hbase.zookeeper.property.clientPort", "2181");
conf = HBaseConfiguration.create(HBASE_CONFIG);
}
/**
* 创建一张表
*/
public static void creatTable(String tableName, String[] familys) throws Exception {
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
System.out.println("table already exists!");
} else {
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
for(int i=0; i<familys.length; i++){
tableDesc.addFamily(new HColumnDescriptor(familys[i]));
}
admin.createTable(tableDesc);
System.out.println("create table " + tableName + " ok.");
}
}
/**
* 删除表
*/
public static void deleteTable(String tableName) throws Exception {
try {
HBaseAdmin admin = new HBaseAdmin(conf);
admin.disableTable(tableName);
admin.deleteTable(tableName);
System.out.println("delete table " + tableName + " ok.");
} catch (MasterNotRunningException e) {
e.printStackTrace();
} catch (ZooKeeperConnectionException e) {
e.printStackTrace();
}
}
/**
* 插入一行记录
*/
public static void addRecord (String tableName, String rowKey, String family, String qualifier, String value)
throws Exception{
try {
HTable table = new HTable(conf, tableName);
Put put = new Put(Bytes.toBytes(rowKey));
put.add(Bytes.toBytes(family),Bytes.toBytes(qualifier),Bytes.toBytes(value));
table.put(put);
System.out.println("insert recored " + rowKey + " to table " + tableName +" ok.");
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 删除一行记录
*/
public static void delRecord (String tableName, String rowKey) throws IOException{
HTable table = new HTable(conf, tableName);
List list = new ArrayList();
Delete del = new Delete(rowKey.getBytes());
list.add(del);
table.delete(list);
System.out.println("del recored " + rowKey + " ok.");
}
/**
* 查找一行记录
*/
public static void getOneRecord (String tableName, String rowKey) throws IOException{
HTable table = new HTable(conf, tableName);
Get get = new Get(rowKey.getBytes());
Result rs = table.get(get);
for(KeyValue kv : rs.raw()){
System.out.print(new String(kv.getRow()) + " " );
System.out.print(new String(kv.getFamily()) + ":" );
System.out.print(new String(kv.getQualifier()) + " " );
System.out.print(kv.getTimestamp() + " " );
System.out.println(new String(kv.getValue()));
}
}
/**
* 显示所有数据
*/
public static void getAllRecord (String tableName) {
try{
HTable table = new HTable(conf, tableName);
Scan s = new Scan();
ResultScanner ss = table.getScanner(s);
for(Result r:ss){
for(KeyValue kv : r.raw()){
System.out.print(new String(kv.getRow()) + " ");
System.out.print(new String(kv.getFamily()) + ":");
System.out.print(new String(kv.getQualifier()) + " ");
System.out.print(kv.getTimestamp() + " ");
System.out.println(new String(kv.getValue()));
}
}
} catch (IOException e){
e.printStackTrace();
}
}
public static void main (String [] agrs) {
try {
String tablename = "scores";
String[] familys = {"grade", "course"};
HBaseTest.creatTable(tablename, familys);
//add record zkb
HBaseTest.addRecord(tablename,"zkb","grade","","5");
HBaseTest.addRecord(tablename,"zkb","course","","90");
HBaseTest.addRecord(tablename,"zkb","course","math","97");
HBaseTest.addRecord(tablename,"zkb","course","art","87");
//add record baoniu
HBaseTest.addRecord(tablename,"baoniu","grade","","4");
HBaseTest.addRecord(tablename,"baoniu","course","math","89");
System.out.println("===========get one record========");
HBaseTest.getOneRecord(tablename, "zkb");
System.out.println("===========show all record========");
HBaseTest.getAllRecord(tablename);
System.out.println("===========del one record========");
HBaseTest.delRecord(tablename, "baoniu");
HBaseTest.getAllRecord(tablename);
System.out.println("===========show all record========");
HBaseTest.getAllRecord(tablename);
} catch (Exception e) {
e.printStackTrace();
}
}
}
四、运行截图
通过Hbase的Web-UI我们可以看到我们已经创建了表Scores,列族结构如下图所示:
五、执行结果
直接在Eclipse中运行,运行结果截屏如下图所示:
六、关于Put
以上示例虽然实现了操作,但是针对就插入性能方面却是没有优化,后续的文章将会介绍影响PUT操作的性能和PUT的集中方法,作为Hbase性能调优的专题。
作者:张子良
出处:http://www.cnblogs.com/hadoopdev
本文版权归作者所有,欢迎转载,但未经作者同意必须保留此段声明,且在文章页面明显位置给出原文连接,否则保留追究法律责任的权利。