环境
部署
su - hadoop
echo "export HADOOP_HOME=/home/hadoop/src/hadoop-2.7.2
export HIVE_HOME=/home/hadoop/src/apache-hive-1.2.1-bin" > ~/.bashrc
source ~/.bashrc
- 安装mysql-server,用于存放hadoop元数据
yum install mysql-server -y
service mysqld start
mysql -uroot -p -e "create database metastore_db;"
mysql -uroot -p -e "grant all on metastore_db.* to hadoop@'%' identified by 'redhat';"
mysql -uroot -p -e "flush privileges;"
<!-- 数据库用户名hadoop -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hadoop</value>
<description>Username to use against metastore database</description>
</property>
<!-- 数据库密码redhat -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>redhat</value>
<description>password to use against metastore database</description>
</property>
<!-- jdbc数据库地址[master.hdp.imdst.com]数据库名[metastore_db] -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql ://master.hdp.imdst.com:3306/metastore_db?characterEncoding=UTF-8</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<!-- hive 数据库hdfs存储目录(自动创建) -->
<property>
<name>hive.exec.scratchdir</name>
<value>hdfs://master.hdp.imdst.com:9000/hive/warehouse</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an
HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<!-- hive 任务临时文件存放目录(可以自定义) -->
<property>
<name>hive.exec.local.scratchdir</name>
<value>/data/hive/iotmp</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/data/hive/iotmp/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<!-- hive 指定使用mysql连接驱动 -->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
su - hadoop
cd src/apache-hive-1.2.1-bin
./bin/schematool -dbType mysql -initSchema
- 启动 HiveServer2和HiveMetaStore元数据服务
nohup bin/hive --service metastore &
nohup bin/hive --service hiveserver2 &
简单测试
[hadoop@master bin]$ ./beeline
Beeline version 1.2.1 by Apache Hive
beeline> !connect jdbc:hive2://master.hdp.imdst.com:10000
Connecting to jdbc:hive2://master.hdp.imdst.com:10000
Enter username for jdbc:hive2://master.hdp.imdst.com:10000:
Enter password for jdbc:hive2://master.hdp.imdst.com:10000:
Connected to: Apache Hive (version 1.2.1)
Driver: Hive JDBC (version 1.2.1)
Transaction isolation: TRANSACTION_REPEATABLE_READ
0: jdbc:hive2://master.hdp.imdst.com:10000> show databases;
+----------------+--+
| database_name |
+----------------+--+
| default |
| nimeia |
+----------------+--+