SparkSQL使用之JDBC代码访问Thrift JDBC Server


启动ThriftJDBCServer:

cd $SPARK_HOME/sbin
start-thriftserver.sh &

使用jdbc访问ThriftJDBCServer代码段:

package com.luogankun.spark.sql

import java.sql.DriverManager
import java.sql.Connection
import java.sql.PreparedStatement

/**
 * JDBC代码访问Thrift JDBC Server
 * @author luogankun
 */
object JdbcThriftServer {
  def main(args: Array[String]) {
    Class.forName("org.apache.hive.jdbc.HiveDriver")

    val conn = DriverManager.getConnection("jdbc:hive2://Hadoop000:10000/default", "hadoop", "")
    val pstat = conn.prepareStatement("SELECT track_time, url, session_id, referer, ip, end_user_id, city_id FROM page_views WHERE city_id = -1000 limit 10")
    val rs = pstat.executeQuery()
    while (rs.next()) {
      println("track_time: " + rs.getString("track_time")
        + ", url: " + rs.getString("url")
        + ", session_id: " + rs.getString("session_id")
        + ", referer: " + rs.getString("referer")
        + ", ip: " + rs.getString("ip")
        + ", end_user_id: " + rs.getString("end_user_id")
        + ", city_id: " + rs.getString("city_id"))

    }
    rs.close()
    pstat.close()
    conn.close()
  }
}

本文永久更新链接地址:

相关内容