我无法将数据从spark中的dataframe传输到我创建的hbase表。问题似乎是,根据在线示例,这种方法只能处理多个数据集,但我有一个包含信息的数据框架。请帮帮我!
//hbase表
create 'weatherHB', 'STATIONID','OBSERVATIONTS','TEMPERATURE'
def catalog = s"""{
"table":{"namespace":"default", "name":"weatherHB"},
"rowkey":"key",
"columns":{
"RecordID":{"cf":"RecordID","col":"key","type":"string"},
"StationID":{"cf":"STATIONID","col":"stationID","type":"string"},
"ObservationTSMonth":{"cf":"OBSERVATIONTS","col":"observationTSMonth","type":"string"},
"ObservationTSDay":{"cf":"OBSERVATIONTS","col":"observationTSDay","type":"string"},
"ObservationTSHour":{"cf":"OBSERVATIONTS","col":"observationTSHour","type":"string"},
"Temperature":{"cf":"TEMPERATURE","col":"temp","type":"string"}
}
}""".stripMargin
case class TempHeader(
recordId: String,
station: String,
month: String,
date: String,
hour: String,
temperature: Double)
import spark.implicits._
val weatherDF = spark.sparkContext.textFile("1902").
map(
rec => List (
rec.substring(1,26).trim(),
rec.substring(4,10).trim(),
rec.substring(19,21).trim(),
rec.substring(21,23).trim(),
rec.substring(23,25).trim(),
rec.substring(87,92).trim()
) ).
map( att => TempHeader( att(0), att(1), att(2), att(3), att(4), (att(5).trim.toDouble)/10)).toDF()
weatherDF.printSchema()
weatherDF.createOrReplaceTempView("TEMP")
val query = spark.sql("""SELECT month, max(temperature), min(temperature), avg(temperature) FROM TEMP GROUP BY month ORDER by month""".stripMargin)
query.show(10)
import org.apache.spark.sql.execution.datasources.hbase._
weatherDF.write.options(Map(HBaseTableCatalog.tableCatalog -> catalog, HBaseTableCatalog.newTable -> "6")).format(
"org.apache.spark.sql.execution.datasources.hbase").save()
1条答案
按热度按时间jmo0nnb31#
它看起来像你的
catalog
有一些问题:柱族
RecordID
应该是rowkey
的数据类型temp
列应为double
spark hbase连接器的文档中给出了一个适当的示例。你的目录应该是这样的(我没有测试过):
除了你的
case class
应与此目录匹配: