我创建了这样一个配置单元表:
CREATE EXTERNAL TABLE table_df (v1 String, v2 String, v3 String, v4 String, v5 String, v6 String, v7 String, v8 String, v9 String, v10 String, v11 String, v12 String, v13 String, v14 String, v15 String, v16 String, v17 String, v18 String, v19 String, v20 String, v21 String, v22 String, v23 String, v24 String, v25 String, v26 String, v27 String, v28 String, v29 String, v30 String, v31 String, v32 Double, v33 Int, v34 Int, v35 Int)
STORED AS PARQUET LOCATION '/data/test/table_df.parquet';
Parquet文件已:
root
|-- v1: string (nullable = true)
|-- v2: string (nullable = true)
|-- v3: string (nullable = true)
|-- v4: string (nullable = true)
|-- v5: string (nullable = true)
|-- v6: string (nullable = true)
|-- v7: string (nullable = true)
|-- v8: string (nullable = true)
|-- v9: string (nullable = true)
|-- v10: string (nullable = true)
|-- v11: string (nullable = true)
|-- v12: string (nullable = true)
|-- v13: string (nullable = true)
|-- v14: string (nullable = true)
|-- v15: string (nullable = true)
|-- v16: string (nullable = true)
|-- v17: string (nullable = true)
|-- v18: string (nullable = true)
|-- v19: string (nullable = true)
|-- v20: string (nullable = true)
|-- v21: string (nullable = true)
|-- v22: string (nullable = true)
|-- v23: string (nullable = true)
|-- v24: string (nullable = true)
|-- v25: string (nullable = true)
|-- v26: string (nullable = true)
|-- v27: string (nullable = true)
|-- v28: string (nullable = true)
|-- v29: string (nullable = true)
|-- v30: string (nullable = true)
|-- v31: string (nullable = true)
|-- v32: double (nullable = true)
|-- v33: integer (nullable = true)
|-- v34: integer (nullable = true)
|-- v35: integer (nullable = true)
执行此请求时出现问题
select * from table_df
我得到以下错误信息:
Bad status for request TFetchResultsReq(fetchType=0, operationHandle=TOperationHandle(hasResultSet=True, modifiedRowCount=None, operationType=0, operationId=THandleIdentifier(secret='b`a!2RA\xb7\x85\xb5u\xb5\x06\xe4,\x16', guid='\xcf\xbde\xc0\xc7%C\xe1\x9c\xf2\x10\x8d\xc1\xb2=\xec')), orientation=4, maxRows=100): TFetchResultsResp(status=TStatus(errorCode=0, errorMessage='java.io.IOException: org.apache.hadoop.hive.ql.metadata.HiveException: java.lang.UnsupportedOperationException: Cannot inspect org.apache.hadoop.hive.serde2.io.DoubleWritable', sqlState=None, infoMessages=['*org.apache.hive.service.cli.HiveSQLException:java.io.IOException: org.apache.hadoop.hive.ql.metadata.HiveException: java.lang.UnsupportedOperationException: Cannot inspect org.apache.hadoop.hive.serde2.io.DoubleWritable:14:13', 'org.apache.hive.service.cli.operation.SQLOperation:getNextRowSet:SQLOperation.java:415', 'org.apache.hive.service.cli.operation.OperationManager:getOperationNextRowSet:OperationManager.java:233', 'org.apache.hive.service.cli.session.HiveSessionImpl:fetchResults:HiveSessionImpl.java:780', 'org.apache.hive.service.cli.CLIService:fetchResults:CLIService.java:478', 'org.apache.hive.service.cli.thrift.ThriftCLIService:FetchResults:ThriftCLIService.java:692', 'org.apache.hive.service.cli.thrift.TCLIService$Processor$FetchResults:getResult:TCLIService.java:1557', 'org.apache.hive.service.cli.thrift.TCLIService$Processor$FetchResults:getResult:TCLIService.java:1542', 'org.apache.thrift.ProcessFunction:process:ProcessFunction.java:39', 'org.apache.thrift.TBaseProcessor:process:TBaseProcessor.java:39', 'org.apache.hive.service.auth.TSetIpAddressProcessor:process:TSetIpAddressProcessor.java:56', 'org.apache.thrift.server.TThreadPoolServer$WorkerProcess:run:TThreadPoolServer.java:286', 'java.util.concurrent.ThreadPoolExecutor:runWorker:ThreadPoolExecutor.java:1142', 'java.util.concurrent.ThreadPoolExecutor$Worker:run:ThreadPoolExecutor.java:617', 'java.lang.Thread:run:Thread.java:745', '*java.io.IOException:org.apache.hadoop.hive.ql.metadata.HiveException: java.lang.UnsupportedOperationException: Cannot inspect org.apache.hadoop.hive.serde2.io.DoubleWritable:16:2', 'org.apache.hadoop.hive.ql.exec.FetchTask:fetch:FetchTask.java:164', 'org.apache.hadoop.hive.ql.Driver:getResults:Driver.java:1762', 'org.apache.hive.service.cli.operation.SQLOperation:getNextRowSet:SQLOperation.java:410', '*org.apache.hadoop.hive.ql.metadata.HiveException:java.lang.UnsupportedOperationException: Cannot inspect org.apache.hadoop.hive.serde2.io.DoubleWritable:23:7', 'org.apache.hadoop.hive.ql.exec.ListSinkOperator:process:ListSinkOperator.java:93', 'org.apache.hadoop.hive.ql.exec.Operator:forward:Operator.java:838', 'org.apache.hadoop.hive.ql.exec.SelectOperator:process:SelectOperator.java:88', 'org.apache.hadoop.hive.ql.exec.Operator:forward:Operator.java:838', 'org.apache.hadoop.hive.ql.exec.TableScanOperator:process:TableScanOperator.java:133', 'org.apache.hadoop.hive.ql.exec.FetchOperator:pushRow:FetchOperator.java:437', 'org.apache.hadoop.hive.ql.exec.FetchOperator:pushRow:FetchOperator.java:429', 'org.apache.hadoop.hive.ql.exec.FetchTask:fetch:FetchTask.java:146', '*java.lang.UnsupportedOperationException:Cannot inspect org.apache.hadoop.hive.serde2.io.DoubleWritable:28:5', 'org.apache.hadoop.hive.ql.io.parquet.serde.primitive.ParquetStringInspector:getPrimitiveJavaObject:ParquetStringInspector.java:77', 'org.apache.hadoop.hive.ql.io.parquet.serde.primitive.ParquetStringInspector:getPrimitiveJavaObject:ParquetStringInspector.java:28', 'org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils:copyToStandardObject:ObjectInspectorUtils.java:305', 'org.apache.hadoop.hive.serde2.SerDeUtils:toThriftPayload:SerDeUtils.java:168', 'org.apache.hadoop.hive.ql.exec.FetchFormatter$ThriftFormatter:convert:FetchFormatter.java:61', 'org.apache.hadoop.hive.ql.exec.ListSinkOperator:process:ListSinkOperator.java:90'], statusCode=3), results=None, hasMoreRows=None)
我对这个请求没有异议:
select v1 from table_df
你知道吗?
1条答案
按热度按时间c6ubokkw1#
这是因为此表指向的文件与表结构的格式不同。使用“show create table\u name”检查表属性
并确保hdfs文件(该表所指向的)满足这些属性,包括列数和列类型。