... 19/02/20 17:51:11 ERROR tool.ImportTool: Import failed: java.io.IOException: Hive does not support the SQL type for column Cat org.apache.sqoop.hive.TableDefWriter.getCreateTableStmt(TableDefWriter.java:181)at org.apache.sqoop.hive.HiveImport.importTable(HiveImport.java:189)at org.apache.sqoop.tool.ImportTool.importTable(ImportTool.java:530)at org.apache.sqoop.tool.ImportTool.run(ImportTool.java:621)at org.apache.sqoop.Sqoop.run(Sqoop.java:147)at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)at org.apache.sqoop.Sqoop.runSqoop(Sqoop.java:183)at org.apache.sqoop.Sqoop.runTool(Sqoop.java:234)at org.apache.sqoop.Sqoop.runTool(Sqoop.java:243)at org.apache.sqoop.Sqoop.main(Sqoop.java:252)
create external table t_blob(a string, b string, c string) row format delimited fields terminated by ',' location '/tmp/t_lob';hive> select a,b,length(c) from t_blob; Query ID = hdfs_20190220174141_292e9bc8-55de-4a42-ad46-ef507252e3ca Total jobs = 1 Launching Job 1 out of 1 Number of reduce tasks is set to 0 since there's no reduce operator Starting Job = job_1550649972092_0003, Tracking URL = http://p13.esgyncn.local:8088/proxy/application_1550649972092_0003/ Kill Command = /opt/cloudera/parcels/CDH-5.13.0-1.cdh5.13.0.p0.29/lib/hadoop/bin/hadoop job -kill job_1550649972092_0003 Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0 2019-02-20 17:41:24,240 Stage-1 map = 0%, reduce = 0% 2019-02-20 17:41:30,471 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.71 sec MapReduce Total cumulative CPU time: 3 seconds 710 msec Ended Job = job_1550649972092_0003 MapReduce Jobs Launched: Stage-Stage-1: Map: 1 Cumulative CPU: 3.71 sec HDFS Read: 133288 HDFS Write: 13 SUCCESS Total MapReduce CPU Time Spent: 3 seconds 710 msec OK 1 ABC 129230 Time taken: 14.027 seconds, Fetched: 1 row(s)