\connect bigsql
drop table if exists stack.issue2;
create hadoop table if not exists stack.issue2 (
f1 integer,
f2 integer,
f3 varchar(200),
f4 integer
)
stored as parquetfile;
insert into stack.issue2 (f1,f2,f3,f4) values (0,0,'Detroit',0);
insert into stack.issue2 (f1,f2,f3,f4) values (1,1,'Mt. Pleasant',1);
insert into stack.issue2 (f1,f2,f3,f4) values (2,2,'Marysville',2);
insert into stack.issue2 (f1,f2,f3,f4) values (3,3,'St. Clair',3);
insert into stack.issue2 (f1,f2,f3,f4) values (4,4,'Port Huron',4);
select * from stack.issue2;
{ call sysproc.admin_cmd('export to /tmp/t1.unl of del select * from stack.issue2') };
\quit
[bigsql@myhost ~]$ db2 "create hadoop table test1 ( i int, i2 int , i3 int)"
DB20000I The SQL command completed successfully.
[bigsql@myhost ~]$ db2 "insert into test1 values (1,2,3), (4,5,6),(7,8,9),(0,1,2)"
DB20000I The SQL command completed successfully.
[bigsql@myhost ~]$ db2 "export to output.del of del select * from test1"
SQL3104N The Export utility is beginning to export data to file "output.del".
SQL3105N The Export utility has finished exporting "4" rows.
Number of rows exported: 4
[bigsql@myhost ~]$ cat output.del
1,2,3
4,5,6
7,8,9
0,1,2
3条答案
按热度按时间iqxoj9l91#
另一种通过sql(在本例中为csv)提取的方法如下:
然后可以从hdfs获取文件。
x8diyxa72#
根据生成的数据文件的大小,可以使用export命令将数据获取到一个文本文件中。结果文件将在一个节点上结束。
我使用以下脚本作为示例:
运行脚本:
产量:
以及导出的文件:
uujelgoq3#
bigsql的妙处在于,您可以像连接常规db2数据库和调用export一样进行连接。