java—即使在ApacheFlink中实现了appendtablestream接口,也会出现以下错误

arknldoa  于 2021-06-25  发布在  Flink
关注(0)|答案(1)|浏览(326)

我已经编写了一个简单的示例来接收表,但是即使在实现了AppendTableLink接口之后,在ApacheFlink中也出现了这个异常。

package com.cc.flink.functionUtils;

    import java.io.IOException;
    import java.util.ArrayList;
    import java.util.Collection;
    import java.util.Iterator;

    import org.apache.flink.api.common.functions.IterationRuntimeContext;
    import org.apache.flink.api.common.functions.MapFunction;
    import org.apache.flink.api.common.functions.RichFunction;
    import org.apache.flink.api.common.io.OutputFormat;
    import org.apache.flink.api.common.typeinfo.TypeInformation;
    import org.apache.flink.api.java.io.LocalCollectionOutputFormat;
    import org.apache.flink.api.java.tuple.Tuple2;
    import org.apache.flink.api.java.typeutils.TupleTypeInfo;
    import org.apache.flink.configuration.Configuration;
    import org.apache.flink.contrib.streaming.DataStreamUtils;
    import org.apache.flink.streaming.api.datastream.DataStream;
    import org.apache.flink.streaming.api.datastream.DataStreamSink;
    import org.apache.flink.streaming.api.datastream.DataStreamSource;
    import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
    import org.apache.flink.streaming.api.functions.sink.SinkFunction;
    import org.apache.flink.streaming.connectors.rabbitmq.RMQSource;
    import org.apache.flink.streaming.connectors.rabbitmq.common.RMQConnectionConfig;
    import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
    import org.apache.flink.table.api.Table;
    import org.apache.flink.table.api.TableEnvironment;
    import org.apache.flink.table.api.java.StreamTableEnvironment;
    import org.apache.flink.table.sinks.AppendStreamTableSink;
    import org.apache.flink.table.sinks.RetractStreamTableSink;
    import org.apache.flink.table.sinks.TableSink;
    import org.apache.flink.types.Row;

    public class MyTable implements AppendStreamTableSink<Row>{

        @Override
        public TableSink<Row> configure(String[] arg0, TypeInformation<?>[] arg1) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public String[] getFieldNames() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public TypeInformation<?>[] getFieldTypes() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public TypeInformation<Row> getOutputType() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public void emitDataStream(DataStream<Row> arg0) {
            // TODO Auto-generated method stub
            arg0.print();

        }

        public static void main(String[] args) throws Exception {

            final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            final RMQConnectionConfig connectionConfig = new RMQConnectionConfig.Builder()
                    .setHost("localhost")
                    .setVirtualHost("/")
                    .setUserName("guest")
                    .setPassword("guest")
                    .setPort(5672)
                    .build();

            final DataStream<String> stream = env
                    .addSource(new RMQSource<String>(
                            connectionConfig,            // config for the RabbitMQ connection
                            "test",                 // name of the RabbitMQ queue to consume
                            true,                        // use correlation ids; can be false if only at-least-once is required
                            new SimpleStringSchema()))   // deserialization schema to turn messages into Java objects
                    .setParallelism(1);   

            final ArrayList<String> values = new ArrayList<>();
            StreamTableEnvironment StreamTableEnv = TableEnvironment.getTableEnvironment(env);
            Table fromDataStream = StreamTableEnv.fromDataStream(stream,
                    "member_id");
            StreamTableEnv.registerTable("emp1",fromDataStream);
            Table output =StreamTableEnv.sql("select count(*) from emp1 where member_id Like '%test%'");
            fromDataStream.writeToSink(new MyTable() );
            env.execute();

        }

    }

log4j:warn找不到logger的appender(org.apache.calcite.sql.parser)。log4j:warn请正确初始化log4j系统。log4j:请参阅http://logging.apache.org/log4j/1.2/faq.html#noconfig 更多信息
线程“main”org.apache.flink.table.api.tableexception中出现异常:流表只能由AppendStreamTableLink、retractstreamtable发出
在org.apache.flink.table.api.streamtableenvironment.writetosink(streamtableenvironment。scala:219)
在org.apache.flink.table.api.table.writetosink(表。scala:800)
在org.apache.flink.table.api.table.writetosink(表。scala:773)
在com.cc.flink.functionutils.mytable.main(mytable。java:103)

xqkwcwgp

xqkwcwgp1#

您的示例中的问题是您试图使用 AppendTableSink 但你的查询产生了撤回。这是由于 COUNT(*) 在你的陈述中。每当新行到达时,旧的发出计数就不再有效,需要收回。
如果只是一个 SELECT * ,则每个传入行将只生成一个不影响前一行的输出行。

相关问题