本文整理了Java中org.apache.flink.util.Preconditions.checkNotNull()
方法的一些代码示例,展示了Preconditions.checkNotNull()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Preconditions.checkNotNull()
方法的具体详情如下:
包路径:org.apache.flink.util.Preconditions
类名称:Preconditions
方法名:checkNotNull
[英]Ensures that the given object reference is not null. Upon violation, a NullPointerException with no message is thrown.
[中]
代码示例来源:origin: apache/flink
private GatherSumApplyIteration(GatherFunction<VV, EV, M> gather, SumFunction<VV, EV, M> sum,
ApplyFunction<K, VV, M> apply, DataSet<Edge<K, EV>> edges, int maximumNumberOfIterations) {
Preconditions.checkNotNull(gather);
Preconditions.checkNotNull(sum);
Preconditions.checkNotNull(apply);
Preconditions.checkNotNull(edges);
Preconditions.checkArgument(maximumNumberOfIterations > 0, "The maximum number of iterations must be at least one.");
this.gather = gather;
this.sum = sum;
this.apply = apply;
this.edgeDataSet = edges;
this.maximumNumberOfIterations = maximumNumberOfIterations;
}
代码示例来源:origin: apache/flink
PeriodicOffsetCommitter(ZookeeperOffsetHandler offsetHandler,
List<KafkaTopicPartitionState<TopicAndPartition>> partitionStates,
ExceptionProxy errorHandler,
long commitInterval) {
this.offsetHandler = checkNotNull(offsetHandler);
this.partitionStates = checkNotNull(partitionStates);
this.errorHandler = checkNotNull(errorHandler);
this.commitInterval = commitInterval;
checkArgument(commitInterval > 0);
}
代码示例来源:origin: apache/flink
public AggregatingUdf(AggregationFunction<Object>[] aggFunctions, int[] fieldPositions) {
Preconditions.checkNotNull(aggFunctions);
Preconditions.checkNotNull(aggFunctions);
Preconditions.checkArgument(aggFunctions.length == fieldPositions.length);
this.aggFunctions = aggFunctions;
this.fieldPositions = fieldPositions;
}
代码示例来源:origin: apache/flink
protected PartFileWriter(
final BucketID bucketId,
final RecoverableFsDataOutputStream currentPartStream,
final long creationTime) {
Preconditions.checkArgument(creationTime >= 0L);
this.bucketId = Preconditions.checkNotNull(bucketId);
this.currentPartStream = Preconditions.checkNotNull(currentPartStream);
this.creationTime = creationTime;
this.lastUpdateTime = creationTime;
}
代码示例来源:origin: apache/flink
/**
* Creates a resumable for the given file at the given position.
*
* @param targetFile The file to resume.
* @param offset The position to resume from.
*/
HadoopFsRecoverable(Path targetFile, Path tempFile, long offset) {
checkArgument(offset >= 0, "offset must be >= 0");
this.targetFile = checkNotNull(targetFile, "targetFile");
this.tempFile = checkNotNull(tempFile, "tempFile");
this.offset = offset;
}
代码示例来源:origin: apache/flink
/**
* Creates a resumable for the given file at the given position.
*
* @param targetFile The file to resume.
* @param offset The position to resume from.
*/
LocalRecoverable(File targetFile, File tempFile, long offset) {
checkArgument(offset >= 0, "offset must be >= 0");
this.targetFile = checkNotNull(targetFile, "targetFile");
this.tempFile = checkNotNull(tempFile, "tempFile");
this.offset = offset;
}
代码示例来源:origin: apache/flink
/**
* Creates a new {@code StreamingFileSink} that writes files to the given base directory.
*/
private StreamingFileSink(
final StreamingFileSink.BucketsBuilder<IN, ?> bucketsBuilder,
final long bucketCheckInterval) {
Preconditions.checkArgument(bucketCheckInterval > 0L);
this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder);
this.bucketCheckInterval = bucketCheckInterval;
}
代码示例来源:origin: apache/flink
/**
* Creates a new named {@code OutputTag} with the given id and output {@link TypeInformation}.
*
* @param id The id of the created {@code OutputTag}.
* @param typeInfo The {@code TypeInformation} for the side output.
*/
public OutputTag(String id, TypeInformation<T> typeInfo) {
Preconditions.checkNotNull(id, "OutputTag id cannot be null.");
Preconditions.checkArgument(!id.isEmpty(), "OutputTag id must not be empty.");
this.id = id;
this.typeInfo = Preconditions.checkNotNull(typeInfo, "TypeInformation cannot be null.");
}
代码示例来源:origin: apache/flink
public AsyncWaitOperator(
AsyncFunction<IN, OUT> asyncFunction,
long timeout,
int capacity,
AsyncDataStream.OutputMode outputMode) {
super(asyncFunction);
chainingStrategy = ChainingStrategy.ALWAYS;
Preconditions.checkArgument(capacity > 0, "The number of concurrent async operation should be greater than 0.");
this.capacity = capacity;
this.outputMode = Preconditions.checkNotNull(outputMode, "outputMode");
this.timeout = timeout;
}
代码示例来源:origin: apache/flink
public ChunkedByteBuf(ByteBuf buf, int chunkSize) {
this.buf = Preconditions.checkNotNull(buf, "Buffer");
Preconditions.checkArgument(chunkSize > 0, "Non-positive chunk size");
this.chunkSize = chunkSize;
}
代码示例来源:origin: apache/flink
public KeyGroupStreamPartitioner(KeySelector<T, K> keySelector, int maxParallelism) {
Preconditions.checkArgument(maxParallelism > 0, "Number of key-groups must be > 0!");
this.keySelector = Preconditions.checkNotNull(keySelector);
this.maxParallelism = maxParallelism;
}
代码示例来源:origin: apache/flink
public BackPressuringExecutor(Executor delegate, int numConcurrentExecutions) {
checkArgument(numConcurrentExecutions > 0, "numConcurrentExecutions must be > 0");
this.delegate = checkNotNull(delegate, "delegate");
this.permits = new Semaphore(numConcurrentExecutions, true);
}
代码示例来源:origin: apache/flink
Elasticsearch6ApiCallBridge(List<HttpHost> httpHosts, RestClientFactory restClientFactory) {
Preconditions.checkArgument(httpHosts != null && !httpHosts.isEmpty());
this.httpHosts = httpHosts;
this.restClientFactory = Preconditions.checkNotNull(restClientFactory);
}
代码示例来源:origin: apache/flink
public AutoClosablePath(final Path path) {
Preconditions.checkNotNull(path, "Path must not be null.");
Preconditions.checkArgument(path.isAbsolute(), "Path must be absolute.");
this.path = path;
}
代码示例来源:origin: apache/flink
public CassandraOutputFormatBase(String insertQuery, ClusterBuilder builder) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(insertQuery), "Query cannot be null or empty");
Preconditions.checkNotNull(builder, "Builder cannot be null");
this.insertQuery = insertQuery;
this.builder = builder;
}
代码示例来源:origin: apache/flink
@Override
public CassandraAppendTableSink configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
CassandraAppendTableSink cassandraTableSink = new CassandraAppendTableSink(this.builder, this.cql, this.properties);
cassandraTableSink.fieldNames = Preconditions.checkNotNull(fieldNames, "Field names must not be null.");
cassandraTableSink.fieldTypes = Preconditions.checkNotNull(fieldTypes, "Field types must not be null.");
Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
"Number of provided field names and types does not match.");
return cassandraTableSink;
}
代码示例来源:origin: apache/flink
public OrderedStreamElementQueue(
int capacity,
Executor executor,
OperatorActions operatorActions) {
Preconditions.checkArgument(capacity > 0, "The capacity must be larger than 0.");
this.capacity = capacity;
this.executor = Preconditions.checkNotNull(executor, "executor");
this.operatorActions = Preconditions.checkNotNull(operatorActions, "operatorActions");
this.lock = new ReentrantLock(false);
this.headIsCompleted = lock.newCondition();
this.notFull = lock.newCondition();
this.queue = new ArrayDeque<>(capacity);
}
代码示例来源:origin: apache/flink
/**
* Sets the path of the ORC file(s).
* If the path specifies a directory, it will be recursively enumerated.
*
* @param path The path of the ORC file(s).
* @return The builder.
*/
public Builder path(String path) {
Preconditions.checkNotNull(path, "Path must not be null.");
Preconditions.checkArgument(!path.isEmpty(), "Path must not be empty.");
this.path = path;
return this;
}
代码示例来源:origin: apache/flink
public SocketTextStreamFunction(String hostname, int port, String delimiter, long maxNumRetries, long delayBetweenRetries) {
checkArgument(port > 0 && port < 65536, "port is out of range");
checkArgument(maxNumRetries >= -1, "maxNumRetries must be zero or larger (num retries), or -1 (infinite retries)");
checkArgument(delayBetweenRetries >= 0, "delayBetweenRetries must be zero or positive");
this.hostname = checkNotNull(hostname, "hostname must not be null");
this.port = port;
this.delimiter = delimiter;
this.maxNumRetries = maxNumRetries;
this.delayBetweenRetries = delayBetweenRetries;
}
代码示例来源:origin: apache/flink
private VertexCentricIteration(ComputeFunction<K, VV, EV, Message> cf,
DataSet<Edge<K, EV>> edgesWithValue, MessageCombiner<K, Message> mc,
int maximumNumberOfIterations) {
Preconditions.checkNotNull(cf);
Preconditions.checkNotNull(edgesWithValue);
Preconditions.checkArgument(maximumNumberOfIterations > 0,
"The maximum number of iterations must be at least one.");
this.computeFunction = cf;
this.edgesWithValue = edgesWithValue;
this.combineFunction = mc;
this.maximumNumberOfIterations = maximumNumberOfIterations;
this.messageType = getMessageType(cf);
}
内容来源于网络,如有侵权,请联系作者删除!