本文整理了Java中org.apache.kafka.connect.errors.ConnectException.<init>()
方法的一些代码示例,展示了ConnectException.<init>()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ConnectException.<init>()
方法的具体详情如下:
包路径:org.apache.kafka.connect.errors.ConnectException
类名称:ConnectException
方法名:<init>
暂无
代码示例来源:origin: debezium/debezium
private void throwProducerFailureIfPresent() {
if (producerFailure != null) {
throw new ConnectException("An exception ocurred in the change event producer. This connector will be stopped.", producerFailure);
}
}
}
代码示例来源:origin: debezium/debezium
private <T> T format(final String pattern, final String s, final Supplier<T> value) {
try {
return value.get();
} catch (final DateTimeParseException e) {
LOGGER.error("Cannot parse time/date value '{}', expected format '{}'", s, pattern);
throw new ConnectException(e);
}
}
}
代码示例来源:origin: debezium/debezium
private long longOffsetValue(Map<String, ?> values, String key) {
Object obj = values.get(key);
if (obj == null) return 0L;
if (obj instanceof Number) return ((Number) obj).longValue();
try {
return Long.parseLong(obj.toString());
} catch (NumberFormatException e) {
throw new ConnectException("Source offset '" + key + "' parameter value " + obj + " could not be converted to a long");
}
}
代码示例来源:origin: debezium/debezium
private static long longOffsetValue(Map<String, ?> values, String key) {
Object obj = values.get(key);
if (obj == null) return 0;
if (obj instanceof Number) return ((Number) obj).longValue();
try {
return Long.parseLong(obj.toString());
} catch (NumberFormatException e) {
throw new ConnectException("Source offset '" + key + "' parameter value " + obj + " could not be converted to a long");
}
}
代码示例来源:origin: debezium/debezium
private static int intOffsetValue(Map<String, ?> values, String key) {
Object obj = values.get(key);
if (obj == null) return 0;
if (obj instanceof Number) return ((Number) obj).intValue();
try {
return Integer.parseInt(obj.toString());
} catch (NumberFormatException e) {
throw new ConnectException("Source offset '" + key + "' parameter value " + obj + " could not be converted to an integer");
}
}
代码示例来源:origin: debezium/debezium
private PreparedStatement createPreparedStatement(String preparedQueryString) {
return statementCache.computeIfAbsent(preparedQueryString, query -> {
try {
LOGGER.trace("Inserting prepared statement '{}' removed from the cache", query);
return connection().prepareStatement(query);
}
catch (SQLException e) {
throw new ConnectException(e);
}
});
}
代码示例来源:origin: debezium/debezium
protected static ColumnMapper instantiateMapper(Class<ColumnMapper> clazz, Configuration config) {
try {
ColumnMapper mapper = clazz.newInstance();
if ( config != null ) {
mapper.initialize(config);
}
return mapper;
} catch (InstantiationException e) {
throw new ConnectException("Unable to instantiate column mapper class " + clazz.getName() + ": " + e.getMessage(), e);
} catch (IllegalAccessException e) {
throw new ConnectException("Unable to access column mapper class " + clazz.getName() + ": " + e.getMessage(), e);
} catch (Throwable e) {
throw new ConnectException("Unable to initialize the column mapper class " + clazz.getName() + ": " + e.getMessage(), e);
}
}
}
代码示例来源:origin: debezium/debezium
private String parseType(String columnName, String typeWithModifiers) {
Matcher m = AbstractReplicationMessageColumn.TypeMetadataImpl.TYPE_PATTERN.matcher(typeWithModifiers);
if (!m.matches()) {
LOGGER.error("Failed to parse columnType for {} '{}'", columnName, typeWithModifiers);
throw new ConnectException(String.format("Failed to parse columnType '%s' for column %s", typeWithModifiers, columnName));
}
String baseType = m.group("base").trim();
final String suffix = m.group("suffix");
if (suffix != null) {
baseType += suffix;
}
baseType = TypeRegistry.normalizeTypeName(baseType);
if (m.group("array") != null) {
baseType = "_" + baseType;
}
return baseType;
}
代码示例来源:origin: debezium/debezium
/**
* Create a stateful Avro fullname adjuster that logs a warning the first time an invalid fullname is seen and replaced
* with a valid fullname, and throws an error if the replacement conflicts with that of a different original. This method
* replaces all invalid characters with the underscore character ('_').
*
* @param logger the logger to use; may not be null
* @return the validator; never null
*/
public static SchemaNameAdjuster create(Logger logger) {
return create(logger, (original, replacement, conflict) -> {
String msg = "The Kafka Connect schema name '" + original +
"' is not a valid Avro schema name and its replacement '" + replacement +
"' conflicts with another different schema '" + conflict + "'";
throw new ConnectException(msg);
});
}
代码示例来源:origin: debezium/debezium
/**
* Determine the available GTID set for MySQL.
*
* @return the string representation of MySQL's GTID sets; never null but an empty string if the server does not use GTIDs
*/
public String knownGtidSet() {
AtomicReference<String> gtidSetStr = new AtomicReference<String>();
try {
jdbc.query("SHOW MASTER STATUS", rs -> {
if (rs.next() && rs.getMetaData().getColumnCount() > 4) {
gtidSetStr.set(rs.getString(5));// GTID set, may be null, blank, or contain a GTID set
}
});
} catch (SQLException e) {
throw new ConnectException("Unexpected error while connecting to MySQL and looking at GTID mode: ", e);
}
String result = gtidSetStr.get();
return result != null ? result : "";
}
代码示例来源:origin: debezium/debezium
/**
* Wraps the specified exception in a {@link ConnectException}, ensuring that all useful state is captured inside
* the new exception's message.
*
* @param error the exception; may not be null
* @return the wrapped Kafka Connect exception
*/
protected ConnectException wrap(Throwable error) {
assert error != null;
String msg = error.getMessage();
if (error instanceof ServerException) {
ServerException e = (ServerException) error;
msg = msg + " Error code: " + e.getErrorCode() + "; SQLSTATE: " + e.getSqlState() + ".";
} else if (error instanceof SQLException) {
SQLException e = (SQLException) error;
msg = e.getMessage() + " Error code: " + e.getErrorCode() + "; SQLSTATE: " + e.getSQLState() + ".";
}
return new ConnectException(msg, error);
}
代码示例来源:origin: apache/ignite
/**
* A sink lifecycle method. Validates grid-specific sink properties.
*
* @param props Sink properties.
*/
@Override public void start(Map<String, String> props) {
configProps = props;
try {
A.notNullOrEmpty(configProps.get(SinkConnector.TOPICS_CONFIG), "topics");
A.notNullOrEmpty(configProps.get(IgniteSinkConstants.CACHE_NAME), "cache name");
A.notNullOrEmpty(configProps.get(IgniteSinkConstants.CACHE_CFG_PATH), "path to cache config file");
}
catch (IllegalArgumentException e) {
throw new ConnectException("Cannot start IgniteSinkConnector due to configuration error", e);
}
}
代码示例来源:origin: apache/ignite
/** {@inheritDoc} */
@Override public void start(Map<String, String> props) {
try {
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_NAME), "cache name");
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_CFG_PATH), "path to cache config file");
A.notNullOrEmpty(props.get(IgniteSourceConstants.CACHE_EVENTS), "Registered cache events");
A.notNullOrEmpty(props.get(IgniteSourceConstants.TOPIC_NAMES), "Kafka topics");
}
catch (IllegalArgumentException e) {
throw new ConnectException("Cannot start IgniteSourceConnector due to configuration error", e);
}
configProps = props;
}
代码示例来源:origin: debezium/debezium
@Override
public void configure(Configuration config, HistoryRecordComparator comparator) {
if (!config.validateAndRecord(ALL_FIELDS, logger::error)) {
throw new ConnectException(
"Error configuring an instance of " + getClass().getSimpleName() + "; check the logs for details");
}
config.validateAndRecord(ALL_FIELDS, logger::error);
if (running.get()) {
throw new IllegalStateException("Database history file already initialized to " + path);
}
super.configure(config, comparator);
path = Paths.get(config.getString(FILE_PATH));
}
代码示例来源:origin: debezium/debezium
@Override
public List<SourceRecord> poll() throws InterruptedException {
if (replicatorError != null) {
throw new ConnectException("Failing connector task, at least one of the replicators has failed");
}
List<SourceRecord> records = queue.poll();
recordSummarizer.accept(records);
return records;
}
代码示例来源:origin: debezium/debezium
@Override
public final void start(Map<String, String> props) {
if (context == null) {
throw new ConnectException("Unexpected null context");
}
Configuration config = Configuration.from(props);
if (!config.validateAndRecord(getAllConfigurationFields(), LOGGER::error)) {
throw new ConnectException("Error configuring an instance of " + getClass().getSimpleName() + "; check the logs for details");
}
LOGGER.info("Starting " + getClass().getSimpleName() + " with configuration:");
config.withMaskedPasswords().forEach((propName, propValue) -> {
LOGGER.info(" {} = {}", propName, propValue);
});
start(config);
}
代码示例来源:origin: debezium/debezium
private Config getKafkaBrokerConfig(AdminClient admin) throws Exception {
final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
if (nodes.isEmpty()) {
throw new ConnectException("No brokers available to obtain default settings");
}
String nodeId = nodes.iterator().next().idString();
Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId));
final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get(
KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS
);
if (configs.isEmpty()) {
throw new ConnectException("No configs have been received");
}
return configs.values().iterator().next();
}
}
代码示例来源:origin: debezium/debezium
/**
* Creates a Postgres connection using the supplied configuration.
*
* @param config {@link Configuration} instance, may not be null.
*/
public PostgresConnection(Configuration config) {
super(config, FACTORY, PostgresConnection::validateServerVersion, PostgresConnection::defaultSettings);
try {
typeRegistry = initTypeRegistry(connection());
}
catch (SQLException e) {
throw new ConnectException("Could not initialize type registry", e);
}
databaseCharset = determineDatabaseCharset();
}
代码示例来源:origin: debezium/debezium
@Override
public void initializeStorage() {
super.initializeStorage();
try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) {
// Find default replication factor
Config brokerConfig = getKafkaBrokerConfig(admin);
final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value());
// Create topic
final NewTopic topic = new NewTopic(topicName, (short)1, replicationFactor);
topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1"));
admin.createTopics(Collections.singleton(topic));
logger.info("Database history topic '{}' created", topic);
}
catch (Exception e) {
throw new ConnectException("Creation of database history topic failed, please create the topic manually", e);
}
}
代码示例来源:origin: debezium/debezium
/**
* Creates new producer instance for the given task context
*
* @param taskContext a {@link PostgresTaskContext}, never null
* @param sourceInfo a {@link SourceInfo} instance to track stored offsets
*/
public RecordsStreamProducer(PostgresTaskContext taskContext,
SourceInfo sourceInfo) {
super(taskContext, sourceInfo);
executorService = Threads.newSingleThreadExecutor(PostgresConnector.class, taskContext.config().getLogicalName(), CONTEXT_NAME);
this.replicationStream = new AtomicReference<>();
try {
this.replicationConnection = taskContext.createReplicationConnection();
} catch (SQLException e) {
throw new ConnectException(e);
}
heartbeat = Heartbeat.create(taskContext.config().getConfig(), taskContext.topicSelector().getHeartbeatTopic(),
taskContext.config().getLogicalName());
}
内容来源于网络,如有侵权,请联系作者删除!