本文整理了Java中org.elasticsearch.hadoop.util.Assert.notNull()
方法的一些代码示例,展示了Assert.notNull()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Assert.notNull()
方法的具体详情如下:
包路径:org.elasticsearch.hadoop.util.Assert
类名称:Assert
方法名:notNull
暂无
代码示例来源:origin: elastic/elasticsearch-hadoop
private ContentBuilder(Generator generator, ValueWriter writer) {
Assert.notNull(generator);
this.generator = generator;
this.writer = writer;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public SparkSettings(SparkConf cfg) {
Assert.notNull(cfg, "non-null spark configuration expected");
this.cfg = cfg;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public PropertiesSettings(Properties props) {
Assert.notNull(props, "Non-null properties expected");
this.props = props;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public SparkSettings(SparkConf cfg) {
Assert.notNull(cfg, "non-null spark configuration expected");
this.cfg = cfg;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public HadoopSettings(Configuration cfg) {
Assert.notNull(cfg, "Non-null properties expected");
this.cfg = cfg;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public static void notNull(Object object) {
notNull(object, "[Assertion failed] - this argument is required; it must not be null");
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
Method m = ReflectionUtils.findMethod(target.getClass(), method.getName(), method.getParameterTypes());
// toString on target seems to lead to weird effects to use the class name instead
Assert.notNull(m, String.format("Cannot find method %s on target %s", method, target.getClass()));
return m.invoke(target, args);
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public static Field findField(Class<?> clazz, String name, Class<?> type) {
Assert.notNull(clazz, "Class must not be null");
Assert.isTrue(name != null || type != null, "Either name or type of the field must be specified");
Class<?> searchType = clazz;
while (!Object.class.equals(searchType) && searchType != null) {
Field[] fields = searchType.getDeclaredFields();
for (Field field : fields) {
if ((name == null || name.equals(field.getName())) && (type == null || type.equals(field.getType()))) {
return field;
}
}
searchType = searchType.getSuperclass();
}
return null;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
protected Object preProcess(Object object, BytesArray storage) {
// serialize the json early on and copy it to storage
Assert.notNull(object, "Empty/null JSON document given...");
jsonWriter.convert(object, storage);
if (log.isTraceEnabled()) {
log.trace(String.format("About to extract information from [%s]", storage));
}
jsonExtractors.process(storage);
return storage;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
private KeystoreWrapper(InputStream inputStream, String type, String password) throws EsHadoopSecurityException, IOException {
Assert.notNull(password, "Password should not be null");
try {
char[] pwd = password.toCharArray();
protection = new KeyStore.PasswordProtection(pwd);
keyStore = KeyStore.getInstance(type);
keyStore.load(inputStream, pwd);
} catch (CertificateException e) {
throw new EsHadoopSecurityException("Could not create keystore", e);
} catch (NoSuchAlgorithmException e) {
throw new EsHadoopSecurityException("Could not create keystore", e);
} catch (KeyStoreException e) {
throw new EsHadoopSecurityException("Could not create keystore", e);
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
protected Object preProcess(Object object, BytesArray storage) {
// serialize the json early on and copy it to storage
Assert.notNull(object, "Empty/null JSON document given...");
BytesArray ba = null;
if (ConfigurationOptions.ES_OPERATION_UPSERT.equals(settings.getOperation())) {
ba = storage;
}
else {
scratchPad.reset();
ba = scratchPad;
}
// write the doc to a temporary space
jsonWriter.convert(object, ba);
if (log.isTraceEnabled()) {
log.trace(String.format("About to extract information from [%s]", ba));
}
jsonExtractors.process(ba);
return storage;
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public NetworkClient(Settings settings, TransportFactory transportFactory) {
this.settings = settings.copy();
this.secureSettings = new SecureSettings(settings);
this.nodes = SettingsUtils.discoveredOrDeclaredNodes(settings);
this.transportFactory = transportFactory;
// shuffle the list of nodes so in case of failures, the fallback is spread
Collections.shuffle(nodes);
if (SettingsUtils.hasPinnedNode(settings)) {
// move pinned node in front to be selected (only once)
String pinnedNode = SettingsUtils.getPinnedNode(settings);
if (log.isDebugEnabled()) {
log.debug("Opening (pinned) network client to " + pinnedNode);
}
nodes.remove(pinnedNode);
nodes.add(0, pinnedNode);
}
selectNextNode();
Assert.notNull(currentTransport, "no node information provided");
}
代码示例来源:origin: elastic/elasticsearch-hadoop
/**
* Writes the objects to index.
*
* @param object object to add to the index
*/
public void writeToIndex(Object object) {
Assert.notNull(object, "no object data given");
lazyInitWriting();
BytesRef serialized = bulkEntryWriter.writeBulkEntry(object);
if (serialized != null) {
doWriteToIndex(serialized);
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public Scroll read(InputStream content) throws IOException {
Assert.notNull(content);
//copy content
BytesArray copy = IOUtils.asBytes(content);
content = new FastByteArrayInputStream(copy);
if (log.isTraceEnabled()) {
log.trace("About to parse scroll content " + copy);
}
Parser parser = new JacksonJsonParser(content);
try {
return read(parser, copy);
} finally {
parser.close();
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public BulkOutputGenerator addSuccess(String operation, int status) {
Assert.notNull(resource);
items.add(getSuccess()
.replace(OP, operation)
.replace(IDX, resource.index())
.replace(TYPE, resource.type())
.replace(ID, UUID.randomUUID().toString())
.replace(VER, "1")
.replace(STAT, "201")
);
return this;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public BulkOutputGenerator addFailure(String operation, int status, String type, String errorMessage) {
Assert.notNull(resource);
errors = true;
items.add(getFailure()
.replace(OP, operation)
.replace(IDX, resource.index())
.replace(TYPE, resource.type())
.replace(ID, UUID.randomUUID().toString())
.replace(STAT, Integer.toString(status))
.replace(ETYPE, type)
.replace(EMESG, errorMessage)
);
return this;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public BulkOutputGenerator addRejection(String operation) {
Assert.notNull(resource);
errors = true;
items.add(getFailure()
.replace(OP, operation)
.replace(IDX, resource.index())
.replace(TYPE, resource.type())
.replace(ID, UUID.randomUUID().toString())
.replace(STAT, Integer.toString(getRejectedStatus()))
.replace(ETYPE, getRejectionType())
.replace(EMESG, getRejectionMsg())
);
return this;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
@Override
public List<E> loadHandlers() {
Assert.notNull(settings, "No settings are present in the handler loader!");
代码示例来源:origin: elastic/elasticsearch-hadoop
HeartBeat(final Progressable progressable, Configuration cfg, TimeValue lead, final Log log) {
Assert.notNull(progressable, "a valid progressable is required to report status to Hadoop");
TimeValue tv = HadoopCfgUtils.getTaskTimeout(cfg);
Assert.isTrue(tv.getSeconds() <= 0 || tv.getSeconds() > lead.getSeconds(), "Hadoop timeout is shorter than the heartbeat");
this.progressable = progressable;
long cfgMillis = (tv.getMillis() > 0 ? tv.getMillis() : 0);
// the task is simple hence the delay = timeout - lead, that is when to start the notification right before the timeout
this.delay = new TimeValue(Math.abs(cfgMillis - lead.getMillis()), TimeUnit.MILLISECONDS);
this.log = log;
String taskId;
TaskID taskID = HadoopCfgUtils.getTaskID(cfg);
if (taskID == null) {
log.warn("Cannot determine task id...");
taskId = "<unknown>";
if (log.isTraceEnabled()) {
log.trace("Current configuration is " + HadoopCfgUtils.asProperties(cfg));
}
}
else {
taskId = "" + taskID;
}
id = taskId;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
/**
* Writes the objects to index.
*
* @param ba The data as a bytes array
*/
public void writeProcessedToIndex(BytesArray ba) {
Assert.notNull(ba, "no data given");
Assert.isTrue(ba.length() > 0, "no data given");
lazyInitWriting();
trivialBytesRef.reset();
trivialBytesRef.add(ba);
doWriteToIndex(trivialBytesRef);
}
内容来源于网络,如有侵权,请联系作者删除!