本文整理了Java中org.rakam.collection.Event.schema()
方法的一些代码示例,展示了Event.schema()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Event.schema()
方法的具体详情如下:
包路径:org.rakam.collection.Event
类名称:Event
方法名:schema
暂无
代码示例来源:origin: rakam-io/rakam
private Event getLastEvent(List<Event> eventsForCollection) {
Event event = eventsForCollection.get(0);
for (int i = 1; i < eventsForCollection.size(); i++) {
Event newEvent = eventsForCollection.get(i);
if (newEvent.schema().size() > event.schema().size()) {
event = newEvent;
}
}
return event;
}
代码示例来源:origin: rakam-io/rakam
generator.writeString(event.collection());
List<SchemaField> fields = event.schema();
for (SchemaField field : fields) {
generator.writeFieldName("properties." + field.getName());
代码示例来源:origin: rakam-io/rakam
public void store(Event event, boolean partitionCheckDone) {
GenericRecord record = event.properties();
try (Connection connection = connectionPool.getConnection()) {
Schema schema = event.properties().getSchema();
PreparedStatement ps = connection.prepareStatement(getQuery(event.project(), event.collection(), schema));
bindParam(connection, ps, event.schema(), record);
ps.executeUpdate();
} catch (SQLException e) {
// check_violation -> https://www.postgresql.org/docs/8.2/static/errcodes-appendix.html
if (version.getVersion() == PG10 && !partitionCheckDone && "23514".equals(e.getSQLState())) {
generateMissingPartitions(event.project(), event.collection(), ImmutableList.of(event), 0);
store(event, true);
} else {
throw new RuntimeException(e);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
代码示例来源:origin: rakam-io/rakam
Event event = eventsForCollection.get(i);
GenericRecord properties = event.properties();
bindParam(connection, ps, lastEvent.schema(), properties);
ps.addBatch();
if (i > 0 && i % 5000 == 0) {
代码示例来源:origin: rakam-io/rakam
@Override
public int[] storeBatch(List<Event> events) {
for (Map.Entry<String, List<Event>> collection : events.stream().collect(Collectors.groupingBy(e -> e.collection())).entrySet()) {
QueryResult join = queryExecutor.executeRawStatement(String.format("INSERT INTO %s.%s.%s (_shard_time, %s) (%s)",
config.getColdStorageConnector(), checkProject(events.get(0).project(), '"'),
checkCollection(collection.getKey()),
collection.getValue().get(0).schema().stream().map(e -> ValidationUtil.checkCollection(e.getName()))
.collect(Collectors.joining(", ")),
collection.getValue().stream()
.map(e -> buildValues(e.properties(), e.schema()))
.collect(Collectors.joining(" union all "))))
.getResult().join();
if (join.isFailed()) {
try {
Thread.sleep(300000);
} catch (InterruptedException e) {
e.printStackTrace();
}
throw new IllegalStateException(join.getError().message);
}
}
return EventStore.SUCCESSFUL_BATCH;
}
代码示例来源:origin: rakam-io/rakam
@Override
public void store(Event event) {
queryExecutor.executeRawStatement(String.format("INSERT INTO %s.%s.%s VALUES %s",
config.getColdStorageConnector(), event.project(),
event.collection(), buildValues(event.properties(), event.schema())));
}
代码示例来源:origin: org.rakam/rakam-clickhouse
@Inject
public ClickHouseEventStore(ClickHouseConfig config)
{
this.config = config;
queuedEvents = new ConcurrentHashMap<>();
currentFutureSingle = new ConcurrentHashMap<>();
Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(() -> {
try {
Iterator<Map.Entry<ProjectCollection, List<Event>>> iterator = queuedEvents.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<ProjectCollection, List<Event>> next = iterator.next();
List<Event> value = next.getValue();
iterator.remove();
CompletableFuture<Void> remove = currentFutureSingle.remove(next.getKey());
List<SchemaField> schema = value.get(0).schema();
executeRequest(next.getKey(), schema, next.getValue(), remove, false);
}
}
catch (Exception e) {
e.printStackTrace();
}
}, 1, 1, TimeUnit.SECONDS);
}
代码示例来源:origin: org.rakam/rakam-clickhouse
private ByteBuffer getBuffer(Event event)
{
SharedByteArrayOutputStream buffer = new SharedByteArrayOutputStream(event.properties().getSchema().getFields().size() * 8);
LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(buffer);
GenericRecord record = event.properties();
Object time = record.get("_time");
try {
int size = event.schema().size();
writeVarInt(size, out);
writeValue(time == null ? 0 : ((int) (((long) time) / 86400000)), DATE, out);
for (int i = 0; i < size; i++) {
writeValue(record.get(i), event.schema().get(i).getType(), out);
}
}
catch (IOException e) {
throw Throwables.propagate(e);
}
return buffer.toByteBuffer();
}
内容来源于网络,如有侵权,请联系作者删除!