最近,我将我的云计算应用程序从Java 11升级到Java 17及其相应的依赖项。应用程序运行良好,甚至测试用例也运行良好。我也把我的apache beam版本从2.35.0升级到了2.49.0。
然而,在一个自定义类RedisWriteIO
中,有一些变化,现在测试在新的代码覆盖率中没有通过。
RedisWriteIO
package com.example.dataflow.io.redis;
import com.google.auto.value.AutoValue;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.PTransform;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PDone;
import org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions;
import org.checkerframework.checker.nullness.qual.Nullable;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Transaction;
public class RedisWriteIO {
public static Write write() {
return (new AutoValue_RedisWriteIO_Write.Builder())
.setConnectionConfiguration(CustomRedisConfigurations.create()).build();
}
@AutoValue
public abstract static class Write extends PTransform<PCollection<KV<String,String>>, PDone> {
public Write() {
}
@Nullable
abstract CustomRedisConfigurations connectionConfiguration();
@Nullable
abstract Long expireTime();
abstract Builder toBuilder();
public Write withEndpoint(String host, int port) {
Preconditions.checkArgument(host != null, "host can not be null");
Preconditions.checkArgument(port > 0, "port can not be negative or 0");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withHost(host).withPort(port)).build();
}
public Write withAuth(String auth) {
Preconditions.checkArgument(auth != null, "auth can not be null");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withAuth(auth)).build();
}
public Write withTimeout(int timeout) {
Preconditions.checkArgument(timeout >= 0, "timeout can not be negative");
return this.toBuilder().setConnectionConfiguration(this.connectionConfiguration().withTimeout(timeout)).build();
}
public Write withConnectionConfiguration(CustomRedisConfigurations connection) {
Preconditions.checkArgument(connection != null, "connection can not be null");
return this.toBuilder().setConnectionConfiguration(connection).build();
}
public Write withExpireTime(Long expireTimeMillis) {
Preconditions.checkArgument(expireTimeMillis != null, "expireTimeMillis can not be null");
Preconditions.checkArgument(expireTimeMillis > 0L, "expireTimeMillis can not be negative or 0");
return this.toBuilder().setExpireTime(expireTimeMillis).build();
}
public PDone expand(PCollection<KV<String, String>> input) {
Preconditions.checkArgument(this.connectionConfiguration() != null, "withConnectionConfiguration() is required");
input.apply(ParDo.of(new WriteFn(this)));
return PDone.in(input.getPipeline());
}
private static class WriteFn extends DoFn<KV<String, String>, Void>{
private static final int DEFAULT_BATCH_SIZE = 1000;
private final RedisWriteIO.Write spec;
private transient Jedis jedis;
private transient @Nullable Transaction transaction;
private int batchCount;
public WriteFn(RedisWriteIO.Write spec) {
this.spec = spec;
}
@Setup
public void setup() {
jedis = spec.connectionConfiguration().connect();
}
@StartBundle
public void startBundle() {
transaction = jedis.multi();
batchCount = 0;
}
@ProcessElement
public void processElement(DoFn<KV<String, String>, Void>.ProcessContext c) {
KV<String, String> record = c.element();
String fieldKey = record.getKey();
String fieldValue = record.getValue();
transaction.sadd(fieldKey,fieldValue);
batchCount++;
if (batchCount >= DEFAULT_BATCH_SIZE) {
transaction.exec();
transaction.multi();
batchCount = 0;
}
}
@FinishBundle
public void finishBundle() {
if (batchCount > 0) {
transaction.exec();
}
if (transaction != null) {
transaction.close();
}
transaction = null;
batchCount = 0;
}
@Teardown
public void teardown() {
jedis.close();
}
}
@AutoValue.Builder
abstract static class Builder {
Builder() {
}
abstract Builder setConnectionConfiguration(CustomRedisConfigurations connectionConfiguration);
abstract Builder setExpireTime(Long expireTimeMillis);
abstract Write build();
}
}
}
测试类如下:
package com.example.dataflow.io.redis;
import com.github.fppt.jedismock.RedisServer;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.testing.PAssert;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.Create;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.Wait;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PDone;
import org.junit.*;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Transaction;
import javax.net.ssl.SSLSocketFactory;
import java.io.IOException;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.*;
public class RedisWriteIOTest {
private static final String REDIS_HOST = "localhost";
private static final String[] INPUT_DATA = new String[]{
"123456789",
"Bruce",
"Wayne"
};
@Mock
static SSLSocketFactory socketFactory;
private static RedisServer server;
private static int port;
@Mock
private static Jedis jedis;
@Mock
private Transaction transaction;
private int batchCount;
@Rule
public TestPipeline pipeline = TestPipeline.create();
@Mock
CustomRedisConfigurations connection;
@Mock
DoFn.OutputReceiver<KV<String, String>> out;
@Before
public void setUp() {
MockitoAnnotations.openMocks(this);
when(connection.connect()).thenReturn(jedis);
when(jedis.multi()).thenReturn(transaction);
batchCount = 0;
}
@BeforeClass
public static void beforeClass() throws Exception {
server = RedisServer.newRedisServer(8000);
server.start();
port = server.getBindPort();
jedis = new Jedis(server.getHost(), server.getBindPort());
}
@AfterClass
public static void afterClass() throws IOException {
jedis.close();
server.stop();
}
@Test
public void WriteMemoryStoreWithEmptyAuth() {
RedisWriteIO.write()
.withEndpoint(REDIS_HOST, port).withAuth("");
}
@Test
public void WriteMemoryStoreWithAuth() {
RedisWriteIO.write()
.withAuth("AuthString");
}
@Test
public void WriteTimeOut() {
RedisWriteIO.write()
.withTimeout(10);
}
@Test
public void WriteMemoryStoreWithExpireTime() {
RedisWriteIO.Write write = RedisWriteIO.write();
write = write.withExpireTime(1000L);
assertNotNull(write);
}
@Test(expected = IllegalArgumentException.class)
public void WriteMemoryStoreWithoutExpireTime() {
RedisWriteIO.write()
.withExpireTime(0L);
}
@Test(expected = IllegalArgumentException.class)
public void WriteMemoryStoreWithNegativeExpireTime() {
RedisWriteIO.write()
.withExpireTime(-10L);
}
@Test
public void WriteMemoryStoryWithConnectionConfiguration() {
connection = CustomRedisConfigurations.create().withHost(REDIS_HOST).withPort(port);
RedisWriteIO.Write write = RedisWriteIO.write()
.withConnectionConfiguration(connection);
assertNotNull(write);
}
@Test(expected = IllegalArgumentException.class)
public void WriteMemoryStoryWithNullConnectionConfiguration() {
RedisWriteIO.Write write = RedisWriteIO.write()
.withConnectionConfiguration(null);
}
@Test
public void testBatchProcessingWithTransactionExecuted() {
RedisWriteIO.Write spec = RedisWriteIO.write().withConnectionConfiguration(connection);
PCollection<String> flushFlag = pipeline.apply("Read File", TextIO.read().from("files/fileHavingFiveThousandRecords.txt"));
List<KV<String, String>> recordEntries = new ArrayList<>();
for (int i = 0; i <= 10000; i++) {
// adding unique entries 10000 times
recordEntries.add(KV.of("Bruce:Wayne" + i, "123456789" + i));
}
// outputData will be written to Redis (memorystore)
PCollection<KV<String, String>> outputData = pipeline.apply(Create.of(recordEntries));
outputData.apply("Waiting until clearing Redis database", Wait.on(flushFlag))
.apply("Writing the data into Redis database", RedisWriteIO.write()
.withConnectionConfiguration(CustomRedisConfigurations
.create(REDIS_HOST, port)
.withTimeout(100)
.withAuth("credentials")
.enableSSL()));
pipeline.run();
}
}
RedisWriteIO
是一个实用程序类,将数据从文件写入Redis数据库。它按预期工作,编写的测试用例也按预期工作。然而,下面的代码块并没有被SonarQube覆盖。
if (batchCount >= DEFAULT_BATCH_SIZE) {
transaction.exec();
transaction.multi();
batchCount = 0;
}
当文件有超过1000条记录时,应该执行上面的块。它在测试类中不起作用。我尝试用一个有5000条记录的测试文件覆盖testBatchProcessingWithTransactionExecuted()
方法中的这段代码,但这段代码仍然没有执行。
我需要你帮我写测试用例。
1条答案
按热度按时间q1qsirdb1#
我能够编写覆盖所有行的测试用例。我只是将列表的大小增加到20000,这样做,
RedisWriteIO
类就可以像预期的那样处理更大的数据集。批计数1000作为
DEFAULT_BATCH_SIZE
指定的阈值,当达到该阈值时,执行事务(transaction.exec()
)并启动新事务(transaction.multi()
)。