我用docker运行kafka,设置在卷上存储数据。我设置了一些源连接器,主题是用cleanup.policy自动创建的 delete
. 使用Kafka管理器,我将策略更改为 compact
.
问题是:
停止/启动docker compose主题出现后,但cleanup.policy恢复为 delete
问题:
重启后如何持久化主题配置?
附加信息
我用命令重新启动Kafka Docker : rm /kafka/data/1/meta.properties; docker-compose down && docker-compose up -d --no-recreate
docker-compose.yml码:
version: '2'
services:
zookeeper:
image: debezium/zookeeper:${DEBEZIUM_VERSION}
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /kafka/zookeeper_data:/zookeeper/data
- /kafka/zookeeper_logs:/zookeeper/logs
- /kafka/zookeeper_conf:/zookeeper/conf
kafka:
image: debezium/kafka:${DEBEZIUM_VERSION}
ports:
- 9092:9092
links:
- zookeeper
environment:
- ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_BROKER_ID=1
- ADVERTISED_HOST_NAME=172.16.10.187
volumes:
- /kafka/data:/kafka/data
- /kafka/config:/kafka/config
schema-registry:
image: confluentinc/cp-schema-registry
ports:
- 8181:8181
- 8081:8081
environment:
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL=zookeeper:2181
- SCHEMA_REGISTRY_HOST_NAME=schema-registry
- SCHEMA_REGISTRY_LISTENERS=http://schema-registry:8081
links:
- zookeeper
connect3:
build:
context: debezium-jdbc-es
ports:
- 8083:8083
links:
- kafka
environment:
- BOOTSTRAP_SERVERS=kafka:9092
- GROUP_ID=1
- CONFIG_STORAGE_TOPIC=my_connect_configs
- OFFSET_STORAGE_TOPIC=my_connect_offsets
- STATUS_STORAGE_TOPIC=my_connect_statuses
- KEY_CONVERTER=io.confluent.connect.avro.AvroConverter
- VALUE_CONVERTER=io.confluent.connect.avro.AvroConverter
- INTERNAL_KEY_CONVERTER=org.apache.kafka.connect.json.JsonConverter
- INTERNAL_VALUE_CONVERTER=org.apache.kafka.connect.json.JsonConverter
- CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL=http://schema-registry:8081
- CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL=http://schema-registry:8081
- KAFKA_OPTS=-javaagent:/kafka/jmx_prometheus_javaagent.jar=8080:/kafka/config.yml
- CONNECT_REST_ADVERTISED_HOST_NAME=connect3
- JMX_PORT=1976
prometheus:
build:
context: debezium-prometheus
ports:
- 9090:9090
links:
- connect3
grafana:
build:
context: debezium-grafana
ports:
- 3000:3000
links:
- prometheus
environment:
- DS_PROMETHEUS=prometheus
restproxy:
image: confluentinc/cp-kafka-rest
environment:
KAFKA_REST_BOOTSTRAP_SERVERS: "kafka:9092"
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_HOST_NAME: restproxy
KAFKA_REST_DEBUG: "true"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
ports:
- 8082:8082
kafka-ui:
image: landoop/kafka-connect-ui:latest
ports:
- 8000:8000
links:
- connect3
- schema-registry
- zookeeper
environment:
- CONNECT_URL=http://connect3:8083/
kafka-topic-ui:
image: landoop/kafka-topics-ui
links:
- connect3
ports:
- 8001:8000
environment:
- KAFKA_REST_PROXY_URL=http://restproxy:8082
- PROXY=true
kafka_manager:
image: hlebalbau/kafka-manager:stable
ports:
- "9000:9000"
environment:
ZK_HOSTS: "zookeeper:2181"
links:
- connect3
1条答案
按热度按时间qyzbxkaa1#
Kafka主题配置存储在zookeeper中。您可以使用
想到的一个潜在问题是zookeeper数据没有持久化。我看到您的zookeeper容器有3个卷,但可能需要检查zookeeper.properties中的datadir属性是否指向持久化文件夹,或者zookeeper数据没有持久化的另一个原因。
希望这有用。
致以最诚挚的问候。