当我启动kibana并查看管理选项卡时,它会显示一条消息
Couldn't find any Elasticsearch data
You'll need to index some data into Elasticsearch before you can create an index pattern
这些数据显示在我的 elasticsearch
通过浏览器进行群集
http://localhost:9200/类别/指数?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
green open .kibana-event-log-7.9.3-000001 JBL1C589TZWBEIjhe63f1w 1 0 3 0 16.2kb 16.2kb
green open .apm-custom-link nqIKOV7rR8OhzG4Y6UntSA 1 0 0 0 208b 208b
green open .kibana_task_manager_1 3RbGcb5nTrelAfjr8cQ8Gg 1 0 6 38 150.3kb 150.3kb
green open .apm-agent-configuration llIcAZAGTWanNghptfymVQ 1 0 0 0 208b 208b
green open .kibana_1 tMQMj0UdRd-sCZPb631Y5g 1 0 23 9 10.4mb 10.4mb
我看不见 logstash
索引和日志存储在端口9600下运行
http://localhost:9600/
{"host":"DESKTOP","version":"7.9.3","http_address":"127.0.0.1:9600","id":"b92c8d86-6159-4821-9ace-01bd5328f6af","name":"DESKTOP-MTG14LM","ephemeral_id":"4332a47b-ad63-4e02-a02e-5c233d7a3773","status":"green","snapshot":false,"pipeline":{"workers":4,"batch_size":125,"batch_delay":50},"build_date":"2020-10-16T12:25:47Z","build_sha":"d296f0087bdce367c37596241d5a1f00c9279193","build_snapshot":false}
logstash-sample.conf文件
input {
file{
type => "syslog"
path => ["D:\Spring Boot Project\demo-gradle\another-log.log"]
}
output {
stdout{
codec => rubydebug
}
elasticsearch {
hosts => ["http://localhost:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
请支持我解决这个问题
http://localhost:9600/\u节点/stats/?漂亮
{
"host" : "DESKTOP",
"version" : "7.9.3",
"http_address" : "127.0.0.1:9600",
"id" : "b92c8d86-6159-4821-9ace-01bd5328f6af",
"name" : "DESKTOP",
"ephemeral_id" : "15e38fba-b37a-4b7d-9e58-6a89e2082799",
"status" : "green",
"snapshot" : false,
"pipeline" : {
"workers" : 4,
"batch_size" : 125,
"batch_delay" : 50
},
"jvm" : {
"threads" : {
"count" : 30,
"peak_count" : 32
},
"mem" : {
"heap_used_percent" : 24,
"heap_committed_in_bytes" : 1038876672,
"heap_max_in_bytes" : 1038876672,
"heap_used_in_bytes" : 255060960,
"non_heap_used_in_bytes" : 174032152,
"non_heap_committed_in_bytes" : 196833280,
"pools" : {
"old" : {
"max_in_bytes" : 724828160,
"peak_max_in_bytes" : 724828160,
"used_in_bytes" : 125286040,
"committed_in_bytes" : 724828160,
"peak_used_in_bytes" : 226688920
},
"young" : {
"max_in_bytes" : 279183360,
"peak_max_in_bytes" : 279183360,
"used_in_bytes" : 102941904,
"committed_in_bytes" : 279183360,
"peak_used_in_bytes" : 279183360
},
"survivor" : {
"max_in_bytes" : 34865152,
"peak_max_in_bytes" : 34865152,
"used_in_bytes" : 26833016,
"committed_in_bytes" : 34865152,
"peak_used_in_bytes" : 34865144
}
}
},
"gc" : {
"collectors" : {
"old" : {
"collection_time_in_millis" : 713,
"collection_count" : 4
},
"young" : {
"collection_time_in_millis" : 501,
"collection_count" : 8
}
}
},
"uptime_in_millis" : 815971
},
"process" : {
"open_file_descriptors" : -1,
"peak_open_file_descriptors" : -1,
"max_file_descriptors" : -1,
"mem" : {
"total_virtual_in_bytes" : -1
},
"cpu" : {
"total_in_millis" : -1,
"percent" : -3,
"load_average" : null
}
},
"events" : {
"in" : 0,
"filtered" : 0,
"out" : 0,
"duration_in_millis" : 0,
"queue_push_duration_in_millis" : 0
},
"pipelines" : {
"main" : {
"events" : {
"queue_push_duration_in_millis" : 0,
"out" : 0,
"duration_in_millis" : 0,
"in" : 0,
"filtered" : 0
},
"plugins" : {
"inputs" : [ {
"id" : "09ae4aa0701a92b926aee6c9c0abef34b22fe75695ed89371fb40e0ce5666067",
"name" : "file",
"events" : {
"queue_push_duration_in_millis" : 0,
"out" : 0
}
} ],
"codecs" : [ {
"id" : "plain_09312af1-ced8-4a87-8be0-7425fe846651",
"name" : "plain",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
}, {
"id" : "rubydebug_88397be3-dcbe-4553-a788-aa3d4474e141",
"name" : "rubydebug",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 3
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
}, {
"id" : "plain_497bb40b-2eab-4852-a002-e2c7ee4d7ab3",
"name" : "plain",
"encode" : {
"writes_in" : 0,
"duration_in_millis" : 0
},
"decode" : {
"out" : 0,
"writes_in" : 0,
"duration_in_millis" : 0
}
} ],
"filters" : [ ],
"outputs" : [ {
"id" : "e48f703a97c1645df3afa1d1b8937faffe8a408694f8a6ba5be6bb23bed53001",
"name" : "stdout",
"events" : {
"out" : 0,
"in" : 0,
"duration_in_millis" : 33
}
}, {
"id" : "ad540803354821020198353da7d7314b73309c07babecea3df737a197017449a",
"name" : "elasticsearch",
"events" : {
"out" : 0,
"in" : 0,
"duration_in_millis" : 4
}
} ]
},
"reloads" : {
"failures" : 0,
"successes" : 0,
"last_success_timestamp" : null,
"last_error" : null,
"last_failure_timestamp" : null
},
"queue" : {
"type" : "memory",
"events_count" : 0,
"queue_size_in_bytes" : 0,
"max_queue_size_in_bytes" : 0
},
"hash" : "661080585b2691f01bac24b363c27f0cfc03a009fbb302424abe96cc1ae50fb5",
"ephemeral_id" : "faf3face-77dc-455f-8632-1ff2e1ebdd7c"
}
},
"reloads" : {
"failures" : 0,
"successes" : 0
},
"os" : { },
"queue" : {
"events_count" : 0
}
bin/logstash--log.level调试
[2020-11-16T21:59:20,627][DEBUG][logstash.runner ] monitoring.elasticsearch.hosts: ["http://localhost:9200"]
[2020-11-16T21:59:20,630][DEBUG][logstash.runner ] monitoring.collection.interval: #<LogStash::Util::TimeValue:0xa362681 @duration=10, @time_unit=:second>
[2020-11-16T21:59:20,635][DEBUG][logstash.runner ] monitoring.collection.timeout_interval: #<LogStash::Util::TimeValue:0x228ca300 @duration=10, @time_unit=:minute>
[2020-11-16T21:59:20,637][DEBUG][logstash.runner ] monitoring.elasticsearch.username: "logstash_system"
[2020-11-16T21:59:20,639][DEBUG][logstash.runner ] monitoring.elasticsearch.ssl.verification_mode: "certificate"
[2020-11-16T21:59:20,640][DEBUG][logstash.runner ] monitoring.elasticsearch.sniffing: false
[2020-11-16T21:59:20,641][DEBUG][logstash.runner ] monitoring.collection.pipeline.details.enabled: true
[2020-11-16T21:59:20,643][DEBUG][logstash.runner ] monitoring.collection.config.enabled: true
[2020-11-16T21:59:20,644][DEBUG][logstash.runner ] node.uuid: ""
[2020-11-16T21:59:20,645][DEBUG][logstash.runner ] --------------- Logstash Settings -------------------
[2020-11-16T21:59:20,711][DEBUG][logstash.config.source.multilocal] Reading pipeline configurations from YAML {:location=>"D:/ELK stack/logstash/config/pipelines.yml"}
ERROR: Pipelines YAML file is empty. Location: D:/ELK stack/logstash/config/pipelines.yml
usage:
bin/logstash -f CONFIG_PATH [-t] [-r] [] [-w COUNT] [-l LOG]
bin/logstash --modules MODULE_NAME [-M "MODULE_NAME.var.PLUGIN_TYPE.PLUGIN_NAME.VARIABLE_NAME=VALUE"] [-t] [-w COUNT] [-l LOG]
bin/logstash -e CONFIG_STR [-t] [--log.level fatal|error|warn|info|debug|trace] [-w COUNT] [-l LOG]
bin/logstash -i SHELL [--log.level fatal|error|warn|info|debug|trace]
bin/logstash -V [--log.level fatal|error|warn|info|debug|trace]
bin/logstash --help
[2020-11-16T21:59:20,755][ERROR][org.logstash.Logstash ] java.lang.IllegalStateException: Logstash stopped processing because of an error: (SystemExit) exit
bin/logstash--log.level debug-f config/logstash-sample.conf
[2020-11-16T22:11:31,227][DEBUG][filewatch.sincedbcollection][main][09ae4aa0701a92b926aee6c9c0abef34b22fe75695ed89371fb40e0ce5666067] writing sincedb (delta since last write = 15)
[2020-11-16T22:11:32,314][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:32,678][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:32,679][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:34,964][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.
[2020-11-16T22:11:37,330][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:37,691][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:37,692][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:39,964][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.
[2020-11-16T22:11:42,336][DEBUG][logstash.instrument.periodicpoller.cgroup] One or more required cgroup files or directories not found: /proc/self/cgroup, /sys/fs/cgroup/cpuacct, /sys/fs/cgroup/cpu
[2020-11-16T22:11:42,697][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ParNew"}
[2020-11-16T22:11:42,697][DEBUG][logstash.instrument.periodicpoller.jvm] collector name {:name=>"ConcurrentMarkSweep"}
[2020-11-16T22:11:44,960][DEBUG][org.logstash.execution.PeriodicFlush][main] Pushing flush onto pipeline.
2条答案
按热度按时间umuewwlo1#
谢谢你的回答。在我的例子中,windows被用作操作系统。因此,用于路由日志路径的反斜杠应替换为正斜杠。
logstash-sample.conf文件
ie3xauqp2#
你的
file
输入是从文件末尾读取的。尝试这样修改输入(即从头开始阅读):还要确保删除此文件: