向stormcrawler添加tika解析器时,不会从该字段提取任何信息并存储在elasticsearch中。
es-爬虫.flux
includes:
- resource: true
file: "/crawler-default.yaml"
override: false
- resource: false
file: "crawler-conf.yaml"
override: true
- resource: false
file: "es-conf.yaml"
override: true
spouts:
- id: "spout"
className: "com.digitalpebble.stormcrawler.elasticsearch.persistence.AggregationSpout"
parallelism: 10
- id: "filespout"
className: "com.digitalpebble.stormcrawler.spout.FileSpout"
parallelism: 1
constructorArgs:
- "."
- "seeds.txt"
- true
bolts:
- id: "filter"
className: "com.digitalpebble.stormcrawler.bolt.URLFilterBolt"
parallelism: 1
- id: "partitioner"
className: "com.digitalpebble.stormcrawler.bolt.URLPartitionerBolt"
parallelism: 1
- id: "fetcher"
className: "com.digitalpebble.stormcrawler.bolt.FetcherBolt"
parallelism: 1
- id: "sitemap"
className: "com.digitalpebble.stormcrawler.bolt.SiteMapParserBolt"
parallelism: 1
- id: "parse"
className: "com.digitalpebble.stormcrawler.bolt.JSoupParserBolt"
parallelism: 1
- id: "tika_redirection"
className: "com.digitalpebble.stormcrawler.tika.RedirectionBolt"
parallelism: 1
- id: "tika_parser"
className: "com.digitalpebble.stormcrawler.tika.ParserBolt"
parallelism: 1
- id: "index"
className: "com.digitalpebble.stormcrawler.elasticsearch.bolt.IndexerBolt"
parallelism: 1
- id: "status"
className: "com.digitalpebble.stormcrawler.elasticsearch.persistence.StatusUpdaterBolt"
parallelism: 1
- id: "status_metrics"
className: "com.digitalpebble.stormcrawler.elasticsearch.metrics.StatusMetricsBolt"
parallelism: 1
streams:
- from: "spout"
to: "partitioner"
grouping:
type: SHUFFLE
- from: "spout"
to: "status_metrics"
grouping:
type: SHUFFLE
- from: "partitioner"
to: "fetcher"
grouping:
type: FIELDS
args: ["key"]
- from: "fetcher"
to: "sitemap"
grouping:
type: LOCAL_OR_SHUFFLE
- from: "sitemap"
to: "parse"
grouping:
type: LOCAL_OR_SHUFFLE
- from: "parse"
to: "index"
grouping:
type: LOCAL_OR_SHUFFLE
- from: "fetcher"
to: "status"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "sitemap"
to: "status"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "parse"
to: "status"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "parse"
to: "tika_redirection"
grouping:
type: LOCAL_OR_SHUFFLE
- from: "tika_redirection"
to: "tika_parser"
grouping:
type: LOCAL_OR_SHUFFLE
streamId: "tika"
- from: "tika_parser"
to: "index"
grouping:
type: LOCAL_OR_SHUFFLE
- from: "tika_parser"
to: "status"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "index"
to: "status"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "filespout"
to: "filter"
grouping:
type: FIELDS
args: ["url"]
streamId: "status"
- from: "filter"
to: "status"
grouping:
streamId: "status"
type: CUSTOM
customClass:
className: "com.digitalpebble.stormcrawler.util.URLStreamGrouping"
constructorArgs:
- "byDomain"
我在crawler-conf.yaml中添加了以下设置:
爬虫-conf.yaml
parser.mimetype.whitelist:
- application/.*pdf.*
jsoup.treat.non.html.as.error: false
另外,我在运行拓扑时会发现以下日志:
16:27:29.867 [Thread-43-tika_parser-executor[22, 22]] INFO c.d.s.t.ParserBolt - skipped_trimmed -> http://cds.iisc.ac.in/wp-content/uploads/DS256.2017.Storm_.Tutorial.pdf
我更喜欢从pdf中提取所有可能的字段,并使用数组存储页面中的信息,因此,在elasticsearch中,一个页面成为数组中的一个元素。
1条答案
按热度按时间vltsax251#
请参阅parserbolt-如果在获取过程中修剪了文档,则不会进行解析。
您可以在conf中使用
这应该可以用tika解析文档。生成的元数据将具有前缀parse。。您可能需要编写一个自定义的bolt,以您想要的格式(即es中每页一个键)来处理数据。