scrapy 下载图像并将其存储到单独的文件中

w9apscun  于 2022-11-09  发布在  其他
关注(0)|答案(1)|浏览(146)

我想从网络上下载图片,并根据图片的标题名称将其存储到单独的文件中。我开发了一个scraper来抓取这些图片的链接,但是当我包含files_pipeline时,我无法在下载时将.png附加到每个图片,也无法将名称从SHA1哈希码更改为我提取的名称,即title
以下是我目前掌握的情况:

import scrapy
from scrapy_playwright.page import PageCoroutine
from scrapy.item import Field
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst, MapCompose, Join
from bs4 import BeautifulSoup
import json
import re

headers = {
    'Connection': 'keep-alive',
    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
    'Accept': '*/*',
    'X-Requested-With': 'XMLHttpRequest',
    'sec-ch-ua-mobile': '?0',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
    'sec-ch-ua-platform': '"macOS"',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Dest': 'empty',
    'Referer': 'https://digital.library.pitt.edu/islandora/object/pitt%3A31735061815696/viewer',
    'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
}

class carnapItem(scrapy.Item):
    title = Field(input_processor = MapCompose(str.strip),
    output_processor  = Join())
    id_image = Field(output_processor = TakeFirst())

class carnapSpider(scrapy.Spider):
    name = 'carnap'
    start_urls = []
    for pages in range(1, 44):
        start_urls.append(f'https://digital.library.pitt.edu/collection/archives-scientific-philosophy?page={pages}&islandora_solr_search_navigation=0&f%5B0%5D=mods_relatedItem_host_titleInfo_title_ms%3A%22Rudolf%5C%20Carnap%5C%20Papers%22')

    custom_settings = {
        'USER_AGENT':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'
    }
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(
                url=url, headers=headers,
                callback = self.parse
            )

    def parse(self, response):
        container = response.xpath("//div[@class='islandora islandora-solr-search-results']/div")
        for data in container:
            href_data = data.xpath('(.//a)[position() mod 5=1]//@href').get()
            href_data = '/viewer#'.join(href_data.split("#"))
            links = response.urljoin(href_data)
            loader = ItemLoader(carnapItem())
            loader.add_value('links', links)
            yield loader.load_item()

    def parse(self, response):
        container = response.xpath("//div[@class='islandora islandora-solr-search-results']/div")
        for data in container:
            href_data = data.xpath('(.//a)[position() mod 5=1]//@href').get()
            href_data = '/viewer#'.join(href_data.split("#"))
            links = response.urljoin(href_data)
            yield response.follow(url=links, callback = self.parse_carnap, headers=headers)

    def parse_carnap(self, response):
        soup = BeautifulSoup(response.body, 'lxml')
        for i in range(53, 54, 1):
            java_val= soup.select(f"*[type]:nth-child({i})")
            for b in java_val:
                data_test=b.text[b.text.find('{'):b.text.rfind('}')+1]
                data_test = json.loads(data_test)
                test = BeautifulSoup(data_test['islandoraInternetArchiveBookReader']['info'], 'lxml')
                title = re.sub('Title','',test.find('tr', {'class':'odd'}).text)
                id_no = [str(test.select('.even')[1]).split('>')[4].split("<")[0]]
                page_count = data_test['islandoraInternetArchiveBookReader']['pageCount']
                for id_m in id_no:
                    for pg in range(1, page_count+1):
                        another_str=f'https://digital.library.pitt.edu/internet_archive_bookreader_get_image_uri/pitt:{id_m}-00{str(pg).zfill(2)}'
                        yield scrapy.Request(
                            url = another_str,
                            method='POST',
                            headers=headers,
                            callback = self.parse_images,
                            cb_kwargs = {
                                'title':title}
                             )

    def parse_images(self, response, title):
        file_url = response.text
        item = DownfilesItem()
        item['original_file_name'] = title
        item['file_urls'] = [file_url]
        yield item

设定值:

BOT_NAME = 'insta_vm'

SPIDER_MODULES = ['insta_vm.spiders']
NEWSPIDER_MODULE = 'insta_vm.spiders'

ROBOTSTXT_OBEY = False

ITEM_PIPELINES = {'scrapy.pipelines.files.FilesPipeline': 150}
FILES_STORE = "Files"

“我的项目”管缐不会将.png附加至下载的档案:

import scrapy
from itemloaders.processors import MapCompose

class DownfilesItem(scrapy.Item):

    # define the fields for your item here like:
    file_urls = scrapy.Field(input_processor = MapCompose(lambda x: x+'.png'))
    original_file_name = scrapy.Field()
    files = scrapy.Field()
llew8vvj

llew8vvj1#

哦!在Islandora 7中,你不会这样刮它。使用数据流URL并将文件名附加到末尾。${DOMAIN}/islandora/object/${PID}/datastream/OBJ/${desired_file_name}.png
Islandora会在您下载文件时自动为文件命名。我过去所做的是使用PID作为文件名并获取原始对象。${DOMAIN}/islandora/object/${PID}/datastream/OBJ/${PID}.png
确保原始对象是PNG。如果不确定,您需要查询Solr或Fedora来检查原始格式。大多数大学喜欢TIFF等RAW格式,而不是PNG格式,您可能需要将URL中的“/OBJ/”部分替换为其他数据流。

相关问题