此问题已在此处有答案:
I can't encode my input(2个答案)
6天前关闭
几天前,接受this回答我的一个问题是正确的,但过了一段时间,我注意到,在一些网址,我得到了以下错误:
2023-05-29 19:22:20 [scrapy.core.scraper] ERROR: Spider error processing <POST https://www.rad.cvm.gov.br/ENET/frmExibirArquivoIPEExterno.aspx/ExibirPDF> (referer: https://www.rad.cvm.gov.br/ENET/frmExibirArquivoIPEExterno.aspx?NumeroProtocoloEntrega=1106380)
Traceback (most recent call last):
File "/home/higo/anaconda3/lib/python3.9/base64.py", line 37, in _bytes_from_decode_data
return s.encode('ascii')
UnicodeEncodeError: 'ascii' codec can't encode character '\xe3' in position 7: ordinal not in range(128)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/higo/anaconda3/lib/python3.9/site-packages/twisted/internet/defer.py", line 857, in _runCallbacks
current.result = callback( # type: ignore[misc]
File "/home/higo/Documentos/Doutorado/Artigo/scrape_fatos/scrape_fatos/spiders/fatos.py", line 63, in download_pdf
pdf = base64.b64decode(b64)
File "/home/higo/anaconda3/lib/python3.9/base64.py", line 80, in b64decode
s = _bytes_from_decode_data(s)
File "/home/higo/anaconda3/lib/python3.9/base64.py", line 39, in _bytes_from_decode_data
raise ValueError('string argument should contain only ASCII characters')
ValueError: string argument should contain only ASCII characters
这对我来说似乎很奇怪,甚至更奇怪,因为错误消息中的上述URL工作正常。
我尝试修改download_pdf
方法:
def download_pdf(self, response, protocol_num):
json_data = response.json()
b64 = json_data.get('d')
if b64:
# Filter out non-ASCII characters
filtered_b64 = re.sub(r'[^A-Za-z0-9+/=]', '', b64)
pdf = base64.b64decode(filtered_b64)
filename = f'{protocol_num}.pdf'
p = os.path.join(self.base_dir, filename)
if not os.path.isdir(self.base_dir):
os.mkdir(self.base_dir)
with open(p, 'wb') as f:
f.write(pdf)
self.log(f"Saved {filename} in {self.base_dir}")
else:
self.log("Couldn't download pdf", logging.ERROR)
但我没有成功:有了这个变化,只是所有保存的PDF文件被损坏。
经过一些小的修改,我的完整代码如下:
import base64
import logging
import os
import re
from urllib.parse import unquote
import scrapy
class FatosSpider(scrapy.Spider):
name = 'fatos'
allowed_domains = ['cvm.gov.br']
with open("urls.txt", "rt") as f:
start_urls = [url.strip() for url in f.readlines()]
base_dir = './pdf_downloads'
def parse(self, response):
id_ = self.get_parameter_by_name("ID", response.url)
if id_:
numeroProtocolo = id_
codInstituicao = 2
else:
numeroProtocolo = self.get_parameter_by_name("NumeroProtocoloEntrega", response.url)
codInstituicao = 1
dataValue = "{ codigoInstituicao: '" + str(codInstituicao) + "', numeroProtocolo: '" + str(numeroProtocolo) + "'"
token = response.xpath('//*[@id="hdnTokenB3"]/@value').get(default='')
versaoCaptcha = ''
if response.xpath('//*[@id="hdnHabilitaCaptcha"]/@value').get(default='') == 'S':
if not token:
versaoCaptcha = 'V3'
payload = dataValue + ", token: '" + token + "', versaoCaptcha: '" + versaoCaptcha + "'}"
url = 'https://www.rad.cvm.gov.br/ENET/frmExibirArquivoIPEExterno.aspx/ExibirPDF'
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "application/json; charset=utf-8",
"DNT": "1",
"Host": "www.rad.cvm.gov.br",
"Origin": "https://www.rad.cvm.gov.br",
"Pragma": "no-cache",
"Referer": f"https://www.rad.cvm.gov.br/ENET/frmExibirArquivoIPEExterno.aspx?NumeroProtocoloEntrega={numeroProtocolo}",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/113.0",
"X-Requested-With": "XMLHttpRequest"
}
yield scrapy.Request(url=url, headers=headers, body=payload, method='POST', callback=self.download_pdf, cb_kwargs={'protocol_num': numeroProtocolo})
def download_pdf(self, response, protocol_num):
json_data = response.json()
b64 = json_data.get('d')
if b64:
pdf = base64.b64decode(b64)
filename = f'{protocol_num}.pdf'
p = os.path.join(self.base_dir, filename)
if not os.path.isdir(self.base_dir):
os.mkdir(self.base_dir)
with open(p, 'wb') as f:
f.write(pdf)
self.log(f"Saved {filename} in {self.base_dir}")
else:
self.log("Couldn't download pdf", logging.ERROR)
@staticmethod
def get_parameter_by_name(name, url):
name = name.replace('[', '\\[').replace(']', '\\]')
results = re.search(r"[?&]" + name + r"(=([^&#]*)|&|#|$)", url)
if not results:
return None
if len(results.groups()) < 2 or not results[2]:
return ''
return unquote(results[2])
这种情况如何解决?
1条答案
按热度按时间ssgvzors1#
我只是在另一个线程中找到了基于this answer的解决方案。下面这行
修改为
现在,所有PDF文件都已正确下载。