我们假设用scrapy(python)将badboard 100艺术家提取到一个json文件中,并抓取每个页面中的前5位,按字母顺序排序并删除重复项.然后,将它们加载到一个新的google工作表中.这是我目前所做的:
import json
import scrapy
import datetime
from datetime import datetime
from datetime import timedelta, date
class BillboardWeeklySpider(scrapy.Spider):
name = 'billboard-weekly'
allowed_domains = ['www.billboard.com']
start_urls = ['https://www.billboard.com/charts/artist-100/']
def __init__(self):
self.last_week_str = ""
def parse(self, response):
for i in range(4):
string_date = response.css('#chart-date-picker::attr(data-date)').get()
real_date = datetime.strptime(string_date, '%Y-%m-%d')
day_delta = timedelta(weeks=1)
last_week = real_date - i * day_delta
self.last_week_str = last_week.strftime('%Y-%m-%d')
next_page = f"https://www.billboard.com/charts/artist-100/{self.last_week_str}"
if next_page:
yield response.follow(next_page, callback=self.week_parse)
def week_parse(self, response):
for element in response.css('.o-chart-results-list-row-container'):
name = element.css('#title-of-a-story::text').get()
number = element.css(
'span.c-label.a-font-primary-bold-l.u-font-size-32\@tablet.u-letter-spacing-0080\@tablet::text').get()
clean_name = name.strip()
clean_number = number.strip()
if int(clean_number) > 5:
break
yield {
'name': clean_name,
'rank': clean_number,
'date': response.url
}
1条答案
按热度按时间f0brbegy1#