python中的Web抓取JSON加载站点

o8x7eapl  于 2023-11-20  发布在  Python
关注(0)|答案(1)|浏览(175)

我试图刮超市网站。他们的网站加载的项目和相关信息使用JSON。
如果我在Chrome的dev选项中使用network选项卡手动获取JSON GET的URL,我可以完美地做到这一点。
两个问题:每次我改变页面/加载更多的项目,有一个新的和不同的“端点”/JSON的项目,我找不到一种方法来循环通过这些网址。
如果我只使用站点URL(而不是我从网络选项卡获得的JSON),我可以循环浏览,我无法获得信息,因为它使用JSON加载数据。
这是可行的:

import pandas as pd
import requests
import json
from bs4 import BeautifulSoup

url = "https://www.masonline.com.ar/_v/segment/graphql/v1?workspace=master&maxAge=short&appsEtag=remove&domain=store&locale=es-AR&__bindingId=d62c820d-5adb-43a2-9d15-25ff4f3a6d2f&operationName=productSearchV3&variables=%7B%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%2240b843ca1f7934d20d05d334916220a0c2cae3833d9f17bcb79cdd2185adceac%22%2C%22sender%22%3A%22vtex.store-resources%400.x%22%2C%22provider%22%3A%22vtex.search-graphql%400.x%22%7D%2C%22variables%22%3A%22eyJoaWRlVW5hdmFpbGFibGVJdGVtcyI6dHJ1ZSwic2t1c0ZpbHRlciI6IkFMTCIsInNpbXVsYXRpb25CZWhhdmlvciI6ImRlZmF1bHQiLCJpbnN0YWxsbWVudENyaXRlcmlhIjoiTUFYX1dJVEhPVVRfSU5URVJFU1QiLCJwcm9kdWN0T3JpZ2luVnRleCI6ZmFsc2UsIm1hcCI6InByb2R1Y3RDbHVzdGVySWRzIiwicXVlcnkiOiIyNjgiLCJvcmRlckJ5IjoiT3JkZXJCeVNjb3JlREVTQyIsImZyb20iOjAsInRvIjoyMywic2VsZWN0ZWRGYWNldHMiOlt7ImtleSI6InByb2R1Y3RDbHVzdGVySWRzIiwidmFsdWUiOiIyNjgifV0sImZhY2V0c0JlaGF2aW9yIjoiU3RhdGljIiwiY2F0ZWdvcnlUcmVlQmVoYXZpb3IiOiJkZWZhdWx0Iiwid2l0aEZhY2V0cyI6ZmFsc2UsInZhcmlhbnQiOiIifQ%3D%3D%22%7D"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser').text
data = json.loads(soup)

test = (data['data']['productSearch']['products'])

df = pd.json_normalize((test))
df.drop(df.columns[[0,1,2,4,5,7,9,10,11,12,13,14,15,16,17,18,19,20,22,23,24,25,26]], axis=1, inplace=True)

df.rename(columns={'productName': 'Nombre', 'brand': 'Marca', 'priceRange.sellingPrice.lowPrice': 'Precio'}, inplace=True)
df['link'] = 'https://www.masonline.com.ar/' + df['link']
print(df)

字符串
但由于它只是第一个JSON,因此它只加载第一个页面中的项目。
所以我也可以找到这样的数据:

import requests
from bs4 import BeautifulSoup
url = "https://www.masonline.com.ar/268?map=productClusterIds&page=1"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
script = str(soup.find_all('script')[18])[8:-9]
print(script)


我没有继续使用最后一个选项,因为如果我这样得到它,数据是不可用的。使用最后一个选项,我也试图使它接近JSON数据格式(这就是为什么我从str中删除8和9),但它仍然不起作用。
所以我认为我最好的选择是使用第一段代码,但我不知道如何在循环时自动获取每个页面的JSON链接(我从开发选项中的网络选项卡中获取的链接)。
谢谢你,谢谢

flvtvl50

flvtvl501#

有效载荷是base64编码的JSON,所以你可以这样做(你可能需要更新/计算sha256Hash一次):

import base64
import json

import requests

def get_payload(from_, to_):
    payload = {
        "hideUnavailableItems": True,
        "skusFilter": "ALL",
        "simulationBehavior": "default",
        "installmentCriteria": "MAX_WITHOUT_INTEREST",
        "productOriginVtex": False,
        "map": "productClusterIds",
        "query": "268",
        "orderBy": "OrderByScoreDESC",
        "from": from_,
        "to": to_,
        "selectedFacets": [{"key": "productClusterIds", "value": "268"}],
        "operator": "and",
        "fuzzy": "0",
        "searchState": None,
        "facetsBehavior": "Static",
        "categoryTreeBehavior": "default",
        "withFacets": False,
        "variant": "",
    }

    json_data = json.dumps(payload)
    base64_data = base64.b64encode(json_data.encode("utf-8")).decode("utf-8")
    return base64_data

api_url = "https://www.masonline.com.ar/_v/segment/graphql/v1"

for page in range(0, 3):  # <-- increase page count here
    base64_data = get_payload(page * 24, (page + 1) * 24)
    query = {
        "workspace": "master",
        "maxAge": "short",
        "appsEtag": "remove",
        "domain": "store",
        "locale": "es-AR",
        "operationName": "productSearchV3",
        "variables": "{}",
        "extensions": '{"persistedQuery":{"version":1,"sha256Hash":"40b843ca1f7934d20d05d334916220a0c2cae3833d9f17bcb79cdd2185adceac","sender":"vtex.st[email protected]","provider":"[email protected]"},"variables":"'
        + base64_data
        + '"}',
    }

    data = requests.get(api_url, params=query).json()
    for p in data["data"]["productSearch"]["products"]:
        print(p["productId"], p["productName"])

字符串
打印:

...

188189 Mayonesa Hellmanns Liviana Doypack 475g
22375 Ketchup Natura 500 Gr
15791 Fideos Spaghetti Marolio 500 Gr
191757 Café Instantáneo Nescafé® Dolca® Suave 170g
166102 Caldo Verdura Maggi® X 12u.
14340 Galletita Pizza Saladix 100 Gr
135083 Harina Integral Pureza 100% X 1 Kg
176220 Conserva De Arvejas Aliada 300g
23745 Lentejas La Española 400 Gr
189853 Pan Lactal De Mesa 315g

相关问题