scrapy 如何从文本文件中一个接一个地抓取公司名称,我的代码只抓取列表中的最后一个公司名称

5tmbdcev  于 2022-11-09  发布在  其他
关注(0)|答案(1)|浏览(159)

我正在尝试从公司数据库中抓取公司信息。我有一个文本文件中的公司列表,我希望selenium进入网站的搜索,并逐个抓取所需的信息。
我的问题是,由于某种原因,它只输入了列表中的最后一个名称。我怎么能告诉python刮取列表中的第一个公司名称,然后是下一个,以此类推?
我的代码如下:


# -*- coding: utf-8 -*-

# from typing_extensions import Self

from lib2to3.pgen2 import driver
import scrapy
from scrapy.selector import Selector

# from scrapy_selenium import SeleniumRequest

from time import sleep
from turtle import delay
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from shutil import which

count = 0
file = open ("cegek.txt", "r")
lines = file.readlines()
for line in lines:
    count += 1

# # cegek = "1000 Út Kft."

class HtSpiderSeleniumceg(scrapy.Spider):
    name = 'kamara'
    allowed_domains = ["wwww.ceginfo.hu/"]
    start_urls = [
        'https://www.ceginfo.hu'
    ]

    def __init__(self):
        chrome_options = Options()
        # chrome_options.add_argument("--headless")

        #get login page

        driver = webdriver.Chrome(executable_path="./chromedriver", options=chrome_options)
        driver.get("https://www.ceginfo.hu/")

        driver.find_element_by_xpath("//input[@type='search']").send_keys(line)
        sleep(2)
        driver.find_element_by_xpath("//input[@type='search']").send_keys(u'\ue007')

        self.html = driver.page_source
        driver.close()

 #scrape needed info
    def parse(self, response):
        resp = Selector(text=self.html)
        for ceg in resp.xpath("(//div[contains(@class, 'd-flex flex-column flex-sm-row justify-content-between align-items-center')])[1]"):
            yield {
                'cegnev': ceg.xpath("(//h2[contains(@class,'s-title heading')])[1]/text()").get(),
                'adoszam': ceg.xpath("(.//span[@class='text-uppercase c-border me-lg-3'])[1]/text()").get(),
                'cegjegy': ceg.xpath("(.//span[@class='c-border'])[1]/text()").get()
            }

以下是公司名称列表的确切格式:

SZIMIKRON Ipari Kft.
Tigra Computer- és Irodatechnikai Kft.
Tradeland Kft.
Török László EV Török Kulcsszervíz
Tungsram Operations Kft.
Tutti Élelmiszeripari Kft.
Water and Soil Kft.
Webkey Development Kft.
ZDMnet

在一些帮助下,现在搜索了列表中的第一个名字,但蜘蛛没有刮擦,而是由于错误而中断:

selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
  (Session info: chrome=100.0.4896.60)

这是我的新代码,在底部的# out部分显示了我的新问题的解决方案,但我不知道如何实现它,我试着把它放在不同的地方,但它没有工作。而且,我不确定'your_element'部分是指什么,这个解决方案是在这个线程中提出的:StaleElementReferenceException on Python Selenium


# -*- coding: utf-8 -*-

# from typing_extensions import Self

from lib2to3.pgen2 import driver
import scrapy
from scrapy.selector import Selector

# from scrapy_selenium import SeleniumRequest

from time import sleep
from turtle import delay
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.chrome.options import Options
from shutil import which
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException

# # cegek = "1000 Út Kft."

class HtSpiderSeleniumceg(scrapy.Spider):

    name = 'kamara'
    allowed_domains = ["wwww.ceginfo.hu/"]
    start_urls = [
        'https://www.ceginfo.hu'
    ]

    global names_to_search
    names_to_search = open("cegek.txt", "r").readlines()

    def __init__(self):
        chrome_options = Options()
        # chrome_options.add_argument("--headless")

        self.driver = webdriver.Chrome(executable_path="./chromedriver", options=chrome_options)
        self.driver.get("https://ceginfo.hu/ceglista/cegek")
        sleep(2)

        self.start_urls = [self.driver.current_url]
        sleep(2)

        global this_driver
        this_driver = self.driver.find_element_by_xpath("//input[@type='search']")
        this_driver.send_keys(names_to_search[0])
        sleep(2)
        this_driver.send_keys(u'\ue007')
        sleep(5)

    def parse(self, response):
        self.driver.get(response.url)
        print('this_driver')
        print(this_driver)
        print('names_to_search')
        print(names_to_search)

        for names in names_to_search:
            print('searching this names:')
            print(names)
            resp = Selector(text=self.driver.page_source)
            sleep(5)
            for ceg in resp.xpath("(//p[@class='mb-3 m-sm-0 meta d-flex flex-column flex-lg-row me-auto'])[1]"):
                yield {
                    'cegnev': ceg.xpath("(//h2[contains(@class,'s-title heading')])[1]/text()").get(),
                    'adoszam': ceg.xpath("(.//span[@class='text-uppercase c-border me-lg-3'])[1]/text()").get(),
                    'cegjegy': ceg.xpath("(.//span[@class='c-border'])[1]/text()").get()
                }
            try:
                print(this_driver)
                this_driver.send_keys(names)
                # driver.find_element_by_xpath("//input[@type='search']").send_keys(line)
                sleep(2)
                this_driver.send_keys(u'\ue007')
            except:
                print('exception - do not break')
        self.driver.close()

        # my_element_id = "(//p[@class='mb-3 m-sm-0 meta d-flex flex-column flex-lg-row me-auto'])[1]"
        # ignored_exceptions=(NoSuchElementException,StaleElementReferenceException,)
        # your_element = WebDriverWait(self.driver, 20,ignored_exceptions=ignored_exceptions)\
        #                         .until(expected_conditions.presence_of_element_located((By.XPATH, my_element_id)))
vxqlmq5t

vxqlmq5t1#

如果不安装Selenium、web驱动程序等,我无法完全复制您的代码,但这是您实现解决方案的方式。
编写一个函数,从cegek.txt读取名称并将其追加到列表中:
第一个
循环names_to_search并将每个名称传递给driver.find_element_by_xpath("//input[@type='search']").send_keys(name)

for name in names_to_search:
    driver.find_element_by_xpath("//input[@type='search']").send_keys(name)

相关问题