使用硒自动爬网

时间:2019-02-20 10:47:31

标签: python selenium web web-crawler

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

OUTPUT_FILE_NAME = 'output0.txt'
driver = webdriver.Chrome()
wait = WebDriverWait(driver, 10)

def get_text():
    driver.get("http://law.go.kr/precSc.do?tabMenuId=tab67")
    elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#viewHeightDiv > 
table > tbody > "
                                                                 "tr:nth-child(1) > 
td.s_tit > a")))

    title = elem.text.strip().split(" ")[0]
    elem.click()

    wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "#viewwrapCenter h2"), 
title))
    content = driver.find_element_by_css_selector("#viewwrapCenter").text
    return content

def main():
    open_output_file = open(OUTPUT_FILE_NAME, 'w')
    result_text = get_text()
    open_output_file.write(result_text)
    open_output_file.close()

main()

基于此代码,我想抓取此网站。 就像从原始url硒进入第一个链接并将文本保存到txt文件,然后回到原始url进入第二个链接并继续 但是问题是第一个链接的css_selector值是#viewHeightDiv> table> tbody> tr:nth-​​child(1)> td.s_tit> a而第二个链接是#viewHeightDiv> table> tbody> tr:nth-​​child(3) > td.s_tit>它们之间的唯一区别是生孩子后的数字,似乎没有规则,就像1,3,5,9,...所以我被困在这里...

2 个答案:

答案 0 :(得分:1)

要删除所有帖子,您不需要Selenium。您可以使用RequestsBeautifulSoup库来完成所有操作:

import requests
from bs4 import BeautifulSoup

if __name__ == '__main__':

    # Using request get 50 items from first page. pg=1 is page number, outmax=50 items per page
    response = requests.post(
        "http://law.go.kr/precScListR.do?q=*&section=evtNm&outmax=50&pg=1&fsort=21,10,30&precSeq=0&dtlYn=N")

    # Parse html using BeautifulSoup
    page = BeautifulSoup(response.text, "html.parser")

    # Find "go to last page" element and get "onclick" attribute, inside "onlick" attribute parse last page number
    # for "outmax=50" (used before)
    onclick = str(page.select(".paging > a:last-child")[0].attrs["onclick"])
    last_page_number = int(''.join([n for n in onclick if n.isdigit()]))

    # To test uncomment code below to get items only from first page
    # last_page_number = 1

    # Go through all pages and collect posts numbers in items
    items = []
    for i in range(1, last_page_number + 1):
        if i>1:
            # Go to next page
            response = requests.post(
                "http://law.go.kr/precScListR.do?q=*&section=evtNm&outmax=100&pg=%d&fsort=21,10,30&precSeq=0&dtlYn=N" % i)

        # Get all links
        links = page.select("#viewHeightDiv .s_tit a")
        # Loop all links and collect post numbers
        for link in links:
            # Parse post number from "onclick" attribute
            items.append(''.join([n for n in link.attrs["onclick"] if n.isdigit()]))

    # Open all posts and collect in posts dictionary with keys: number, url and text
    posts = []
    for item in items:
        url = "http://law.go.kr/precInfoR.do?precSeq=%s&vSct=*" % item
        response = requests.get(url)
        t = BeautifulSoup(response.text, "html.parser").find('div', attrs={'id': 'contentBody'}).text
        posts.append({'number': item, 'url': url, 'text': t})

要保存到文件,请将代码的最后一部分更改为下面的内容,其中/yourfullpath/替换为您的路径,例如“ C:// files /”或“ / Users / myuser / files /”:

# Open all posts and collect in posts dictionary with keys: number, url and text
posts = []
for item in items:
    url = "http://law.go.kr/precInfoR.do?precSeq=%s&vSct=*" % item
    response = requests.get(url)
    parsed = BeautifulSoup(response.text, "html.parser")
    text = parsed.find('div', attrs={'id': 'contentBody'}).text
    title = parsed.select_one("h2").text
    posts.append({'number': item, 'url': url, 'text': text, 'title': title})

    with open('/yourfullpath/' + title + '.text', 'w') as f:
        f.write(text)

答案 1 :(得分:0)

您可以使用定位器,例如:

td.s_tit > a