新闻网站使用Python抓取

时间:2019-03-13 11:15:32

标签: python beautifulsoup scrape

我正努力抓些消息。我从这个站点上按标准选择了3k文章,列表比较大,并且(考虑到我是Python的新手),我使用此脚本来抓取它们:

import pandas as pd
import bs4

from urllib.request import urlopen
from bs4 import BeautifulSoup

import csv
# get the URL list
list1 = []

a = 'https://www.dnes.bg/sofia/2019/03/13/borisov-se-pohvali-prihodite-ot-gorivata-sa-sys-7-poveche.404467'
b = 'https://www.dnes.bg/obshtestvo/2019/03/13/pazim-ezika-si-pravopis-pod-patronaja-na-radeva.404462'
c = 'https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091'
list1.append(a)
list1.append(b)
list1.append(c)
# define the variables
#url = "https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091"
list2 = list1 #[0:10]
#type(list2)

href = []
title = []
subtitle = []
time = []
article = []
art1 = []

#
#dd = soup.find("div", "art_author").text
#dd

filename = "scraped.csv"
f = open(filename, "w")
#headers = "href;title;subtitle;time;article\n"
headers = "title;subtitle;time;article\n"
f.write(headers)


for url in list2:
    html = urlopen(url)
    soup = BeautifulSoup(html, 'lxml').decode('windows-1251')

    href = url
    title = soup.find("h1", "title").string
    #title = soup.find("h1", "title").string
    #title.extend(soup.find("h1", "title").string) # the title string
    subtitle = soup.find("div", "descr").string
    #subtitle.extend(soup.find("div", "descr").string) # the subtitle string
    time = soup.find("div", "art_author").text
    #time.extend(soup.find("div", "art_author").text)
    #par = soup.find("div", id="art_start").find_all("p")
    art1.extend(soup.find("div", id="art_start").find_all("p"))

    for a in art1:
        #article.extend(art1.find_all("p"))
        article = ([a.text.strip()])
        break

    #href = "".join(href)    
    title = "".join(title)
    subtitle = "".join(subtitle)
    time = "".join(time)
    article = "".join(article)

    #f.write(href + ";" + title + ";" + subtitle + ";" + time + ";" + article + "\n")
    f.write(title + ";" + subtitle + ";" + time + ";" + article +"\n")
f.close()

目前的主要问题是我得到一个错误:

  File "<ipython-input-12-9a796b182a82>", line 24, in <module>
    title = soup.find("h1", "title").string
TypeError: slice indices must be integers or None or have an __index__ method

我真的找不到解决方案。

第二个问题是,每当我成功刮取一个站点时,就会出现一些空单元格,这意味着我必须找到一种通过Ajax的方法。

我使用的是Anaconda版本2018.12。

2 个答案:

答案 0 :(得分:0)

好的。我已将您的soup对象存储为字符串的问题解决了,因此您可以使用bs4解析html。我还选择了使用熊猫.to_csv(),因为我对此更加熟悉,但是它可以为您提供所需的输出:

import pandas as pd
from bs4 import BeautifulSoup
import requests


# get the URL list
list1 = []

a = 'https://www.dnes.bg/sofia/2019/03/13/borisov-se-pohvali-prihodite-ot-gorivata-sa-sys-7-poveche.404467'
b = 'https://www.dnes.bg/obshtestvo/2019/03/13/pazim-ezika-si-pravopis-pod-patronaja-na-radeva.404462'
c = 'https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091'
list1.append(a)
list1.append(b) 
list1.append(c) 
# define the variables
#url = "https://www.dnes.bg/politika/2019/01/03/politikata-nekanen-gost-na-praznichnata-novogodishna-trapeza.398091"
list2 = list1 #[0:10]
#type(list2)



results = pd.DataFrame()
for url in list2:

    html = requests.get(url)
    soup = BeautifulSoup(html.text, 'html.parser')

    href = url
    title = soup.find("h1", "title").text
    #title = soup.find("h1", "title").string
    #title.extend(soup.find("h1", "title").string) # the title string
    subtitle = soup.find("div", "descr").text
    #subtitle.extend(soup.find("div", "descr").string) # the subtitle string
    time = soup.find("div", "art_author").text
    #time.extend(soup.find("div", "art_author").text)
    #par = soup.find("div", id="art_start").find_all("p")
    art1 = soup.find("div", id="art_start").find_all("p")

    article = []
    for a in art1:
        if 'googletag.cmd.push' not in a.text:
            article.append(a.text.strip())
    article = ' '.join(article)



    temp_df = pd.DataFrame([[title, subtitle, time, article]], columns = ['title','subtitle','time','article'])
    results = results.append(temp_df).reset_index(drop=True)

results.to_csv("scraped.csv", index=False, encoding='utf-8-sig')

输出:

print (results.to_string())
                                               title                                           subtitle                                               time                                            article
0  Борисов се похвали: Приходите от горивата са с...  Мерките за изсветляване на сектора действат, к...  Обновена: 13 мар 2019 13:24 | 13 мар 2019 11:3...  Приходите от горивата са със 7% повече. Това с...
1  "Пазим езика си": Правопис под патронажа на Ра...  Грамотността зависи не само от училището, смят...  Обновена: 13 мар 2019 11:34 | 13 мар 2019 11:2...  За втора поредна година Сдружение "Живата вода...
2  Политиката – "неканен гост" на празничната нов...  Основателни ли бяха критиките на президента Ру...               3 яну 2019 10:45, Цветелин Димитров   Оказа ли се политиката "неканен гост" на празн...

答案 1 :(得分:0)

我偶然发现的东西([这里] https://www.youtube.com/watch?v=FSH77vnOGqU):

import bs4 as bs
import sys
import urllib.request
from PyQt5.QtWebEngineWidgets import QWebEnginePage
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QUrl


class Page(QWebEnginePage):
    def __init__(self, url):
        self.app = QApplication(sys.argv)
        QWebEnginePage.__init__(self)
        self.html = ''
        self.loadFinished.connect(self._on_load_finished)
        self.load(QUrl(url))
        self.app.exec_()

    def _on_load_finished(self):
        self.html = self.toHtml(self.Callable)
        print('Load finished')

    def Callable(self, html_str):
        self.html = html_str
        self.app.quit()



def main():
    page = Page('https://pythonprogramming.net/parsememcparseface/')
    soup = bs.BeautifulSoup(page.html, 'html.parser')
    js_test = soup.find('p', class_='jstest')
    print(js_test.text)

if __name__ == '__main__': main()