用美丽的汤刮内部链接

时间:2013-10-03 20:02:48

标签: python beautifulsoup web-crawler

我编写了一个python代码来获取与给定网址相对应的网页,并将该网页上的所有链接解析为链接存储库。接下来,它从刚刚创建的存储库中获取任何url的内容,将此新内容中的链接解析到存储库,并继续对存储库中的所有链接执行此过程,直到停止或获取给定数量的链接为止。 / p>

这里代码:

import BeautifulSoup
import urllib2
import itertools
import random


class Crawler(object):
"""docstring for Crawler"""

def __init__(self):

    self.soup = None                                        # Beautiful Soup object
    self.current_page   = "http://www.python.org/"          # Current page's address
    self.links          = set()                             # Queue with every links fetched
    self.visited_links  = set()

    self.counter = 0 # Simple counter for debug purpose

def open(self):

    # Open url
    print self.counter , ":", self.current_page
    res = urllib2.urlopen(self.current_page)
    html_code = res.read()
    self.visited_links.add(self.current_page) 

    # Fetch every links
    self.soup = BeautifulSoup.BeautifulSoup(html_code)

    page_links = []
    try :
        page_links = itertools.ifilter(  # Only deal with absolute links 
                                        lambda href: 'http://' in href,
                                            ( a.get('href') for a in self.soup.findAll('a') )  )
    except Exception: # Magnificent exception handling
        pass



    # Update links 
    self.links = self.links.union( set(page_links) ) 



    # Choose a random url from non-visited set
    self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
    self.counter+=1


def run(self):

    # Crawl 3 webpages (or stop if all url has been fetched)
    while len(self.visited_links) < 3 or (self.visited_links == self.links):
        self.open()

    for link in self.links:
        print link



if __name__ == '__main__':

C = Crawler()
C.run()

此代码不提取内部链接(仅限绝对形成的超链接)

如何获取以“/”或“#”或“。”开头的内部链接

2 个答案:

答案 0 :(得分:7)

嗯,你的代码已经告诉你发生了什么。在你的lambda中,你只是抓住以http://开头的绝对链接(你没有抓住https FWIW)。您应该抓住所有链接并检查它们是否以http +开头。如果他们不这样做,那么他们就是一个相对链接,因为你知道current_page是什么,那么你可以用它来创建绝对链接。

以下是对代码的修改。请原谅我的Python,因为它有点生疏,但我运行它,它适用于我的Python 2.7。你需要清理它并添加一些边缘/错误检测,但你得到了要点:

#!/usr/bin/python

from bs4 import BeautifulSoup
import urllib2
import itertools
import random
import urlparse


class Crawler(object):
"""docstring for Crawler"""

def __init__(self):
    self.soup = None                                        # Beautiful Soup object
    self.current_page   = "http://www.python.org/"          # Current page's address
    self.links          = set()                             # Queue with every links fetched
    self.visited_links  = set()

    self.counter = 0 # Simple counter for debug purpose

def open(self):

    # Open url
    print self.counter , ":", self.current_page
    res = urllib2.urlopen(self.current_page)
    html_code = res.read()
    self.visited_links.add(self.current_page)

    # Fetch every links
    self.soup = BeautifulSoup(html_code)

    page_links = []
    try :
        for link in [h.get('href') for h in self.soup.find_all('a')]:
            print "Found link: '" + link + "'"
            if link.startswith('http'):
                page_links.append(link)
                print "Adding link" + link + "\n"
            elif link.startswith('/'):
                parts = urlparse.urlparse(self.current_page)
                page_links.append(parts.scheme + '://' + parts.netloc + link)
                print "Adding link " + parts.scheme + '://' + parts.netloc + link + "\n"
            else:
                page_links.append(self.current_page+link)
                print "Adding link " + self.current_page+link + "\n"

    except Exception, ex: # Magnificent exception handling
        print ex

    # Update links 
    self.links = self.links.union( set(page_links) )

    # Choose a random url from non-visited set
    self.current_page = random.sample( self.links.difference(self.visited_links),1)[0]
    self.counter+=1

def run(self):

    # Crawl 3 webpages (or stop if all url has been fetched)
    while len(self.visited_links) < 3 or (self.visited_links == self.links):
        self.open()

    for link in self.links:
        print link

if __name__ == '__main__':
    C = Crawler()
    C.run()

答案 1 :(得分:1)

lambda中的chage条件:

page_links = itertools.ifilter(  # Only deal with absolute links 
                                        lambda href: 'http://' in href or href.startswith('/') or href.startswith('#') or href.startswith('.'),
                                            ( a.get('href') for a in  self.soup.findAll('a') )  )