使用找到美丽的汤与python获取href

时间:2019-05-20 07:59:39

标签: python web-scraping beautifulsoup location-href

我正在尝试抓取一个房地产网站,但不知道如何获取链接到每个属性的URL。

这是我现在拥有的代码:

我还是编码的新手,我搜索了其他类似的主题,但是找不到最适合我的问题的答案。


import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup

options = Options()
options.add_argument("window-size=1400,600")
from fake_useragent import UserAgent

ua = UserAgent()
a = ua.random
user_agent = ua.random
print(user_agent)
options.add_argument(f'user-agent={user_agent}')

driver = webdriver.Chrome('/Users/raduulea/Documents/chromedriver', options=options)

driver.get('https://www.immoweb.be/fr/recherche/appartement/a-vendre')

import time

time.sleep(10)

Title = []
address = []
price = []
surface = []
desc = []
page = 2
while True:
    time.sleep(10)
    html = driver.page_source
    soup = BeautifulSoup(html, 'html.parser')
    if int(page) >1:
        results = soup.find_all(True, {"class": ["result-xl", "result-l","result-m"]})
        for result in results:
            Title.append(result.find("div", {"class":"title-bar-left"}).get_text().strip())
            address.append(result.find("span", {"result-adress"}).get_text().strip())
            price.append(result.find("div", {"class": ["xl-price rangePrice", "l-price rangePrice", "m-price rangePrice", "xl-price-promotion rangePrice"]}).get_text().strip())
            surface.append(result.find("div", {"class": ["xl-surface-ch", "l-surface-ch", "m-surface-ch"]}).get_text().strip())
            desc.append(result.find("div", {"class": ["xl-desc", "l-desc", "m-desc"]}).get_text().strip())
        if len(driver.find_elements_by_css_selector("a.next")) > 0:
            url = "https://www.immoweb.be/fr/recherche/appartement/a-vendre/?page={}".format(page)
            driver.get(url)
            page += 1
        else:
            break




df = pd.DataFrame({"Title": Title, "Address": address, "Price:": price, "Surface": surface, "Description": desc})
df.to_csv("immo_a.csv")

这是可从中获取链接的HTML:

<a href="https://www.immoweb.be/fr/annonce/immeuble-a-appartements/a-vendre/hoboken/2660/id8135041" title="Immeuble à appartements de 2 façades à vendre à 2660 Hoboken au prix de 545.000 € - (8135041)" target="IWEB_MAIN" xpath="1"></a>

预先感谢您的帮助! :)

2 个答案:

答案 0 :(得分:0)

如果我正确理解了摘录下面的问题,可能会对您有所帮助。我正在使用Beautifulsoup。如果 IWEB_MAIN 是常见属性,则在这里。

from bs4 import BeautifulSoup
data  = ''''<a href="https://www.immoweb.be/fr/annonce/immeuble-a-appartements/a-vendre/hoboken/2660/id8135041" title="Immeuble à appartements de 2 façades à vendre à 2660 Hoboken au prix de 545.000 € - (8135041)" target="IWEB_MAIN" xpath="1"></a>'''
soup = BeautifulSoup(data)
for links in soup.find_all('a',{'target':'IWEB_MAIN'}):
    href = links.get('href')
    title = links.get('title')
    print(href,title)

答案 1 :(得分:0)

尝试一下:

__init__

哪里

from selenium import webdriver
import time
from bs4 import BeautifulSoup
from bs4.element import Tag
import pandas as pd
import traceback
from selenium.webdriver.chrome.options import Options


options = Options()
options.add_argument("window-size=1400,600")
from fake_useragent import UserAgent

ua = UserAgent()
user_agent = ua.random

options.add_argument(f'user-agent={user_agent}')
driver = webdriver.Chrome('/Users/raduulea/Documents/chromedriver', options=options)

driver.get('https://www.immoweb.be/fr/recherche/appartement/a-vendre')

time.sleep(4)

title = []
address = []
price = []
surface = []
desc = []
link = []

page = 2

try:

    soup = BeautifulSoup(driver.page_source, 'lxml')
    companies = soup.find("div", {"id": "result"})

    for tag in companies:

        if not isinstance(tag, Tag):
            continue

        _class = tag.get('class')

        if _class is None or "result-xl" not in _class[0]:
            continue

        title.append(tag.find("div", {"class":"title-bar-left"}).get_text().strip())
        address.append(tag.find("span", {"result-adress"}).get_text().strip())
        price.append(tag.find("div", {"class": ["xl-price rangePrice", "l-price rangePrice", "m-price rangePrice", "xl-price-promotion rangePrice"]}).get_text().strip())
        surface.append(tag.find("div", {"class": ["xl-surface-ch", "l-surface-ch", "m-surface-ch"]}).get_text().strip())
        desc.append(tag.find("div", {"class": ["xl-desc", "l-desc", "m-desc"]}).get_text().strip())

        links = tag.find("a",href=True)

        link.append(links['href'])

        if len(driver.find_elements_by_css_selector("a.next")) > 0:
            url = "https://www.immoweb.be/fr/recherche/appartement/a-vendre/?page={}".format(page)
            driver.get(url)
            page += 1
        else:
            break

except Exception as e:
    print(e)
    print(traceback.print_exc())

driver.quit()

df = pd.DataFrame({"Title": title, "Address": address, "Price:": price, "Surface": surface, "Description": desc,"Link":link})
df.to_csv("immo_a.csv")

取消每个属性链接

csv文件o / p:

links = tag.find("a",href=True)
link.append(links['href'])