从动态页面检索所有汽车链接

时间:2019-03-14 06:34:24

标签: python web-scraping

from selenium import webdriver
options = webdriver.ChromeOptions()
options.add_argument("--user-agent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'")
#options.add_argument("headless")
driver=webdriver.Chrome(executable_path="/home/timmy/Python/chromedriver",chrome_options=options)

url="https://turo.com/search?country=US&defaultZoomLevel=7&endDate=03%2F20%2F2019&endTime=10%3A00&international=true&isMapSearch=false&itemsPerPage=200&location=Colorado%2C%20USA&locationType=City&maximumDistanceInMiles=30&northEastLatitude=41.0034439&northEastLongitude=-102.040878&region=CO&sortType=RELEVANCE&southWestLatitude=36.992424&southWestLongitude=-109.060256&startDate=03%2F15%2F2019&startTime=10%3A00"
driver.get(url)


list_of_all_car_links=[]
x=0
while True:
    html=driver.page_source
    soup = BeautifulSoup(html, "html.parser")
    for i in soup.find_all("a", href=True):
        if i['href'].startswith("/rentals") and len(i['href']) > 31 :
            link2="https://turo.com"+i['href']
            list_of_all_car_links.append(link2)
    try:
        x=scrolldown(last_height=x)
    except KeyError:
        #driver.close()
        break

我尝试向下滑动,然后找到链接,但我只有一部分是我的向下滚动功能:

def scrolldown(last_height=0,SCROLL_PAUSE_TIME=3,num_tries = 2):

        # Scroll down to bottom
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")

        # Wait to load page
    time.sleep(SCROLL_PAUSE_TIME)

    new_height = driver.execute_script("return document.body.scrollHeight")

        # break condition
    if last_height == new_height:
        #print("hello")
        num_tries-=1
        if num_tries==0:
            print("Reached End of page")
            raise KeyError
        else:
            scrolldown(last_height=last_height, SCROLL_PAUSE_TIME=2,num_tries=num_tries)

    return new_height

我还尝试在每次滚动到BeautifulSoup之后将html转换为,然后找到链接,但没有获得所有链接。

我想要的是获取该页面中的每个汽车链接。

1 个答案:

答案 0 :(得分:1)

我将使用requests和开发工具中xhr列表中显示的API。请注意查询字符串itemsPerPage=200中的每页项目参数。您可以尝试将其更改为更大的结果集。

import requests
url = 'https://turo.com/api/search?country=US&defaultZoomLevel=7&endDate=03%2F20%2F2019&endTime=10%3A00&international=true&isMapSearch=false&itemsPerPage=200&location=Colorado%2C%20USA&locationType=City&maximumDistanceInMiles=30&northEastLatitude=41.0034439&northEastLongitude=-102.040878&region=CO&sortType=RELEVANCE&southWestLatitude=36.992424&southWestLongitude=-109.060256&startDate=03%2F15%2F2019&startTime=10%3A00'
baseUrl = 'https://turo.com'
headers = {'Referer' : 'https://turo.com/search?country=US&defaultZoomLevel=7&endDate=03%2F20%2F2019&endTime=10%3A00&international=true&isMapSearch=false&itemsPerPage=200&location=Colorado%2C%20USA&locationType=City&maximumDistanceInMiles=30&northEastLatitude=41.0034439&northEastLongitude=-102.040878&region=CO&sortType=RELEVANCE&southWestLatitude=36.992424&southWestLongitude=-109.060256&startDate=03%2F15%2F2019&startTime=10%3A00',
           'User-Agent' : 'Mozilla/5.0'}

r = requests.get(url, headers = headers).json()
results = []

for item in r['list']:
    results.append(baseUrl + item['vehicle']['url'])
print(results)