尝试将数据从爬网导出到csv文件

时间:2017-02-03 10:25:38

标签: python python-2.7 beautifulsoup

我在网上发现了这个代码,我想使用它,但我找不到将收集的数据导出到csv文件的方法。

import urllib
import scrapy
import json
import csv
from bs4 import BeautifulSoup


url = "http://www.straitstimes.com/tags/malaysia-crimes"

html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)

# kill all script and style elements
for script in soup(["script", "style"]):
   script.extract()    # rip it out

# get text
text = soup.body.get_text()

# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split("    "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)

print(text)

1 个答案:

答案 0 :(得分:0)

以下似乎适合我认为你想要的东西:

我使用xlwt包创建,编写并保存工作簿,然后使用循环遍历每行文本并将其写入工作簿。我把它保存为testing.csv

import urllib
import scrapy
import json
import csv
from bs4 import BeautifulSoup
from xlwt import Workbook


url = "http://www.straitstimes.com/tags/malaysia-crimes"

html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)

# create excel workbook
wb = Workbook()
sheet1 = wb.add_sheet('Sheet 1')

# kill all script and style elements
for script in soup(["script", "style"]):
   script.extract()    # rip it out

# get text
text = soup.body.get_text()

# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split("    "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)

print(text)

# go through each line and print to a new row in excel
counter = 1
for text_to_write in text.splitlines():
   sheet1.write(counter,1,text_to_write)
   counter = counter + 1

wb.save('testing.csv')