带标签的CSV文件

时间:2018-03-14 17:17:08

标签: python csv export-to-csv tf-idf sklearn-pandas

正如Python Tf idf algorithm所建议的那样,我使用此代码来获取一组文档中的单词频率。

import pandas as pd
import csv
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer
import codecs

def tokenize(text):
    tokens = word_tokenize(text)
    stems = []
    for item in tokens: stems.append(PorterStemmer().stem(item))
    return stems

with codecs.open("book1.txt",'r','utf-8') as i1,\
        codecs.open("book2.txt",'r','utf-8') as i2,\
        codecs.open("book3.txt",'r','utf-8') as i3:
    # your corpus
    t1=i1.read().replace('\n',' ')
    t2=i2.read().replace('\n',' ')
    t3=i3.read().replace('\n',' ')

    text = [t1,t2,t3]
    # word tokenize and stem
    text = [" ".join(tokenize(txt.lower())) for txt in text]
    vectorizer = TfidfVectorizer()
    matrix = vectorizer.fit_transform(text).todense()
    # transform the matrix to a pandas df
    matrix = pd.DataFrame(matrix, columns=vectorizer.get_feature_names())
    # sum over each document (axis=0)
    top_words = matrix.sum(axis=0).sort_values(ascending=False)

    top_words.to_csv('dict.csv', index=True, float_format="%f",encoding="utf-8")

使用最后一行,我创建了一个csv文件,其中列出了所有单词及其频率。有没有办法给他们贴上标签,看一个单词只属于第三个文件,还是全部? 我的目标是从csv文件中删除仅出现在第3个文档中的所有单词(book3

1 个答案:

答案 0 :(得分:1)

您可以使用isin()属性从整个语料库中的top_words过滤出第三本书中的top_ words

(以下示例我从http://www.gutenberg.org/下载了三本随机书籍)

import codecs
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
# import nltk
# nltk.download('punkt')
from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer

def tokenize(text):
    tokens = word_tokenize(text)
    stems = []
    for item in tokens: stems.append(PorterStemmer().stem(item))
    return stems

with codecs.open("56732-0.txt",'r','utf-8') as i1,\
        codecs.open("56734-0.txt",'r','utf-8') as i2,\
        codecs.open("56736-0.txt",'r','utf-8') as i3:
    # your corpus
    t1=i1.read().replace('\n',' ')
    t2=i2.read().replace('\n',' ')
    t3=i3.read().replace('\n',' ')

text = [t1,t2,t3]
# word tokenize and stem
text = [" ".join(tokenize(txt.lower())) for txt in text]
vectorizer = TfidfVectorizer()
matrix = vectorizer.fit_transform(text).todense()
# transform the matrix to a pandas df
matrix = pd.DataFrame(matrix, columns=vectorizer.get_feature_names())
# sum over each document (axis=0)
top_words = matrix.sum(axis=0).sort_values(ascending=False)

# top_words for the 3rd book alone
text = [" ".join(tokenize(t3.lower()))]
matrix = vectorizer.fit_transform(text).todense()
matrix = pd.DataFrame(matrix, columns=vectorizer.get_feature_names())
top_words3 = matrix.sum(axis=0).sort_values(ascending=False)

# Mask out words in t3
mask = ~top_words.index.isin(top_words3.index)
# Filter those words from top_words
top_words = top_words[mask]

top_words.to_csv('dict.csv', index=True, float_format="%f",encoding="utf-8")