尝试运行cronjob来构建Twitter用户ID列表

时间:2018-11-06 12:32:06

标签: r cron rtweet

我正在尝试建立一个Twitter用户ID列表(还收集生物描述,位置和其他一些参数)。

我想设置一个cronjob,以使该代码每天运行一个月并从Twitter API收集信息。

但是,我担心使用当前代码时,只要有代码运行,它就会收集一组新的Twitter用户,并且不会将该组新数据追加到我的数据框中(该数据框将只是大多数数据的列表)最近收集的Twitter用户)。

这是相关代码->

#creating auth token to sign into Twitter API
token <- create_token(app="**", consumer_key ="*",
consumer_secret = "*", access_token ="*", access_secret = "*")


keyword1 <- search_tweets("**", geocode= lookup_coords("**", apikey =
"*"), n = 2700)
keyword2 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword3 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword4 <-search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword5 <-search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700) 
keyword6 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword7 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword8 <- search_tweets ("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword9 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword10 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword11 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 5000)

#creating dataframe with relevant columns from data

users <- c(keyword1$user_id,keyword2$user_id,keyword3$user_id,keyword4$user_id,keyword5$user_id,keyword6$user_id,keyword7$user_id,keyword8$user_id,keyword9$user_id,keyword10$user_id,keyword11$user_id)
screen <- c(keyword1$screen_name,keyword2$screen_name,keyword3$screen_name,keyword4$screen_name,keyword5$screen_name,keyword6$screen_name,keyword7$screen_name,keyword8$screen_name,keyword9$screen_name,keyword10$screen_name,keyword11$screen_name)
followers <- c(keyword1$followers_count,keyword2$followers_count,keyword3$followers_count,keyword4$followers_count,keyword5$followers_count,keyword6$followers_count,keyword7$followers_count,keyword8$followers_count,keyword9$followers_count,keyword10$followers_count,keyword11$followers_count)
place  <- c(keyword1$location,keyword2$location,keyword3$location,keyword4$location,keyword5$location,keyword6$location,keyword7$location,keyword8$location,keyword9$location,keyword10$location,keyword11$location)
tweet_hashtags <- c(keyword1$hashtags,keyword2$hashtags,keyword3$hashtags,keyword4$hashtags,keyword5$hashtags,keyword6$hashtags,keyword7$hashtags,keyword8$hashtags,keyword9$hashtags,keyword10$hashtags,keyword11$hashtags)
descript <- c(keyword1$description,keyword2$description,keyword3$description,keyword4$description,keyword5$description,keyword6$description,keyword7$description,keyword8$description,keyword9$description,keyword10$description,keyword11$description)

frame <- data.frame(users, screen, followers, place)
frame2 <- data.frame(users, screen, followers, place, descript)
unique_frame <- unique(frame)
unique_frame2 <- unique(frame2)
frame2_descr <- unique_frame2$descript

#replace and replace with spaces- cleaning up description
remove1 = gsub("[[:punct:]]"," ",unique_frame2$descript) #remove punctuation marks
remove2 = gsub("[[digit:]]", " ", remove1) #remove digits 
cleaned = iconv(remove2, from= "latin1", to="ASCII", sub=" ") #remove strange symbols

#removing words that are not helpful to analysis but appear a lot
words = c("the", "com", "https", "gmail", "bio", "just","don", "live", "can", "real", "things", "best", "you", "follow", "everything", "believe", "get", "trying", "day","for", "mor", "first", "born","hate", "good","great","high", "rself","back","time", "always", "tweet", "say", "anything", "tweets", "think", "never", "know", "see", "guy","will", "making", "now", "twitter","free", "make", "doesn","one", "chelseafc", "got", "views", "hard", "south", "world", "self","around","fan","addict", "not", "fan", "thing", "when","mor","far","want","give","hop","host","boy","life", "god", "official","alumni","email", "new","king","like","living","change", "ing", "going", "jesus")

cleaned = gsub(paste(words, collapse ='|'), '', cleaned, ignore.case= TRUE)

unique_frame_df <- cbind(unique_frame2, cleaned)
screenName <-  unique_frame_df$screen

#writing dataframe to CSV and saving it in local
write.csv(unique_frame_df, file ="twitter_list*emphasized text*.csv")

只需重申一下期望的结果就是能够运行cron作业,并将新数据附加到我现有的数据框中

1 个答案:

答案 0 :(得分:0)

您的代码只会用一个新的csv覆盖现有的csv。您可以使用append = TRUE和header = FALSE参数来添加现有数据框:

write.table(unique_frame_df,file ="twitter_list.csv", header=FALSE, append=TRUE`, sep=',')

请注意要注意的地方:write.csv设计为不灵活的,以确保作为结果创建有效的csv文件。您需要改用write.table。

您可能还想添加一个datetamp列以指示何时收集特定观察值。