#' searchscrap
#'
#' Use twitterscraper to scrap tweets over a period of time.
#'
#' @importFrom stringi stri_rand_strings
#' @import dplyr
#' @import readr
#' @param request The query corresponding to the datas to search for. Must be a character string
#' @param startday The beginning of the desired period. Default set to the beginning of Twitter
#' @param endday The end of the desired period. Default set to the date today
#' @return The datas of the sent request.
#' @export
searchscrap <- function(request,
startday = "",
endday = Sys.Date()
) {
startday <- ifelse(startday == "", " -bd 2006-03-21", paste0(" -bd ", startday))
tempfile_name <- stringi::stri_rand_strings(1, 28, '[a-z]')
system(paste0("twitterscraper \"", request,"\"", startday," -ed ", as.character(endday)," -c -o ", tempfile_name, ".csv"))
if ((paste0(tempfile_name, ".csv") %in% list.files(".")) == FALSE) {
stop("No tweet retrieved.")
}
datas <- read_csv(paste0(tempfile_name, ".csv"), col_types = cols(
user = col_character(),
fullname = col_character(),
`tweet-id` = col_character(),
timestamp = col_datetime(format = ""),
url = col_character(),
likes = col_integer(),
replies = col_integer(),
retweets = col_integer(),
text = col_character(),
html = col_character()
)) %>%
rename(status_id = `tweet-id`,
screen_name = user,
name = fullname,
created_at = timestamp,
favorite_count = likes,
retweet_count = retweets,
reply_count = replies) %>%
mutate(url = paste0("https://twitter.com", url),
is_retweet = FALSE) %>%
select(status_id, screen_name, created_at, text, favorite_count, retweet_count, reply_count, is_retweet, name, url)
file.remove(paste0(tempfile_name, ".csv"))
datas
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.