---- PURPOSE ---- given a channel_id , return list of all playlists and save to data/


knitr::opts_chunk$set(echo = F,  
                                            comment="      ##",  
                                            error=TRUE, 
                      include=F,
                                            collapse=TRUE)
load_all()
## initialize takes no args, returns list of params good for entire file
global_params <- initialize()
global_params
#----------------
## For playlists, Google wants:
#----------------
{
    base_url <- "https://www.googleapis.com/youtube/v3/playlists"
    part  <-  "snippet"
    fields=paste(sep=",", 
                   "nextPageToken",
                  "items(id,snippet(title,description,publishedAt))"
                  )
    class  <- "playlists"

    query_params  <- query_params(url = base_url, part = part, fields = fields,
                                  class = class)
    query_params
}

Assemble query for GET

## from global_params

    api  <- global_params$api 
    key  <- api$api_key
    config  <-  global_params$config


## from query_params
    {
    url  <- query_params$url
    part  <- query_params$part
    fields  <- query_params$fields
    defaults  <- query_params$defaults
    channelId  <- defaults$channelId
    # videoId  <- defaults$videoId
    #playlistId  <- defaults$playlistId
    maxResults  <- defaults$maxResults
    key  <- api$api_key
}

## last chance to change video/playlist
# spare:
# videoId  <- "Mec9sw1cJk8"  # carter
#playlistId  <- "PLbcglKxZP5PMZ7afIT7E2o9NwQIzqTI5l"

    query  <- list( part = part,
                   channelId = channelId ,
                   maxResults = maxResults,
                   fields = fields,
                   url = url,
                   key = key,
                   pageToken = NULL)

    query

HELPER:

get_batch  <- function(url = NULL,  query = NULL, config=NULL) {
  res <- httr::GET(url = url, query = query, config= config ) %>% 
  httr::stop_for_status()
}

From r, process playlists * Helper function * For each batch, extract the playlistId and playlists * Then rbind to existing playlists *

process_playlists  <- function(r, playlists = tibble::tibble()) {
  json_content <- get_json(r)
  next_playlists <- cbind(playlistId =json_content$items$id,
                          json_content$items$snippet)
  next_playlists <- tibble::as_tibble(next_playlists)
  playlists <- rbind(playlists, next_playlists)
}

get_batch()

SCOPE? auth_token? Something fails with auth token

expect > 100 playlists

Obtain first batch (~50) and put into tibble playlists

r  <- get_batch(url = url, query = query, config = config)
playlists  <- process_playlists(r)
playlists

if more batches ...

httr::content(r)$nextPageToken

while ( !is.null(httr::content(r)$nextPageToken )) {
  query$pageToken  <- httr::content(r)$nextPageToken
    r  <- get_batch(url = url, query = query, config = config)
    playlists  <- process_playlists(r, playlists)

} # end loop
playlists

save playlists

BE SURE data/ exists!

saveRDS(playlists, file=here("data", "playlists.RDS"))

read playlists

x  <- readRDS(here("data", "playlists.RDS"))
x

pretty the date.

# google stores dates as ISO 8601, as string
# why need TWO lubridate commands to retrieve simple date?
playlists <- playlists %>% 
    dplyr::mutate(date= lubridate::as_date(
                                    lubridate::as_datetime(publishedAt))) %>% 
    dplyr::select(-c(publishedAt))

playlists
# example playlistId  
playlists$playlistId[[2]]

Move to DT

library(data.table)
dt  <- as.data.table(playlists)
dt

library(knitr)
dt %>% knitr::kable()
saveRDS(dt, file=here("data", "dt_playlists.RDS"))
x  <- readRDS(here("data", "dt_playlists.RDS"))
identical(dt,x)
x %>% knitr::kable()
dt %>% knitr::kable()
print(playlists[,2:4], n=200)
{
file <- "/home/jim/code/youtube_api/rmd/021_LIST_all_playlists.Rmd"
file  <- here("rmd", basename(file))
file
}


rmarkdown::render(file,
                  #output_format="html_document")
                  output_format="pdf_document")


jimrothstein/yt_api documentation built on Nov. 5, 2022, 8:05 p.m.