R/datapipeline_service.R

Defines functions service datapipeline

Documented in datapipeline

# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL

#' AWS Data Pipeline
#'
#' @description
#' AWS Data Pipeline configures and manages a data-driven workflow called a
#' pipeline. AWS Data Pipeline handles the details of scheduling and
#' ensuring that data dependencies are met so that your application can
#' focus on processing the data.
#' 
#' AWS Data Pipeline provides a JAR implementation of a task runner called
#' AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides
#' logic for common data management scenarios, such as performing database
#' queries and running data analysis using Amazon Elastic MapReduce (Amazon
#' EMR). You can use AWS Data Pipeline Task Runner as your task runner, or
#' you can write your own task runner to provide custom data management.
#' 
#' AWS Data Pipeline implements two main sets of functionality. Use the
#' first set to create a pipeline and define data sources, schedules,
#' dependencies, and the transforms to be performed on the data. Use the
#' second set in your task runner application to receive the next task
#' ready for processing. The logic for performing the task, such as
#' querying the data, running data analysis, or converting the data from
#' one format to another, is contained within the task runner. The task
#' runner performs the task assigned to it by the web service, reporting
#' progress to the web service as it does so. When the task is done, the
#' task runner reports the final success or failure of the task to the web
#' service.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- datapipeline(
#'   config = list(
#'     credentials = list(
#'       creds = list(
#'         access_key_id = "string",
#'         secret_access_key = "string",
#'         session_token = "string"
#'       ),
#'       profile = "string",
#'       anonymous = "logical"
#'     ),
#'     endpoint = "string",
#'     region = "string",
#'     close_connection = "logical",
#'     timeout = "numeric",
#'     s3_force_path_style = "logical",
#'     sts_regional_endpoint = "string"
#'   ),
#'   credentials = list(
#'     creds = list(
#'       access_key_id = "string",
#'       secret_access_key = "string",
#'       session_token = "string"
#'     ),
#'     profile = "string",
#'     anonymous = "logical"
#'   ),
#'   endpoint = "string",
#'   region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- datapipeline()
#' svc$activate_pipeline(
#'   Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#'  \link[=datapipeline_activate_pipeline]{activate_pipeline} \tab Validates the specified pipeline and starts processing pipeline tasks\cr
#'  \link[=datapipeline_add_tags]{add_tags} \tab Adds or modifies tags for the specified pipeline\cr
#'  \link[=datapipeline_create_pipeline]{create_pipeline} \tab Creates a new, empty pipeline\cr
#'  \link[=datapipeline_deactivate_pipeline]{deactivate_pipeline} \tab Deactivates the specified running pipeline\cr
#'  \link[=datapipeline_delete_pipeline]{delete_pipeline} \tab Deletes a pipeline, its pipeline definition, and its run history\cr
#'  \link[=datapipeline_describe_objects]{describe_objects} \tab Gets the object definitions for a set of objects associated with the pipeline\cr
#'  \link[=datapipeline_describe_pipelines]{describe_pipelines} \tab Retrieves metadata about one or more pipelines\cr
#'  \link[=datapipeline_evaluate_expression]{evaluate_expression} \tab Task runners call EvaluateExpression to evaluate a string in the context of the specified object\cr
#'  \link[=datapipeline_get_pipeline_definition]{get_pipeline_definition} \tab Gets the definition of the specified pipeline\cr
#'  \link[=datapipeline_list_pipelines]{list_pipelines} \tab Lists the pipeline identifiers for all active pipelines that you have permission to access\cr
#'  \link[=datapipeline_poll_for_task]{poll_for_task} \tab Task runners call PollForTask to receive a task to perform from AWS Data Pipeline\cr
#'  \link[=datapipeline_put_pipeline_definition]{put_pipeline_definition} \tab Adds tasks, schedules, and preconditions to the specified pipeline\cr
#'  \link[=datapipeline_query_objects]{query_objects} \tab Queries the specified pipeline for the names of objects that match the specified set of conditions\cr
#'  \link[=datapipeline_remove_tags]{remove_tags} \tab Removes existing tags from the specified pipeline\cr
#'  \link[=datapipeline_report_task_progress]{report_task_progress} \tab Task runners call ReportTaskProgress when assigned a task to acknowledge that it has the task\cr
#'  \link[=datapipeline_report_task_runner_heartbeat]{report_task_runner_heartbeat} \tab Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational\cr
#'  \link[=datapipeline_set_status]{set_status} \tab Requests that the status of the specified physical or logical pipeline objects be updated in the specified pipeline\cr
#'  \link[=datapipeline_set_task_status]{set_task_status} \tab Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is completed and provide information about the final status\cr
#'  \link[=datapipeline_validate_pipeline_definition]{validate_pipeline_definition} \tab Validates the specified pipeline definition to ensure that it is well formed and can be run without error
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname datapipeline
#' @export
datapipeline <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
  config <- merge_config(
    config,
    list(
      credentials = credentials,
      endpoint = endpoint,
      region = region
    )
  )
  svc <- .datapipeline$operations
  svc <- set_config(svc, config)
  return(svc)
}

# Private API objects: metadata, handlers, interfaces, etc.
.datapipeline <- list()

.datapipeline$operations <- list()

.datapipeline$metadata <- list(
  service_name = "datapipeline",
  endpoints = list("*" = list(endpoint = "datapipeline.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "datapipeline.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "datapipeline.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "datapipeline.{region}.sc2s.sgov.gov", global = FALSE)),
  service_id = "Data Pipeline",
  api_version = "2012-10-29",
  signing_name = "datapipeline",
  json_version = "1.1",
  target_prefix = "DataPipeline"
)

.datapipeline$service <- function(config = list()) {
  handlers <- new_handlers("jsonrpc", "v4")
  new_service(.datapipeline$metadata, handlers, config)
}

Try the paws.analytics package in your browser

Any scripts or data that you put into this service are public.

paws.analytics documentation built on Sept. 11, 2023, 5:06 p.m.