Skip to content

Get Job Run

emrserverless_get_job_run R Documentation

Displays detailed information about a job run

Description

Displays detailed information about a job run.

Usage

emrserverless_get_job_run(applicationId, jobRunId, attempt)

Arguments

applicationId

[required] The ID of the application on which the job run is submitted.

jobRunId

[required] The ID of the job run.

attempt

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

Value

A list with the following syntax:

list(
  jobRun = list(
    applicationId = "string",
    jobRunId = "string",
    name = "string",
    arn = "string",
    createdBy = "string",
    createdAt = as.POSIXct(
      "2015-01-01"
    ),
    updatedAt = as.POSIXct(
      "2015-01-01"
    ),
    executionRole = "string",
    state = "SUBMITTED"|"PENDING"|"SCHEDULED"|"RUNNING"|"SUCCESS"|"FAILED"|"CANCELLING"|"CANCELLED",
    stateDetails = "string",
    releaseLabel = "string",
    configurationOverrides = list(
      applicationConfiguration = list(
        list(
          classification = "string",
          properties = list(
            "string"
          ),
          configurations = list()
        )
      ),
      monitoringConfiguration = list(
        s3MonitoringConfiguration = list(
          logUri = "string",
          encryptionKeyArn = "string"
        ),
        managedPersistenceMonitoringConfiguration = list(
          enabled = TRUE|FALSE,
          encryptionKeyArn = "string"
        ),
        cloudWatchLoggingConfiguration = list(
          enabled = TRUE|FALSE,
          logGroupName = "string",
          logStreamNamePrefix = "string",
          encryptionKeyArn = "string",
          logTypes = list(
            list(
              "string"
            )
          )
        ),
        prometheusMonitoringConfiguration = list(
          remoteWriteUrl = "string"
        )
      )
    ),
    jobDriver = list(
      sparkSubmit = list(
        entryPoint = "string",
        entryPointArguments = list(
          "string"
        ),
        sparkSubmitParameters = "string"
      ),
      hive = list(
        query = "string",
        initQueryFile = "string",
        parameters = "string"
      )
    ),
    tags = list(
      "string"
    ),
    totalResourceUtilization = list(
      vCPUHour = 123.0,
      memoryGBHour = 123.0,
      storageGBHour = 123.0
    ),
    networkConfiguration = list(
      subnetIds = list(
        "string"
      ),
      securityGroupIds = list(
        "string"
      )
    ),
    totalExecutionDurationSeconds = 123,
    executionTimeoutMinutes = 123,
    billedResourceUtilization = list(
      vCPUHour = 123.0,
      memoryGBHour = 123.0,
      storageGBHour = 123.0
    ),
    mode = "BATCH"|"STREAMING",
    retryPolicy = list(
      maxAttempts = 123,
      maxFailedAttemptsPerHour = 123
    ),
    attempt = 123,
    attemptCreatedAt = as.POSIXct(
      "2015-01-01"
    ),
    attemptUpdatedAt = as.POSIXct(
      "2015-01-01"
    )
  )
)

Request syntax

svc$get_job_run(
  applicationId = "string",
  jobRunId = "string",
  attempt = 123
)