glue

package module
v0.26.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 30, 2020 License: Apache-2.0 Imports: 27 Imported by: 0

Documentation

Index

Constants

View Source
const ServiceAPIVersion = "2017-03-31"
View Source
const ServiceID = "Glue"

Variables

This section is empty.

Functions

func AddResolveEndpointMiddleware

func AddResolveEndpointMiddleware(stack *middleware.Stack, options ResolveEndpointMiddlewareOptions)

func NewDefaultEndpointResolver

func NewDefaultEndpointResolver() *internalendpoints.Resolver

NewDefaultEndpointResolver constructs a new service endpoint resolver

func RemoveResolveEndpointMiddleware

func RemoveResolveEndpointMiddleware(stack *middleware.Stack) error

Types

type BatchCreatePartitionInput

type BatchCreatePartitionInput struct {
	// The ID of the catalog in which the partition is to be created. Currently, this
	// should be the AWS account ID.
	CatalogId *string
	// The name of the metadata database in which the partition is to be created.
	DatabaseName *string
	// The name of the metadata table in which the partition is to be created.
	TableName *string
	// A list of PartitionInput structures that define the partitions to be created.
	PartitionInputList []*types.PartitionInput
}

type BatchCreatePartitionOutput

type BatchCreatePartitionOutput struct {
	// The errors encountered when trying to create the requested partitions.
	Errors []*types.PartitionError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchDeleteConnectionInput

type BatchDeleteConnectionInput struct {
	// A list of names of the connections to delete.
	ConnectionNameList []*string
	// The ID of the Data Catalog in which the connections reside. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type BatchDeleteConnectionOutput

type BatchDeleteConnectionOutput struct {
	// A map of the names of connections that were not successfully deleted to error
	// details.
	Errors map[string]*types.ErrorDetail
	// A list of names of the connection definitions that were successfully deleted.
	Succeeded []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchDeletePartitionInput

type BatchDeletePartitionInput struct {
	// The name of the catalog database in which the table in question resides.
	DatabaseName *string
	// The name of the table that contains the partitions to be deleted.
	TableName *string
	// A list of PartitionInput structures that define the partitions to be deleted.
	PartitionsToDelete []*types.PartitionValueList
	// The ID of the Data Catalog where the partition to be deleted resides. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
}

type BatchDeletePartitionOutput

type BatchDeletePartitionOutput struct {
	// The errors encountered when trying to delete the requested partitions.
	Errors []*types.PartitionError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchDeleteTableInput

type BatchDeleteTableInput struct {
	// A list of the table to delete.
	TablesToDelete []*string
	// The name of the catalog database in which the tables to delete reside. For Hive
	// compatibility, this name is entirely lowercase.
	DatabaseName *string
	// The ID of the Data Catalog where the table resides. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
}

type BatchDeleteTableOutput

type BatchDeleteTableOutput struct {
	// A list of errors encountered in attempting to delete the specified tables.
	Errors []*types.TableError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchDeleteTableVersionInput

type BatchDeleteTableVersionInput struct {
	// The database in the catalog in which the table resides. For Hive compatibility,
	// this name is entirely lowercase.
	DatabaseName *string
	// A list of the IDs of versions to be deleted. A VersionId is a string
	// representation of an integer. Each version is incremented by 1.
	VersionIds []*string
	// The ID of the Data Catalog where the tables reside. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The name of the table. For Hive compatibility, this name is entirely lowercase.
	TableName *string
}

type BatchDeleteTableVersionOutput

type BatchDeleteTableVersionOutput struct {
	// A list of errors encountered while trying to delete the specified table
	// versions.
	Errors []*types.TableVersionError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetCrawlersInput

type BatchGetCrawlersInput struct {
	// A list of crawler names, which might be the names returned from the ListCrawlers
	// operation.
	CrawlerNames []*string
}

type BatchGetCrawlersOutput

type BatchGetCrawlersOutput struct {
	// A list of crawler definitions.
	Crawlers []*types.Crawler
	// A list of names of crawlers that were not found.
	CrawlersNotFound []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetDevEndpointsInput

type BatchGetDevEndpointsInput struct {
	// The list of DevEndpoint names, which might be the names returned from the
	// ListDevEndpoint operation.
	DevEndpointNames []*string
}

type BatchGetDevEndpointsOutput

type BatchGetDevEndpointsOutput struct {
	// A list of DevEndpoint definitions.
	DevEndpoints []*types.DevEndpoint
	// A list of DevEndpoints not found.
	DevEndpointsNotFound []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetJobsInput

type BatchGetJobsInput struct {
	// A list of job names, which might be the names returned from the ListJobs
	// operation.
	JobNames []*string
}

type BatchGetJobsOutput

type BatchGetJobsOutput struct {
	// A list of names of jobs not found.
	JobsNotFound []*string
	// A list of job definitions.
	Jobs []*types.Job

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetPartitionInput

type BatchGetPartitionInput struct {
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
	// A list of partition values identifying the partitions to retrieve.
	PartitionsToGet []*types.PartitionValueList
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// The name of the partitions' table.
	TableName *string
}

type BatchGetPartitionOutput

type BatchGetPartitionOutput struct {
	// A list of the partition values in the request for which partitions were not
	// returned.
	UnprocessedKeys []*types.PartitionValueList
	// A list of the requested partitions.
	Partitions []*types.Partition

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetTriggersInput

type BatchGetTriggersInput struct {
	// A list of trigger names, which may be the names returned from the ListTriggers
	// operation.
	TriggerNames []*string
}

type BatchGetTriggersOutput

type BatchGetTriggersOutput struct {
	// A list of trigger definitions.
	Triggers []*types.Trigger
	// A list of names of triggers not found.
	TriggersNotFound []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchGetWorkflowsInput

type BatchGetWorkflowsInput struct {
	// A list of workflow names, which may be the names returned from the ListWorkflows
	// operation.
	Names []*string
	// Specifies whether to include a graph when returning the workflow resource
	// metadata.
	IncludeGraph *bool
}

type BatchGetWorkflowsOutput

type BatchGetWorkflowsOutput struct {
	// A list of workflow resource metadata.
	Workflows []*types.Workflow
	// A list of names of workflows not found.
	MissingWorkflows []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type BatchStopJobRunInput

type BatchStopJobRunInput struct {
	// The name of the job definition for which to stop job runs.
	JobName *string
	// A list of the JobRunIds that should be stopped for that job definition.
	JobRunIds []*string
}

type BatchStopJobRunOutput

type BatchStopJobRunOutput struct {
	// A list of the errors that were encountered in trying to stop JobRuns, including
	// the JobRunId for which each error was encountered and details about the error.
	Errors []*types.BatchStopJobRunError
	// A list of the JobRuns that were successfully submitted for stopping.
	SuccessfulSubmissions []*types.BatchStopJobRunSuccessfulSubmission

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CancelMLTaskRunInput

type CancelMLTaskRunInput struct {
	// The unique identifier of the machine learning transform.
	TransformId *string
	// A unique identifier for the task run.
	TaskRunId *string
}

type CancelMLTaskRunOutput

type CancelMLTaskRunOutput struct {
	// The unique identifier of the machine learning transform.
	TransformId *string
	// The unique identifier for the task run.
	TaskRunId *string
	// The status for this run.
	Status types.TaskStatusType

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type Client

type Client struct {
	// contains filtered or unexported fields
}

AWS Glue Defines the public endpoint for the AWS Glue service.

func New

func New(options Options, optFns ...func(*Options)) *Client

New returns an initialized Client based on the functional options. Provide additional functional options to further configure the behavior of the client, such as changing the client's endpoint or adding custom middleware behavior.

func NewFromConfig

func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client

NewFromConfig returns a new client from the provided config.

func (*Client) BatchCreatePartition

func (c *Client) BatchCreatePartition(ctx context.Context, params *BatchCreatePartitionInput, optFns ...func(*Options)) (*BatchCreatePartitionOutput, error)

Creates one or more partitions in a batch operation.

func (*Client) BatchDeleteConnection

func (c *Client) BatchDeleteConnection(ctx context.Context, params *BatchDeleteConnectionInput, optFns ...func(*Options)) (*BatchDeleteConnectionOutput, error)

Deletes a list of connection definitions from the Data Catalog.

func (*Client) BatchDeletePartition

func (c *Client) BatchDeletePartition(ctx context.Context, params *BatchDeletePartitionInput, optFns ...func(*Options)) (*BatchDeletePartitionOutput, error)

Deletes one or more partitions in a batch operation.

func (*Client) BatchDeleteTable

func (c *Client) BatchDeleteTable(ctx context.Context, params *BatchDeleteTableInput, optFns ...func(*Options)) (*BatchDeleteTableOutput, error)

Deletes multiple tables at once. After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service. To ensure the immediate deletion of all related resources, before calling BatchDeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

func (*Client) BatchDeleteTableVersion

func (c *Client) BatchDeleteTableVersion(ctx context.Context, params *BatchDeleteTableVersionInput, optFns ...func(*Options)) (*BatchDeleteTableVersionOutput, error)

Deletes a specified batch of versions of a table.

func (*Client) BatchGetCrawlers

func (c *Client) BatchGetCrawlers(ctx context.Context, params *BatchGetCrawlersInput, optFns ...func(*Options)) (*BatchGetCrawlersOutput, error)

Returns a list of resource metadata for a given list of crawler names. After calling the ListCrawlers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

func (*Client) BatchGetDevEndpoints

func (c *Client) BatchGetDevEndpoints(ctx context.Context, params *BatchGetDevEndpointsInput, optFns ...func(*Options)) (*BatchGetDevEndpointsOutput, error)

Returns a list of resource metadata for a given list of development endpoint names. After calling the ListDevEndpoints operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

func (*Client) BatchGetJobs

func (c *Client) BatchGetJobs(ctx context.Context, params *BatchGetJobsInput, optFns ...func(*Options)) (*BatchGetJobsOutput, error)

Returns a list of resource metadata for a given list of job names. After calling the ListJobs operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

func (*Client) BatchGetPartition

func (c *Client) BatchGetPartition(ctx context.Context, params *BatchGetPartitionInput, optFns ...func(*Options)) (*BatchGetPartitionOutput, error)

Retrieves partitions in a batch request.

func (*Client) BatchGetTriggers

func (c *Client) BatchGetTriggers(ctx context.Context, params *BatchGetTriggersInput, optFns ...func(*Options)) (*BatchGetTriggersOutput, error)

Returns a list of resource metadata for a given list of trigger names. After calling the ListTriggers operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

func (*Client) BatchGetWorkflows

func (c *Client) BatchGetWorkflows(ctx context.Context, params *BatchGetWorkflowsInput, optFns ...func(*Options)) (*BatchGetWorkflowsOutput, error)

Returns a list of resource metadata for a given list of workflow names. After calling the ListWorkflows operation, you can call this operation to access the data to which you have been granted permissions. This operation supports all IAM permissions, including permission conditions that uses tags.

func (*Client) BatchStopJobRun

func (c *Client) BatchStopJobRun(ctx context.Context, params *BatchStopJobRunInput, optFns ...func(*Options)) (*BatchStopJobRunOutput, error)

Stops one or more job runs for a specified job definition.

func (*Client) CancelMLTaskRun

func (c *Client) CancelMLTaskRun(ctx context.Context, params *CancelMLTaskRunInput, optFns ...func(*Options)) (*CancelMLTaskRunOutput, error)

Cancels (stops) a task run. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can cancel a machine learning task run at any time by calling CancelMLTaskRun with a task run's parent transform's TransformID and the task run's TaskRunId.

func (*Client) CreateClassifier

func (c *Client) CreateClassifier(ctx context.Context, params *CreateClassifierInput, optFns ...func(*Options)) (*CreateClassifierOutput, error)

Creates a classifier in the user's account. This can be a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field of the request is present.

func (*Client) CreateConnection

func (c *Client) CreateConnection(ctx context.Context, params *CreateConnectionInput, optFns ...func(*Options)) (*CreateConnectionOutput, error)

Creates a connection definition in the Data Catalog.

func (*Client) CreateCrawler

func (c *Client) CreateCrawler(ctx context.Context, params *CreateCrawlerInput, optFns ...func(*Options)) (*CreateCrawlerOutput, error)

Creates a new crawler with specified targets, role, configuration, and optional schedule. At least one crawl target must be specified, in the s3Targets field, the jdbcTargets field, or the DynamoDBTargets field.

func (*Client) CreateDatabase

func (c *Client) CreateDatabase(ctx context.Context, params *CreateDatabaseInput, optFns ...func(*Options)) (*CreateDatabaseOutput, error)

Creates a new database in a Data Catalog.

func (*Client) CreateDevEndpoint

func (c *Client) CreateDevEndpoint(ctx context.Context, params *CreateDevEndpointInput, optFns ...func(*Options)) (*CreateDevEndpointOutput, error)

Creates a new development endpoint.

func (*Client) CreateJob

func (c *Client) CreateJob(ctx context.Context, params *CreateJobInput, optFns ...func(*Options)) (*CreateJobOutput, error)

Creates a new job definition.

func (*Client) CreateMLTransform

func (c *Client) CreateMLTransform(ctx context.Context, params *CreateMLTransformInput, optFns ...func(*Options)) (*CreateMLTransformOutput, error)

Creates an AWS Glue machine learning transform. This operation creates the transform and all the necessary parameters to train it. <p>Call this operation as the first step in the process of using a machine learning transform (such as the <code>FindMatches</code> transform) for deduplicating data. You can provide an optional <code>Description</code>, in addition to the parameters that you want to use for your algorithm.</p> <p>You must also specify certain parameters for the tasks that AWS Glue runs on your behalf as part of learning from your data and creating a high-quality machine learning transform. These parameters include <code>Role</code>, and optionally, <code>AllocatedCapacity</code>, <code>Timeout</code>, and <code>MaxRetries</code>. For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-jobs-job.html">Jobs</a>.</p>

func (*Client) CreatePartition

func (c *Client) CreatePartition(ctx context.Context, params *CreatePartitionInput, optFns ...func(*Options)) (*CreatePartitionOutput, error)

Creates a new partition.

func (*Client) CreateScript

func (c *Client) CreateScript(ctx context.Context, params *CreateScriptInput, optFns ...func(*Options)) (*CreateScriptOutput, error)

Transforms a directed acyclic graph (DAG) into code.

func (*Client) CreateSecurityConfiguration

func (c *Client) CreateSecurityConfiguration(ctx context.Context, params *CreateSecurityConfigurationInput, optFns ...func(*Options)) (*CreateSecurityConfigurationOutput, error)

Creates a new security configuration. A security configuration is a set of security properties that can be used by AWS Glue. You can use a security configuration to encrypt data at rest. For information about using security configurations in AWS Glue, see Encrypting Data Written by Crawlers, Jobs, and Development Endpoints (https://docs.aws.amazon.com/glue/latest/dg/encryption-security-configuration.html).

func (*Client) CreateTable

func (c *Client) CreateTable(ctx context.Context, params *CreateTableInput, optFns ...func(*Options)) (*CreateTableOutput, error)

Creates a new table definition in the Data Catalog.

func (*Client) CreateTrigger

func (c *Client) CreateTrigger(ctx context.Context, params *CreateTriggerInput, optFns ...func(*Options)) (*CreateTriggerOutput, error)

Creates a new trigger.

func (*Client) CreateUserDefinedFunction

func (c *Client) CreateUserDefinedFunction(ctx context.Context, params *CreateUserDefinedFunctionInput, optFns ...func(*Options)) (*CreateUserDefinedFunctionOutput, error)

Creates a new function definition in the Data Catalog.

func (*Client) CreateWorkflow

func (c *Client) CreateWorkflow(ctx context.Context, params *CreateWorkflowInput, optFns ...func(*Options)) (*CreateWorkflowOutput, error)

Creates a new workflow.

func (*Client) DeleteClassifier

func (c *Client) DeleteClassifier(ctx context.Context, params *DeleteClassifierInput, optFns ...func(*Options)) (*DeleteClassifierOutput, error)

Removes a classifier from the Data Catalog.

func (*Client) DeleteColumnStatisticsForPartition

func (c *Client) DeleteColumnStatisticsForPartition(ctx context.Context, params *DeleteColumnStatisticsForPartitionInput, optFns ...func(*Options)) (*DeleteColumnStatisticsForPartitionOutput, error)

Delete the partition column statistics of a column.

func (*Client) DeleteColumnStatisticsForTable

func (c *Client) DeleteColumnStatisticsForTable(ctx context.Context, params *DeleteColumnStatisticsForTableInput, optFns ...func(*Options)) (*DeleteColumnStatisticsForTableOutput, error)

Retrieves table statistics of columns.

func (*Client) DeleteConnection

func (c *Client) DeleteConnection(ctx context.Context, params *DeleteConnectionInput, optFns ...func(*Options)) (*DeleteConnectionOutput, error)

Deletes a connection from the Data Catalog.

func (*Client) DeleteCrawler

func (c *Client) DeleteCrawler(ctx context.Context, params *DeleteCrawlerInput, optFns ...func(*Options)) (*DeleteCrawlerOutput, error)

Removes a specified crawler from the AWS Glue Data Catalog, unless the crawler state is RUNNING.

func (*Client) DeleteDatabase

func (c *Client) DeleteDatabase(ctx context.Context, params *DeleteDatabaseInput, optFns ...func(*Options)) (*DeleteDatabaseOutput, error)

Removes a specified database from a Data Catalog. After completing this operation, you no longer have access to the tables (and all table versions and partitions that might belong to the tables) and the user-defined functions in the deleted database. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service. To ensure the immediate deletion of all related resources, before calling DeleteDatabase, use DeleteTableVersion or BatchDeleteTableVersion, DeletePartition or BatchDeletePartition, DeleteUserDefinedFunction, and DeleteTable or BatchDeleteTable, to delete any resources that belong to the database.

func (*Client) DeleteDevEndpoint

func (c *Client) DeleteDevEndpoint(ctx context.Context, params *DeleteDevEndpointInput, optFns ...func(*Options)) (*DeleteDevEndpointOutput, error)

Deletes a specified development endpoint.

func (*Client) DeleteJob

func (c *Client) DeleteJob(ctx context.Context, params *DeleteJobInput, optFns ...func(*Options)) (*DeleteJobOutput, error)

Deletes a specified job definition. If the job definition is not found, no exception is thrown.

func (*Client) DeleteMLTransform

func (c *Client) DeleteMLTransform(ctx context.Context, params *DeleteMLTransformInput, optFns ...func(*Options)) (*DeleteMLTransformOutput, error)

Deletes an AWS Glue machine learning transform. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. If you no longer need a transform, you can delete it by calling DeleteMLTransforms. However, any AWS Glue jobs that still reference the deleted transform will no longer succeed.

func (*Client) DeletePartition

func (c *Client) DeletePartition(ctx context.Context, params *DeletePartitionInput, optFns ...func(*Options)) (*DeletePartitionOutput, error)

Deletes a specified partition.

func (*Client) DeleteResourcePolicy

func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error)

Deletes a specified policy.

func (*Client) DeleteSecurityConfiguration

func (c *Client) DeleteSecurityConfiguration(ctx context.Context, params *DeleteSecurityConfigurationInput, optFns ...func(*Options)) (*DeleteSecurityConfigurationOutput, error)

Deletes a specified security configuration.

func (*Client) DeleteTable

func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optFns ...func(*Options)) (*DeleteTableOutput, error)

Removes a table definition from the Data Catalog. After completing this operation, you no longer have access to the table versions and partitions that belong to the deleted table. AWS Glue deletes these "orphaned" resources asynchronously in a timely manner, at the discretion of the service. To ensure the immediate deletion of all related resources, before calling DeleteTable, use DeleteTableVersion or BatchDeleteTableVersion, and DeletePartition or BatchDeletePartition, to delete any resources that belong to the table.

func (*Client) DeleteTableVersion

func (c *Client) DeleteTableVersion(ctx context.Context, params *DeleteTableVersionInput, optFns ...func(*Options)) (*DeleteTableVersionOutput, error)

Deletes a specified version of a table.

func (*Client) DeleteTrigger

func (c *Client) DeleteTrigger(ctx context.Context, params *DeleteTriggerInput, optFns ...func(*Options)) (*DeleteTriggerOutput, error)

Deletes a specified trigger. If the trigger is not found, no exception is thrown.

func (*Client) DeleteUserDefinedFunction

func (c *Client) DeleteUserDefinedFunction(ctx context.Context, params *DeleteUserDefinedFunctionInput, optFns ...func(*Options)) (*DeleteUserDefinedFunctionOutput, error)

Deletes an existing function definition from the Data Catalog.

func (*Client) DeleteWorkflow

func (c *Client) DeleteWorkflow(ctx context.Context, params *DeleteWorkflowInput, optFns ...func(*Options)) (*DeleteWorkflowOutput, error)

Deletes a workflow.

func (*Client) GetCatalogImportStatus

func (c *Client) GetCatalogImportStatus(ctx context.Context, params *GetCatalogImportStatusInput, optFns ...func(*Options)) (*GetCatalogImportStatusOutput, error)

Retrieves the status of a migration operation.

func (*Client) GetClassifier

func (c *Client) GetClassifier(ctx context.Context, params *GetClassifierInput, optFns ...func(*Options)) (*GetClassifierOutput, error)

Retrieve a classifier by name.

func (*Client) GetClassifiers

func (c *Client) GetClassifiers(ctx context.Context, params *GetClassifiersInput, optFns ...func(*Options)) (*GetClassifiersOutput, error)

Lists all classifier objects in the Data Catalog.

func (*Client) GetColumnStatisticsForPartition

func (c *Client) GetColumnStatisticsForPartition(ctx context.Context, params *GetColumnStatisticsForPartitionInput, optFns ...func(*Options)) (*GetColumnStatisticsForPartitionOutput, error)

Retrieves partition statistics of columns.

func (*Client) GetColumnStatisticsForTable

func (c *Client) GetColumnStatisticsForTable(ctx context.Context, params *GetColumnStatisticsForTableInput, optFns ...func(*Options)) (*GetColumnStatisticsForTableOutput, error)

Retrieves table statistics of columns.

func (*Client) GetConnection

func (c *Client) GetConnection(ctx context.Context, params *GetConnectionInput, optFns ...func(*Options)) (*GetConnectionOutput, error)

Retrieves a connection definition from the Data Catalog.

func (*Client) GetConnections

func (c *Client) GetConnections(ctx context.Context, params *GetConnectionsInput, optFns ...func(*Options)) (*GetConnectionsOutput, error)

Retrieves a list of connection definitions from the Data Catalog.

func (*Client) GetCrawler

func (c *Client) GetCrawler(ctx context.Context, params *GetCrawlerInput, optFns ...func(*Options)) (*GetCrawlerOutput, error)

Retrieves metadata for a specified crawler.

func (*Client) GetCrawlerMetrics

func (c *Client) GetCrawlerMetrics(ctx context.Context, params *GetCrawlerMetricsInput, optFns ...func(*Options)) (*GetCrawlerMetricsOutput, error)

Retrieves metrics about specified crawlers.

func (*Client) GetCrawlers

func (c *Client) GetCrawlers(ctx context.Context, params *GetCrawlersInput, optFns ...func(*Options)) (*GetCrawlersOutput, error)

Retrieves metadata for all crawlers defined in the customer account.

func (*Client) GetDataCatalogEncryptionSettings

func (c *Client) GetDataCatalogEncryptionSettings(ctx context.Context, params *GetDataCatalogEncryptionSettingsInput, optFns ...func(*Options)) (*GetDataCatalogEncryptionSettingsOutput, error)

Retrieves the security configuration for a specified catalog.

func (*Client) GetDatabase

func (c *Client) GetDatabase(ctx context.Context, params *GetDatabaseInput, optFns ...func(*Options)) (*GetDatabaseOutput, error)

Retrieves the definition of a specified database.

func (*Client) GetDatabases

func (c *Client) GetDatabases(ctx context.Context, params *GetDatabasesInput, optFns ...func(*Options)) (*GetDatabasesOutput, error)

Retrieves all databases defined in a given Data Catalog.

func (*Client) GetDataflowGraph

func (c *Client) GetDataflowGraph(ctx context.Context, params *GetDataflowGraphInput, optFns ...func(*Options)) (*GetDataflowGraphOutput, error)

Transforms a Python script into a directed acyclic graph (DAG).

func (*Client) GetDevEndpoint

func (c *Client) GetDevEndpoint(ctx context.Context, params *GetDevEndpointInput, optFns ...func(*Options)) (*GetDevEndpointOutput, error)

Retrieves information about a specified development endpoint. When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address, and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

func (*Client) GetDevEndpoints

func (c *Client) GetDevEndpoints(ctx context.Context, params *GetDevEndpointsInput, optFns ...func(*Options)) (*GetDevEndpointsOutput, error)

Retrieves all the development endpoints in this AWS account. When you create a development endpoint in a virtual private cloud (VPC), AWS Glue returns only a private IP address and the public IP address field is not populated. When you create a non-VPC development endpoint, AWS Glue returns only a public IP address.

func (*Client) GetJob

func (c *Client) GetJob(ctx context.Context, params *GetJobInput, optFns ...func(*Options)) (*GetJobOutput, error)

Retrieves an existing job definition.

func (*Client) GetJobBookmark

func (c *Client) GetJobBookmark(ctx context.Context, params *GetJobBookmarkInput, optFns ...func(*Options)) (*GetJobBookmarkOutput, error)

Returns information on a job bookmark entry.

func (*Client) GetJobRun

func (c *Client) GetJobRun(ctx context.Context, params *GetJobRunInput, optFns ...func(*Options)) (*GetJobRunOutput, error)

Retrieves the metadata for a given job run.

func (*Client) GetJobRuns

func (c *Client) GetJobRuns(ctx context.Context, params *GetJobRunsInput, optFns ...func(*Options)) (*GetJobRunsOutput, error)

Retrieves metadata for all runs of a given job definition.

func (*Client) GetJobs

func (c *Client) GetJobs(ctx context.Context, params *GetJobsInput, optFns ...func(*Options)) (*GetJobsOutput, error)

Retrieves all current job definitions.

func (*Client) GetMLTaskRun

func (c *Client) GetMLTaskRun(ctx context.Context, params *GetMLTaskRunInput, optFns ...func(*Options)) (*GetMLTaskRunOutput, error)

Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform's TransformID.

func (*Client) GetMLTaskRuns

func (c *Client) GetMLTaskRuns(ctx context.Context, params *GetMLTaskRunsInput, optFns ...func(*Options)) (*GetMLTaskRunsOutput, error)

Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that AWS Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform's TransformID and other optional parameters as documented in this section. <p>This operation returns a list of historic runs and must be paginated.</p>

func (*Client) GetMLTransform

func (c *Client) GetMLTransform(ctx context.Context, params *GetMLTransformInput, optFns ...func(*Options)) (*GetMLTransformOutput, error)

Gets an AWS Glue machine learning transform artifact and all its corresponding metadata. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue. You can retrieve their metadata by calling GetMLTransform.

func (*Client) GetMLTransforms

func (c *Client) GetMLTransforms(ctx context.Context, params *GetMLTransformsInput, optFns ...func(*Options)) (*GetMLTransformsOutput, error)

Gets a sortable, filterable list of existing AWS Glue machine learning transforms. Machine learning transforms are a special type of transform that use machine learning to learn the details of the transformation to be performed by learning from examples provided by humans. These transformations are then saved by AWS Glue, and you can retrieve their metadata by calling GetMLTransforms.

func (*Client) GetMapping

func (c *Client) GetMapping(ctx context.Context, params *GetMappingInput, optFns ...func(*Options)) (*GetMappingOutput, error)

Creates mappings.

func (*Client) GetPartition

func (c *Client) GetPartition(ctx context.Context, params *GetPartitionInput, optFns ...func(*Options)) (*GetPartitionOutput, error)

Retrieves information about a specified partition.

func (*Client) GetPartitions

func (c *Client) GetPartitions(ctx context.Context, params *GetPartitionsInput, optFns ...func(*Options)) (*GetPartitionsOutput, error)

Retrieves information about the partitions in a table.

func (*Client) GetPlan

func (c *Client) GetPlan(ctx context.Context, params *GetPlanInput, optFns ...func(*Options)) (*GetPlanOutput, error)

Gets code to perform a specified mapping.

func (*Client) GetResourcePolicies

func (c *Client) GetResourcePolicies(ctx context.Context, params *GetResourcePoliciesInput, optFns ...func(*Options)) (*GetResourcePoliciesOutput, error)

Retrieves the security configurations for the resource policies set on individual resources, and also the account-level policy. <p>This operation also returns the Data Catalog resource policy. However, if you enabled metadata encryption in Data Catalog settings, and you do not have permission on the AWS KMS key, the operation can't return the Data Catalog resource policy.</p>

func (*Client) GetResourcePolicy

func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error)

Retrieves a specified resource policy.

func (*Client) GetSecurityConfiguration

func (c *Client) GetSecurityConfiguration(ctx context.Context, params *GetSecurityConfigurationInput, optFns ...func(*Options)) (*GetSecurityConfigurationOutput, error)

Retrieves a specified security configuration.

func (*Client) GetSecurityConfigurations

func (c *Client) GetSecurityConfigurations(ctx context.Context, params *GetSecurityConfigurationsInput, optFns ...func(*Options)) (*GetSecurityConfigurationsOutput, error)

Retrieves a list of all security configurations.

func (*Client) GetTable

func (c *Client) GetTable(ctx context.Context, params *GetTableInput, optFns ...func(*Options)) (*GetTableOutput, error)

Retrieves the Table definition in a Data Catalog for a specified table.

func (*Client) GetTableVersion

func (c *Client) GetTableVersion(ctx context.Context, params *GetTableVersionInput, optFns ...func(*Options)) (*GetTableVersionOutput, error)

Retrieves a specified version of a table.

func (*Client) GetTableVersions

func (c *Client) GetTableVersions(ctx context.Context, params *GetTableVersionsInput, optFns ...func(*Options)) (*GetTableVersionsOutput, error)

Retrieves a list of strings that identify available versions of a specified table.

func (*Client) GetTables

func (c *Client) GetTables(ctx context.Context, params *GetTablesInput, optFns ...func(*Options)) (*GetTablesOutput, error)

Retrieves the definitions of some or all of the tables in a given Database.

func (*Client) GetTags

func (c *Client) GetTags(ctx context.Context, params *GetTagsInput, optFns ...func(*Options)) (*GetTagsOutput, error)

Retrieves a list of tags associated with a resource.

func (*Client) GetTrigger

func (c *Client) GetTrigger(ctx context.Context, params *GetTriggerInput, optFns ...func(*Options)) (*GetTriggerOutput, error)

Retrieves the definition of a trigger.

func (*Client) GetTriggers

func (c *Client) GetTriggers(ctx context.Context, params *GetTriggersInput, optFns ...func(*Options)) (*GetTriggersOutput, error)

Gets all the triggers associated with a job.

func (*Client) GetUserDefinedFunction

func (c *Client) GetUserDefinedFunction(ctx context.Context, params *GetUserDefinedFunctionInput, optFns ...func(*Options)) (*GetUserDefinedFunctionOutput, error)

Retrieves a specified function definition from the Data Catalog.

func (*Client) GetUserDefinedFunctions

func (c *Client) GetUserDefinedFunctions(ctx context.Context, params *GetUserDefinedFunctionsInput, optFns ...func(*Options)) (*GetUserDefinedFunctionsOutput, error)

Retrieves multiple function definitions from the Data Catalog.

func (*Client) GetWorkflow

func (c *Client) GetWorkflow(ctx context.Context, params *GetWorkflowInput, optFns ...func(*Options)) (*GetWorkflowOutput, error)

Retrieves resource metadata for a workflow.

func (*Client) GetWorkflowRun

func (c *Client) GetWorkflowRun(ctx context.Context, params *GetWorkflowRunInput, optFns ...func(*Options)) (*GetWorkflowRunOutput, error)

Retrieves the metadata for a given workflow run.

func (*Client) GetWorkflowRunProperties

func (c *Client) GetWorkflowRunProperties(ctx context.Context, params *GetWorkflowRunPropertiesInput, optFns ...func(*Options)) (*GetWorkflowRunPropertiesOutput, error)

Retrieves the workflow run properties which were set during the run.

func (*Client) GetWorkflowRuns

func (c *Client) GetWorkflowRuns(ctx context.Context, params *GetWorkflowRunsInput, optFns ...func(*Options)) (*GetWorkflowRunsOutput, error)

Retrieves metadata for all runs of a given workflow.

func (*Client) ImportCatalogToGlue

func (c *Client) ImportCatalogToGlue(ctx context.Context, params *ImportCatalogToGlueInput, optFns ...func(*Options)) (*ImportCatalogToGlueOutput, error)

Imports an existing Amazon Athena Data Catalog to AWS Glue

func (*Client) ListCrawlers

func (c *Client) ListCrawlers(ctx context.Context, params *ListCrawlersInput, optFns ...func(*Options)) (*ListCrawlersOutput, error)

Retrieves the names of all crawler resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names. <p>This operation takes the optional <code>Tags</code> field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.</p>

func (*Client) ListDevEndpoints

func (c *Client) ListDevEndpoints(ctx context.Context, params *ListDevEndpointsInput, optFns ...func(*Options)) (*ListDevEndpointsOutput, error)

Retrieves the names of all DevEndpoint resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names. <p>This operation takes the optional <code>Tags</code> field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.</p>

func (*Client) ListJobs

func (c *Client) ListJobs(ctx context.Context, params *ListJobsInput, optFns ...func(*Options)) (*ListJobsOutput, error)

Retrieves the names of all job resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names. <p>This operation takes the optional <code>Tags</code> field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.</p>

func (*Client) ListMLTransforms

func (c *Client) ListMLTransforms(ctx context.Context, params *ListMLTransformsInput, optFns ...func(*Options)) (*ListMLTransformsOutput, error)

Retrieves a sortable, filterable list of existing AWS Glue machine learning transforms in this AWS account, or the resources with the specified tag. This operation takes the optional Tags field, which you can use as a filter of the responses so that tagged resources can be retrieved as a group. If you choose to use tag filtering, only resources with the tags are retrieved.

func (*Client) ListTriggers

func (c *Client) ListTriggers(ctx context.Context, params *ListTriggersInput, optFns ...func(*Options)) (*ListTriggersOutput, error)

Retrieves the names of all trigger resources in this AWS account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names. <p>This operation takes the optional <code>Tags</code> field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.</p>

func (*Client) ListWorkflows

func (c *Client) ListWorkflows(ctx context.Context, params *ListWorkflowsInput, optFns ...func(*Options)) (*ListWorkflowsOutput, error)

Lists names of workflows created in the account.

func (*Client) PutDataCatalogEncryptionSettings

func (c *Client) PutDataCatalogEncryptionSettings(ctx context.Context, params *PutDataCatalogEncryptionSettingsInput, optFns ...func(*Options)) (*PutDataCatalogEncryptionSettingsOutput, error)

Sets the security configuration for a specified catalog. After the configuration has been set, the specified encryption is applied to every catalog write thereafter.

func (*Client) PutResourcePolicy

func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error)

Sets the Data Catalog resource policy for access control.

func (*Client) PutWorkflowRunProperties

func (c *Client) PutWorkflowRunProperties(ctx context.Context, params *PutWorkflowRunPropertiesInput, optFns ...func(*Options)) (*PutWorkflowRunPropertiesOutput, error)

Puts the specified workflow run properties for the given workflow run. If a property already exists for the specified run, then it overrides the value otherwise adds the property to existing properties.

func (*Client) ResetJobBookmark

func (c *Client) ResetJobBookmark(ctx context.Context, params *ResetJobBookmarkInput, optFns ...func(*Options)) (*ResetJobBookmarkOutput, error)

Resets a bookmark entry.

func (*Client) ResumeWorkflowRun

func (c *Client) ResumeWorkflowRun(ctx context.Context, params *ResumeWorkflowRunInput, optFns ...func(*Options)) (*ResumeWorkflowRunOutput, error)

Restarts any completed nodes in a workflow run and resumes the run execution.

func (*Client) SearchTables

func (c *Client) SearchTables(ctx context.Context, params *SearchTablesInput, optFns ...func(*Options)) (*SearchTablesOutput, error)

Searches a set of tables based on properties in the table metadata as well as on the parent database. You can search against text or filter conditions. You can only get tables that you have access to based on the security policies defined in Lake Formation. You need at least a read-only access to the table for it to be returned. If you do not have access to all the columns in the table, these columns will not be searched against when returning the list of tables back to you. If you have access to the columns but not the data in the columns, those columns and the associated metadata for those columns will be included in the search.

func (*Client) StartCrawler

func (c *Client) StartCrawler(ctx context.Context, params *StartCrawlerInput, optFns ...func(*Options)) (*StartCrawlerOutput, error)

Starts a crawl using the specified crawler, regardless of what is scheduled. If the crawler is already running, returns a CrawlerRunningException (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-exceptions.html#aws-glue-api-exceptions-CrawlerRunningException).

func (*Client) StartCrawlerSchedule

func (c *Client) StartCrawlerSchedule(ctx context.Context, params *StartCrawlerScheduleInput, optFns ...func(*Options)) (*StartCrawlerScheduleOutput, error)

Changes the schedule state of the specified crawler to SCHEDULED, unless the crawler is already running or the schedule state is already SCHEDULED.

func (*Client) StartExportLabelsTaskRun

func (c *Client) StartExportLabelsTaskRun(ctx context.Context, params *StartExportLabelsTaskRunInput, optFns ...func(*Options)) (*StartExportLabelsTaskRunOutput, error)

Begins an asynchronous task to export all labeled data for a particular transform. This task is the only label-related API call that is not part of the typical active learning workflow. You typically use StartExportLabelsTaskRun when you want to work with all of your existing labels at the same time, such as when you want to remove or change labels that were previously submitted as truth. This API operation accepts the TransformId whose labels you want to export and an Amazon Simple Storage Service (Amazon S3) path to export the labels to. The operation returns a TaskRunId. You can check on the status of your task run by calling the GetMLTaskRun API.

func (*Client) StartImportLabelsTaskRun

func (c *Client) StartImportLabelsTaskRun(ctx context.Context, params *StartImportLabelsTaskRunInput, optFns ...func(*Options)) (*StartImportLabelsTaskRunOutput, error)

Enables you to provide additional labels (examples of truth) to be used to teach the machine learning transform and improve its quality. This API operation is generally used as part of the active learning workflow that starts with the StartMLLabelingSetGenerationTaskRun call and that ultimately results in improving the quality of your machine learning transform. <p>After the <code>StartMLLabelingSetGenerationTaskRun</code> finishes, AWS Glue machine learning will have generated a series of questions for humans to answer. (Answering these questions is often called 'labeling' in the machine learning workflows). In the case of the <code>FindMatches</code> transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?” After the labeling process is finished, users upload their answers/labels with a call to <code>StartImportLabelsTaskRun</code>. After <code>StartImportLabelsTaskRun</code> finishes, all future runs of the machine learning transform use the new and improved labels and perform a higher-quality transformation.</p> <p>By default, <code>StartMLLabelingSetGenerationTaskRun</code> continually learns from and combines all labels that you upload unless you set <code>Replace</code> to true. If you set <code>Replace</code> to true, <code>StartImportLabelsTaskRun</code> deletes and forgets all previously uploaded labels and learns only from the exact set that you upload. Replacing labels can be helpful if you realize that you previously uploaded incorrect labels, and you believe that they are having a negative effect on your transform quality.</p> <p>You can check on the status of your task run by calling the <code>GetMLTaskRun</code> operation. </p>

func (*Client) StartJobRun

func (c *Client) StartJobRun(ctx context.Context, params *StartJobRunInput, optFns ...func(*Options)) (*StartJobRunOutput, error)

Starts a job run using a job definition.

func (*Client) StartMLEvaluationTaskRun

func (c *Client) StartMLEvaluationTaskRun(ctx context.Context, params *StartMLEvaluationTaskRunInput, optFns ...func(*Options)) (*StartMLEvaluationTaskRunOutput, error)

Starts a task to estimate the quality of the transform. <p>When you provide label sets as examples of truth, AWS Glue machine learning uses some of those examples to learn from them. The rest of the labels are used as a test to estimate quality.</p> <p>Returns a unique identifier for the run. You can call <code>GetMLTaskRun</code> to get more information about the stats of the <code>EvaluationTaskRun</code>.</p>

func (*Client) StartMLLabelingSetGenerationTaskRun

func (c *Client) StartMLLabelingSetGenerationTaskRun(ctx context.Context, params *StartMLLabelingSetGenerationTaskRunInput, optFns ...func(*Options)) (*StartMLLabelingSetGenerationTaskRunOutput, error)

Starts the active learning workflow for your machine learning transform to improve the transform's quality by generating label sets and adding labels. <p>When the <code>StartMLLabelingSetGenerationTaskRun</code> finishes, AWS Glue will have generated a "labeling set" or a set of questions for humans to answer.</p> <p>In the case of the <code>FindMatches</code> transform, these questions are of the form, “What is the correct way to group these rows together into groups composed entirely of matching records?” </p> <p>After the labeling process is finished, you can upload your labels with a call to <code>StartImportLabelsTaskRun</code>. After <code>StartImportLabelsTaskRun</code> finishes, all future runs of the machine learning transform will use the new and improved labels and perform a higher-quality transformation.</p>

func (*Client) StartTrigger

func (c *Client) StartTrigger(ctx context.Context, params *StartTriggerInput, optFns ...func(*Options)) (*StartTriggerOutput, error)

Starts an existing trigger. See Triggering Jobs (https://docs.aws.amazon.com/glue/latest/dg/trigger-job.html) for information about how different types of trigger are started.

func (*Client) StartWorkflowRun

func (c *Client) StartWorkflowRun(ctx context.Context, params *StartWorkflowRunInput, optFns ...func(*Options)) (*StartWorkflowRunOutput, error)

Starts a new run of the specified workflow.

func (*Client) StopCrawler

func (c *Client) StopCrawler(ctx context.Context, params *StopCrawlerInput, optFns ...func(*Options)) (*StopCrawlerOutput, error)

If the specified crawler is running, stops the crawl.

func (*Client) StopCrawlerSchedule

func (c *Client) StopCrawlerSchedule(ctx context.Context, params *StopCrawlerScheduleInput, optFns ...func(*Options)) (*StopCrawlerScheduleOutput, error)

Sets the schedule state of the specified crawler to NOT_SCHEDULED, but does not stop the crawler if it is already running.

func (*Client) StopTrigger

func (c *Client) StopTrigger(ctx context.Context, params *StopTriggerInput, optFns ...func(*Options)) (*StopTriggerOutput, error)

Stops a specified trigger.

func (*Client) StopWorkflowRun

func (c *Client) StopWorkflowRun(ctx context.Context, params *StopWorkflowRunInput, optFns ...func(*Options)) (*StopWorkflowRunOutput, error)

Stops the execution of the specified workflow run.

func (*Client) TagResource

func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error)

Adds tags to a resource. A tag is a label you can assign to an AWS resource. In AWS Glue, you can tag only certain resources. For information about what resources you can tag, see AWS Tags in AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html).

func (*Client) UntagResource

func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error)

Removes tags from a resource.

func (*Client) UpdateClassifier

func (c *Client) UpdateClassifier(ctx context.Context, params *UpdateClassifierInput, optFns ...func(*Options)) (*UpdateClassifierOutput, error)

Modifies an existing classifier (a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field is present).

func (*Client) UpdateColumnStatisticsForPartition

func (c *Client) UpdateColumnStatisticsForPartition(ctx context.Context, params *UpdateColumnStatisticsForPartitionInput, optFns ...func(*Options)) (*UpdateColumnStatisticsForPartitionOutput, error)

Creates or updates partition statistics of columns.

func (*Client) UpdateColumnStatisticsForTable

func (c *Client) UpdateColumnStatisticsForTable(ctx context.Context, params *UpdateColumnStatisticsForTableInput, optFns ...func(*Options)) (*UpdateColumnStatisticsForTableOutput, error)

Creates or updates table statistics of columns.

func (*Client) UpdateConnection

func (c *Client) UpdateConnection(ctx context.Context, params *UpdateConnectionInput, optFns ...func(*Options)) (*UpdateConnectionOutput, error)

Updates a connection definition in the Data Catalog.

func (*Client) UpdateCrawler

func (c *Client) UpdateCrawler(ctx context.Context, params *UpdateCrawlerInput, optFns ...func(*Options)) (*UpdateCrawlerOutput, error)

Updates a crawler. If a crawler is running, you must stop it using StopCrawler before updating it.

func (*Client) UpdateCrawlerSchedule

func (c *Client) UpdateCrawlerSchedule(ctx context.Context, params *UpdateCrawlerScheduleInput, optFns ...func(*Options)) (*UpdateCrawlerScheduleOutput, error)

Updates the schedule of a crawler using a cron expression.

func (*Client) UpdateDatabase

func (c *Client) UpdateDatabase(ctx context.Context, params *UpdateDatabaseInput, optFns ...func(*Options)) (*UpdateDatabaseOutput, error)

Updates an existing database definition in a Data Catalog.

func (*Client) UpdateDevEndpoint

func (c *Client) UpdateDevEndpoint(ctx context.Context, params *UpdateDevEndpointInput, optFns ...func(*Options)) (*UpdateDevEndpointOutput, error)

Updates a specified development endpoint.

func (*Client) UpdateJob

func (c *Client) UpdateJob(ctx context.Context, params *UpdateJobInput, optFns ...func(*Options)) (*UpdateJobOutput, error)

Updates an existing job definition.

func (*Client) UpdateMLTransform

func (c *Client) UpdateMLTransform(ctx context.Context, params *UpdateMLTransformInput, optFns ...func(*Options)) (*UpdateMLTransformOutput, error)

Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results. <p>After calling this operation, you can call the <code>StartMLEvaluationTaskRun</code> operation to assess how well your new parameters achieved your goals (such as improving the quality of your machine learning transform, or making it more cost-effective).</p>

func (*Client) UpdatePartition

func (c *Client) UpdatePartition(ctx context.Context, params *UpdatePartitionInput, optFns ...func(*Options)) (*UpdatePartitionOutput, error)

Updates a partition.

func (*Client) UpdateTable

func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error)

Updates a metadata table in the Data Catalog.

func (*Client) UpdateTrigger

func (c *Client) UpdateTrigger(ctx context.Context, params *UpdateTriggerInput, optFns ...func(*Options)) (*UpdateTriggerOutput, error)

Updates a trigger definition.

func (*Client) UpdateUserDefinedFunction

func (c *Client) UpdateUserDefinedFunction(ctx context.Context, params *UpdateUserDefinedFunctionInput, optFns ...func(*Options)) (*UpdateUserDefinedFunctionOutput, error)

Updates an existing function definition in the Data Catalog.

func (*Client) UpdateWorkflow

func (c *Client) UpdateWorkflow(ctx context.Context, params *UpdateWorkflowInput, optFns ...func(*Options)) (*UpdateWorkflowOutput, error)

Updates an existing workflow.

type CreateClassifierInput

type CreateClassifierInput struct {
	// A JsonClassifier object specifying the classifier to create.
	JsonClassifier *types.CreateJsonClassifierRequest
	// A CsvClassifier object specifying the classifier to create.
	CsvClassifier *types.CreateCsvClassifierRequest
	// An XMLClassifier object specifying the classifier to create.
	XMLClassifier *types.CreateXMLClassifierRequest
	// A GrokClassifier object specifying the classifier to create.
	GrokClassifier *types.CreateGrokClassifierRequest
}

type CreateClassifierOutput

type CreateClassifierOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateConnectionInput

type CreateConnectionInput struct {
	// The ID of the Data Catalog in which to create the connection. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
	// A ConnectionInput object defining the connection to create.
	ConnectionInput *types.ConnectionInput
}

type CreateConnectionOutput

type CreateConnectionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateCrawlerInput

type CreateCrawlerInput struct {
	// The IAM role or Amazon Resource Name (ARN) of an IAM role used by the new
	// crawler to access customer resources.
	Role *string
	// The tags to use with this crawler request. You may use tags to limit access to
	// the crawler. For more information about tags in AWS Glue, see AWS Tags in AWS
	// Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the
	// developer guide.
	Tags map[string]*string
	// A list of custom classifiers that the user has registered. By default, all
	// built-in classifiers are included in a crawl, but these custom classifiers
	// always override the default classifiers for a given classification.
	Classifiers []*string
	// The table prefix used for catalog tables that are created.
	TablePrefix *string
	// The name of the SecurityConfiguration structure to be used by this crawler.
	CrawlerSecurityConfiguration *string
	// The AWS Glue database where results are written, such as:
	// arn:aws:daylight:us-east-1::database/sometable/*.
	DatabaseName *string
	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html).
	// For example, to run something every day at 12:15 UTC, you would specify: cron(15
	// 12 * * ? *).
	Schedule *string
	// Name of the new crawler.
	Name *string
	// A description of the new crawler.
	Description *string
	// Crawler configuration information. This versioned JSON string allows users to
	// specify aspects of a crawler's behavior. For more information, see Configuring a
	// Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html).
	Configuration *string
	// A list of collection of targets to crawl.
	Targets *types.CrawlerTargets
	// The policy for the crawler's update and deletion behavior.
	SchemaChangePolicy *types.SchemaChangePolicy
}

type CreateCrawlerOutput

type CreateCrawlerOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateDatabaseInput

type CreateDatabaseInput struct {
	// The metadata for the database.
	DatabaseInput *types.DatabaseInput
	// The ID of the Data Catalog in which to create the database. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type CreateDatabaseOutput

type CreateDatabaseOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateDevEndpointInput

type CreateDevEndpointInput struct {
	// The subnet ID for the new DevEndpoint to use.
	SubnetId *string
	// The name of the SecurityConfiguration structure to be used with this
	// DevEndpoint.
	SecurityConfiguration *string
	// The public key to be used by this DevEndpoint for authentication. This attribute
	// is provided for backward compatibility because the recommended attribute to use
	// is public keys.
	PublicKey *string
	// The name to be assigned to the new DevEndpoint.
	EndpointName *string
	// A list of public keys to be used by the development endpoints for
	// authentication. The use of this attribute is preferred over a single public key
	// because the public keys allow you to have a different private key per client.
	// <note> <p>If you previously created an endpoint with a public key, you must
	// remove that key to be able to set a list of public keys. Call the
	// <code>UpdateDevEndpoint</code> API with the public key content in the
	// <code>deletePublicKeys</code> attribute, and the list of new keys in the
	// <code>addPublicKeys</code> attribute.</p> </note>
	PublicKeys []*string
	// The IAM role for the DevEndpoint.
	RoleArn *string
	// The number of AWS Glue Data Processing Units (DPUs) to allocate to this
	// DevEndpoint.
	NumberOfNodes *int32
	// Security group IDs for the security groups to be used by the new DevEndpoint.
	SecurityGroupIds []*string
	// The tags to use with this DevEndpoint. You may use tags to limit access to the
	// DevEndpoint. For more information about tags in AWS Glue, see AWS Tags in AWS
	// Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the
	// developer guide.
	Tags map[string]*string
	// A map of arguments used to configure the DevEndpoint.
	Arguments map[string]*string
	// Glue version determines the versions of Apache Spark and Python that AWS Glue
	// supports. The Python version indicates the version supported for running your
	// ETL scripts on development endpoints.  <p>For more information about the
	// available AWS Glue versions and corresponding Spark and Python versions, see <a
	// href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a>
	// in the developer guide.</p> <p>Development endpoints that are created without
	// specifying a Glue version default to Glue 0.9.</p> <p>You can specify a version
	// of Python support for development endpoints by using the <code>Arguments</code>
	// parameter in the <code>CreateDevEndpoint</code> or
	// <code>UpdateDevEndpoint</code> APIs. If no arguments are provided, the version
	// defaults to Python 2.</p>
	GlueVersion *string
	// The paths to one or more Python libraries in an Amazon S3 bucket that should be
	// loaded in your DevEndpoint. Multiple values must be complete paths separated by
	// a comma. You can only use pure Python libraries with a DevEndpoint. Libraries
	// that rely on C extensions, such as the pandas (http://pandas.pydata.org/) Python
	// data analysis library, are not yet supported.
	ExtraPythonLibsS3Path *string
	// The type of predefined worker that is allocated to the development endpoint.
	// Accepts a value of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type,
	// each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors
	// per worker.
	//
	//     * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU,
	// 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend
	// this worker type for memory-intensive jobs.
	//
	//     * For the G.2X worker type,
	// each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1
	// executor per worker. We recommend this worker type for memory-intensive
	// jobs.
	//
	// Known issue: when a development endpoint is created with the
	// G.2XWorkerType configuration, the Spark drivers for the development endpoint
	// will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
	WorkerType types.WorkerType
	// The path to one or more Java .jar files in an S3 bucket that should be loaded in
	// your DevEndpoint.
	ExtraJarsS3Path *string
	// The number of workers of a defined workerType that are allocated to the
	// development endpoint.  <p>The maximum number of workers you can define are 299
	// for <code>G.1X</code>, and 149 for <code>G.2X</code>. </p>
	NumberOfWorkers *int32
}

type CreateDevEndpointOutput

type CreateDevEndpointOutput struct {
	// The AWS Availability Zone where this DevEndpoint is located.
	AvailabilityZone *string
	// The number of AWS Glue Data Processing Units (DPUs) allocated to this
	// DevEndpoint.
	NumberOfNodes *int32
	// The Apache Zeppelin port for the remote Apache Spark interpreter.
	ZeppelinRemoteSparkInterpreterPort *int32
	// The reason for a current failure in this DevEndpoint.
	FailureReason *string
	// The paths to one or more Python libraries in an S3 bucket that will be loaded in
	// your DevEndpoint.
	ExtraPythonLibsS3Path *string
	// The current status of the new DevEndpoint.
	Status *string
	// The map of arguments used to configure this DevEndpoint.  <p>Valid arguments
	// are:</p> <ul> <li> <p> <code>"--enable-glue-datacatalog": ""</code> </p> </li>
	// <li> <p> <code>"GLUE_PYTHON_VERSION": "3"</code> </p> </li> <li> <p>
	// <code>"GLUE_PYTHON_VERSION": "2"</code> </p> </li> </ul> <p>You can specify a
	// version of Python support for development endpoints by using the
	// <code>Arguments</code> parameter in the <code>CreateDevEndpoint</code> or
	// <code>UpdateDevEndpoint</code> APIs. If no arguments are provided, the version
	// defaults to Python 2.</p>
	Arguments map[string]*string
	// The ID of the virtual private cloud (VPC) used by this DevEndpoint.
	VpcId *string
	// The point in time at which this DevEndpoint was created.
	CreatedTimestamp *time.Time
	// Glue version determines the versions of Apache Spark and Python that AWS Glue
	// supports. The Python version indicates the version supported for running your
	// ETL scripts on development endpoints.
	GlueVersion *string
	// The number of workers of a defined workerType that are allocated to the
	// development endpoint.
	NumberOfWorkers *int32
	// Path to one or more Java .jar files in an S3 bucket that will be loaded in your
	// DevEndpoint.
	ExtraJarsS3Path *string
	// The name of the SecurityConfiguration structure being used with this
	// DevEndpoint.
	SecurityConfiguration *string
	// The address of the YARN endpoint used by this DevEndpoint.
	YarnEndpointAddress *string
	// The type of predefined worker that is allocated to the development endpoint. May
	// be a value of Standard, G.1X, or G.2X.
	WorkerType types.WorkerType
	// The name assigned to the new DevEndpoint.
	EndpointName *string
	// The Amazon Resource Name (ARN) of the role assigned to the new DevEndpoint.
	RoleArn *string
	// The subnet ID assigned to the new DevEndpoint.
	SubnetId *string
	// The security groups assigned to the new DevEndpoint.
	SecurityGroupIds []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateJobInput

type CreateJobInput struct {
	// The tags to use with this job. You may use tags to limit access to the job. For
	// more information about tags in AWS Glue, see AWS Tags in AWS Glue
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the developer
	// guide.
	Tags map[string]*string
	// Glue version determines the versions of Apache Spark and Python that AWS Glue
	// supports. The Python version indicates the version supported for jobs of type
	// Spark.  <p>For more information about the available AWS Glue versions and
	// corresponding Spark and Python versions, see <a
	// href="https://docs.aws.amazon.com/glue/latest/dg/add-job.html">Glue version</a>
	// in the developer guide.</p> <p>Jobs that are created without specifying a Glue
	// version default to Glue 0.9.</p>
	GlueVersion *string
	// The number of workers of a defined workerType that are allocated when a job
	// runs.  <p>The maximum number of workers you can define are 299 for
	// <code>G.1X</code>, and 149 for <code>G.2X</code>. </p>
	NumberOfWorkers *int32
	// The JobCommand that executes this job.
	Command *types.JobCommand
	// The default arguments for this job. You can specify arguments here that your own
	// job-execution script consumes, as well as arguments that AWS Glue itself
	// consumes. For information about how to specify and consume your own Job
	// arguments, see the Calling AWS Glue APIs in Python
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the key-value pairs that AWS
	// Glue consumes to set up your job, see the Special Parameters Used by AWS Glue
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide.
	DefaultArguments map[string]*string
	// The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
	Role *string
	// The job timeout in minutes. This is the maximum time that a job run can consume
	// resources before it is terminated and enters TIMEOUT status. The default is
	// 2,880 minutes (48 hours).
	Timeout *int32
	// An ExecutionProperty specifying the maximum number of concurrent runs allowed
	// for this job.
	ExecutionProperty *types.ExecutionProperty
	// This parameter is deprecated. Use MaxCapacity instead.  <p>The number of AWS
	// Glue data processing units (DPUs) to allocate to this Job. You can allocate from
	// 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing
	// power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
	// information, see the <a href="https://aws.amazon.com/glue/pricing/">AWS Glue
	// pricing page</a>.</p>
	AllocatedCapacity *int32
	// The type of predefined worker that is allocated when a job runs. Accepts a value
	// of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type, each worker
	// provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
	//
	//
	// * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory,
	// 64 GB disk), and provides 1 executor per worker. We recommend this worker type
	// for memory-intensive jobs.
	//
	//     * For the G.2X worker type, each worker maps to
	// 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per
	// worker. We recommend this worker type for memory-intensive jobs.
	WorkerType types.WorkerType
	// The maximum number of times to retry this job if it fails.
	MaxRetries *int32
	// This field is reserved for future use.
	LogUri *string
	// The name of the SecurityConfiguration structure to be used with this job.
	SecurityConfiguration *string
	// The connections used for this job.
	Connections *types.ConnectionsList
	// Non-overridable arguments for this job, specified as name-value pairs.
	NonOverridableArguments map[string]*string
	// The number of AWS Glue data processing units (DPUs) that can be allocated when
	// this job runs. A DPU is a relative measure of processing power that consists of
	// 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the
	// AWS Glue pricing page (https://aws.amazon.com/glue/pricing/).  <p>Do not set
	// <code>Max Capacity</code> if using <code>WorkerType</code> and
	// <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for
	// <code>MaxCapacity</code> depends on whether you are running a Python shell job
	// or an Apache Spark ETL job:</p> <ul> <li> <p>When you specify a Python shell job
	// (<code>JobCommand.Name</code>="pythonshell"), you can allocate either 0.0625 or
	// 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache
	// Spark ETL job (<code>JobCommand.Name</code>="glueetl") or Apache Spark streaming
	// ETL job (<code>JobCommand.Name</code>="gluestreaming"), you can allocate from 2
	// to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
	// allocation.</p> </li> </ul>
	MaxCapacity *float64
	// Description of the job being defined.
	Description *string
	// The name you assign to this job definition. It must be unique in your account.
	Name *string
	// Specifies configuration properties of a job notification.
	NotificationProperty *types.NotificationProperty
}

type CreateJobOutput

type CreateJobOutput struct {
	// The unique name that was provided for this job definition.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateMLTransformInput

type CreateMLTransformInput struct {
	// The maximum number of times to retry a task for this transform after a task run
	// fails.
	MaxRetries *int32
	// The name or Amazon Resource Name (ARN) of the IAM role with the required
	// permissions. The required permissions include both AWS Glue service role
	// permissions to AWS Glue resources, and Amazon S3 permissions required by the
	// transform.  <ul> <li> <p>This role needs AWS Glue service role permissions to
	// allow access to resources in AWS Glue. See <a
	// href="https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html">Attach
	// a Policy to IAM Users That Access AWS Glue</a>.</p> </li> <li> <p>This role
	// needs permission to your Amazon Simple Storage Service (Amazon S3) sources,
	// targets, temporary directory, scripts, and any libraries used by the task run
	// for this transform.</p> </li> </ul>
	Role *string
	// The type of predefined worker that is allocated when this task runs. Accepts a
	// value of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type, each
	// worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per
	// worker.
	//
	//     * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of
	// memory and a 64GB disk, and 1 executor per worker.
	//
	//     * For the G.2X worker
	// type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1
	// executor per worker.
	//
	//     <p> <code>MaxCapacity</code> is a mutually exclusive
	// option with <code>NumberOfWorkers</code> and <code>WorkerType</code>.</p> <ul>
	// <li> <p>If either <code>NumberOfWorkers</code> or <code>WorkerType</code> is
	// set, then <code>MaxCapacity</code> cannot be set.</p> </li> <li> <p>If
	// <code>MaxCapacity</code> is set then neither <code>NumberOfWorkers</code> or
	// <code>WorkerType</code> can be set.</p> </li> <li> <p>If <code>WorkerType</code>
	// is set, then <code>NumberOfWorkers</code> is required (and vice versa).</p>
	// </li> <li> <p> <code>MaxCapacity</code> and <code>NumberOfWorkers</code> must
	// both be at least 1.</p> </li> </ul>
	WorkerType types.WorkerType
	// A description of the machine learning transform that is being defined. The
	// default is an empty string.
	Description *string
	// The algorithmic parameters that are specific to the transform type used.
	// Conditionally dependent on the transform type.
	Parameters *types.TransformParameters
	// The number of workers of a defined workerType that are allocated when this task
	// runs.  <p>If <code>WorkerType</code> is set, then <code>NumberOfWorkers</code>
	// is required (and vice versa).</p>
	NumberOfWorkers *int32
	// A list of AWS Glue table definitions used by the transform.
	InputRecordTables []*types.GlueTable
	// The number of AWS Glue data processing units (DPUs) that are allocated to task
	// runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10.
	// A DPU is a relative measure of processing power that consists of 4 vCPUs of
	// compute capacity and 16 GB of memory. For more information, see the AWS Glue
	// pricing page (https://aws.amazon.com/glue/pricing/).  <p>
	// <code>MaxCapacity</code> is a mutually exclusive option with
	// <code>NumberOfWorkers</code> and <code>WorkerType</code>.</p> <ul> <li> <p>If
	// either <code>NumberOfWorkers</code> or <code>WorkerType</code> is set, then
	// <code>MaxCapacity</code> cannot be set.</p> </li> <li> <p>If
	// <code>MaxCapacity</code> is set then neither <code>NumberOfWorkers</code> or
	// <code>WorkerType</code> can be set.</p> </li> <li> <p>If <code>WorkerType</code>
	// is set, then <code>NumberOfWorkers</code> is required (and vice versa).</p>
	// </li> <li> <p> <code>MaxCapacity</code> and <code>NumberOfWorkers</code> must
	// both be at least 1.</p> </li> </ul> <p>When the <code>WorkerType</code> field is
	// set to a value other than <code>Standard</code>, the <code>MaxCapacity</code>
	// field is set automatically and becomes read-only.</p> <p>When the
	// <code>WorkerType</code> field is set to a value other than
	// <code>Standard</code>, the <code>MaxCapacity</code> field is set automatically
	// and becomes read-only.</p>
	MaxCapacity *float64
	// The tags to use with this machine learning transform. You may use tags to limit
	// access to the machine learning transform. For more information about tags in AWS
	// Glue, see AWS Tags in AWS Glue
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the developer
	// guide.
	Tags map[string]*string
	// The unique name that you give the transform when you create it.
	Name *string
	// The timeout of the task run for this transform in minutes. This is the maximum
	// time that a task run for this transform can consume resources before it is
	// terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
	Timeout *int32
	// This value determines which version of AWS Glue this machine learning transform
	// is compatible with. Glue 1.0 is recommended for most customers. If the value is
	// not set, the Glue compatibility defaults to Glue 0.9. For more information, see
	// AWS Glue Versions
	// (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions)
	// in the developer guide.
	GlueVersion *string
}

type CreateMLTransformOutput

type CreateMLTransformOutput struct {
	// A unique identifier that is generated for the transform.
	TransformId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreatePartitionInput

type CreatePartitionInput struct {
	// The name of the metadata table in which the partition is to be created.
	TableName *string
	// The name of the metadata database in which the partition is to be created.
	DatabaseName *string
	// The AWS account ID of the catalog in which the partition is to be created.
	CatalogId *string
	// A PartitionInput structure defining the partition to be created.
	PartitionInput *types.PartitionInput
}

type CreatePartitionOutput

type CreatePartitionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateScriptInput

type CreateScriptInput struct {
	// A list of the nodes in the DAG.
	DagNodes []*types.CodeGenNode
	// The programming language of the resulting code from the DAG.
	Language types.Language
	// A list of the edges in the DAG.
	DagEdges []*types.CodeGenEdge
}

type CreateScriptOutput

type CreateScriptOutput struct {
	// The Scala code generated from the DAG.
	ScalaCode *string
	// The Python script generated from the DAG.
	PythonScript *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateSecurityConfigurationInput

type CreateSecurityConfigurationInput struct {
	// The name for the new security configuration.
	Name *string
	// The encryption configuration for the new security configuration.
	EncryptionConfiguration *types.EncryptionConfiguration
}

type CreateSecurityConfigurationOutput

type CreateSecurityConfigurationOutput struct {
	// The time at which the new security configuration was created.
	CreatedTimestamp *time.Time
	// The name assigned to the new security configuration.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateTableInput

type CreateTableInput struct {
	// The ID of the Data Catalog in which to create the Table. If none is supplied,
	// the AWS account ID is used by default.
	CatalogId *string
	// The catalog database in which to create the new table. For Hive compatibility,
	// this name is entirely lowercase.
	DatabaseName *string
	// The TableInput object that defines the metadata table to create in the catalog.
	TableInput *types.TableInput
}

type CreateTableOutput

type CreateTableOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateTriggerInput

type CreateTriggerInput struct {
	// A description of the new trigger.
	Description *string
	// Set to true to start SCHEDULED and CONDITIONAL triggers when created. True is
	// not supported for ON_DEMAND triggers.
	StartOnCreation *bool
	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html).
	// For example, to run something every day at 12:15 UTC, you would specify: cron(15
	// 12 * * ? *). This field is required when the trigger type is SCHEDULED.
	Schedule *string
	// The name of the workflow associated with the trigger.
	WorkflowName *string
	// The actions initiated by this trigger when it fires.
	Actions []*types.Action
	// The type of the new trigger.
	Type types.TriggerType
	// The tags to use with this trigger. You may use tags to limit access to the
	// trigger. For more information about tags in AWS Glue, see AWS Tags in AWS Glue
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) in the developer
	// guide.
	Tags map[string]*string
	// The name of the trigger.
	Name *string
	// A predicate to specify when the new trigger should fire. This field is required
	// when the trigger type is CONDITIONAL.
	Predicate *types.Predicate
}

type CreateTriggerOutput

type CreateTriggerOutput struct {
	// The name of the trigger.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateUserDefinedFunctionInput

type CreateUserDefinedFunctionInput struct {
	// The name of the catalog database in which to create the function.
	DatabaseName *string
	// A FunctionInput object that defines the function to create in the Data Catalog.
	FunctionInput *types.UserDefinedFunctionInput
	// The ID of the Data Catalog in which to create the function. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type CreateUserDefinedFunctionOutput

type CreateUserDefinedFunctionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type CreateWorkflowInput

type CreateWorkflowInput struct {
	// The name to be assigned to the workflow. It should be unique within your
	// account.
	Name *string
	// The tags to be used with this workflow.
	Tags map[string]*string
	// A description of the workflow.
	Description *string
	// A collection of properties to be used as part of each execution of the workflow.
	DefaultRunProperties map[string]*string
}

type CreateWorkflowOutput

type CreateWorkflowOutput struct {
	// The name of the workflow which was provided as part of the request.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteClassifierInput

type DeleteClassifierInput struct {
	// Name of the classifier to remove.
	Name *string
}

type DeleteClassifierOutput

type DeleteClassifierOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteColumnStatisticsForPartitionInput

type DeleteColumnStatisticsForPartitionInput struct {
	// The name of the partitions' table.
	TableName *string
	// Name of the column.
	ColumnName *string
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
	// A list of partition values identifying the partition.
	PartitionValues []*string
}

type DeleteColumnStatisticsForPartitionOutput

type DeleteColumnStatisticsForPartitionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteColumnStatisticsForTableInput

type DeleteColumnStatisticsForTableInput struct {
	// The name of the partitions' table.
	TableName *string
	// The name of the column.
	ColumnName *string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
}

type DeleteColumnStatisticsForTableOutput

type DeleteColumnStatisticsForTableOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteConnectionInput

type DeleteConnectionInput struct {
	// The name of the connection to delete.
	ConnectionName *string
	// The ID of the Data Catalog in which the connection resides. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type DeleteConnectionOutput

type DeleteConnectionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteCrawlerInput

type DeleteCrawlerInput struct {
	// The name of the crawler to remove.
	Name *string
}

type DeleteCrawlerOutput

type DeleteCrawlerOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteDatabaseInput

type DeleteDatabaseInput struct {
	// The ID of the Data Catalog in which the database resides. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
	// The name of the database to delete. For Hive compatibility, this must be all
	// lowercase.
	Name *string
}

type DeleteDatabaseOutput

type DeleteDatabaseOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteDevEndpointInput

type DeleteDevEndpointInput struct {
	// The name of the DevEndpoint.
	EndpointName *string
}

type DeleteDevEndpointOutput

type DeleteDevEndpointOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteJobInput

type DeleteJobInput struct {
	// The name of the job definition to delete.
	JobName *string
}

type DeleteJobOutput

type DeleteJobOutput struct {
	// The name of the job definition that was deleted.
	JobName *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteMLTransformInput

type DeleteMLTransformInput struct {
	// The unique identifier of the transform to delete.
	TransformId *string
}

type DeleteMLTransformOutput

type DeleteMLTransformOutput struct {
	// The unique identifier of the transform that was deleted.
	TransformId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeletePartitionInput

type DeletePartitionInput struct {
	// The name of the table that contains the partition to be deleted.
	TableName *string
	// The values that define the partition.
	PartitionValues []*string
	// The name of the catalog database in which the table in question resides.
	DatabaseName *string
	// The ID of the Data Catalog where the partition to be deleted resides. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
}

type DeletePartitionOutput

type DeletePartitionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteResourcePolicyInput

type DeleteResourcePolicyInput struct {
	// The ARN of the AWS Glue resource for the resource policy to be deleted.
	ResourceArn *string
	// The hash value returned when this policy was set.
	PolicyHashCondition *string
}

type DeleteResourcePolicyOutput

type DeleteResourcePolicyOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteSecurityConfigurationInput

type DeleteSecurityConfigurationInput struct {
	// The name of the security configuration to delete.
	Name *string
}

type DeleteSecurityConfigurationOutput

type DeleteSecurityConfigurationOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteTableInput

type DeleteTableInput struct {
	// The ID of the Data Catalog where the table resides. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The name of the table to be deleted. For Hive compatibility, this name is
	// entirely lowercase.
	Name *string
	// The name of the catalog database in which the table resides. For Hive
	// compatibility, this name is entirely lowercase.
	DatabaseName *string
}

type DeleteTableOutput

type DeleteTableOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteTableVersionInput

type DeleteTableVersionInput struct {
	// The name of the table. For Hive compatibility, this name is entirely lowercase.
	TableName *string
	// The ID of the Data Catalog where the tables reside. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The ID of the table version to be deleted. A VersionID is a string
	// representation of an integer. Each version is incremented by 1.
	VersionId *string
	// The database in the catalog in which the table resides. For Hive compatibility,
	// this name is entirely lowercase.
	DatabaseName *string
}

type DeleteTableVersionOutput

type DeleteTableVersionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteTriggerInput

type DeleteTriggerInput struct {
	// The name of the trigger to delete.
	Name *string
}

type DeleteTriggerOutput

type DeleteTriggerOutput struct {
	// The name of the trigger that was deleted.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteUserDefinedFunctionInput

type DeleteUserDefinedFunctionInput struct {
	// The ID of the Data Catalog where the function to be deleted is located. If none
	// is supplied, the AWS account ID is used by default.
	CatalogId *string
	// The name of the function definition to be deleted.
	FunctionName *string
	// The name of the catalog database where the function is located.
	DatabaseName *string
}

type DeleteUserDefinedFunctionOutput

type DeleteUserDefinedFunctionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type DeleteWorkflowInput

type DeleteWorkflowInput struct {
	// Name of the workflow to be deleted.
	Name *string
}

type DeleteWorkflowOutput

type DeleteWorkflowOutput struct {
	// Name of the workflow specified in input.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type EndpointResolver

type EndpointResolver interface {
	ResolveEndpoint(region string, options ResolverOptions) (aws.Endpoint, error)
}

EndpointResolver interface for resolving service endpoints.

func WithEndpointResolver

func WithEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver

WithEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided fallbackResolver for resolution. awsResolver and fallbackResolver must not be nil

type EndpointResolverFunc

type EndpointResolverFunc func(region string, options ResolverOptions) (aws.Endpoint, error)

EndpointResolverFunc is a helper utility that wraps a function so it satisfies the EndpointResolver interface. This is useful when you want to add additional endpoint resolving logic, or stub out specific endpoints with custom values.

func (EndpointResolverFunc) ResolveEndpoint

func (fn EndpointResolverFunc) ResolveEndpoint(region string, options ResolverOptions) (endpoint aws.Endpoint, err error)

type GetCatalogImportStatusInput

type GetCatalogImportStatusInput struct {
	// The ID of the catalog to migrate. Currently, this should be the AWS account ID.
	CatalogId *string
}

type GetCatalogImportStatusOutput

type GetCatalogImportStatusOutput struct {
	// The status of the specified catalog migration.
	ImportStatus *types.CatalogImportStatus

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetClassifierInput

type GetClassifierInput struct {
	// Name of the classifier to retrieve.
	Name *string
}

type GetClassifierOutput

type GetClassifierOutput struct {
	// The requested classifier.
	Classifier *types.Classifier

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetClassifiersInput

type GetClassifiersInput struct {
	// An optional continuation token.
	NextToken *string
	// The size of the list to return (optional).
	MaxResults *int32
}

type GetClassifiersOutput

type GetClassifiersOutput struct {
	// The requested list of classifier objects.
	Classifiers []*types.Classifier
	// A continuation token.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetColumnStatisticsForPartitionInput

type GetColumnStatisticsForPartitionInput struct {
	// The name of the partitions' table.
	TableName *string
	// A list of the column names.
	ColumnNames []*string
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// A list of partition values identifying the partition.
	PartitionValues []*string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
}

type GetColumnStatisticsForPartitionOutput

type GetColumnStatisticsForPartitionOutput struct {
	// List of ColumnStatistics that failed to be retrieved.
	ColumnStatisticsList []*types.ColumnStatistics
	// Error occurred during retrieving column statistics data.
	Errors []*types.ColumnError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetColumnStatisticsForTableInput

type GetColumnStatisticsForTableInput struct {
	// The name of the partitions' table.
	TableName *string
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// A list of the column names.
	ColumnNames []*string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
}

type GetColumnStatisticsForTableOutput

type GetColumnStatisticsForTableOutput struct {
	// List of ColumnStatistics that failed to be retrieved.
	Errors []*types.ColumnError
	// List of ColumnStatistics that failed to be retrieved.
	ColumnStatisticsList []*types.ColumnStatistics

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetConnectionInput

type GetConnectionInput struct {
	// The name of the connection definition to retrieve.
	Name *string
	// The ID of the Data Catalog in which the connection resides. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
	// Allows you to retrieve the connection metadata without returning the password.
	// For instance, the AWS Glue console uses this flag to retrieve the connection,
	// and does not display the password. Set this parameter when the caller might not
	// have permission to use the AWS KMS key to decrypt the password, but it does have
	// permission to access the rest of the connection properties.
	HidePassword *bool
}

type GetConnectionOutput

type GetConnectionOutput struct {
	// The requested connection definition.
	Connection *types.Connection

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetConnectionsInput

type GetConnectionsInput struct {
	// Allows you to retrieve the connection metadata without returning the password.
	// For instance, the AWS Glue console uses this flag to retrieve the connection,
	// and does not display the password. Set this parameter when the caller might not
	// have permission to use the AWS KMS key to decrypt the password, but it does have
	// permission to access the rest of the connection properties.
	HidePassword *bool
	// A continuation token, if this is a continuation call.
	NextToken *string
	// The maximum number of connections to return in one response.
	MaxResults *int32
	// A filter that controls which connections are returned.
	Filter *types.GetConnectionsFilter
	// The ID of the Data Catalog in which the connections reside. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type GetConnectionsOutput

type GetConnectionsOutput struct {
	// A continuation token, if the list of connections returned does not include the
	// last of the filtered connections.
	NextToken *string
	// A list of requested connection definitions.
	ConnectionList []*types.Connection

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetCrawlerInput

type GetCrawlerInput struct {
	// The name of the crawler to retrieve metadata for.
	Name *string
}

type GetCrawlerMetricsInput

type GetCrawlerMetricsInput struct {
	// A list of the names of crawlers about which to retrieve metrics.
	CrawlerNameList []*string
	// The maximum size of a list to return.
	MaxResults *int32
	// A continuation token, if this is a continuation call.
	NextToken *string
}

type GetCrawlerMetricsOutput

type GetCrawlerMetricsOutput struct {
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string
	// A list of metrics for the specified crawler.
	CrawlerMetricsList []*types.CrawlerMetrics

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetCrawlerOutput

type GetCrawlerOutput struct {
	// The metadata for the specified crawler.
	Crawler *types.Crawler

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetCrawlersInput

type GetCrawlersInput struct {
	// The number of crawlers to return on each call.
	MaxResults *int32
	// A continuation token, if this is a continuation request.
	NextToken *string
}

type GetCrawlersOutput

type GetCrawlersOutput struct {
	// A continuation token, if the returned list has not reached the end of those
	// defined in this customer account.
	NextToken *string
	// A list of crawler metadata.
	Crawlers []*types.Crawler

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDataCatalogEncryptionSettingsInput

type GetDataCatalogEncryptionSettingsInput struct {
	// The ID of the Data Catalog to retrieve the security configuration for. If none
	// is provided, the AWS account ID is used by default.
	CatalogId *string
}

type GetDataCatalogEncryptionSettingsOutput

type GetDataCatalogEncryptionSettingsOutput struct {
	// The requested security configuration.
	DataCatalogEncryptionSettings *types.DataCatalogEncryptionSettings

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDatabaseInput

type GetDatabaseInput struct {
	// The name of the database to retrieve. For Hive compatibility, this should be all
	// lowercase.
	Name *string
	// The ID of the Data Catalog in which the database resides. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
}

type GetDatabaseOutput

type GetDatabaseOutput struct {
	// The definition of the specified database in the Data Catalog.
	Database *types.Database

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDatabasesInput

type GetDatabasesInput struct {
	// A continuation token, if this is a continuation call.
	NextToken *string
	// Allows you to specify that you want to list the databases shared with your
	// account. The allowable values are FOREIGN or ALL.  <ul> <li> <p>If set to
	// <code>FOREIGN</code>, will list the databases shared with your account. </p>
	// </li> <li> <p>If set to <code>ALL</code>, will list the databases shared with
	// your account, as well as the databases in yor local account. </p> </li> </ul>
	ResourceShareType types.ResourceShareType
	// The maximum number of databases to return in one response.
	MaxResults *int32
	// The ID of the Data Catalog from which to retrieve Databases. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
}

type GetDatabasesOutput

type GetDatabasesOutput struct {
	// A list of Database objects from the specified catalog.
	DatabaseList []*types.Database
	// A continuation token for paginating the returned list of tokens, returned if the
	// current segment of the list is not the last.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDataflowGraphInput

type GetDataflowGraphInput struct {
	// The Python script to transform.
	PythonScript *string
}

type GetDataflowGraphOutput

type GetDataflowGraphOutput struct {
	// A list of the edges in the resulting DAG.
	DagEdges []*types.CodeGenEdge
	// A list of the nodes in the resulting DAG.
	DagNodes []*types.CodeGenNode

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDevEndpointInput

type GetDevEndpointInput struct {
	// Name of the DevEndpoint to retrieve information for.
	EndpointName *string
}

type GetDevEndpointOutput

type GetDevEndpointOutput struct {
	// A DevEndpoint definition.
	DevEndpoint *types.DevEndpoint

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetDevEndpointsInput

type GetDevEndpointsInput struct {
	// The maximum size of information to return.
	MaxResults *int32
	// A continuation token, if this is a continuation call.
	NextToken *string
}

type GetDevEndpointsOutput

type GetDevEndpointsOutput struct {
	// A list of DevEndpoint definitions.
	DevEndpoints []*types.DevEndpoint
	// A continuation token, if not all DevEndpoint definitions have yet been returned.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetJobBookmarkInput

type GetJobBookmarkInput struct {
	// The name of the job in question.
	JobName *string
	// The unique run identifier associated with this job run.
	RunId *string
}

type GetJobBookmarkOutput

type GetJobBookmarkOutput struct {
	// A structure that defines a point that a job can resume processing.
	JobBookmarkEntry *types.JobBookmarkEntry

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetJobInput

type GetJobInput struct {
	// The name of the job definition to retrieve.
	JobName *string
}

type GetJobOutput

type GetJobOutput struct {
	// The requested job definition.
	Job *types.Job

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetJobRunInput

type GetJobRunInput struct {
	// The ID of the job run.
	RunId *string
	// Name of the job definition being run.
	JobName *string
	// True if a list of predecessor runs should be returned.
	PredecessorsIncluded *bool
}

type GetJobRunOutput

type GetJobRunOutput struct {
	// The requested job-run metadata.
	JobRun *types.JobRun

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetJobRunsInput

type GetJobRunsInput struct {
	// The name of the job definition for which to retrieve all job runs.
	JobName *string
	// The maximum size of the response.
	MaxResults *int32
	// A continuation token, if this is a continuation call.
	NextToken *string
}

type GetJobRunsOutput

type GetJobRunsOutput struct {
	// A continuation token, if not all requested job runs have been returned.
	NextToken *string
	// A list of job-run metadata objects.
	JobRuns []*types.JobRun

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetJobsInput

type GetJobsInput struct {
	// A continuation token, if this is a continuation call.
	NextToken *string
	// The maximum size of the response.
	MaxResults *int32
}

type GetJobsOutput

type GetJobsOutput struct {
	// A list of job definitions.
	Jobs []*types.Job
	// A continuation token, if not all job definitions have yet been returned.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetMLTaskRunInput

type GetMLTaskRunInput struct {
	// The unique identifier of the machine learning transform.
	TransformId *string
	// The unique identifier of the task run.
	TaskRunId *string
}

type GetMLTaskRunOutput

type GetMLTaskRunOutput struct {
	// The amount of time (in seconds) that the task run consumed resources.
	ExecutionTime *int32
	// The status for this task run.
	Status types.TaskStatusType
	// The list of properties that are associated with the task run.
	Properties *types.TaskRunProperties
	// The unique run identifier associated with this run.
	TaskRunId *string
	// The date and time when this task run was last modified.
	LastModifiedOn *time.Time
	// The names of the log groups that are associated with the task run.
	LogGroupName *string
	// The date and time when this task run was completed.
	CompletedOn *time.Time
	// The unique identifier of the task run.
	TransformId *string
	// The error strings that are associated with the task run.
	ErrorString *string
	// The date and time when this task run started.
	StartedOn *time.Time

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetMLTaskRunsInput

type GetMLTaskRunsInput struct {
	// The sorting criteria, in the TaskRunSortCriteria structure, for the task run.
	Sort *types.TaskRunSortCriteria
	// A token for pagination of the results. The default is empty.
	NextToken *string
	// The maximum number of results to return.
	MaxResults *int32
	// The filter criteria, in the TaskRunFilterCriteria structure, for the task run.
	Filter *types.TaskRunFilterCriteria
	// The unique identifier of the machine learning transform.
	TransformId *string
}

type GetMLTaskRunsOutput

type GetMLTaskRunsOutput struct {
	// A pagination token, if more results are available.
	NextToken *string
	// A list of task runs that are associated with the transform.
	TaskRuns []*types.TaskRun

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetMLTransformInput

type GetMLTransformInput struct {
	// The unique identifier of the transform, generated at the time that the transform
	// was created.
	TransformId *string
}

type GetMLTransformOutput

type GetMLTransformOutput struct {
	// The unique name given to the transform when it was created.
	Name *string
	// The date and time when the transform was last modified.
	LastModifiedOn *time.Time
	// The number of labels available for this transform.
	LabelCount *int32
	// The date and time when the transform was created.
	CreatedOn *time.Time
	// The configuration parameters that are specific to the algorithm used.
	Parameters *types.TransformParameters
	// The maximum number of times to retry a task for this transform after a task run
	// fails.
	MaxRetries *int32
	// The last known status of the transform (to indicate whether it can be used or
	// not). One of "NOT_READY", "READY", or "DELETING".
	Status types.TransformStatusType
	// The type of predefined worker that is allocated when this task runs. Accepts a
	// value of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type, each
	// worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per
	// worker.
	//
	//     * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of
	// memory and a 64GB disk, and 1 executor per worker.
	//
	//     * For the G.2X worker
	// type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1
	// executor per worker.
	WorkerType types.WorkerType
	// This value determines which version of AWS Glue this machine learning transform
	// is compatible with. Glue 1.0 is recommended for most customers. If the value is
	// not set, the Glue compatibility defaults to Glue 0.9. For more information, see
	// AWS Glue Versions
	// (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions)
	// in the developer guide.
	GlueVersion *string
	// The number of AWS Glue data processing units (DPUs) that are allocated to task
	// runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10.
	// A DPU is a relative measure of processing power that consists of 4 vCPUs of
	// compute capacity and 16 GB of memory. For more information, see the AWS Glue
	// pricing page (https://aws.amazon.com/glue/pricing/).  <p>When the
	// <code>WorkerType</code> field is set to a value other than
	// <code>Standard</code>, the <code>MaxCapacity</code> field is set automatically
	// and becomes read-only.</p>
	MaxCapacity *float64
	// A description of the transform.
	Description *string
	// The unique identifier of the transform, generated at the time that the transform
	// was created.
	TransformId *string
	// A list of AWS Glue table definitions used by the transform.
	InputRecordTables []*types.GlueTable
	// The number of workers of a defined workerType that are allocated when this task
	// runs.
	NumberOfWorkers *int32
	// The name or Amazon Resource Name (ARN) of the IAM role with the required
	// permissions.
	Role *string
	// The timeout for a task run for this transform in minutes. This is the maximum
	// time that a task run for this transform can consume resources before it is
	// terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
	Timeout *int32
	// The Map object that represents the schema that this transform accepts. Has an
	// upper bound of 100 columns.
	Schema []*types.SchemaColumn
	// The latest evaluation metrics.
	EvaluationMetrics *types.EvaluationMetrics

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetMLTransformsInput

type GetMLTransformsInput struct {
	// The maximum number of results to return.
	MaxResults *int32
	// The filter transformation criteria.
	Filter *types.TransformFilterCriteria
	// A paginated token to offset the results.
	NextToken *string
	// The sorting criteria.
	Sort *types.TransformSortCriteria
}

type GetMLTransformsOutput

type GetMLTransformsOutput struct {
	// A list of machine learning transforms.
	Transforms []*types.MLTransform
	// A pagination token, if more results are available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetMappingInput

type GetMappingInput struct {
	// A list of target tables.
	Sinks []*types.CatalogEntry
	// Specifies the source table.
	Source *types.CatalogEntry
	// Parameters for the mapping.
	Location *types.Location
}

type GetMappingOutput

type GetMappingOutput struct {
	// A list of mappings to the specified targets.
	Mapping []*types.MappingEntry

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetPartitionInput

type GetPartitionInput struct {
	// The values that define the partition.
	PartitionValues []*string
	// The ID of the Data Catalog where the partition in question resides. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
	// The name of the catalog database where the partition resides.
	DatabaseName *string
	// The name of the partition's table.
	TableName *string
}

type GetPartitionOutput

type GetPartitionOutput struct {
	// The requested information, in the form of a Partition object.
	Partition *types.Partition

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetPartitionsInput

type GetPartitionsInput struct {
	// A continuation token, if this is not the first call to retrieve these
	// partitions.
	NextToken *string
	// The segment of the table's partitions to scan in this request.
	Segment *types.Segment
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// The name of the partitions' table.
	TableName *string
	// The maximum number of partitions to return in a single response.
	MaxResults *int32
	// An expression that filters the partitions to be returned. The expression uses
	// SQL syntax similar to the SQL WHERE filter clause. The SQL statement parser
	// JSQLParser (http://jsqlparser.sourceforge.net/home.php) parses the expression.
	// Operators: The following are the operators that you can use in the Expression
	// API call: = Checks whether the values of the two operands are equal; if yes,
	// then the condition becomes true. Example: Assume 'variable a' holds 10 and
	// 'variable b' holds 20. (a = b) is not true. < > Checks whether the values of two
	// operands are equal; if the values are not equal, then the condition becomes
	// true. Example: (a < > b) is true. > Checks whether the value of the left operand
	// is greater than the value of the right operand; if yes, then the condition
	// becomes true. Example: (a > b) is not true. < Checks whether the value of the
	// left operand is less than the value of the right operand; if yes, then the
	// condition becomes true. Example: (a < b) is true. >= Checks whether the value of
	// the left operand is greater than or equal to the value of the right operand; if
	// yes, then the condition becomes true. Example: (a >= b) is not true. <= Checks
	// whether the value of the left operand is less than or equal to the value of the
	// right operand; if yes, then the condition becomes true. Example: (a <= b) is
	// true. AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL Logical operators. Supported
	// Partition Key Types: The following are the supported partition keys.
	//
	//     *
	// string
	//
	//     * date
	//
	//     * timestamp
	//
	//     * int
	//
	//     * bigint
	//
	//     * long
	//
	//     *
	// tinyint
	//
	//     * smallint
	//
	//     * decimal
	//
	// If an invalid type is encountered, an
	// exception is thrown. The following list shows the valid operators on each type.
	// When you define a crawler, the partitionKey type is created as a STRING, to be
	// compatible with the catalog partitions.  <p> <i>Sample API Call</i>: </p>
	Expression *string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
}

type GetPartitionsOutput

type GetPartitionsOutput struct {
	// A continuation token, if the returned list of partitions does not include the
	// last one.
	NextToken *string
	// A list of requested partitions.
	Partitions []*types.Partition

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetPlanInput

type GetPlanInput struct {
	// The list of mappings from a source table to target tables.
	Mapping []*types.MappingEntry
	// The programming language of the code to perform the mapping.
	Language types.Language
	// The target tables.
	Sinks []*types.CatalogEntry
	// The source table.
	Source *types.CatalogEntry
	// The parameters for the mapping.
	Location *types.Location
}

type GetPlanOutput

type GetPlanOutput struct {
	// A Python script to perform the mapping.
	PythonScript *string
	// The Scala code to perform the mapping.
	ScalaCode *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetResourcePoliciesInput

type GetResourcePoliciesInput struct {
	// A continuation token, if this is a continuation request.
	NextToken *string
	// The maximum size of a list to return.
	MaxResults *int32
}

type GetResourcePoliciesOutput

type GetResourcePoliciesOutput struct {
	// A list of the individual resource policies and the account-level resource
	// policy.
	GetResourcePoliciesResponseList []*types.GluePolicy
	// A continuation token, if the returned list does not contain the last resource
	// policy available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetResourcePolicyInput

type GetResourcePolicyInput struct {
	// The ARN of the AWS Glue resource for the resource policy to be retrieved. For
	// more information about AWS Glue resource ARNs, see the AWS Glue ARN string
	// pattern
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id)
	ResourceArn *string
}

type GetResourcePolicyOutput

type GetResourcePolicyOutput struct {
	// Contains the hash value associated with this policy.
	PolicyHash *string
	// The date and time at which the policy was last updated.
	UpdateTime *time.Time
	// Contains the requested policy document, in JSON format.
	PolicyInJson *string
	// The date and time at which the policy was created.
	CreateTime *time.Time

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetSecurityConfigurationInput

type GetSecurityConfigurationInput struct {
	// The name of the security configuration to retrieve.
	Name *string
}

type GetSecurityConfigurationOutput

type GetSecurityConfigurationOutput struct {
	// The requested security configuration.
	SecurityConfiguration *types.SecurityConfiguration

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetSecurityConfigurationsInput

type GetSecurityConfigurationsInput struct {
	// A continuation token, if this is a continuation call.
	NextToken *string
	// The maximum number of results to return.
	MaxResults *int32
}

type GetSecurityConfigurationsOutput

type GetSecurityConfigurationsOutput struct {
	// A list of security configurations.
	SecurityConfigurations []*types.SecurityConfiguration
	// A continuation token, if there are more security configurations to return.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTableInput

type GetTableInput struct {
	// The ID of the Data Catalog where the table resides. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The name of the table for which to retrieve the definition. For Hive
	// compatibility, this name is entirely lowercase.
	Name *string
	// The name of the database in the catalog in which the table resides. For Hive
	// compatibility, this name is entirely lowercase.
	DatabaseName *string
}

type GetTableOutput

type GetTableOutput struct {
	// The Table object that defines the specified table.
	Table *types.Table

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTableVersionInput

type GetTableVersionInput struct {
	// The database in the catalog in which the table resides. For Hive compatibility,
	// this name is entirely lowercase.
	DatabaseName *string
	// The name of the table. For Hive compatibility, this name is entirely lowercase.
	TableName *string
	// The ID value of the table version to be retrieved. A VersionID is a string
	// representation of an integer. Each version is incremented by 1.
	VersionId *string
	// The ID of the Data Catalog where the tables reside. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
}

type GetTableVersionOutput

type GetTableVersionOutput struct {
	// The requested table version.
	TableVersion *types.TableVersion

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTableVersionsInput

type GetTableVersionsInput struct {
	// The ID of the Data Catalog where the tables reside. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The maximum number of table versions to return in one response.
	MaxResults *int32
	// The database in the catalog in which the table resides. For Hive compatibility,
	// this name is entirely lowercase.
	DatabaseName *string
	// The name of the table. For Hive compatibility, this name is entirely lowercase.
	TableName *string
	// A continuation token, if this is not the first call.
	NextToken *string
}

type GetTableVersionsOutput

type GetTableVersionsOutput struct {
	// A continuation token, if the list of available versions does not include the
	// last one.
	NextToken *string
	// A list of strings identifying available versions of the specified table.
	TableVersions []*types.TableVersion

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTablesInput

type GetTablesInput struct {
	// The ID of the Data Catalog where the tables reside. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// The database in the catalog whose tables to list. For Hive compatibility, this
	// name is entirely lowercase.
	DatabaseName *string
	// A continuation token, included if this is a continuation call.
	NextToken *string
	// The maximum number of tables to return in a single response.
	MaxResults *int32
	// A regular expression pattern. If present, only those tables whose names match
	// the pattern are returned.
	Expression *string
}

type GetTablesOutput

type GetTablesOutput struct {
	// A list of the requested Table objects.
	TableList []*types.Table
	// A continuation token, present if the current list segment is not the last.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTagsInput

type GetTagsInput struct {
	// The Amazon Resource Name (ARN) of the resource for which to retrieve tags.
	ResourceArn *string
}

type GetTagsOutput

type GetTagsOutput struct {
	// The requested tags.
	Tags map[string]*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTriggerInput

type GetTriggerInput struct {
	// The name of the trigger to retrieve.
	Name *string
}

type GetTriggerOutput

type GetTriggerOutput struct {
	// The requested trigger definition.
	Trigger *types.Trigger

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetTriggersInput

type GetTriggersInput struct {
	// The name of the job to retrieve triggers for. The trigger that can start this
	// job is returned, and if there is no such trigger, all triggers are returned.
	DependentJobName *string
	// The maximum size of the response.
	MaxResults *int32
	// A continuation token, if this is a continuation call.
	NextToken *string
}

type GetTriggersOutput

type GetTriggersOutput struct {
	// A list of triggers for the specified job.
	Triggers []*types.Trigger
	// A continuation token, if not all the requested triggers have yet been returned.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetUserDefinedFunctionInput

type GetUserDefinedFunctionInput struct {
	// The name of the catalog database where the function is located.
	DatabaseName *string
	// The name of the function.
	FunctionName *string
	// The ID of the Data Catalog where the function to be retrieved is located. If
	// none is provided, the AWS account ID is used by default.
	CatalogId *string
}

type GetUserDefinedFunctionOutput

type GetUserDefinedFunctionOutput struct {
	// The requested function definition.
	UserDefinedFunction *types.UserDefinedFunction

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetUserDefinedFunctionsInput

type GetUserDefinedFunctionsInput struct {
	// The name of the catalog database where the functions are located. If none is
	// provided, functions from all the databases across the catalog will be returned.
	DatabaseName *string
	// A continuation token, if this is a continuation call.
	NextToken *string
	// The maximum number of functions to return in one response.
	MaxResults *int32
	// The ID of the Data Catalog where the functions to be retrieved are located. If
	// none is provided, the AWS account ID is used by default.
	CatalogId *string
	// An optional function-name pattern string that filters the function definitions
	// returned.
	Pattern *string
}

type GetUserDefinedFunctionsOutput

type GetUserDefinedFunctionsOutput struct {
	// A list of requested function definitions.
	UserDefinedFunctions []*types.UserDefinedFunction
	// A continuation token, if the list of functions returned does not include the
	// last requested function.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetWorkflowInput

type GetWorkflowInput struct {
	// Specifies whether to include a graph when returning the workflow resource
	// metadata.
	IncludeGraph *bool
	// The name of the workflow to retrieve.
	Name *string
}

type GetWorkflowOutput

type GetWorkflowOutput struct {
	// The resource metadata for the workflow.
	Workflow *types.Workflow

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetWorkflowRunInput

type GetWorkflowRunInput struct {
	// The ID of the workflow run.
	RunId *string
	// Specifies whether to include the workflow graph in response or not.
	IncludeGraph *bool
	// Name of the workflow being run.
	Name *string
}

type GetWorkflowRunOutput

type GetWorkflowRunOutput struct {
	// The requested workflow run metadata.
	Run *types.WorkflowRun

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetWorkflowRunPropertiesInput

type GetWorkflowRunPropertiesInput struct {
	// Name of the workflow which was run.
	Name *string
	// The ID of the workflow run whose run properties should be returned.
	RunId *string
}

type GetWorkflowRunPropertiesOutput

type GetWorkflowRunPropertiesOutput struct {
	// The workflow run properties which were set during the specified run.
	RunProperties map[string]*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type GetWorkflowRunsInput

type GetWorkflowRunsInput struct {
	// Name of the workflow whose metadata of runs should be returned.
	Name *string
	// The maximum size of the response.
	NextToken *string
	// Specifies whether to include the workflow graph in response or not.
	IncludeGraph *bool
	// The maximum number of workflow runs to be included in the response.
	MaxResults *int32
}

type GetWorkflowRunsOutput

type GetWorkflowRunsOutput struct {
	// A continuation token, if not all requested workflow runs have been returned.
	NextToken *string
	// A list of workflow run metadata objects.
	Runs []*types.WorkflowRun

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type HTTPClient

type HTTPClient interface {
	Do(*http.Request) (*http.Response, error)
}

type HTTPSignerV4

type HTTPSignerV4 interface {
	SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time) error
}

type ImportCatalogToGlueInput

type ImportCatalogToGlueInput struct {
	// The ID of the catalog to import. Currently, this should be the AWS account ID.
	CatalogId *string
}

type ImportCatalogToGlueOutput

type ImportCatalogToGlueOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListCrawlersInput

type ListCrawlersInput struct {
	// The maximum size of a list to return.
	MaxResults *int32
	// A continuation token, if this is a continuation request.
	NextToken *string
	// Specifies to return only these tagged resources.
	Tags map[string]*string
}

type ListCrawlersOutput

type ListCrawlersOutput struct {
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string
	// The names of all crawlers in the account, or the crawlers with the specified
	// tags.
	CrawlerNames []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListDevEndpointsInput

type ListDevEndpointsInput struct {
	// The maximum size of a list to return.
	MaxResults *int32
	// Specifies to return only these tagged resources.
	Tags map[string]*string
	// A continuation token, if this is a continuation request.
	NextToken *string
}

type ListDevEndpointsOutput

type ListDevEndpointsOutput struct {
	// The names of all the DevEndpoints in the account, or the DevEndpoints with the
	// specified tags.
	DevEndpointNames []*string
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListJobsInput

type ListJobsInput struct {
	// A continuation token, if this is a continuation request.
	NextToken *string
	// Specifies to return only these tagged resources.
	Tags map[string]*string
	// The maximum size of a list to return.
	MaxResults *int32
}

type ListJobsOutput

type ListJobsOutput struct {
	// The names of all jobs in the account, or the jobs with the specified tags.
	JobNames []*string
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListMLTransformsInput

type ListMLTransformsInput struct {
	// A TransformFilterCriteria used to filter the machine learning transforms.
	Filter *types.TransformFilterCriteria
	// The maximum size of a list to return.
	MaxResults *int32
	// Specifies to return only these tagged resources.
	Tags map[string]*string
	// A continuation token, if this is a continuation request.
	NextToken *string
	// A TransformSortCriteria used to sort the machine learning transforms.
	Sort *types.TransformSortCriteria
}

type ListMLTransformsOutput

type ListMLTransformsOutput struct {
	// The identifiers of all the machine learning transforms in the account, or the
	// machine learning transforms with the specified tags.
	TransformIds []*string
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListTriggersInput

type ListTriggersInput struct {
	// The name of the job for which to retrieve triggers. The trigger that can start
	// this job is returned. If there is no such trigger, all triggers are returned.
	DependentJobName *string
	// The maximum size of a list to return.
	MaxResults *int32
	// Specifies to return only these tagged resources.
	Tags map[string]*string
	// A continuation token, if this is a continuation request.
	NextToken *string
}

type ListTriggersOutput

type ListTriggersOutput struct {
	// The names of all triggers in the account, or the triggers with the specified
	// tags.
	TriggerNames []*string
	// A continuation token, if the returned list does not contain the last metric
	// available.
	NextToken *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ListWorkflowsInput

type ListWorkflowsInput struct {
	// The maximum size of a list to return.
	MaxResults *int32
	// A continuation token, if this is a continuation request.
	NextToken *string
}

type ListWorkflowsOutput

type ListWorkflowsOutput struct {
	// A continuation token, if not all workflow names have been returned.
	NextToken *string
	// List of names of workflows in the account.
	Workflows []*string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type Options

type Options struct {
	// Set of options to modify how an operation is invoked. These apply to all
	// operations invoked for this client. Use functional options on operation call to
	// modify this list for per operation behavior.
	APIOptions []func(*middleware.Stack) error

	// The credentials object to use when signing requests.
	Credentials aws.CredentialsProvider

	// The endpoint options to be used when attempting to resolve an endpoint.
	EndpointOptions ResolverOptions

	// The service endpoint resolver.
	EndpointResolver EndpointResolver

	// Signature Version 4 (SigV4) Signer
	HTTPSignerV4 HTTPSignerV4

	// The region to send requests to. (Required)
	Region string

	// Retryer guides how HTTP requests should be retried in case of recoverable
	// failures. When nil the API client will use a default retryer.
	Retryer retry.Retryer

	// The HTTP client to invoke API calls with. Defaults to client's default HTTP
	// implementation if nil.
	HTTPClient HTTPClient
}

func (Options) Copy

func (o Options) Copy() Options

Copy creates a clone where the APIOptions list is deep copied.

func (Options) GetCredentials

func (o Options) GetCredentials() aws.CredentialsProvider

func (Options) GetEndpointOptions

func (o Options) GetEndpointOptions() ResolverOptions

func (Options) GetEndpointResolver

func (o Options) GetEndpointResolver() EndpointResolver

func (Options) GetHTTPSignerV4

func (o Options) GetHTTPSignerV4() HTTPSignerV4

func (Options) GetRegion

func (o Options) GetRegion() string

func (Options) GetRetryer

func (o Options) GetRetryer() retry.Retryer

type PutDataCatalogEncryptionSettingsInput

type PutDataCatalogEncryptionSettingsInput struct {
	// The security configuration to set.
	DataCatalogEncryptionSettings *types.DataCatalogEncryptionSettings
	// The ID of the Data Catalog to set the security configuration for. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
}

type PutDataCatalogEncryptionSettingsOutput

type PutDataCatalogEncryptionSettingsOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type PutResourcePolicyInput

type PutResourcePolicyInput struct {
	// The hash value returned when the previous policy was set using
	// PutResourcePolicy. Its purpose is to prevent concurrent modifications of a
	// policy. Do not use this parameter if no previous policy has been set.
	PolicyHashCondition *string
	// Allows you to specify if you want to use both resource-level and
	// account/catalog-level resource policies. A resource-level policy is a policy
	// attached to an individual resource such as a database or a table.  <p>The
	// default value of <code>NO</code> indicates that resource-level policies cannot
	// co-exist with an account-level policy. A value of <code>YES</code> means the use
	// of both resource-level and account/catalog-level resource policies is
	// allowed.</p>
	EnableHybrid types.EnableHybridValues
	// Contains the policy document to set, in JSON format.
	PolicyInJson *string
	// A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is used
	// to create a new policy. If a value of NONE or a null value is used, the call
	// will not depend on the existence of a policy.
	PolicyExistsCondition types.ExistCondition
	// The ARN of the AWS Glue resource for the resource policy to be set. For more
	// information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id)
	ResourceArn *string
}

type PutResourcePolicyOutput

type PutResourcePolicyOutput struct {
	// A hash of the policy that has just been set. This must be included in a
	// subsequent call that overwrites or updates this policy.
	PolicyHash *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type PutWorkflowRunPropertiesInput

type PutWorkflowRunPropertiesInput struct {
	// Name of the workflow which was run.
	Name *string
	// The ID of the workflow run for which the run properties should be updated.
	RunId *string
	// The properties to put for the specified run.
	RunProperties map[string]*string
}

type PutWorkflowRunPropertiesOutput

type PutWorkflowRunPropertiesOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ResetJobBookmarkInput

type ResetJobBookmarkInput struct {
	// The unique run identifier associated with this job run.
	RunId *string
	// The name of the job in question.
	JobName *string
}

type ResetJobBookmarkOutput

type ResetJobBookmarkOutput struct {
	// The reset bookmark entry.
	JobBookmarkEntry *types.JobBookmarkEntry

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type ResolveEndpoint

type ResolveEndpoint struct {
	Resolver EndpointResolver
	Options  ResolverOptions
}

func (*ResolveEndpoint) HandleSerialize

func (*ResolveEndpoint) ID

func (*ResolveEndpoint) ID() string

type ResolveEndpointMiddlewareOptions

type ResolveEndpointMiddlewareOptions interface {
	GetEndpointResolver() EndpointResolver
	GetEndpointOptions() ResolverOptions
}

type ResolverOptions

type ResolverOptions = internalendpoints.Options

ResolverOptions is the service endpoint resolver options

type ResumeWorkflowRunInput

type ResumeWorkflowRunInput struct {
	// The name of the workflow to resume.
	Name *string
	// The ID of the workflow run to resume.
	RunId *string
	// A list of the node IDs for the nodes you want to restart. The nodes that are to
	// be restarted must have an execution attempt in the original run.
	NodeIds []*string
}

type ResumeWorkflowRunOutput

type ResumeWorkflowRunOutput struct {
	// A list of the node IDs for the nodes that were actually restarted.
	NodeIds []*string
	// The new ID assigned to the resumed workflow run. Each resume of a workflow run
	// will have a new run ID.
	RunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type SearchTablesInput

type SearchTablesInput struct {
	// A list of criteria for sorting the results by a field name, in an ascending or
	// descending order.
	SortCriteria []*types.SortCriterion
	// A list of key-value pairs, and a comparator used to filter the search results.
	// Returns all entities matching the predicate.  <p>The <code>Comparator</code>
	// member of the <code>PropertyPredicate</code> struct is used only for time
	// fields, and can be omitted for other field types. Also, when comparing string
	// values, such as when <code>Key=Name</code>, a fuzzy match algorithm is used. The
	// <code>Key</code> field (for example, the value of the <code>Name</code> field)
	// is split on certain punctuation characters, for example, -, :, #, etc. into
	// tokens. Then each token is exact-match compared with the <code>Value</code>
	// member of <code>PropertyPredicate</code>. For example, if <code>Key=Name</code>
	// and <code>Value=link</code>, tables named <code>customer-link</code> and
	// <code>xx-link-yy</code> are returned, but <code>xxlinkyy</code> is not
	// returned.</p>
	Filters []*types.PropertyPredicate
	// A unique identifier, consisting of  account_id .
	CatalogId *string
	// Allows you to specify that you want to search the tables shared with your
	// account. The allowable values are FOREIGN or ALL.  <ul> <li> <p>If set to
	// <code>FOREIGN</code>, will search the tables shared with your account. </p>
	// </li> <li> <p>If set to <code>ALL</code>, will search the tables shared with
	// your account, as well as the tables in yor local account. </p> </li> </ul>
	ResourceShareType types.ResourceShareType
	// The maximum number of tables to return in a single response.
	MaxResults *int32
	// A continuation token, included if this is a continuation call.
	NextToken *string
	// A string used for a text search. Specifying a value in quotes filters based on
	// an exact match to the value.
	SearchText *string
}

type SearchTablesOutput

type SearchTablesOutput struct {
	// A continuation token, present if the current list segment is not the last.
	NextToken *string
	// A list of the requested Table objects. The SearchTables response returns only
	// the tables that you have access to.
	TableList []*types.Table

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartCrawlerInput

type StartCrawlerInput struct {
	// Name of the crawler to start.
	Name *string
}

type StartCrawlerOutput

type StartCrawlerOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartCrawlerScheduleInput

type StartCrawlerScheduleInput struct {
	// Name of the crawler to schedule.
	CrawlerName *string
}

type StartCrawlerScheduleOutput

type StartCrawlerScheduleOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartExportLabelsTaskRunInput

type StartExportLabelsTaskRunInput struct {
	// The Amazon S3 path where you export the labels.
	OutputS3Path *string
	// The unique identifier of the machine learning transform.
	TransformId *string
}

type StartExportLabelsTaskRunOutput

type StartExportLabelsTaskRunOutput struct {
	// The unique identifier for the task run.
	TaskRunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartImportLabelsTaskRunInput

type StartImportLabelsTaskRunInput struct {
	// Indicates whether to overwrite your existing labels.
	ReplaceAllLabels *bool
	// The Amazon Simple Storage Service (Amazon S3) path from where you import the
	// labels.
	InputS3Path *string
	// The unique identifier of the machine learning transform.
	TransformId *string
}

type StartImportLabelsTaskRunOutput

type StartImportLabelsTaskRunOutput struct {
	// The unique identifier for the task run.
	TaskRunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartJobRunInput

type StartJobRunInput struct {
	// The name of the job definition to use.
	JobName *string
	// The ID of a previous JobRun to retry.
	JobRunId *string
	// The name of the SecurityConfiguration structure to be used with this job run.
	SecurityConfiguration *string
	// The JobRun timeout in minutes. This is the maximum time that a job run can
	// consume resources before it is terminated and enters TIMEOUT status. The default
	// is 2,880 minutes (48 hours). This overrides the timeout value set in the parent
	// job.
	Timeout *int32
	// This field is deprecated. Use MaxCapacity instead.  <p>The number of AWS Glue
	// data processing units (DPUs) to allocate to this JobRun. From 2 to 100 DPUs can
	// be allocated; the default is 10. A DPU is a relative measure of processing power
	// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
	// information, see the <a
	// href="https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/">AWS Glue
	// pricing page</a>.</p>
	AllocatedCapacity *int32
	// The job arguments specifically for this run. For this job run, they replace the
	// default arguments set in the job definition itself. You can specify arguments
	// here that your own job-execution script consumes, as well as arguments that AWS
	// Glue itself consumes. For information about how to specify and consume your own
	// Job arguments, see the Calling AWS Glue APIs in Python
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the key-value pairs that AWS
	// Glue consumes to set up your job, see the Special Parameters Used by AWS Glue
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide.
	Arguments map[string]*string
	// The number of AWS Glue data processing units (DPUs) that can be allocated when
	// this job runs. A DPU is a relative measure of processing power that consists of
	// 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the
	// AWS Glue pricing page
	// (https://docs.aws.amazon.com/https:/aws.amazon.com/glue/pricing/).  <p>Do not
	// set <code>Max Capacity</code> if using <code>WorkerType</code> and
	// <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for
	// <code>MaxCapacity</code> depends on whether you are running a Python shell job,
	// or an Apache Spark ETL job:</p> <ul> <li> <p>When you specify a Python shell job
	// (<code>JobCommand.Name</code>="pythonshell"), you can allocate either 0.0625 or
	// 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache
	// Spark ETL job (<code>JobCommand.Name</code>="glueetl"), you can allocate from 2
	// to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU
	// allocation.</p> </li> </ul>
	MaxCapacity *float64
	// Specifies configuration properties of a job run notification.
	NotificationProperty *types.NotificationProperty
	// The type of predefined worker that is allocated when a job runs. Accepts a value
	// of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type, each worker
	// provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
	//
	//
	// * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a
	// 64GB disk, and 1 executor per worker.
	//
	//     * For the G.2X worker type, each
	// worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per
	// worker.
	WorkerType types.WorkerType
	// The number of workers of a defined workerType that are allocated when a job
	// runs.  <p>The maximum number of workers you can define are 299 for
	// <code>G.1X</code>, and 149 for <code>G.2X</code>. </p>
	NumberOfWorkers *int32
}

type StartJobRunOutput

type StartJobRunOutput struct {
	// The ID assigned to this job run.
	JobRunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartMLEvaluationTaskRunInput

type StartMLEvaluationTaskRunInput struct {
	// The unique identifier of the machine learning transform.
	TransformId *string
}

type StartMLEvaluationTaskRunOutput

type StartMLEvaluationTaskRunOutput struct {
	// The unique identifier associated with this run.
	TaskRunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartMLLabelingSetGenerationTaskRunInput

type StartMLLabelingSetGenerationTaskRunInput struct {
	// The unique identifier of the machine learning transform.
	TransformId *string
	// The Amazon Simple Storage Service (Amazon S3) path where you generate the
	// labeling set.
	OutputS3Path *string
}

type StartMLLabelingSetGenerationTaskRunOutput

type StartMLLabelingSetGenerationTaskRunOutput struct {
	// The unique run identifier that is associated with this task run.
	TaskRunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartTriggerInput

type StartTriggerInput struct {
	// The name of the trigger to start.
	Name *string
}

type StartTriggerOutput

type StartTriggerOutput struct {
	// The name of the trigger that was started.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StartWorkflowRunInput

type StartWorkflowRunInput struct {
	// The name of the workflow to start.
	Name *string
}

type StartWorkflowRunOutput

type StartWorkflowRunOutput struct {
	// An Id for the new run.
	RunId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StopCrawlerInput

type StopCrawlerInput struct {
	// Name of the crawler to stop.
	Name *string
}

type StopCrawlerOutput

type StopCrawlerOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StopCrawlerScheduleInput

type StopCrawlerScheduleInput struct {
	// Name of the crawler whose schedule state to set.
	CrawlerName *string
}

type StopCrawlerScheduleOutput

type StopCrawlerScheduleOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StopTriggerInput

type StopTriggerInput struct {
	// The name of the trigger to stop.
	Name *string
}

type StopTriggerOutput

type StopTriggerOutput struct {
	// The name of the trigger that was stopped.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type StopWorkflowRunInput

type StopWorkflowRunInput struct {
	// The ID of the workflow run to stop.
	RunId *string
	// The name of the workflow to stop.
	Name *string
}

type StopWorkflowRunOutput

type StopWorkflowRunOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type TagResourceInput

type TagResourceInput struct {
	// Tags to add to this resource.
	TagsToAdd map[string]*string
	// The ARN of the AWS Glue resource to which to add the tags. For more information
	// about AWS Glue resource ARNs, see the AWS Glue ARN string pattern
	// (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id).
	ResourceArn *string
}

type TagResourceOutput

type TagResourceOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UntagResourceInput

type UntagResourceInput struct {
	// Tags to remove from this resource.
	TagsToRemove []*string
	// The Amazon Resource Name (ARN) of the resource from which to remove the tags.
	ResourceArn *string
}

type UntagResourceOutput

type UntagResourceOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateClassifierInput

type UpdateClassifierInput struct {
	// A GrokClassifier object with updated fields.
	GrokClassifier *types.UpdateGrokClassifierRequest
	// A CsvClassifier object with updated fields.
	CsvClassifier *types.UpdateCsvClassifierRequest
	// An XMLClassifier object with updated fields.
	XMLClassifier *types.UpdateXMLClassifierRequest
	// A JsonClassifier object with updated fields.
	JsonClassifier *types.UpdateJsonClassifierRequest
}

type UpdateClassifierOutput

type UpdateClassifierOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateColumnStatisticsForPartitionInput

type UpdateColumnStatisticsForPartitionInput struct {
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// A list of the column statistics.
	ColumnStatisticsList []*types.ColumnStatistics
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
	// A list of partition values identifying the partition.
	PartitionValues []*string
	// The name of the partitions' table.
	TableName *string
}

type UpdateColumnStatisticsForPartitionOutput

type UpdateColumnStatisticsForPartitionOutput struct {
	// Error occurred during updating column statistics data.
	Errors []*types.ColumnStatisticsError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateColumnStatisticsForTableInput

type UpdateColumnStatisticsForTableInput struct {
	// The name of the catalog database where the partitions reside.
	DatabaseName *string
	// The ID of the Data Catalog where the partitions in question reside. If none is
	// supplied, the AWS account ID is used by default.
	CatalogId *string
	// The name of the partitions' table.
	TableName *string
	// A list of the column statistics.
	ColumnStatisticsList []*types.ColumnStatistics
}

type UpdateColumnStatisticsForTableOutput

type UpdateColumnStatisticsForTableOutput struct {
	// List of ColumnStatisticsErrors.
	Errors []*types.ColumnStatisticsError

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateConnectionInput

type UpdateConnectionInput struct {
	// The ID of the Data Catalog in which the connection resides. If none is provided,
	// the AWS account ID is used by default.
	CatalogId *string
	// The name of the connection definition to update.
	Name *string
	// A ConnectionInput object that redefines the connection in question.
	ConnectionInput *types.ConnectionInput
}

type UpdateConnectionOutput

type UpdateConnectionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateCrawlerInput

type UpdateCrawlerInput struct {
	// The name of the SecurityConfiguration structure to be used by this crawler.
	CrawlerSecurityConfiguration *string
	// Name of the new crawler.
	Name *string
	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html).
	// For example, to run something every day at 12:15 UTC, you would specify: cron(15
	// 12 * * ? *).
	Schedule *string
	// Crawler configuration information. This versioned JSON string allows users to
	// specify aspects of a crawler's behavior. For more information, see Configuring a
	// Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html).
	Configuration *string
	// The AWS Glue database where results are stored, such as:
	// arn:aws:daylight:us-east-1::database/sometable/*.
	DatabaseName *string
	// A description of the new crawler.
	Description *string
	// A list of custom classifiers that the user has registered. By default, all
	// built-in classifiers are included in a crawl, but these custom classifiers
	// always override the default classifiers for a given classification.
	Classifiers []*string
	// The table prefix used for catalog tables that are created.
	TablePrefix *string
	// A list of targets to crawl.
	Targets *types.CrawlerTargets
	// The IAM role or Amazon Resource Name (ARN) of an IAM role that is used by the
	// new crawler to access customer resources.
	Role *string
	// The policy for the crawler's update and deletion behavior.
	SchemaChangePolicy *types.SchemaChangePolicy
}

type UpdateCrawlerOutput

type UpdateCrawlerOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateCrawlerScheduleInput

type UpdateCrawlerScheduleInput struct {
	// The name of the crawler whose schedule to update.
	CrawlerName *string
	// The updated cron expression used to specify the schedule (see Time-Based
	// Schedules for Jobs and Crawlers
	// (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html).
	// For example, to run something every day at 12:15 UTC, you would specify: cron(15
	// 12 * * ? *).
	Schedule *string
}

type UpdateCrawlerScheduleOutput

type UpdateCrawlerScheduleOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateDatabaseInput

type UpdateDatabaseInput struct {
	// The ID of the Data Catalog in which the metadata database resides. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
	// A DatabaseInput object specifying the new definition of the metadata database in
	// the catalog.
	DatabaseInput *types.DatabaseInput
	// The name of the database to update in the catalog. For Hive compatibility, this
	// is folded to lowercase.
	Name *string
}

type UpdateDatabaseOutput

type UpdateDatabaseOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateDevEndpointInput

type UpdateDevEndpointInput struct {
	// The name of the DevEndpoint to be updated.
	EndpointName *string
	// The list of public keys to be deleted from the DevEndpoint.
	DeletePublicKeys []*string
	// The list of argument keys to be deleted from the map of arguments used to
	// configure the DevEndpoint.
	DeleteArguments []*string
	// The list of public keys for the DevEndpoint to use.
	AddPublicKeys []*string
	// The public key for the DevEndpoint to use.
	PublicKey *string
	// The map of arguments to add the map of arguments used to configure the
	// DevEndpoint.  <p>Valid arguments are:</p> <ul> <li> <p>
	// <code>"--enable-glue-datacatalog": ""</code> </p> </li> <li> <p>
	// <code>"GLUE_PYTHON_VERSION": "3"</code> </p> </li> <li> <p>
	// <code>"GLUE_PYTHON_VERSION": "2"</code> </p> </li> </ul> <p>You can specify a
	// version of Python support for development endpoints by using the
	// <code>Arguments</code> parameter in the <code>CreateDevEndpoint</code> or
	// <code>UpdateDevEndpoint</code> APIs. If no arguments are provided, the version
	// defaults to Python 2.</p>
	AddArguments map[string]*string
	// Custom Python or Java libraries to be loaded in the DevEndpoint.
	CustomLibraries *types.DevEndpointCustomLibraries
	// True if the list of custom libraries to be loaded in the development endpoint
	// needs to be updated, or False if otherwise.
	UpdateEtlLibraries *bool
}

type UpdateDevEndpointOutput

type UpdateDevEndpointOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateJobInput

type UpdateJobInput struct {
	// Specifies the values with which to update the job definition.
	JobUpdate *types.JobUpdate
	// The name of the job definition to update.
	JobName *string
}

type UpdateJobOutput

type UpdateJobOutput struct {
	// Returns the name of the updated job definition.
	JobName *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateMLTransformInput

type UpdateMLTransformInput struct {
	// The unique name that you gave the transform when you created it.
	Name *string
	// The timeout for a task run for this transform in minutes. This is the maximum
	// time that a task run for this transform can consume resources before it is
	// terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
	Timeout *int32
	// A description of the transform. The default is an empty string.
	Description *string
	// The number of AWS Glue data processing units (DPUs) that are allocated to task
	// runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10.
	// A DPU is a relative measure of processing power that consists of 4 vCPUs of
	// compute capacity and 16 GB of memory. For more information, see the AWS Glue
	// pricing page (https://aws.amazon.com/glue/pricing/).  <p>When the
	// <code>WorkerType</code> field is set to a value other than
	// <code>Standard</code>, the <code>MaxCapacity</code> field is set automatically
	// and becomes read-only.</p>
	MaxCapacity *float64
	// The number of workers of a defined workerType that are allocated when this task
	// runs.
	NumberOfWorkers *int32
	// The name or Amazon Resource Name (ARN) of the IAM role with the required
	// permissions.
	Role *string
	// The maximum number of times to retry a task for this transform after a task run
	// fails.
	MaxRetries *int32
	// The configuration parameters that are specific to the transform type (algorithm)
	// used. Conditionally dependent on the transform type.
	Parameters *types.TransformParameters
	// This value determines which version of AWS Glue this machine learning transform
	// is compatible with. Glue 1.0 is recommended for most customers. If the value is
	// not set, the Glue compatibility defaults to Glue 0.9. For more information, see
	// AWS Glue Versions
	// (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions)
	// in the developer guide.
	GlueVersion *string
	// The type of predefined worker that is allocated when this task runs. Accepts a
	// value of Standard, G.1X, or G.2X.
	//
	//     * For the Standard worker type, each
	// worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per
	// worker.
	//
	//     * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of
	// memory and a 64GB disk, and 1 executor per worker.
	//
	//     * For the G.2X worker
	// type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1
	// executor per worker.
	WorkerType types.WorkerType
	// A unique identifier that was generated when the transform was created.
	TransformId *string
}

type UpdateMLTransformOutput

type UpdateMLTransformOutput struct {
	// The unique identifier for the transform that was updated.
	TransformId *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdatePartitionInput

type UpdatePartitionInput struct {
	// List of partition key values that define the partition to update.
	PartitionValueList []*string
	// The ID of the Data Catalog where the partition to be updated resides. If none is
	// provided, the AWS account ID is used by default.
	CatalogId *string
	// The name of the catalog database in which the table in question resides.
	DatabaseName *string
	// The name of the table in which the partition to be updated is located.
	TableName *string
	// The new partition object to update the partition to.  <p>The <code>Values</code>
	// property can't be changed. If you want to change the partition key values for a
	// partition, delete and recreate the partition.</p>
	PartitionInput *types.PartitionInput
}

type UpdatePartitionOutput

type UpdatePartitionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateTableInput

type UpdateTableInput struct {
	// By default, UpdateTable always creates an archived version of the table before
	// updating it. However, if skipArchive is set to true, UpdateTable does not create
	// the archived version.
	SkipArchive *bool
	// The name of the catalog database in which the table resides. For Hive
	// compatibility, this name is entirely lowercase.
	DatabaseName *string
	// The ID of the Data Catalog where the table resides. If none is provided, the AWS
	// account ID is used by default.
	CatalogId *string
	// An updated TableInput object to define the metadata table in the catalog.
	TableInput *types.TableInput
}

type UpdateTableOutput

type UpdateTableOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateTriggerInput

type UpdateTriggerInput struct {
	// The new values with which to update the trigger.
	TriggerUpdate *types.TriggerUpdate
	// The name of the trigger to update.
	Name *string
}

type UpdateTriggerOutput

type UpdateTriggerOutput struct {
	// The resulting trigger definition.
	Trigger *types.Trigger

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateUserDefinedFunctionInput

type UpdateUserDefinedFunctionInput struct {
	// A FunctionInput object that redefines the function in the Data Catalog.
	FunctionInput *types.UserDefinedFunctionInput
	// The ID of the Data Catalog where the function to be updated is located. If none
	// is provided, the AWS account ID is used by default.
	CatalogId *string
	// The name of the catalog database where the function to be updated is located.
	DatabaseName *string
	// The name of the function.
	FunctionName *string
}

type UpdateUserDefinedFunctionOutput

type UpdateUserDefinedFunctionOutput struct {
	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

type UpdateWorkflowInput

type UpdateWorkflowInput struct {
	// A collection of properties to be used as part of each execution of the workflow.
	DefaultRunProperties map[string]*string
	// The description of the workflow.
	Description *string
	// Name of the workflow to be updated.
	Name *string
}

type UpdateWorkflowOutput

type UpdateWorkflowOutput struct {
	// The name of the workflow which was specified in input.
	Name *string

	// Metadata pertaining to the operation's result.
	ResultMetadata middleware.Metadata
}

Source Files

Directories

Path Synopsis
internal

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL