types

package
v1.80.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 19, 2024 License: Apache-2.0 Imports: 4 Imported by: 27

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AccessDeniedException

type AccessDeniedException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

Access to a resource was denied.

func (*AccessDeniedException) Error

func (e *AccessDeniedException) Error() string

func (*AccessDeniedException) ErrorCode

func (e *AccessDeniedException) ErrorCode() string

func (*AccessDeniedException) ErrorFault

func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault

func (*AccessDeniedException) ErrorMessage

func (e *AccessDeniedException) ErrorMessage() string

type Action

type Action struct {

	// The job arguments used when this trigger fires. For this job run, they replace
	// the default arguments set in the job definition itself. You can specify
	// arguments here that your own job-execution script consumes, as well as arguments
	// that Glue itself consumes. For information about how to specify and consume your
	// own Job arguments, see the Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the key-value pairs that
	// Glue consumes to set up your job, see the Special Parameters Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide.
	Arguments map[string]string

	// The name of the crawler to be used with this action.
	CrawlerName *string

	// The name of a job to be run.
	JobName *string

	// Specifies configuration properties of a job run notification.
	NotificationProperty *NotificationProperty

	// The name of the SecurityConfiguration structure to be used with this action.
	SecurityConfiguration *string

	// The JobRun timeout in minutes. This is the maximum time that a job run can
	// consume resources before it is terminated and enters TIMEOUT status. The
	// default is 2,880 minutes (48 hours). This overrides the timeout value set in the
	// parent job.
	Timeout *int32
	// contains filtered or unexported fields
}

Defines an action to be initiated by a trigger.

type AdditionalOptionKeys added in v1.49.0

type AdditionalOptionKeys string
const (
	AdditionalOptionKeysCacheOption        AdditionalOptionKeys = "performanceTuning.caching"
	AdditionalOptionKeysObservationsOption AdditionalOptionKeys = "observations.scope"
)

Enum values for AdditionalOptionKeys

func (AdditionalOptionKeys) Values added in v1.49.0

Values returns all known values for AdditionalOptionKeys. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type AggFunction added in v1.25.0

type AggFunction string
const (
	AggFunctionAvg           AggFunction = "avg"
	AggFunctionCountDistinct AggFunction = "countDistinct"
	AggFunctionCount         AggFunction = "count"
	AggFunctionFirst         AggFunction = "first"
	AggFunctionLast          AggFunction = "last"
	AggFunctionKurtosis      AggFunction = "kurtosis"
	AggFunctionMax           AggFunction = "max"
	AggFunctionMin           AggFunction = "min"
	AggFunctionSkewness      AggFunction = "skewness"
	AggFunctionStddevSamp    AggFunction = "stddev_samp"
	AggFunctionStddevPop     AggFunction = "stddev_pop"
	AggFunctionSum           AggFunction = "sum"
	AggFunctionSumDistinct   AggFunction = "sumDistinct"
	AggFunctionVarSamp       AggFunction = "var_samp"
	AggFunctionVarPop        AggFunction = "var_pop"
)

Enum values for AggFunction

func (AggFunction) Values added in v1.25.0

func (AggFunction) Values() []AggFunction

Values returns all known values for AggFunction. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Aggregate added in v1.25.0

type Aggregate struct {

	// Specifies the aggregate functions to be performed on specified fields.
	//
	// This member is required.
	Aggs []AggregateOperation

	// Specifies the fields to group by.
	//
	// This member is required.
	Groups [][]string

	// Specifies the fields and rows to use as inputs for the aggregate transform.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a transform that groups rows by chosen fields and computes the aggregated value by specified function.

type AggregateOperation added in v1.25.0

type AggregateOperation struct {

	// Specifies the aggregation function to apply. Possible aggregation functions
	// include: avg countDistinct, count, first, last, kurtosis, max, min, skewness,
	// stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop
	//
	// This member is required.
	AggFunc AggFunction

	// Specifies the column on the data set on which the aggregation function will be
	// applied.
	//
	// This member is required.
	Column []string
	// contains filtered or unexported fields
}

Specifies the set of parameters needed to perform aggregation in the aggregate transform.

type AlreadyExistsException

type AlreadyExistsException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A resource to be created or added already exists.

func (*AlreadyExistsException) Error

func (e *AlreadyExistsException) Error() string

func (*AlreadyExistsException) ErrorCode

func (e *AlreadyExistsException) ErrorCode() string

func (*AlreadyExistsException) ErrorFault

func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault

func (*AlreadyExistsException) ErrorMessage

func (e *AlreadyExistsException) ErrorMessage() string

type AmazonRedshiftAdvancedOption added in v1.47.0

type AmazonRedshiftAdvancedOption struct {

	// The key for the additional connection option.
	Key *string

	// The value for the additional connection option.
	Value *string
	// contains filtered or unexported fields
}

Specifies an optional value when connecting to the Redshift cluster.

type AmazonRedshiftNodeData added in v1.47.0

type AmazonRedshiftNodeData struct {

	// The access type for the Redshift connection. Can be a direct connection or
	// catalog connections.
	AccessType *string

	// Specifies how writing to a Redshift cluser will occur.
	Action *string

	// Optional values when connecting to the Redshift cluster.
	AdvancedOptions []AmazonRedshiftAdvancedOption

	// The name of the Glue Data Catalog database when working with a data catalog.
	CatalogDatabase *Option

	// The Redshift schema name when working with a data catalog.
	CatalogRedshiftSchema *string

	// The database table to read from.
	CatalogRedshiftTable *string

	// The Glue Data Catalog table name when working with a data catalog.
	CatalogTable *Option

	// The Glue connection to the Redshift cluster.
	Connection *Option

	// Specifies the name of the connection that is associated with the catalog table
	// used.
	CrawlerConnection *string

	// Optional. The role name use when connection to S3. The IAM role ill default to
	// the role on the job when left blank.
	IamRole *Option

	// The action used when to detemine how a MERGE in a Redshift sink will be handled.
	MergeAction *string

	// The SQL used in a custom merge to deal with matching records.
	MergeClause *string

	// The action used when to detemine how a MERGE in a Redshift sink will be handled
	// when an existing record matches a new record.
	MergeWhenMatched *string

	// The action used when to detemine how a MERGE in a Redshift sink will be handled
	// when an existing record doesn't match a new record.
	MergeWhenNotMatched *string

	// The SQL used before a MERGE or APPEND with upsert is run.
	PostAction *string

	// The SQL used before a MERGE or APPEND with upsert is run.
	PreAction *string

	// The SQL used to fetch the data from a Redshift sources when the SourceType is
	// 'query'.
	SampleQuery *string

	// The Redshift schema name when working with a direct connection.
	Schema *Option

	// The list of column names used to determine a matching record when doing a MERGE
	// or APPEND with upsert.
	SelectedColumns []Option

	// The source type to specify whether a specific table is the source or a custom
	// query.
	SourceType *string

	// The name of the temporary staging table that is used when doing a MERGE or
	// APPEND with upsert.
	StagingTable *string

	// The Redshift table name when working with a direct connection.
	Table *Option

	// Specifies the prefix to a table.
	TablePrefix *string

	// The array of schema output for a given node.
	TableSchema []Option

	// The Amazon S3 path where temporary data can be staged when copying out of the
	// database.
	TempDir *string

	// The action used on Redshift sinks when doing an APPEND.
	Upsert bool
	// contains filtered or unexported fields
}

Specifies an Amazon Redshift node.

type AmazonRedshiftSource added in v1.47.0

type AmazonRedshiftSource struct {

	// Specifies the data of the Amazon Reshift source node.
	Data *AmazonRedshiftNodeData

	// The name of the Amazon Redshift source.
	Name *string
	// contains filtered or unexported fields
}

Specifies an Amazon Redshift source.

type AmazonRedshiftTarget added in v1.47.0

type AmazonRedshiftTarget struct {

	// Specifies the data of the Amazon Redshift target node.
	Data *AmazonRedshiftNodeData

	// The nodes that are inputs to the data target.
	Inputs []string

	// The name of the Amazon Redshift target.
	Name *string
	// contains filtered or unexported fields
}

Specifies an Amazon Redshift target.

type ApplyMapping added in v1.25.0

type ApplyMapping struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// Specifies the mapping of data property keys in the data source to data property
	// keys in the data target.
	//
	// This member is required.
	Mapping []Mapping

	// The name of the transform node.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.

type AthenaConnectorSource added in v1.25.0

type AthenaConnectorSource struct {

	// The name of the connection that is associated with the connector.
	//
	// This member is required.
	ConnectionName *string

	// The type of connection, such as marketplace.athena or custom.athena,
	// designating a connection to an Amazon Athena data store.
	//
	// This member is required.
	ConnectionType *string

	// The name of a connector that assists with accessing the data store in Glue
	// Studio.
	//
	// This member is required.
	ConnectorName *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the Cloudwatch log group to read from. For example,
	// /aws-glue/jobs/output .
	//
	// This member is required.
	SchemaName *string

	// The name of the table in the data source.
	ConnectionTable *string

	// Specifies the data schema for the custom Athena source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a connector to an Amazon Athena data source.

type AuditContext added in v1.18.0

type AuditContext struct {

	// A string containing the additional audit context information.
	AdditionalAuditContext *string

	// All columns request for audit.
	AllColumnsRequested *bool

	// The requested columns for audit.
	RequestedColumns []string
	// contains filtered or unexported fields
}

A structure containing the Lake Formation audit context.

type BackfillError added in v0.31.0

type BackfillError struct {

	// The error code for an error that occurred when registering partition indexes
	// for an existing table.
	Code BackfillErrorCode

	// A list of a limited number of partitions in the response.
	Partitions []PartitionValueList
	// contains filtered or unexported fields
}

A list of errors that can occur when registering partition indexes for an existing table. These errors give the details about why an index registration failed and provide a limited number of partitions in the response, so that you can fix the partitions at fault and try registering the index again. The most common set of errors that can occur are categorized as follows:

  • EncryptedPartitionError: The partitions are encrypted.
  • InvalidPartitionTypeDataError: The partition value doesn't match the data type for that partition column.
  • MissingPartitionValueError: The partitions are encrypted.
  • UnsupportedPartitionCharacterError: Characters inside the partition value are not supported. For example: U+0000 , U+0001, U+0002.
  • InternalError: Any error which does not belong to other error codes.

type BackfillErrorCode added in v0.31.0

type BackfillErrorCode string
const (
	BackfillErrorCodeEncryptedPartitionError            BackfillErrorCode = "ENCRYPTED_PARTITION_ERROR"
	BackfillErrorCodeInternalError                      BackfillErrorCode = "INTERNAL_ERROR"
	BackfillErrorCodeInvalidPartitionTypeDataError      BackfillErrorCode = "INVALID_PARTITION_TYPE_DATA_ERROR"
	BackfillErrorCodeMissingPartitionValueError         BackfillErrorCode = "MISSING_PARTITION_VALUE_ERROR"
	BackfillErrorCodeUnsupportedPartitionCharacterError BackfillErrorCode = "UNSUPPORTED_PARTITION_CHARACTER_ERROR"
)

Enum values for BackfillErrorCode

func (BackfillErrorCode) Values added in v0.31.0

Values returns all known values for BackfillErrorCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type BasicCatalogTarget added in v1.25.0

type BasicCatalogTarget struct {

	// The database that contains the table you want to use as the target. This
	// database must already exist in the Data Catalog.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of your data target.
	//
	// This member is required.
	Name *string

	// The table that defines the schema of your output data. This table must already
	// exist in the Data Catalog.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a target that uses a Glue Data Catalog table.

type BatchGetTableOptimizerEntry added in v1.68.0

type BatchGetTableOptimizerEntry struct {

	// The Catalog ID of the table.
	CatalogId *string

	// The name of the database in the catalog in which the table resides.
	DatabaseName *string

	// The name of the table.
	TableName *string

	// The type of table optimizer.
	Type TableOptimizerType
	// contains filtered or unexported fields
}

Represents a table optimizer to retrieve in the BatchGetTableOptimizer operation.

type BatchGetTableOptimizerError added in v1.68.0

type BatchGetTableOptimizerError struct {

	// The Catalog ID of the table.
	CatalogId *string

	// The name of the database in the catalog in which the table resides.
	DatabaseName *string

	// An ErrorDetail object containing code and message details about the error.
	Error *ErrorDetail

	// The name of the table.
	TableName *string

	// The type of table optimizer.
	Type TableOptimizerType
	// contains filtered or unexported fields
}

Contains details on one of the errors in the error list returned by the BatchGetTableOptimizer operation.

type BatchStopJobRunError

type BatchStopJobRunError struct {

	// Specifies details about the error that was encountered.
	ErrorDetail *ErrorDetail

	// The name of the job definition that is used in the job run in question.
	JobName *string

	// The JobRunId of the job run in question.
	JobRunId *string
	// contains filtered or unexported fields
}

Records an error that occurred when attempting to stop a specified job run.

type BatchStopJobRunSuccessfulSubmission

type BatchStopJobRunSuccessfulSubmission struct {

	// The name of the job definition used in the job run that was stopped.
	JobName *string

	// The JobRunId of the job run that was stopped.
	JobRunId *string
	// contains filtered or unexported fields
}

Records a successful request to stop a specified JobRun .

type BatchTableOptimizer added in v1.68.0

type BatchTableOptimizer struct {

	// The Catalog ID of the table.
	CatalogId *string

	// The name of the database in the catalog in which the table resides.
	DatabaseName *string

	// The name of the table.
	TableName *string

	// A TableOptimizer object that contains details on the configuration and last run
	// of a table optimzer.
	TableOptimizer *TableOptimizer
	// contains filtered or unexported fields
}

Contains details for one of the table optimizers returned by the BatchGetTableOptimizer operation.

type BatchUpdatePartitionFailureEntry added in v0.29.0

type BatchUpdatePartitionFailureEntry struct {

	// The details about the batch update partition error.
	ErrorDetail *ErrorDetail

	// A list of values defining the partitions.
	PartitionValueList []string
	// contains filtered or unexported fields
}

Contains information about a batch update partition error.

type BatchUpdatePartitionRequestEntry added in v0.29.0

type BatchUpdatePartitionRequestEntry struct {

	// The structure used to update a partition.
	//
	// This member is required.
	PartitionInput *PartitionInput

	// A list of values defining the partitions.
	//
	// This member is required.
	PartitionValueList []string
	// contains filtered or unexported fields
}

A structure that contains the values and structure used to update a partition.

type BinaryColumnStatisticsData

type BinaryColumnStatisticsData struct {

	// The average bit sequence length in the column.
	//
	// This member is required.
	AverageLength float64

	// The size of the longest bit sequence in the column.
	//
	// This member is required.
	MaximumLength int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64
	// contains filtered or unexported fields
}

Defines column statistics supported for bit sequence data values.

type Blueprint added in v1.11.0

type Blueprint struct {

	// Specifies the path in Amazon S3 where the blueprint is published.
	BlueprintLocation *string

	// Specifies a path in Amazon S3 where the blueprint is copied when you call
	// CreateBlueprint/UpdateBlueprint to register the blueprint in Glue.
	BlueprintServiceLocation *string

	// The date and time the blueprint was registered.
	CreatedOn *time.Time

	// The description of the blueprint.
	Description *string

	// An error message.
	ErrorMessage *string

	// When there are multiple versions of a blueprint and the latest version has some
	// errors, this attribute indicates the last successful blueprint definition that
	// is available with the service.
	LastActiveDefinition *LastActiveDefinition

	// The date and time the blueprint was last modified.
	LastModifiedOn *time.Time

	// The name of the blueprint.
	Name *string

	// A JSON string that indicates the list of parameter specifications for the
	// blueprint.
	ParameterSpec *string

	// The status of the blueprint registration.
	//   - Creating — The blueprint registration is in progress.
	//   - Active — The blueprint has been successfully registered.
	//   - Updating — An update to the blueprint registration is in progress.
	//   - Failed — The blueprint registration failed.
	Status BlueprintStatus
	// contains filtered or unexported fields
}

The details of a blueprint.

type BlueprintDetails added in v1.11.0

type BlueprintDetails struct {

	// The name of the blueprint.
	BlueprintName *string

	// The run ID for this blueprint.
	RunId *string
	// contains filtered or unexported fields
}

The details of a blueprint.

type BlueprintRun added in v1.11.0

type BlueprintRun struct {

	// The name of the blueprint.
	BlueprintName *string

	// The date and time that the blueprint run completed.
	CompletedOn *time.Time

	// Indicates any errors that are seen while running the blueprint.
	ErrorMessage *string

	// The blueprint parameters as a string. You will have to provide a value for each
	// key that is required from the parameter spec that is defined in the
	// Blueprint$ParameterSpec .
	Parameters *string

	// The role ARN. This role will be assumed by the Glue service and will be used to
	// create the workflow and other entities of a workflow.
	RoleArn *string

	// If there are any errors while creating the entities of a workflow, we try to
	// roll back the created entities until that point and delete them. This attribute
	// indicates the errors seen while trying to delete the entities that are created.
	RollbackErrorMessage *string

	// The run ID for this blueprint run.
	RunId *string

	// The date and time that the blueprint run started.
	StartedOn *time.Time

	// The state of the blueprint run. Possible values are:
	//   - Running — The blueprint run is in progress.
	//   - Succeeded — The blueprint run completed successfully.
	//   - Failed — The blueprint run failed and rollback is complete.
	//   - Rolling Back — The blueprint run failed and rollback is in progress.
	State BlueprintRunState

	// The name of a workflow that is created as a result of a successful blueprint
	// run. If a blueprint run has an error, there will not be a workflow created.
	WorkflowName *string
	// contains filtered or unexported fields
}

The details of a blueprint run.

type BlueprintRunState added in v1.11.0

type BlueprintRunState string
const (
	BlueprintRunStateRunning     BlueprintRunState = "RUNNING"
	BlueprintRunStateSucceeded   BlueprintRunState = "SUCCEEDED"
	BlueprintRunStateFailed      BlueprintRunState = "FAILED"
	BlueprintRunStateRollingBack BlueprintRunState = "ROLLING_BACK"
)

Enum values for BlueprintRunState

func (BlueprintRunState) Values added in v1.11.0

Values returns all known values for BlueprintRunState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type BlueprintStatus added in v1.11.0

type BlueprintStatus string
const (
	BlueprintStatusCreating BlueprintStatus = "CREATING"
	BlueprintStatusActive   BlueprintStatus = "ACTIVE"
	BlueprintStatusUpdating BlueprintStatus = "UPDATING"
	BlueprintStatusFailed   BlueprintStatus = "FAILED"
)

Enum values for BlueprintStatus

func (BlueprintStatus) Values added in v1.11.0

func (BlueprintStatus) Values() []BlueprintStatus

Values returns all known values for BlueprintStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type BooleanColumnStatisticsData

type BooleanColumnStatisticsData struct {

	// The number of false values in the column.
	//
	// This member is required.
	NumberOfFalses int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64

	// The number of true values in the column.
	//
	// This member is required.
	NumberOfTrues int64
	// contains filtered or unexported fields
}

Defines column statistics supported for Boolean data columns.

type CatalogDeltaSource added in v1.43.0

type CatalogDeltaSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the Delta Lake data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalDeltaOptions map[string]string

	// Specifies the data schema for the Delta Lake source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Delta Lake data source that is registered in the Glue Data Catalog.

type CatalogEncryptionMode

type CatalogEncryptionMode string
const (
	CatalogEncryptionModeDisabled              CatalogEncryptionMode = "DISABLED"
	CatalogEncryptionModeSsekms                CatalogEncryptionMode = "SSE-KMS"
	CatalogEncryptionModeSsekmswithservicerole CatalogEncryptionMode = "SSE-KMS-WITH-SERVICE-ROLE"
)

Enum values for CatalogEncryptionMode

func (CatalogEncryptionMode) Values added in v0.29.0

Values returns all known values for CatalogEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CatalogEntry

type CatalogEntry struct {

	// The database in which the table metadata resides.
	//
	// This member is required.
	DatabaseName *string

	// The name of the table in question.
	//
	// This member is required.
	TableName *string
	// contains filtered or unexported fields
}

Specifies a table definition in the Glue Data Catalog.

type CatalogHudiSource added in v1.40.0

type CatalogHudiSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the Hudi data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalHudiOptions map[string]string

	// Specifies the data schema for the Hudi source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Hudi data source that is registered in the Glue Data Catalog.

type CatalogImportStatus

type CatalogImportStatus struct {

	// True if the migration has completed, or False otherwise.
	ImportCompleted bool

	// The time that the migration was started.
	ImportTime *time.Time

	// The name of the person who initiated the migration.
	ImportedBy *string
	// contains filtered or unexported fields
}

A structure containing migration status information.

type CatalogKafkaSource added in v1.25.0

type CatalogKafkaSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Specifies options related to data preview for viewing a sample of your data.
	DataPreviewOptions *StreamingDataPreviewOptions

	// Whether to automatically determine the schema from the incoming data.
	DetectSchema *bool

	// Specifies the streaming options.
	StreamingOptions *KafkaStreamingSourceOptions

	// The amount of time to spend processing each micro batch.
	WindowSize *int32
	// contains filtered or unexported fields
}

Specifies an Apache Kafka data store in the Data Catalog.

type CatalogKinesisSource added in v1.25.0

type CatalogKinesisSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Additional options for data preview.
	DataPreviewOptions *StreamingDataPreviewOptions

	// Whether to automatically determine the schema from the incoming data.
	DetectSchema *bool

	// Additional options for the Kinesis streaming data source.
	StreamingOptions *KinesisStreamingSourceOptions

	// The amount of time to spend processing each micro batch.
	WindowSize *int32
	// contains filtered or unexported fields
}

Specifies a Kinesis data source in the Glue Data Catalog.

type CatalogSchemaChangePolicy added in v1.25.0

type CatalogSchemaChangePolicy struct {

	// Whether to use the specified update behavior when the crawler finds a changed
	// schema.
	EnableUpdateCatalog *bool

	// The update behavior when the crawler finds a changed schema.
	UpdateBehavior UpdateCatalogBehavior
	// contains filtered or unexported fields
}

A policy that specifies update behavior for the crawler.

type CatalogSource added in v1.25.0

type CatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a data store in the Glue Data Catalog.

type CatalogTarget

type CatalogTarget struct {

	// The name of the database to be synchronized.
	//
	// This member is required.
	DatabaseName *string

	// A list of the tables to be synchronized.
	//
	// This member is required.
	Tables []string

	// The name of the connection for an Amazon S3-backed Data Catalog table to be a
	// target of the crawl when using a Catalog connection type paired with a NETWORK
	// Connection type.
	ConnectionName *string

	// A valid Amazon dead-letter SQS ARN. For example,
	// arn:aws:sqs:region:account:deadLetterQueue .
	DlqEventQueueArn *string

	// A valid Amazon SQS ARN. For example, arn:aws:sqs:region:account:sqs .
	EventQueueArn *string
	// contains filtered or unexported fields
}

Specifies an Glue Data Catalog target.

type Classifier

type Classifier struct {

	// A classifier for comma-separated values (CSV).
	CsvClassifier *CsvClassifier

	// A classifier that uses grok .
	GrokClassifier *GrokClassifier

	// A classifier for JSON content.
	JsonClassifier *JsonClassifier

	// A classifier for XML content.
	XMLClassifier *XMLClassifier
	// contains filtered or unexported fields
}

Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle. If it is, the classifier creates a schema in the form of a StructType object that matches that data format. You can use the standard classifiers that Glue provides, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a grok classifier, an XML classifier, a JSON classifier, or a custom CSV classifier, as specified in one of the fields in the Classifier object.

type CloudWatchEncryption

type CloudWatchEncryption struct {

	// The encryption mode to use for CloudWatch data.
	CloudWatchEncryptionMode CloudWatchEncryptionMode

	// The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
	KmsKeyArn *string
	// contains filtered or unexported fields
}

Specifies how Amazon CloudWatch data should be encrypted.

type CloudWatchEncryptionMode

type CloudWatchEncryptionMode string
const (
	CloudWatchEncryptionModeDisabled CloudWatchEncryptionMode = "DISABLED"
	CloudWatchEncryptionModeSsekms   CloudWatchEncryptionMode = "SSE-KMS"
)

Enum values for CloudWatchEncryptionMode

func (CloudWatchEncryptionMode) Values added in v0.29.0

Values returns all known values for CloudWatchEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CodeGenConfigurationNode added in v1.25.0

type CodeGenConfigurationNode struct {

	// Specifies a transform that groups rows by chosen fields and computes the
	// aggregated value by specified function.
	Aggregate *Aggregate

	// Specifies a target that writes to a data source in Amazon Redshift.
	AmazonRedshiftSource *AmazonRedshiftSource

	// Specifies a target that writes to a data target in Amazon Redshift.
	AmazonRedshiftTarget *AmazonRedshiftTarget

	// Specifies a transform that maps data property keys in the data source to data
	// property keys in the data target. You can rename keys, modify the data types for
	// keys, and choose which keys to drop from the dataset.
	ApplyMapping *ApplyMapping

	// Specifies a connector to an Amazon Athena data source.
	AthenaConnectorSource *AthenaConnectorSource

	// Specifies a Delta Lake data source that is registered in the Glue Data Catalog.
	CatalogDeltaSource *CatalogDeltaSource

	// Specifies a Hudi data source that is registered in the Glue Data Catalog.
	CatalogHudiSource *CatalogHudiSource

	// Specifies an Apache Kafka data store in the Data Catalog.
	CatalogKafkaSource *CatalogKafkaSource

	// Specifies a Kinesis data source in the Glue Data Catalog.
	CatalogKinesisSource *CatalogKinesisSource

	// Specifies a data store in the Glue Data Catalog.
	CatalogSource *CatalogSource

	// Specifies a target that uses a Glue Data Catalog table.
	CatalogTarget *BasicCatalogTarget

	// Specifies a source generated with standard connection options.
	ConnectorDataSource *ConnectorDataSource

	// Specifies a target generated with standard connection options.
	ConnectorDataTarget *ConnectorDataTarget

	// Specifies a transform that uses custom code you provide to perform the data
	// transformation. The output is a collection of DynamicFrames.
	CustomCode *CustomCode

	// Specifies the direct JDBC source connection.
	DirectJDBCSource *DirectJDBCSource

	// Specifies an Apache Kafka data store.
	DirectKafkaSource *DirectKafkaSource

	// Specifies a direct Amazon Kinesis data source.
	DirectKinesisSource *DirectKinesisSource

	// Specifies a transform that removes rows of repeating data from a data set.
	DropDuplicates *DropDuplicates

	// Specifies a transform that chooses the data property keys that you want to drop.
	DropFields *DropFields

	// Specifies a transform that removes columns from the dataset if all values in
	// the column are 'null'. By default, Glue Studio will recognize null objects, but
	// some values such as empty strings, strings that are "null", -1 integers or other
	// placeholders such as zeros, are not automatically recognized as nulls.
	DropNullFields *DropNullFields

	// Specifies a custom visual transform created by a user.
	DynamicTransform *DynamicTransform

	// Specifies a DynamoDBC Catalog data store in the Glue Data Catalog.
	DynamoDBCatalogSource *DynamoDBCatalogSource

	// Specifies your data quality evaluation criteria.
	EvaluateDataQuality *EvaluateDataQuality

	// Specifies your data quality evaluation criteria. Allows multiple input data and
	// returns a collection of Dynamic Frames.
	EvaluateDataQualityMultiFrame *EvaluateDataQualityMultiFrame

	// Specifies a transform that locates records in the dataset that have missing
	// values and adds a new field with a value determined by imputation. The input
	// data set is used to train the machine learning model that determines what the
	// missing value should be.
	FillMissingValues *FillMissingValues

	// Specifies a transform that splits a dataset into two, based on a filter
	// condition.
	Filter *Filter

	// Specifies a data source in a goverened Data Catalog.
	GovernedCatalogSource *GovernedCatalogSource

	// Specifies a data target that writes to a goverened catalog.
	GovernedCatalogTarget *GovernedCatalogTarget

	// Specifies a connector to a JDBC data source.
	JDBCConnectorSource *JDBCConnectorSource

	// Specifies a data target that writes to Amazon S3 in Apache Parquet columnar
	// storage.
	JDBCConnectorTarget *JDBCConnectorTarget

	// Specifies a transform that joins two datasets into one dataset using a
	// comparison phrase on the specified data property keys. You can use inner, outer,
	// left, right, left semi, and left anti joins.
	Join *Join

	// Specifies a transform that merges a DynamicFrame with a staging DynamicFrame
	// based on the specified primary keys to identify records. Duplicate records
	// (records with the same primary keys) are not de-duplicated.
	Merge *Merge

	// Specifies a Microsoft SQL server data source in the Glue Data Catalog.
	MicrosoftSQLServerCatalogSource *MicrosoftSQLServerCatalogSource

	// Specifies a target that uses Microsoft SQL.
	MicrosoftSQLServerCatalogTarget *MicrosoftSQLServerCatalogTarget

	// Specifies a MySQL data source in the Glue Data Catalog.
	MySQLCatalogSource *MySQLCatalogSource

	// Specifies a target that uses MySQL.
	MySQLCatalogTarget *MySQLCatalogTarget

	// Specifies an Oracle data source in the Glue Data Catalog.
	OracleSQLCatalogSource *OracleSQLCatalogSource

	// Specifies a target that uses Oracle SQL.
	OracleSQLCatalogTarget *OracleSQLCatalogTarget

	// Specifies a transform that identifies, removes or masks PII data.
	PIIDetection *PIIDetection

	// Specifies a PostgresSQL data source in the Glue Data Catalog.
	PostgreSQLCatalogSource *PostgreSQLCatalogSource

	// Specifies a target that uses Postgres SQL.
	PostgreSQLCatalogTarget *PostgreSQLCatalogTarget

	// Specifies a Glue DataBrew recipe node.
	Recipe *Recipe

	// Specifies an Amazon Redshift data store.
	RedshiftSource *RedshiftSource

	// Specifies a target that uses Amazon Redshift.
	RedshiftTarget *RedshiftTarget

	// Specifies a relational catalog data store in the Glue Data Catalog.
	RelationalCatalogSource *RelationalCatalogSource

	// Specifies a transform that renames a single data property key.
	RenameField *RenameField

	// Specifies a Delta Lake data source that is registered in the Glue Data Catalog.
	// The data source must be stored in Amazon S3.
	S3CatalogDeltaSource *S3CatalogDeltaSource

	// Specifies a Hudi data source that is registered in the Glue Data Catalog. The
	// data source must be stored in Amazon S3.
	S3CatalogHudiSource *S3CatalogHudiSource

	// Specifies an Amazon S3 data store in the Glue Data Catalog.
	S3CatalogSource *S3CatalogSource

	// Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.
	S3CatalogTarget *S3CatalogTarget

	// Specifies a command-separated value (CSV) data store stored in Amazon S3.
	S3CsvSource *S3CsvSource

	// Specifies a target that writes to a Delta Lake data source in the Glue Data
	// Catalog.
	S3DeltaCatalogTarget *S3DeltaCatalogTarget

	// Specifies a target that writes to a Delta Lake data source in Amazon S3.
	S3DeltaDirectTarget *S3DeltaDirectTarget

	// Specifies a Delta Lake data source stored in Amazon S3.
	S3DeltaSource *S3DeltaSource

	// Specifies a data target that writes to Amazon S3.
	S3DirectTarget *S3DirectTarget

	// Specifies a data target that writes to Amazon S3 in Apache Parquet columnar
	// storage.
	S3GlueParquetTarget *S3GlueParquetTarget

	// Specifies a target that writes to a Hudi data source in the Glue Data Catalog.
	S3HudiCatalogTarget *S3HudiCatalogTarget

	// Specifies a target that writes to a Hudi data source in Amazon S3.
	S3HudiDirectTarget *S3HudiDirectTarget

	// Specifies a Hudi data source stored in Amazon S3.
	S3HudiSource *S3HudiSource

	// Specifies a JSON data store stored in Amazon S3.
	S3JsonSource *S3JsonSource

	// Specifies an Apache Parquet data store stored in Amazon S3.
	S3ParquetSource *S3ParquetSource

	// Specifies a transform that chooses the data property keys that you want to keep.
	SelectFields *SelectFields

	// Specifies a transform that chooses one DynamicFrame from a collection of
	// DynamicFrames . The output is the selected DynamicFrame
	SelectFromCollection *SelectFromCollection

	// Specifies a Snowflake data source.
	SnowflakeSource *SnowflakeSource

	// Specifies a target that writes to a Snowflake data source.
	SnowflakeTarget *SnowflakeTarget

	// Specifies a connector to an Apache Spark data source.
	SparkConnectorSource *SparkConnectorSource

	// Specifies a target that uses an Apache Spark connector.
	SparkConnectorTarget *SparkConnectorTarget

	// Specifies a transform where you enter a SQL query using Spark SQL syntax to
	// transform the data. The output is a single DynamicFrame .
	SparkSQL *SparkSQL

	// Specifies a transform that writes samples of the data to an Amazon S3 bucket.
	Spigot *Spigot

	// Specifies a transform that splits data property keys into two DynamicFrames .
	// The output is a collection of DynamicFrames : one with selected data property
	// keys, and one with the remaining data property keys.
	SplitFields *SplitFields

	// Specifies a transform that combines the rows from two or more datasets into a
	// single result.
	Union *Union
	// contains filtered or unexported fields
}

CodeGenConfigurationNode enumerates all valid Node types. One and only one of its member variables can be populated.

type CodeGenEdge

type CodeGenEdge struct {

	// The ID of the node at which the edge starts.
	//
	// This member is required.
	Source *string

	// The ID of the node at which the edge ends.
	//
	// This member is required.
	Target *string

	// The target of the edge.
	TargetParameter *string
	// contains filtered or unexported fields
}

Represents a directional edge in a directed acyclic graph (DAG).

type CodeGenNode

type CodeGenNode struct {

	// Properties of the node, in the form of name-value pairs.
	//
	// This member is required.
	Args []CodeGenNodeArg

	// A node identifier that is unique within the node's graph.
	//
	// This member is required.
	Id *string

	// The type of node that this is.
	//
	// This member is required.
	NodeType *string

	// The line number of the node.
	LineNumber int32
	// contains filtered or unexported fields
}

Represents a node in a directed acyclic graph (DAG)

type CodeGenNodeArg

type CodeGenNodeArg struct {

	// The name of the argument or property.
	//
	// This member is required.
	Name *string

	// The value of the argument or property.
	//
	// This member is required.
	Value *string

	// True if the value is used as a parameter.
	Param bool
	// contains filtered or unexported fields
}

An argument or property of a node.

type Column

type Column struct {

	// The name of the Column .
	//
	// This member is required.
	Name *string

	// A free-form text comment.
	Comment *string

	// These key-value pairs define properties associated with the column.
	Parameters map[string]string

	// The data type of the Column .
	Type *string
	// contains filtered or unexported fields
}

A column in a Table .

type ColumnError

type ColumnError struct {

	// The name of the column that failed.
	ColumnName *string

	// An error message with the reason for the failure of an operation.
	Error *ErrorDetail
	// contains filtered or unexported fields
}

Encapsulates a column name that failed and the reason for failure.

type ColumnImportance added in v0.31.0

type ColumnImportance struct {

	// The name of a column.
	ColumnName *string

	// The column importance score for the column, as a decimal.
	Importance *float64
	// contains filtered or unexported fields
}

A structure containing the column name and column importance score for a column. Column importance helps you understand how columns contribute to your model, by identifying which columns in your records are more important than others.

type ColumnRowFilter added in v1.18.0

type ColumnRowFilter struct {

	// A string containing the name of the column.
	ColumnName *string

	// A string containing the row-level filter expression.
	RowFilterExpression *string
	// contains filtered or unexported fields
}

A filter that uses both column-level and row-level filtering.

type ColumnStatistics

type ColumnStatistics struct {

	// The timestamp of when column statistics were generated.
	//
	// This member is required.
	AnalyzedTime *time.Time

	// Name of column which statistics belong to.
	//
	// This member is required.
	ColumnName *string

	// The data type of the column.
	//
	// This member is required.
	ColumnType *string

	// A ColumnStatisticData object that contains the statistics data values.
	//
	// This member is required.
	StatisticsData *ColumnStatisticsData
	// contains filtered or unexported fields
}

Represents the generated column-level statistics for a table or partition.

type ColumnStatisticsData

type ColumnStatisticsData struct {

	// The type of column statistics data.
	//
	// This member is required.
	Type ColumnStatisticsType

	// Binary column statistics data.
	BinaryColumnStatisticsData *BinaryColumnStatisticsData

	// Boolean column statistics data.
	BooleanColumnStatisticsData *BooleanColumnStatisticsData

	// Date column statistics data.
	DateColumnStatisticsData *DateColumnStatisticsData

	// Decimal column statistics data. UnscaledValues within are Base64-encoded binary
	// objects storing big-endian, two's complement representations of the decimal's
	// unscaled value.
	DecimalColumnStatisticsData *DecimalColumnStatisticsData

	// Double column statistics data.
	DoubleColumnStatisticsData *DoubleColumnStatisticsData

	// Long column statistics data.
	LongColumnStatisticsData *LongColumnStatisticsData

	// String column statistics data.
	StringColumnStatisticsData *StringColumnStatisticsData
	// contains filtered or unexported fields
}

Contains the individual types of column statistics data. Only one data object should be set and indicated by the Type attribute.

type ColumnStatisticsError

type ColumnStatisticsError struct {

	// The ColumnStatistics of the column.
	ColumnStatistics *ColumnStatistics

	// An error message with the reason for the failure of an operation.
	Error *ErrorDetail
	// contains filtered or unexported fields
}

Encapsulates a ColumnStatistics object that failed and the reason for failure.

type ColumnStatisticsState added in v1.69.0

type ColumnStatisticsState string
const (
	ColumnStatisticsStateStarting  ColumnStatisticsState = "STARTING"
	ColumnStatisticsStateRunning   ColumnStatisticsState = "RUNNING"
	ColumnStatisticsStateSucceeded ColumnStatisticsState = "SUCCEEDED"
	ColumnStatisticsStateFailed    ColumnStatisticsState = "FAILED"
	ColumnStatisticsStateStopped   ColumnStatisticsState = "STOPPED"
)

Enum values for ColumnStatisticsState

func (ColumnStatisticsState) Values added in v1.69.0

Values returns all known values for ColumnStatisticsState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ColumnStatisticsTaskNotRunningException added in v1.69.0

type ColumnStatisticsTaskNotRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An exception thrown when you try to stop a task run when there is no task running.

func (*ColumnStatisticsTaskNotRunningException) Error added in v1.69.0

func (*ColumnStatisticsTaskNotRunningException) ErrorCode added in v1.69.0

func (*ColumnStatisticsTaskNotRunningException) ErrorFault added in v1.69.0

func (*ColumnStatisticsTaskNotRunningException) ErrorMessage added in v1.69.0

type ColumnStatisticsTaskRun added in v1.69.0

type ColumnStatisticsTaskRun struct {

	// The ID of the Data Catalog where the table resides. If none is supplied, the
	// Amazon Web Services account ID is used by default.
	CatalogID *string

	// A list of the column names. If none is supplied, all column names for the table
	// will be used by default.
	ColumnNameList []string

	// The identifier for the particular column statistics task run.
	ColumnStatisticsTaskRunId *string

	// The time that this task was created.
	CreationTime *time.Time

	// The Amazon Web Services account ID.
	CustomerId *string

	// The calculated DPU usage in seconds for all autoscaled workers.
	DPUSeconds float64

	// The database where the table resides.
	DatabaseName *string

	// The end time of the task.
	EndTime *time.Time

	// The error message for the job.
	ErrorMessage *string

	// The last point in time when this task was modified.
	LastUpdated *time.Time

	// The number of workers used to generate column statistics. The job is
	// preconfigured to autoscale up to 25 instances.
	NumberOfWorkers int32

	// The IAM role that the service assumes to generate statistics.
	Role *string

	// The percentage of rows used to generate statistics. If none is supplied, the
	// entire table will be used to generate stats.
	SampleSize float64

	// Name of the security configuration that is used to encrypt CloudWatch logs for
	// the column stats task run.
	SecurityConfiguration *string

	// The start time of the task.
	StartTime *time.Time

	// The status of the task run.
	Status ColumnStatisticsState

	// The name of the table for which column statistics is generated.
	TableName *string

	// The type of workers being used for generating stats. The default is g.1x .
	WorkerType *string
	// contains filtered or unexported fields
}

The object that shows the details of the column stats run.

type ColumnStatisticsTaskRunningException added in v1.69.0

type ColumnStatisticsTaskRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An exception thrown when you try to start another job while running a column stats generation job.

func (*ColumnStatisticsTaskRunningException) Error added in v1.69.0

func (*ColumnStatisticsTaskRunningException) ErrorCode added in v1.69.0

func (*ColumnStatisticsTaskRunningException) ErrorFault added in v1.69.0

func (*ColumnStatisticsTaskRunningException) ErrorMessage added in v1.69.0

func (e *ColumnStatisticsTaskRunningException) ErrorMessage() string

type ColumnStatisticsTaskStoppingException added in v1.69.0

type ColumnStatisticsTaskStoppingException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An exception thrown when you try to stop a task run.

func (*ColumnStatisticsTaskStoppingException) Error added in v1.69.0

func (*ColumnStatisticsTaskStoppingException) ErrorCode added in v1.69.0

func (*ColumnStatisticsTaskStoppingException) ErrorFault added in v1.69.0

func (*ColumnStatisticsTaskStoppingException) ErrorMessage added in v1.69.0

type ColumnStatisticsType

type ColumnStatisticsType string
const (
	ColumnStatisticsTypeBoolean ColumnStatisticsType = "BOOLEAN"
	ColumnStatisticsTypeDate    ColumnStatisticsType = "DATE"
	ColumnStatisticsTypeDecimal ColumnStatisticsType = "DECIMAL"
	ColumnStatisticsTypeDouble  ColumnStatisticsType = "DOUBLE"
	ColumnStatisticsTypeLong    ColumnStatisticsType = "LONG"
	ColumnStatisticsTypeString  ColumnStatisticsType = "STRING"
	ColumnStatisticsTypeBinary  ColumnStatisticsType = "BINARY"
)

Enum values for ColumnStatisticsType

func (ColumnStatisticsType) Values added in v0.29.0

Values returns all known values for ColumnStatisticsType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Comparator

type Comparator string
const (
	ComparatorEquals            Comparator = "EQUALS"
	ComparatorGreaterThan       Comparator = "GREATER_THAN"
	ComparatorLessThan          Comparator = "LESS_THAN"
	ComparatorGreaterThanEquals Comparator = "GREATER_THAN_EQUALS"
	ComparatorLessThanEquals    Comparator = "LESS_THAN_EQUALS"
)

Enum values for Comparator

func (Comparator) Values added in v0.29.0

func (Comparator) Values() []Comparator

Values returns all known values for Comparator. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Compatibility added in v0.30.0

type Compatibility string
const (
	CompatibilityNone        Compatibility = "NONE"
	CompatibilityDisabled    Compatibility = "DISABLED"
	CompatibilityBackward    Compatibility = "BACKWARD"
	CompatibilityBackwardAll Compatibility = "BACKWARD_ALL"
	CompatibilityForward     Compatibility = "FORWARD"
	CompatibilityForwardAll  Compatibility = "FORWARD_ALL"
	CompatibilityFull        Compatibility = "FULL"
	CompatibilityFullAll     Compatibility = "FULL_ALL"
)

Enum values for Compatibility

func (Compatibility) Values added in v0.30.0

func (Compatibility) Values() []Compatibility

Values returns all known values for Compatibility. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CompressionType added in v1.25.0

type CompressionType string
const (
	CompressionTypeGzip  CompressionType = "gzip"
	CompressionTypeBzip2 CompressionType = "bzip2"
)

Enum values for CompressionType

func (CompressionType) Values added in v1.25.0

func (CompressionType) Values() []CompressionType

Values returns all known values for CompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ConcurrentModificationException

type ConcurrentModificationException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

Two processes are trying to modify a resource simultaneously.

func (*ConcurrentModificationException) Error

func (*ConcurrentModificationException) ErrorCode

func (e *ConcurrentModificationException) ErrorCode() string

func (*ConcurrentModificationException) ErrorFault

func (*ConcurrentModificationException) ErrorMessage

func (e *ConcurrentModificationException) ErrorMessage() string

type ConcurrentRunsExceededException

type ConcurrentRunsExceededException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

Too many jobs are being run concurrently.

func (*ConcurrentRunsExceededException) Error

func (*ConcurrentRunsExceededException) ErrorCode

func (e *ConcurrentRunsExceededException) ErrorCode() string

func (*ConcurrentRunsExceededException) ErrorFault

func (*ConcurrentRunsExceededException) ErrorMessage

func (e *ConcurrentRunsExceededException) ErrorMessage() string

type Condition

type Condition struct {

	// The state of the crawler to which this condition applies.
	CrawlState CrawlState

	// The name of the crawler to which this condition applies.
	CrawlerName *string

	// The name of the job whose JobRuns this condition applies to, and on which this
	// trigger waits.
	JobName *string

	// A logical operator.
	LogicalOperator LogicalOperator

	// The condition state. Currently, the only job states that a trigger can listen
	// for are SUCCEEDED , STOPPED , FAILED , and TIMEOUT . The only crawler states
	// that a trigger can listen for are SUCCEEDED , FAILED , and CANCELLED .
	State JobRunState
	// contains filtered or unexported fields
}

Defines a condition under which a trigger fires.

type ConditionCheckFailureException

type ConditionCheckFailureException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A specified condition was not satisfied.

func (*ConditionCheckFailureException) Error

func (*ConditionCheckFailureException) ErrorCode

func (e *ConditionCheckFailureException) ErrorCode() string

func (*ConditionCheckFailureException) ErrorFault

func (*ConditionCheckFailureException) ErrorMessage

func (e *ConditionCheckFailureException) ErrorMessage() string

type ConflictException added in v0.29.0

type ConflictException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The CreatePartitions API was called on a table that has indexes enabled.

func (*ConflictException) Error added in v0.29.0

func (e *ConflictException) Error() string

func (*ConflictException) ErrorCode added in v0.29.0

func (e *ConflictException) ErrorCode() string

func (*ConflictException) ErrorFault added in v0.29.0

func (e *ConflictException) ErrorFault() smithy.ErrorFault

func (*ConflictException) ErrorMessage added in v0.29.0

func (e *ConflictException) ErrorMessage() string

type ConfusionMatrix

type ConfusionMatrix struct {

	// The number of matches in the data that the transform didn't find, in the
	// confusion matrix for your transform.
	NumFalseNegatives *int64

	// The number of nonmatches in the data that the transform incorrectly classified
	// as a match, in the confusion matrix for your transform.
	NumFalsePositives *int64

	// The number of nonmatches in the data that the transform correctly rejected, in
	// the confusion matrix for your transform.
	NumTrueNegatives *int64

	// The number of matches in the data that the transform correctly found, in the
	// confusion matrix for your transform.
	NumTruePositives *int64
	// contains filtered or unexported fields
}

The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making. For more information, see Confusion matrix (https://en.wikipedia.org/wiki/Confusion_matrix) in Wikipedia.

type Connection

type Connection struct {

	// These key-value pairs define parameters for the connection:
	//   - HOST - The host URI: either the fully qualified domain name (FQDN) or the
	//   IPv4 address of the database host.
	//   - PORT - The port number, between 1024 and 65535, of the port on which the
	//   database host is listening for database connections.
	//   - USER_NAME - The name under which to log in to the database. The value string
	//   for USER_NAME is " USERNAME ".
	//   - PASSWORD - A password, if one is used, for the user name.
	//   - ENCRYPTED_PASSWORD - When you enable connection password protection by
	//   setting ConnectionPasswordEncryption in the Data Catalog encryption settings,
	//   this field stores the encrypted password.
	//   - JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of
	//   the JAR file that contains the JDBC driver to use.
	//   - JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.
	//   - JDBC_ENGINE - The name of the JDBC engine to use.
	//   - JDBC_ENGINE_VERSION - The version of the JDBC engine to use.
	//   - CONFIG_FILES - (Reserved for future use.)
	//   - INSTANCE_ID - The instance ID to use.
	//   - JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.
	//   - JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure
	//   Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection
	//   on the client. The default is false.
	//   - CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root
	//   certificate. Glue uses this root certificate to validate the customer’s
	//   certificate when connecting to the customer database. Glue only handles X.509
	//   certificates. The certificate provided must be DER-encoded and supplied in
	//   Base64 encoding PEM format.
	//   - SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false . Glue
	//   validates the Signature algorithm and Subject Public Key Algorithm for the
	//   customer certificate. The only permitted algorithms for the Signature algorithm
	//   are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key
	//   Algorithm, the key length must be at least 2048. You can set the value of this
	//   property to true to skip Glue’s validation of the customer certificate.
	//   - CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for
	//   domain match or distinguished name match to prevent a man-in-the-middle attack.
	//   In Oracle database, this is used as the SSL_SERVER_CERT_DN ; in Microsoft SQL
	//   Server, this is used as the hostNameInCertificate .
	//   - CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.
	//   - SECRET_ID - The secret ID used for the secret manager of credentials.
	//   - CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.
	//   - CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.
	//   - CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM
	//   connection.
	//   - KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that
	//   are the addresses of the Apache Kafka brokers in a Kafka cluster to which a
	//   Kafka client will connect to and bootstrap itself.
	//   - KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka
	//   connection. Default value is "true".
	//   - KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem
	//   format). The default is an empty string.
	//   - KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA
	//   cert file or not. Glue validates for three algorithms: SHA256withRSA,
	//   SHA384withRSA and SHA512withRSA. Default value is "false".
	//   - KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file
	//   for Kafka client side authentication (Optional).
	//   - KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided
	//   keystore (Optional).
	//   - KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this
	//   is the password to access the client key to be used with the Kafka server side
	//   key (Optional).
	//   - ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the
	//   Kafka client keystore password (if the user has the Glue encrypt passwords
	//   setting selected).
	//   - ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka
	//   client key password (if the user has the Glue encrypt passwords setting
	//   selected).
	//   - KAFKA_SASL_MECHANISM - "SCRAM-SHA-512" , "GSSAPI" , "AWS_MSK_IAM" , or
	//   "PLAIN" . These are the supported SASL Mechanisms (https://www.iana.org/assignments/sasl-mechanisms/sasl-mechanisms.xhtml)
	//   .
	//   - KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with
	//   the "PLAIN" mechanism.
	//   - KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with
	//   the "PLAIN" mechanism.
	//   - ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka
	//   SASL PLAIN password (if the user has the Glue encrypt passwords setting
	//   selected).
	//   - KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with
	//   the "SCRAM-SHA-512" mechanism.
	//   - KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with
	//   the "SCRAM-SHA-512" mechanism.
	//   - ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka
	//   SASL SCRAM password (if the user has the Glue encrypt passwords setting
	//   selected).
	//   - KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in
	//   Amazon Web Services Secrets Manager.
	//   - KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A
	//   keytab stores long-term keys for one or more principals. For more information,
	//   see MIT Kerberos Documentation: Keytab (https://web.mit.edu/kerberos/krb5-latest/doc/basic/keytab_def.html)
	//   .
	//   - KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file.
	//   A krb5.conf stores Kerberos configuration information, such as the location of
	//   the KDC server. For more information, see MIT Kerberos Documentation:
	//   krb5.conf (https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
	//   .
	//   - KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with
	//   sasl.kerberos.service.name in your Kafka Configuration (https://kafka.apache.org/documentation/#brokerconfigs_sasl.kerberos.service.name)
	//   .
	//   - KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by
	//   Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers (https://kafka.apache.org/documentation/#security_sasl_kerberos_clientconfig)
	//   .
	ConnectionProperties map[string]string

	// The type of the connection. Currently, SFTP is not supported.
	ConnectionType ConnectionType

	// The time that this connection definition was created.
	CreationTime *time.Time

	// The description of the connection.
	Description *string

	// The user, group, or role that last updated this connection definition.
	LastUpdatedBy *string

	// The last time that this connection definition was updated.
	LastUpdatedTime *time.Time

	// A list of criteria that can be used in selecting this connection.
	MatchCriteria []string

	// The name of the connection definition.
	Name *string

	// A map of physical connection requirements, such as virtual private cloud (VPC)
	// and SecurityGroup , that are needed to make this connection successfully.
	PhysicalConnectionRequirements *PhysicalConnectionRequirements
	// contains filtered or unexported fields
}

Defines a connection to a data source.

type ConnectionInput

type ConnectionInput struct {

	// These key-value pairs define parameters for the connection.
	//
	// This member is required.
	ConnectionProperties map[string]string

	// The type of the connection. Currently, these types are supported:
	//   - JDBC - Designates a connection to a database through Java Database
	//   Connectivity (JDBC). JDBC Connections use the following ConnectionParameters.
	//   - Required: All of ( HOST , PORT , JDBC_ENGINE ) or JDBC_CONNECTION_URL .
	//   - Required: All of ( USERNAME , PASSWORD ) or SECRET_ID .
	//   - Optional: JDBC_ENFORCE_SSL , CUSTOM_JDBC_CERT , CUSTOM_JDBC_CERT_STRING ,
	//   SKIP_CUSTOM_JDBC_CERT_VALIDATION . These parameters are used to configure SSL
	//   with JDBC.
	//   - KAFKA - Designates a connection to an Apache Kafka streaming platform. KAFKA
	//   Connections use the following ConnectionParameters.
	//   - Required: KAFKA_BOOTSTRAP_SERVERS .
	//   - Optional: KAFKA_SSL_ENABLED , KAFKA_CUSTOM_CERT ,
	//   KAFKA_SKIP_CUSTOM_CERT_VALIDATION . These parameters are used to configure SSL
	//   with KAFKA .
	//   - Optional: KAFKA_CLIENT_KEYSTORE , KAFKA_CLIENT_KEYSTORE_PASSWORD ,
	//   KAFKA_CLIENT_KEY_PASSWORD , ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD ,
	//   ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD . These parameters are used to configure
	//   TLS client configuration with SSL in KAFKA .
	//   - Optional: KAFKA_SASL_MECHANISM . Can be specified as SCRAM-SHA-512 , GSSAPI
	//   , or AWS_MSK_IAM .
	//   - Optional: KAFKA_SASL_SCRAM_USERNAME , KAFKA_SASL_SCRAM_PASSWORD ,
	//   ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD . These parameters are used to configure
	//   SASL/SCRAM-SHA-512 authentication with KAFKA .
	//   - Optional: KAFKA_SASL_GSSAPI_KEYTAB , KAFKA_SASL_GSSAPI_KRB5_CONF ,
	//   KAFKA_SASL_GSSAPI_SERVICE , KAFKA_SASL_GSSAPI_PRINCIPAL . These parameters are
	//   used to configure SASL/GSSAPI authentication with KAFKA .
	//   - MONGODB - Designates a connection to a MongoDB document database. MONGODB
	//   Connections use the following ConnectionParameters.
	//   - Required: CONNECTION_URL .
	//   - Required: All of ( USERNAME , PASSWORD ) or SECRET_ID .
	//   - NETWORK - Designates a network connection to a data source within an Amazon
	//   Virtual Private Cloud environment (Amazon VPC). NETWORK Connections do not
	//   require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.
	//
	//   - MARKETPLACE - Uses configuration settings contained in a connector purchased
	//   from Amazon Web Services Marketplace to read from and write to data stores that
	//   are not natively supported by Glue. MARKETPLACE Connections use the following
	//   ConnectionParameters.
	//   - Required: CONNECTOR_TYPE , CONNECTOR_URL , CONNECTOR_CLASS_NAME ,
	//   CONNECTION_URL .
	//   - Required for JDBC CONNECTOR_TYPE connections: All of ( USERNAME , PASSWORD )
	//   or SECRET_ID .
	//   - CUSTOM - Uses configuration settings contained in a custom connector to read
	//   from and write to data stores that are not natively supported by Glue.
	// SFTP is not supported. For more information about how optional
	// ConnectionProperties are used to configure features in Glue, consult Glue
	// connection properties (https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html)
	// . For more information about how optional ConnectionProperties are used to
	// configure features in Glue Studio, consult Using connectors and connections (https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html)
	// .
	//
	// This member is required.
	ConnectionType ConnectionType

	// The name of the connection. Connection will not function as expected without a
	// name.
	//
	// This member is required.
	Name *string

	// The description of the connection.
	Description *string

	// A list of criteria that can be used in selecting this connection.
	MatchCriteria []string

	// A map of physical connection requirements, such as virtual private cloud (VPC)
	// and SecurityGroup , that are needed to successfully make this connection.
	PhysicalConnectionRequirements *PhysicalConnectionRequirements
	// contains filtered or unexported fields
}

A structure that is used to specify a connection to create or update.

type ConnectionPasswordEncryption

type ConnectionPasswordEncryption struct {

	// When the ReturnConnectionPasswordEncrypted flag is set to "true", passwords
	// remain encrypted in the responses of GetConnection and GetConnections . This
	// encryption takes effect independently from catalog encryption.
	//
	// This member is required.
	ReturnConnectionPasswordEncrypted bool

	// An KMS key that is used to encrypt the connection password. If connection
	// password protection is enabled, the caller of CreateConnection and
	// UpdateConnection needs at least kms:Encrypt permission on the specified KMS
	// key, to encrypt passwords before storing them in the Data Catalog. You can set
	// the decrypt permission to enable or restrict access on the password key
	// according to your security requirements.
	AwsKmsKeyId *string
	// contains filtered or unexported fields
}

The data structure used by the Data Catalog to encrypt the password as part of CreateConnection or UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection properties. You can enable catalog encryption or only password encryption. When a CreationConnection request arrives containing a password, the Data Catalog first encrypts the password using your KMS key. It then encrypts the whole connection object again if catalog encryption is also enabled. This encryption requires that you set KMS key permissions to enable or restrict access on the password key according to your security requirements. For example, you might want only administrators to have decrypt permission on the password key.

type ConnectionPropertyKey

type ConnectionPropertyKey string
const (
	ConnectionPropertyKeyHost                                 ConnectionPropertyKey = "HOST"
	ConnectionPropertyKeyPort                                 ConnectionPropertyKey = "PORT"
	ConnectionPropertyKeyUserName                             ConnectionPropertyKey = "USERNAME"
	ConnectionPropertyKeyPassword                             ConnectionPropertyKey = "PASSWORD"
	ConnectionPropertyKeyEncryptedPassword                    ConnectionPropertyKey = "ENCRYPTED_PASSWORD"
	ConnectionPropertyKeyJdbcDriverJarUri                     ConnectionPropertyKey = "JDBC_DRIVER_JAR_URI"
	ConnectionPropertyKeyJdbcDriverClassName                  ConnectionPropertyKey = "JDBC_DRIVER_CLASS_NAME"
	ConnectionPropertyKeyJdbcEngine                           ConnectionPropertyKey = "JDBC_ENGINE"
	ConnectionPropertyKeyJdbcEngineVersion                    ConnectionPropertyKey = "JDBC_ENGINE_VERSION"
	ConnectionPropertyKeyConfigFiles                          ConnectionPropertyKey = "CONFIG_FILES"
	ConnectionPropertyKeyInstanceId                           ConnectionPropertyKey = "INSTANCE_ID"
	ConnectionPropertyKeyJdbcConnectionUrl                    ConnectionPropertyKey = "JDBC_CONNECTION_URL"
	ConnectionPropertyKeyJdbcEnforceSsl                       ConnectionPropertyKey = "JDBC_ENFORCE_SSL"
	ConnectionPropertyKeyCustomJdbcCert                       ConnectionPropertyKey = "CUSTOM_JDBC_CERT"
	ConnectionPropertyKeySkipCustomJdbcCertValidation         ConnectionPropertyKey = "SKIP_CUSTOM_JDBC_CERT_VALIDATION"
	ConnectionPropertyKeyCustomJdbcCertString                 ConnectionPropertyKey = "CUSTOM_JDBC_CERT_STRING"
	ConnectionPropertyKeyConnectionUrl                        ConnectionPropertyKey = "CONNECTION_URL"
	ConnectionPropertyKeyKafkaBootstrapServers                ConnectionPropertyKey = "KAFKA_BOOTSTRAP_SERVERS"
	ConnectionPropertyKeyKafkaSslEnabled                      ConnectionPropertyKey = "KAFKA_SSL_ENABLED"
	ConnectionPropertyKeyKafkaCustomCert                      ConnectionPropertyKey = "KAFKA_CUSTOM_CERT"
	ConnectionPropertyKeyKafkaSkipCustomCertValidation        ConnectionPropertyKey = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION"
	ConnectionPropertyKeyKafkaClientKeystore                  ConnectionPropertyKey = "KAFKA_CLIENT_KEYSTORE"
	ConnectionPropertyKeyKafkaClientKeystorePassword          ConnectionPropertyKey = "KAFKA_CLIENT_KEYSTORE_PASSWORD"
	ConnectionPropertyKeyKafkaClientKeyPassword               ConnectionPropertyKey = "KAFKA_CLIENT_KEY_PASSWORD"
	ConnectionPropertyKeyEncryptedKafkaClientKeystorePassword ConnectionPropertyKey = "ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD"
	ConnectionPropertyKeyEncryptedKafkaClientKeyPassword      ConnectionPropertyKey = "ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD"
	ConnectionPropertyKeySecretId                             ConnectionPropertyKey = "SECRET_ID"
	ConnectionPropertyKeyConnectorUrl                         ConnectionPropertyKey = "CONNECTOR_URL"
	ConnectionPropertyKeyConnectorType                        ConnectionPropertyKey = "CONNECTOR_TYPE"
	ConnectionPropertyKeyConnectorClassName                   ConnectionPropertyKey = "CONNECTOR_CLASS_NAME"
	ConnectionPropertyKeyKafkaSaslMechanism                   ConnectionPropertyKey = "KAFKA_SASL_MECHANISM"
	ConnectionPropertyKeyKafkaSaslPlainUsername               ConnectionPropertyKey = "KAFKA_SASL_PLAIN_USERNAME"
	ConnectionPropertyKeyKafkaSaslPlainPassword               ConnectionPropertyKey = "KAFKA_SASL_PLAIN_PASSWORD"
	ConnectionPropertyKeyEncryptedKafkaSaslPlainPassword      ConnectionPropertyKey = "ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD"
	ConnectionPropertyKeyKafkaSaslScramUsername               ConnectionPropertyKey = "KAFKA_SASL_SCRAM_USERNAME"
	ConnectionPropertyKeyKafkaSaslScramPassword               ConnectionPropertyKey = "KAFKA_SASL_SCRAM_PASSWORD"
	ConnectionPropertyKeyKafkaSaslScramSecretsArn             ConnectionPropertyKey = "KAFKA_SASL_SCRAM_SECRETS_ARN"
	ConnectionPropertyKeyEncryptedKafkaSaslScramPassword      ConnectionPropertyKey = "ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD"
	ConnectionPropertyKeyKafkaSaslGssapiKeytab                ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KEYTAB"
	ConnectionPropertyKeyKafkaSaslGssapiKrb5Conf              ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_KRB5_CONF"
	ConnectionPropertyKeyKafkaSaslGssapiService               ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_SERVICE"
	ConnectionPropertyKeyKafkaSaslGssapiPrincipal             ConnectionPropertyKey = "KAFKA_SASL_GSSAPI_PRINCIPAL"
)

Enum values for ConnectionPropertyKey

func (ConnectionPropertyKey) Values added in v0.29.0

Values returns all known values for ConnectionPropertyKey. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ConnectionType

type ConnectionType string
const (
	ConnectionTypeJdbc        ConnectionType = "JDBC"
	ConnectionTypeSftp        ConnectionType = "SFTP"
	ConnectionTypeMongodb     ConnectionType = "MONGODB"
	ConnectionTypeKafka       ConnectionType = "KAFKA"
	ConnectionTypeNetwork     ConnectionType = "NETWORK"
	ConnectionTypeMarketplace ConnectionType = "MARKETPLACE"
	ConnectionTypeCustom      ConnectionType = "CUSTOM"
)

Enum values for ConnectionType

func (ConnectionType) Values added in v0.29.0

func (ConnectionType) Values() []ConnectionType

Values returns all known values for ConnectionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ConnectionsList

type ConnectionsList struct {

	// A list of connections used by the job.
	Connections []string
	// contains filtered or unexported fields
}

Specifies the connections used by a job.

type ConnectorDataSource added in v1.67.0

type ConnectorDataSource struct {

	// The connectionType , as provided to the underlying Glue library. This node type
	// supports the following connection types:
	//   - opensearch
	//   - azuresql
	//   - azurecosmos
	//   - bigquery
	//   - saphana
	//   - teradata
	//   - vertica
	//
	// This member is required.
	ConnectionType *string

	// A map specifying connection options for the node. You can find standard
	// connection options for the corresponding connection type in the Connection
	// parameters (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-connect.html)
	// section of the Glue documentation.
	//
	// This member is required.
	Data map[string]string

	// The name of this source node.
	//
	// This member is required.
	Name *string

	// Specifies the data schema for this source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a source generated with standard connection options.

type ConnectorDataTarget added in v1.67.0

type ConnectorDataTarget struct {

	// The connectionType , as provided to the underlying Glue library. This node type
	// supports the following connection types:
	//   - opensearch
	//   - azuresql
	//   - azurecosmos
	//   - bigquery
	//   - saphana
	//   - teradata
	//   - vertica
	//
	// This member is required.
	ConnectionType *string

	// A map specifying connection options for the node. You can find standard
	// connection options for the corresponding connection type in the Connection
	// parameters (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-connect.html)
	// section of the Glue documentation.
	//
	// This member is required.
	Data map[string]string

	// The name of this target node.
	//
	// This member is required.
	Name *string

	// The nodes that are inputs to the data target.
	Inputs []string
	// contains filtered or unexported fields
}

Specifies a target generated with standard connection options.

type Crawl

type Crawl struct {

	// The date and time on which the crawl completed.
	CompletedOn *time.Time

	// The error message associated with the crawl.
	ErrorMessage *string

	// The log group associated with the crawl.
	LogGroup *string

	// The log stream associated with the crawl.
	LogStream *string

	// The date and time on which the crawl started.
	StartedOn *time.Time

	// The state of the crawler.
	State CrawlState
	// contains filtered or unexported fields
}

The details of a crawl in the workflow.

type CrawlState

type CrawlState string
const (
	CrawlStateRunning    CrawlState = "RUNNING"
	CrawlStateCancelling CrawlState = "CANCELLING"
	CrawlStateCancelled  CrawlState = "CANCELLED"
	CrawlStateSucceeded  CrawlState = "SUCCEEDED"
	CrawlStateFailed     CrawlState = "FAILED"
	CrawlStateError      CrawlState = "ERROR"
)

Enum values for CrawlState

func (CrawlState) Values added in v0.29.0

func (CrawlState) Values() []CrawlState

Values returns all known values for CrawlState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Crawler

type Crawler struct {

	// A list of UTF-8 strings that specify the custom classifiers that are associated
	// with the crawler.
	Classifiers []string

	// Crawler configuration information. This versioned JSON string allows users to
	// specify aspects of a crawler's behavior. For more information, see Setting
	// crawler configuration options (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html)
	// .
	Configuration *string

	// If the crawler is running, contains the total time elapsed since the last crawl
	// began.
	CrawlElapsedTime int64

	// The name of the SecurityConfiguration structure to be used by this crawler.
	CrawlerSecurityConfiguration *string

	// The time that the crawler was created.
	CreationTime *time.Time

	// The name of the database in which the crawler's output is stored.
	DatabaseName *string

	// A description of the crawler.
	Description *string

	// Specifies whether the crawler should use Lake Formation credentials for the
	// crawler instead of the IAM role credentials.
	LakeFormationConfiguration *LakeFormationConfiguration

	// The status of the last crawl, and potentially error information if an error
	// occurred.
	LastCrawl *LastCrawlInfo

	// The time that the crawler was last updated.
	LastUpdated *time.Time

	// A configuration that specifies whether data lineage is enabled for the crawler.
	LineageConfiguration *LineageConfiguration

	// The name of the crawler.
	Name *string

	// A policy that specifies whether to crawl the entire dataset again, or to crawl
	// only folders that were added since the last crawler run.
	RecrawlPolicy *RecrawlPolicy

	// The Amazon Resource Name (ARN) of an IAM role that's used to access customer
	// resources, such as Amazon Simple Storage Service (Amazon S3) data.
	Role *string

	// For scheduled crawlers, the schedule when the crawler runs.
	Schedule *Schedule

	// The policy that specifies update and delete behaviors for the crawler.
	SchemaChangePolicy *SchemaChangePolicy

	// Indicates whether the crawler is running, or whether a run is pending.
	State CrawlerState

	// The prefix added to the names of tables that are created.
	TablePrefix *string

	// A collection of targets to crawl.
	Targets *CrawlerTargets

	// The version of the crawler.
	Version int64
	// contains filtered or unexported fields
}

Specifies a crawler program that examines a data source and uses classifiers to try to determine its schema. If successful, the crawler records metadata concerning the data source in the Glue Data Catalog.

type CrawlerHistory added in v1.26.0

type CrawlerHistory struct {

	// A UUID identifier for each crawl.
	CrawlId *string

	// The number of data processing units (DPU) used in hours for the crawl.
	DPUHour float64

	// The date and time on which the crawl ended.
	EndTime *time.Time

	// If an error occurred, the error message associated with the crawl.
	ErrorMessage *string

	// The log group associated with the crawl.
	LogGroup *string

	// The log stream associated with the crawl.
	LogStream *string

	// The prefix for a CloudWatch message about this crawl.
	MessagePrefix *string

	// The date and time on which the crawl started.
	StartTime *time.Time

	// The state of the crawl.
	State CrawlerHistoryState

	// A run summary for the specific crawl in JSON. Contains the catalog tables and
	// partitions that were added, updated, or deleted.
	Summary *string
	// contains filtered or unexported fields
}

Contains the information for a run of a crawler.

type CrawlerHistoryState added in v1.26.0

type CrawlerHistoryState string
const (
	CrawlerHistoryStateRunning   CrawlerHistoryState = "RUNNING"
	CrawlerHistoryStateCompleted CrawlerHistoryState = "COMPLETED"
	CrawlerHistoryStateFailed    CrawlerHistoryState = "FAILED"
	CrawlerHistoryStateStopped   CrawlerHistoryState = "STOPPED"
)

Enum values for CrawlerHistoryState

func (CrawlerHistoryState) Values added in v1.26.0

Values returns all known values for CrawlerHistoryState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CrawlerLineageSettings added in v0.31.0

type CrawlerLineageSettings string
const (
	CrawlerLineageSettingsEnable  CrawlerLineageSettings = "ENABLE"
	CrawlerLineageSettingsDisable CrawlerLineageSettings = "DISABLE"
)

Enum values for CrawlerLineageSettings

func (CrawlerLineageSettings) Values added in v0.31.0

Values returns all known values for CrawlerLineageSettings. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CrawlerMetrics

type CrawlerMetrics struct {

	// The name of the crawler.
	CrawlerName *string

	// The duration of the crawler's most recent run, in seconds.
	LastRuntimeSeconds float64

	// The median duration of this crawler's runs, in seconds.
	MedianRuntimeSeconds float64

	// True if the crawler is still estimating how long it will take to complete this
	// run.
	StillEstimating bool

	// The number of tables created by this crawler.
	TablesCreated int32

	// The number of tables deleted by this crawler.
	TablesDeleted int32

	// The number of tables updated by this crawler.
	TablesUpdated int32

	// The estimated time left to complete a running crawl.
	TimeLeftSeconds float64
	// contains filtered or unexported fields
}

Metrics for a specified crawler.

type CrawlerNodeDetails

type CrawlerNodeDetails struct {

	// A list of crawls represented by the crawl node.
	Crawls []Crawl
	// contains filtered or unexported fields
}

The details of a Crawler node present in the workflow.

type CrawlerNotRunningException

type CrawlerNotRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The specified crawler is not running.

func (*CrawlerNotRunningException) Error

func (*CrawlerNotRunningException) ErrorCode

func (e *CrawlerNotRunningException) ErrorCode() string

func (*CrawlerNotRunningException) ErrorFault

func (*CrawlerNotRunningException) ErrorMessage

func (e *CrawlerNotRunningException) ErrorMessage() string

type CrawlerRunningException

type CrawlerRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The operation cannot be performed because the crawler is already running.

func (*CrawlerRunningException) Error

func (e *CrawlerRunningException) Error() string

func (*CrawlerRunningException) ErrorCode

func (e *CrawlerRunningException) ErrorCode() string

func (*CrawlerRunningException) ErrorFault

func (e *CrawlerRunningException) ErrorFault() smithy.ErrorFault

func (*CrawlerRunningException) ErrorMessage

func (e *CrawlerRunningException) ErrorMessage() string

type CrawlerState

type CrawlerState string
const (
	CrawlerStateReady    CrawlerState = "READY"
	CrawlerStateRunning  CrawlerState = "RUNNING"
	CrawlerStateStopping CrawlerState = "STOPPING"
)

Enum values for CrawlerState

func (CrawlerState) Values added in v0.29.0

func (CrawlerState) Values() []CrawlerState

Values returns all known values for CrawlerState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CrawlerStoppingException

type CrawlerStoppingException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The specified crawler is stopping.

func (*CrawlerStoppingException) Error

func (e *CrawlerStoppingException) Error() string

func (*CrawlerStoppingException) ErrorCode

func (e *CrawlerStoppingException) ErrorCode() string

func (*CrawlerStoppingException) ErrorFault

func (e *CrawlerStoppingException) ErrorFault() smithy.ErrorFault

func (*CrawlerStoppingException) ErrorMessage

func (e *CrawlerStoppingException) ErrorMessage() string

type CrawlerTargets

type CrawlerTargets struct {

	// Specifies Glue Data Catalog targets.
	CatalogTargets []CatalogTarget

	// Specifies Delta data store targets.
	DeltaTargets []DeltaTarget

	// Specifies Amazon DynamoDB targets.
	DynamoDBTargets []DynamoDBTarget

	// Specifies Apache Hudi data store targets.
	HudiTargets []HudiTarget

	// Specifies Apache Iceberg data store targets.
	IcebergTargets []IcebergTarget

	// Specifies JDBC targets.
	JdbcTargets []JdbcTarget

	// Specifies Amazon DocumentDB or MongoDB targets.
	MongoDBTargets []MongoDBTarget

	// Specifies Amazon Simple Storage Service (Amazon S3) targets.
	S3Targets []S3Target
	// contains filtered or unexported fields
}

Specifies data stores to crawl.

type CrawlsFilter added in v1.26.0

type CrawlsFilter struct {

	// A key used to filter the crawler runs for a specified crawler. Valid values for
	// each of the field names are:
	//   - CRAWL_ID : A string representing the UUID identifier for a crawl.
	//   - STATE : A string representing the state of the crawl.
	//   - START_TIME and END_TIME : The epoch timestamp in milliseconds.
	//   - DPU_HOUR : The number of data processing unit (DPU) hours used for the
	//   crawl.
	FieldName FieldName

	// The value provided for comparison on the crawl field.
	FieldValue *string

	// A defined comparator that operates on the value. The available operators are:
	//   - GT : Greater than.
	//   - GE : Greater than or equal to.
	//   - LT : Less than.
	//   - LE : Less than or equal to.
	//   - EQ : Equal to.
	//   - NE : Not equal to.
	FilterOperator FilterOperator
	// contains filtered or unexported fields
}

A list of fields, comparators and value that you can use to filter the crawler runs for a specified crawler.

type CreateCsvClassifierRequest

type CreateCsvClassifierRequest struct {

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// Enables the processing of files that contain only one column.
	AllowSingleColumn *bool

	// Indicates whether the CSV file contains a header.
	ContainsHeader CsvHeaderOption

	// Enables the configuration of custom datatypes.
	CustomDatatypeConfigured *bool

	// Creates a list of supported custom datatypes.
	CustomDatatypes []string

	// A custom symbol to denote what separates each column entry in the row.
	Delimiter *string

	// Specifies not to trim values before identifying the type of column values. The
	// default value is true.
	DisableValueTrimming *bool

	// A list of strings representing column names.
	Header []string

	// A custom symbol to denote what combines content into a single column value.
	// Must be different from the column delimiter.
	QuoteSymbol *string

	// Sets the SerDe for processing CSV in the classifier, which will be applied in
	// the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None .
	// You can specify the None value when you want the crawler to do the detection.
	Serde CsvSerdeOption
	// contains filtered or unexported fields
}

Specifies a custom CSV classifier for CreateClassifier to create.

type CreateGrokClassifierRequest

type CreateGrokClassifierRequest struct {

	// An identifier of the data format that the classifier matches, such as Twitter,
	// JSON, Omniture logs, Amazon CloudWatch Logs, and so on.
	//
	// This member is required.
	Classification *string

	// The grok pattern used by this classifier.
	//
	// This member is required.
	GrokPattern *string

	// The name of the new classifier.
	//
	// This member is required.
	Name *string

	// Optional custom grok patterns used by this classifier.
	CustomPatterns *string
	// contains filtered or unexported fields
}

Specifies a grok classifier for CreateClassifier to create.

type CreateJsonClassifierRequest

type CreateJsonClassifierRequest struct {

	// A JsonPath string defining the JSON data for the classifier to classify. Glue
	// supports a subset of JsonPath, as described in Writing JsonPath Custom
	// Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json)
	// .
	//
	// This member is required.
	JsonPath *string

	// The name of the classifier.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a JSON classifier for CreateClassifier to create.

type CreateXMLClassifierRequest

type CreateXMLClassifierRequest struct {

	// An identifier of the data format that the classifier matches.
	//
	// This member is required.
	Classification *string

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// The XML tag designating the element that contains each record in an XML
	// document being parsed. This can't identify a self-closing element (closed by />
	// ). An empty row element that contains only attributes can be parsed as long as
	// it ends with a closing tag (for example, is okay, but  is not).
	RowTag *string
	// contains filtered or unexported fields
}

Specifies an XML classifier for CreateClassifier to create.

type CsvClassifier

type CsvClassifier struct {

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// Enables the processing of files that contain only one column.
	AllowSingleColumn *bool

	// Indicates whether the CSV file contains a header.
	ContainsHeader CsvHeaderOption

	// The time that this classifier was registered.
	CreationTime *time.Time

	// Enables the custom datatype to be configured.
	CustomDatatypeConfigured *bool

	// A list of custom datatypes including "BINARY", "BOOLEAN", "DATE", "DECIMAL",
	// "DOUBLE", "FLOAT", "INT", "LONG", "SHORT", "STRING", "TIMESTAMP".
	CustomDatatypes []string

	// A custom symbol to denote what separates each column entry in the row.
	Delimiter *string

	// Specifies not to trim values before identifying the type of column values. The
	// default value is true .
	DisableValueTrimming *bool

	// A list of strings representing column names.
	Header []string

	// The time that this classifier was last updated.
	LastUpdated *time.Time

	// A custom symbol to denote what combines content into a single column value. It
	// must be different from the column delimiter.
	QuoteSymbol *string

	// Sets the SerDe for processing CSV in the classifier, which will be applied in
	// the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None .
	// You can specify the None value when you want the crawler to do the detection.
	Serde CsvSerdeOption

	// The version of this classifier.
	Version int64
	// contains filtered or unexported fields
}

A classifier for custom CSV content.

type CsvHeaderOption

type CsvHeaderOption string
const (
	CsvHeaderOptionUnknown CsvHeaderOption = "UNKNOWN"
	CsvHeaderOptionPresent CsvHeaderOption = "PRESENT"
	CsvHeaderOptionAbsent  CsvHeaderOption = "ABSENT"
)

Enum values for CsvHeaderOption

func (CsvHeaderOption) Values added in v0.29.0

func (CsvHeaderOption) Values() []CsvHeaderOption

Values returns all known values for CsvHeaderOption. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CsvSerdeOption added in v1.61.0

type CsvSerdeOption string
const (
	CsvSerdeOptionOpenCSVSerDe    CsvSerdeOption = "OpenCSVSerDe"
	CsvSerdeOptionLazySimpleSerDe CsvSerdeOption = "LazySimpleSerDe"
	CsvSerdeOptionNone            CsvSerdeOption = "None"
)

Enum values for CsvSerdeOption

func (CsvSerdeOption) Values added in v1.61.0

func (CsvSerdeOption) Values() []CsvSerdeOption

Values returns all known values for CsvSerdeOption. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type CustomCode added in v1.25.0

type CustomCode struct {

	// The name defined for the custom code node class.
	//
	// This member is required.
	ClassName *string

	// The custom code that is used to perform the data transformation.
	//
	// This member is required.
	Code *string

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// Specifies the data schema for the custom code transform.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a transform that uses custom code you provide to perform the data transformation. The output is a collection of DynamicFrames.

type CustomEntityType added in v1.24.0

type CustomEntityType struct {

	// A name for the custom pattern that allows it to be retrieved or deleted later.
	// This name must be unique per Amazon Web Services account.
	//
	// This member is required.
	Name *string

	// A regular expression string that is used for detecting sensitive data in a
	// custom pattern.
	//
	// This member is required.
	RegexString *string

	// A list of context words. If none of these context words are found within the
	// vicinity of the regular expression the data will not be detected as sensitive
	// data. If no context words are passed only a regular expression is checked.
	ContextWords []string
	// contains filtered or unexported fields
}

An object representing a custom pattern for detecting sensitive data across the columns and rows of your structured data.

type DQResultsPublishingOptions added in v1.37.0

type DQResultsPublishingOptions struct {

	// Enable metrics for your data quality results.
	CloudWatchMetricsEnabled *bool

	// The context of the evaluation.
	EvaluationContext *string

	// Enable publishing for your data quality results.
	ResultsPublishingEnabled *bool

	// The Amazon S3 prefix prepended to the results.
	ResultsS3Prefix *string
	// contains filtered or unexported fields
}

Options to configure how your data quality evaluation results are published.

type DQStopJobOnFailureOptions added in v1.37.0

type DQStopJobOnFailureOptions struct {

	// When to stop job if your data quality evaluation fails. Options are Immediate
	// or AfterDataLoad.
	StopJobOnFailureTiming DQStopJobOnFailureTiming
	// contains filtered or unexported fields
}

Options to configure how your job will stop if your data quality evaluation fails.

type DQStopJobOnFailureTiming added in v1.37.0

type DQStopJobOnFailureTiming string
const (
	DQStopJobOnFailureTimingImmediate     DQStopJobOnFailureTiming = "Immediate"
	DQStopJobOnFailureTimingAfterDataLoad DQStopJobOnFailureTiming = "AfterDataLoad"
)

Enum values for DQStopJobOnFailureTiming

func (DQStopJobOnFailureTiming) Values added in v1.37.0

Values returns all known values for DQStopJobOnFailureTiming. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DQTransformOutput added in v1.37.0

type DQTransformOutput string
const (
	DQTransformOutputPrimaryInput      DQTransformOutput = "PrimaryInput"
	DQTransformOutputEvaluationResults DQTransformOutput = "EvaluationResults"
)

Enum values for DQTransformOutput

func (DQTransformOutput) Values added in v1.37.0

Values returns all known values for DQTransformOutput. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DataCatalogEncryptionSettings

type DataCatalogEncryptionSettings struct {

	// When connection password protection is enabled, the Data Catalog uses a
	// customer-provided key to encrypt the password as part of CreateConnection or
	// UpdateConnection and store it in the ENCRYPTED_PASSWORD field in the connection
	// properties. You can enable catalog encryption or only password encryption.
	ConnectionPasswordEncryption *ConnectionPasswordEncryption

	// Specifies the encryption-at-rest configuration for the Data Catalog.
	EncryptionAtRest *EncryptionAtRest
	// contains filtered or unexported fields
}

Contains configuration information for maintaining Data Catalog security.

type DataFormat added in v0.30.0

type DataFormat string
const (
	DataFormatAvro     DataFormat = "AVRO"
	DataFormatJson     DataFormat = "JSON"
	DataFormatProtobuf DataFormat = "PROTOBUF"
)

Enum values for DataFormat

func (DataFormat) Values added in v0.30.0

func (DataFormat) Values() []DataFormat

Values returns all known values for DataFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DataLakePrincipal

type DataLakePrincipal struct {

	// An identifier for the Lake Formation principal.
	DataLakePrincipalIdentifier *string
	// contains filtered or unexported fields
}

The Lake Formation principal.

type DataQualityAnalyzerResult added in v1.72.0

type DataQualityAnalyzerResult struct {

	// A description of the data quality analyzer.
	Description *string

	// A map of metrics associated with the evaluation of the analyzer.
	EvaluatedMetrics map[string]float64

	// An evaluation message.
	EvaluationMessage *string

	// The name of the data quality analyzer.
	Name *string
	// contains filtered or unexported fields
}

Describes the result of the evaluation of a data quality analyzer.

type DataQualityEvaluationRunAdditionalRunOptions added in v1.37.0

type DataQualityEvaluationRunAdditionalRunOptions struct {

	// Whether or not to enable CloudWatch metrics.
	CloudWatchMetricsEnabled *bool

	// Prefix for Amazon S3 to store results.
	ResultsS3Prefix *string
	// contains filtered or unexported fields
}

Additional run options you can specify for an evaluation run.

type DataQualityMetricValues added in v1.72.0

type DataQualityMetricValues struct {

	// The actual value of the data quality metric.
	ActualValue *float64

	// The expected value of the data quality metric according to the analysis of
	// historical data.
	ExpectedValue *float64

	// The lower limit of the data quality metric value according to the analysis of
	// historical data.
	LowerLimit *float64

	// The upper limit of the data quality metric value according to the analysis of
	// historical data.
	UpperLimit *float64
	// contains filtered or unexported fields
}

Describes the data quality metric value according to the analysis of historical data.

type DataQualityObservation added in v1.72.0

type DataQualityObservation struct {

	// A description of the data quality observation.
	Description *string

	// An object of type MetricBasedObservation representing the observation that is
	// based on evaluated data quality metrics.
	MetricBasedObservation *MetricBasedObservation
	// contains filtered or unexported fields
}

Describes the observation generated after evaluating the rules and analyzers.

type DataQualityResult added in v1.37.0

type DataQualityResult struct {

	// A list of DataQualityAnalyzerResult objects representing the results for each
	// analyzer.
	AnalyzerResults []DataQualityAnalyzerResult

	// The date and time when this data quality run completed.
	CompletedOn *time.Time

	// The table associated with the data quality result, if any.
	DataSource *DataSource

	// In the context of a job in Glue Studio, each node in the canvas is typically
	// assigned some sort of name and data quality nodes will have names. In the case
	// of multiple nodes, the evaluationContext can differentiate the nodes.
	EvaluationContext *string

	// The job name associated with the data quality result, if any.
	JobName *string

	// The job run ID associated with the data quality result, if any.
	JobRunId *string

	// A list of DataQualityObservation objects representing the observations
	// generated after evaluating the rules and analyzers.
	Observations []DataQualityObservation

	// A unique result ID for the data quality result.
	ResultId *string

	// A list of DataQualityRuleResult objects representing the results for each rule.
	RuleResults []DataQualityRuleResult

	// The unique run ID for the ruleset evaluation for this data quality result.
	RulesetEvaluationRunId *string

	// The name of the ruleset associated with the data quality result.
	RulesetName *string

	// An aggregate data quality score. Represents the ratio of rules that passed to
	// the total number of rules.
	Score *float64

	// The date and time when this data quality run started.
	StartedOn *time.Time
	// contains filtered or unexported fields
}

Describes a data quality result.

type DataQualityResultDescription added in v1.37.0

type DataQualityResultDescription struct {

	// The table name associated with the data quality result.
	DataSource *DataSource

	// The job name associated with the data quality result.
	JobName *string

	// The job run ID associated with the data quality result.
	JobRunId *string

	// The unique result ID for this data quality result.
	ResultId *string

	// The time that the run started for this data quality result.
	StartedOn *time.Time
	// contains filtered or unexported fields
}

Describes a data quality result.

type DataQualityResultFilterCriteria added in v1.37.0

type DataQualityResultFilterCriteria struct {

	// Filter results by the specified data source. For example, retrieving all
	// results for an Glue table.
	DataSource *DataSource

	// Filter results by the specified job name.
	JobName *string

	// Filter results by the specified job run ID.
	JobRunId *string

	// Filter results by runs that started after this time.
	StartedAfter *time.Time

	// Filter results by runs that started before this time.
	StartedBefore *time.Time
	// contains filtered or unexported fields
}

Criteria used to return data quality results.

type DataQualityRuleRecommendationRunDescription added in v1.37.0

type DataQualityRuleRecommendationRunDescription struct {

	// The data source (Glue table) associated with the recommendation run.
	DataSource *DataSource

	// The unique run identifier associated with this run.
	RunId *string

	// The date and time when this run started.
	StartedOn *time.Time

	// The status for this run.
	Status TaskStatusType
	// contains filtered or unexported fields
}

Describes the result of a data quality rule recommendation run.

type DataQualityRuleRecommendationRunFilter added in v1.37.0

type DataQualityRuleRecommendationRunFilter struct {

	// Filter based on a specified data source (Glue table).
	//
	// This member is required.
	DataSource *DataSource

	// Filter based on time for results started after provided time.
	StartedAfter *time.Time

	// Filter based on time for results started before provided time.
	StartedBefore *time.Time
	// contains filtered or unexported fields
}

A filter for listing data quality recommendation runs.

type DataQualityRuleResult added in v1.37.0

type DataQualityRuleResult struct {

	// A description of the data quality rule.
	Description *string

	// A map of metrics associated with the evaluation of the rule.
	EvaluatedMetrics map[string]float64

	// An evaluation message.
	EvaluationMessage *string

	// The name of the data quality rule.
	Name *string

	// A pass or fail status for the rule.
	Result DataQualityRuleResultStatus
	// contains filtered or unexported fields
}

Describes the result of the evaluation of a data quality rule.

type DataQualityRuleResultStatus added in v1.37.0

type DataQualityRuleResultStatus string
const (
	DataQualityRuleResultStatusPass  DataQualityRuleResultStatus = "PASS"
	DataQualityRuleResultStatusFail  DataQualityRuleResultStatus = "FAIL"
	DataQualityRuleResultStatusError DataQualityRuleResultStatus = "ERROR"
)

Enum values for DataQualityRuleResultStatus

func (DataQualityRuleResultStatus) Values added in v1.37.0

Values returns all known values for DataQualityRuleResultStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DataQualityRulesetEvaluationRunDescription added in v1.37.0

type DataQualityRulesetEvaluationRunDescription struct {

	// The data source (an Glue table) associated with the run.
	DataSource *DataSource

	// The unique run identifier associated with this run.
	RunId *string

	// The date and time when the run started.
	StartedOn *time.Time

	// The status for this run.
	Status TaskStatusType
	// contains filtered or unexported fields
}

Describes the result of a data quality ruleset evaluation run.

type DataQualityRulesetEvaluationRunFilter added in v1.37.0

type DataQualityRulesetEvaluationRunFilter struct {

	// Filter based on a data source (an Glue table) associated with the run.
	//
	// This member is required.
	DataSource *DataSource

	// Filter results by runs that started after this time.
	StartedAfter *time.Time

	// Filter results by runs that started before this time.
	StartedBefore *time.Time
	// contains filtered or unexported fields
}

The filter criteria.

type DataQualityRulesetFilterCriteria added in v1.37.0

type DataQualityRulesetFilterCriteria struct {

	// Filter on rulesets created after this date.
	CreatedAfter *time.Time

	// Filter on rulesets created before this date.
	CreatedBefore *time.Time

	// The description of the ruleset filter criteria.
	Description *string

	// Filter on rulesets last modified after this date.
	LastModifiedAfter *time.Time

	// Filter on rulesets last modified before this date.
	LastModifiedBefore *time.Time

	// The name of the ruleset filter criteria.
	Name *string

	// The name and database name of the target table.
	TargetTable *DataQualityTargetTable
	// contains filtered or unexported fields
}

The criteria used to filter data quality rulesets.

type DataQualityRulesetListDetails added in v1.37.0

type DataQualityRulesetListDetails struct {

	// The date and time the data quality ruleset was created.
	CreatedOn *time.Time

	// A description of the data quality ruleset.
	Description *string

	// The date and time the data quality ruleset was last modified.
	LastModifiedOn *time.Time

	// The name of the data quality ruleset.
	Name *string

	// When a ruleset was created from a recommendation run, this run ID is generated
	// to link the two together.
	RecommendationRunId *string

	// The number of rules in the ruleset.
	RuleCount *int32

	// An object representing an Glue table.
	TargetTable *DataQualityTargetTable
	// contains filtered or unexported fields
}

Describes a data quality ruleset returned by GetDataQualityRuleset .

type DataQualityTargetTable added in v1.37.0

type DataQualityTargetTable struct {

	// The name of the database where the Glue table exists.
	//
	// This member is required.
	DatabaseName *string

	// The name of the Glue table.
	//
	// This member is required.
	TableName *string

	// The catalog id where the Glue table exists.
	CatalogId *string
	// contains filtered or unexported fields
}

An object representing an Glue table.

type DataSource added in v1.37.0

type DataSource struct {

	// An Glue table.
	//
	// This member is required.
	GlueTable *GlueTable
	// contains filtered or unexported fields
}

A data source (an Glue table) for which you want data quality results.

type Database

type Database struct {

	// The name of the database. For Hive compatibility, this is folded to lowercase
	// when it is stored.
	//
	// This member is required.
	Name *string

	// The ID of the Data Catalog in which the database resides.
	CatalogId *string

	// Creates a set of default permissions on the table for principals. Used by Lake
	// Formation. Not used in the normal course of Glue operations.
	CreateTableDefaultPermissions []PrincipalPermissions

	// The time at which the metadata database was created in the catalog.
	CreateTime *time.Time

	// A description of the database.
	Description *string

	// A FederatedDatabase structure that references an entity outside the Glue Data
	// Catalog.
	FederatedDatabase *FederatedDatabase

	// The location of the database (for example, an HDFS path).
	LocationUri *string

	// These key-value pairs define parameters and properties of the database.
	Parameters map[string]string

	// A DatabaseIdentifier structure that describes a target database for resource
	// linking.
	TargetDatabase *DatabaseIdentifier
	// contains filtered or unexported fields
}

The Database object represents a logical grouping of tables that might reside in a Hive metastore or an RDBMS.

type DatabaseIdentifier

type DatabaseIdentifier struct {

	// The ID of the Data Catalog in which the database resides.
	CatalogId *string

	// The name of the catalog database.
	DatabaseName *string

	// Region of the target database.
	Region *string
	// contains filtered or unexported fields
}

A structure that describes a target database for resource linking.

type DatabaseInput

type DatabaseInput struct {

	// The name of the database. For Hive compatibility, this is folded to lowercase
	// when it is stored.
	//
	// This member is required.
	Name *string

	// Creates a set of default permissions on the table for principals. Used by Lake
	// Formation. Not used in the normal course of Glue operations.
	CreateTableDefaultPermissions []PrincipalPermissions

	// A description of the database.
	Description *string

	// A FederatedDatabase structure that references an entity outside the Glue Data
	// Catalog.
	FederatedDatabase *FederatedDatabase

	// The location of the database (for example, an HDFS path).
	LocationUri *string

	// These key-value pairs define parameters and properties of the database. These
	// key-value pairs define parameters and properties of the database.
	Parameters map[string]string

	// A DatabaseIdentifier structure that describes a target database for resource
	// linking.
	TargetDatabase *DatabaseIdentifier
	// contains filtered or unexported fields
}

The structure used to create or update a database.

type Datatype added in v1.25.0

type Datatype struct {

	// The datatype of the value.
	//
	// This member is required.
	Id *string

	// A label assigned to the datatype.
	//
	// This member is required.
	Label *string
	// contains filtered or unexported fields
}

A structure representing the datatype of the value.

type DateColumnStatisticsData

type DateColumnStatisticsData struct {

	// The number of distinct values in a column.
	//
	// This member is required.
	NumberOfDistinctValues int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64

	// The highest value in the column.
	MaximumValue *time.Time

	// The lowest value in the column.
	MinimumValue *time.Time
	// contains filtered or unexported fields
}

Defines column statistics supported for timestamp data columns.

type DecimalColumnStatisticsData

type DecimalColumnStatisticsData struct {

	// The number of distinct values in a column.
	//
	// This member is required.
	NumberOfDistinctValues int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64

	// The highest value in the column.
	MaximumValue *DecimalNumber

	// The lowest value in the column.
	MinimumValue *DecimalNumber
	// contains filtered or unexported fields
}

Defines column statistics supported for fixed-point number data columns.

type DecimalNumber

type DecimalNumber struct {

	// The scale that determines where the decimal point falls in the unscaled value.
	//
	// This member is required.
	Scale int32

	// The unscaled numeric value.
	//
	// This member is required.
	UnscaledValue []byte
	// contains filtered or unexported fields
}

Contains a numeric value in decimal format.

type DeleteBehavior

type DeleteBehavior string
const (
	DeleteBehaviorLog                 DeleteBehavior = "LOG"
	DeleteBehaviorDeleteFromDatabase  DeleteBehavior = "DELETE_FROM_DATABASE"
	DeleteBehaviorDeprecateInDatabase DeleteBehavior = "DEPRECATE_IN_DATABASE"
)

Enum values for DeleteBehavior

func (DeleteBehavior) Values added in v0.29.0

func (DeleteBehavior) Values() []DeleteBehavior

Values returns all known values for DeleteBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DeltaTarget added in v1.18.0

type DeltaTarget struct {

	// The name of the connection to use to connect to the Delta table target.
	ConnectionName *string

	// Specifies whether the crawler will create native tables, to allow integration
	// with query engines that support querying of the Delta transaction log directly.
	CreateNativeDeltaTable *bool

	// A list of the Amazon S3 paths to the Delta tables.
	DeltaTables []string

	// Specifies whether to write the manifest files to the Delta table path.
	WriteManifest *bool
	// contains filtered or unexported fields
}

Specifies a Delta data store to crawl one or more Delta tables.

type DeltaTargetCompressionType added in v1.43.0

type DeltaTargetCompressionType string
const (
	DeltaTargetCompressionTypeUncompressed DeltaTargetCompressionType = "uncompressed"
	DeltaTargetCompressionTypeSnappy       DeltaTargetCompressionType = "snappy"
)

Enum values for DeltaTargetCompressionType

func (DeltaTargetCompressionType) Values added in v1.43.0

Values returns all known values for DeltaTargetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type DevEndpoint

type DevEndpoint struct {

	// A map of arguments used to configure the DevEndpoint . Valid arguments are:
	//   - "--enable-glue-datacatalog": ""
	// You can specify a version of Python support for development endpoints by using
	// the Arguments parameter in the CreateDevEndpoint or UpdateDevEndpoint APIs. If
	// no arguments are provided, the version defaults to Python 2.
	Arguments map[string]string

	// The Amazon Web Services Availability Zone where this DevEndpoint is located.
	AvailabilityZone *string

	// The point in time at which this DevEndpoint was created.
	CreatedTimestamp *time.Time

	// The name of the DevEndpoint .
	EndpointName *string

	// The path to one or more Java .jar files in an S3 bucket that should be loaded
	// in your DevEndpoint . You can only use pure Java/Scala libraries with a
	// DevEndpoint .
	ExtraJarsS3Path *string

	// The paths to one or more Python libraries in an Amazon S3 bucket that should be
	// loaded in your DevEndpoint . Multiple values must be complete paths separated by
	// a comma. You can only use pure Python libraries with a DevEndpoint . Libraries
	// that rely on C extensions, such as the pandas (http://pandas.pydata.org/)
	// Python data analysis library, are not currently supported.
	ExtraPythonLibsS3Path *string

	// The reason for a current failure in this DevEndpoint .
	FailureReason *string

	// Glue version determines the versions of Apache Spark and Python that Glue
	// supports. The Python version indicates the version supported for running your
	// ETL scripts on development endpoints. For more information about the available
	// Glue versions and corresponding Spark and Python versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html)
	// in the developer guide. Development endpoints that are created without
	// specifying a Glue version default to Glue 0.9. You can specify a version of
	// Python support for development endpoints by using the Arguments parameter in
	// the CreateDevEndpoint or UpdateDevEndpoint APIs. If no arguments are provided,
	// the version defaults to Python 2.
	GlueVersion *string

	// The point in time at which this DevEndpoint was last modified.
	LastModifiedTimestamp *time.Time

	// The status of the last update.
	LastUpdateStatus *string

	// The number of Glue Data Processing Units (DPUs) allocated to this DevEndpoint .
	NumberOfNodes int32

	// The number of workers of a defined workerType that are allocated to the
	// development endpoint. The maximum number of workers you can define are 299 for
	// G.1X , and 149 for G.2X .
	NumberOfWorkers *int32

	// A private IP address to access the DevEndpoint within a VPC if the DevEndpoint
	// is created within one. The PrivateAddress field is present only when you create
	// the DevEndpoint within your VPC.
	PrivateAddress *string

	// The public IP address used by this DevEndpoint . The PublicAddress field is
	// present only when you create a non-virtual private cloud (VPC) DevEndpoint .
	PublicAddress *string

	// The public key to be used by this DevEndpoint for authentication. This
	// attribute is provided for backward compatibility because the recommended
	// attribute to use is public keys.
	PublicKey *string

	// A list of public keys to be used by the DevEndpoints for authentication. Using
	// this attribute is preferred over a single public key because the public keys
	// allow you to have a different private key per client. If you previously created
	// an endpoint with a public key, you must remove that key to be able to set a list
	// of public keys. Call the UpdateDevEndpoint API operation with the public key
	// content in the deletePublicKeys attribute, and the list of new keys in the
	// addPublicKeys attribute.
	PublicKeys []string

	// The Amazon Resource Name (ARN) of the IAM role used in this DevEndpoint .
	RoleArn *string

	// The name of the SecurityConfiguration structure to be used with this DevEndpoint
	// .
	SecurityConfiguration *string

	// A list of security group identifiers used in this DevEndpoint .
	SecurityGroupIds []string

	// The current status of this DevEndpoint .
	Status *string

	// The subnet ID for this DevEndpoint .
	SubnetId *string

	// The ID of the virtual private cloud (VPC) used by this DevEndpoint .
	VpcId *string

	// The type of predefined worker that is allocated to the development endpoint.
	// Accepts a value of Standard, G.1X, or G.2X.
	//   - For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory
	//   and a 50GB disk, and 2 executors per worker.
	//   - For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of
	//   memory, 64 GB disk), and provides 1 executor per worker. We recommend this
	//   worker type for memory-intensive jobs.
	//   - For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of
	//   memory, 128 GB disk), and provides 1 executor per worker. We recommend this
	//   worker type for memory-intensive jobs.
	// Known issue: when a development endpoint is created with the G.2X WorkerType
	// configuration, the Spark drivers for the development endpoint will run on 4
	// vCPU, 16 GB of memory, and a 64 GB disk.
	WorkerType WorkerType

	// The YARN endpoint address used by this DevEndpoint .
	YarnEndpointAddress *string

	// The Apache Zeppelin port for the remote Apache Spark interpreter.
	ZeppelinRemoteSparkInterpreterPort int32
	// contains filtered or unexported fields
}

A development endpoint where a developer can remotely debug extract, transform, and load (ETL) scripts.

type DevEndpointCustomLibraries

type DevEndpointCustomLibraries struct {

	// The path to one or more Java .jar files in an S3 bucket that should be loaded
	// in your DevEndpoint . You can only use pure Java/Scala libraries with a
	// DevEndpoint .
	ExtraJarsS3Path *string

	// The paths to one or more Python libraries in an Amazon Simple Storage Service
	// (Amazon S3) bucket that should be loaded in your DevEndpoint . Multiple values
	// must be complete paths separated by a comma. You can only use pure Python
	// libraries with a DevEndpoint . Libraries that rely on C extensions, such as the
	// pandas (http://pandas.pydata.org/) Python data analysis library, are not
	// currently supported.
	ExtraPythonLibsS3Path *string
	// contains filtered or unexported fields
}

Custom libraries to be loaded into a development endpoint.

type DirectJDBCSource added in v1.42.0

type DirectJDBCSource struct {

	// The connection name of the JDBC source.
	//
	// This member is required.
	ConnectionName *string

	// The connection type of the JDBC source.
	//
	// This member is required.
	ConnectionType JDBCConnectionType

	// The database of the JDBC source connection.
	//
	// This member is required.
	Database *string

	// The name of the JDBC source connection.
	//
	// This member is required.
	Name *string

	// The table of the JDBC source connection.
	//
	// This member is required.
	Table *string

	// The temp directory of the JDBC Redshift source.
	RedshiftTmpDir *string
	// contains filtered or unexported fields
}

Specifies the direct JDBC source connection.

type DirectKafkaSource added in v1.25.0

type DirectKafkaSource struct {

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// Specifies options related to data preview for viewing a sample of your data.
	DataPreviewOptions *StreamingDataPreviewOptions

	// Whether to automatically determine the schema from the incoming data.
	DetectSchema *bool

	// Specifies the streaming options.
	StreamingOptions *KafkaStreamingSourceOptions

	// The amount of time to spend processing each micro batch.
	WindowSize *int32
	// contains filtered or unexported fields
}

Specifies an Apache Kafka data store.

type DirectKinesisSource added in v1.25.0

type DirectKinesisSource struct {

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// Additional options for data preview.
	DataPreviewOptions *StreamingDataPreviewOptions

	// Whether to automatically determine the schema from the incoming data.
	DetectSchema *bool

	// Additional options for the Kinesis streaming data source.
	StreamingOptions *KinesisStreamingSourceOptions

	// The amount of time to spend processing each micro batch.
	WindowSize *int32
	// contains filtered or unexported fields
}

Specifies a direct Amazon Kinesis data source.

type DirectSchemaChangePolicy added in v1.25.0

type DirectSchemaChangePolicy struct {

	// Specifies the database that the schema change policy applies to.
	Database *string

	// Whether to use the specified update behavior when the crawler finds a changed
	// schema.
	EnableUpdateCatalog *bool

	// Specifies the table in the database that the schema change policy applies to.
	Table *string

	// The update behavior when the crawler finds a changed schema.
	UpdateBehavior UpdateCatalogBehavior
	// contains filtered or unexported fields
}

A policy that specifies update behavior for the crawler.

type DoubleColumnStatisticsData

type DoubleColumnStatisticsData struct {

	// The number of distinct values in a column.
	//
	// This member is required.
	NumberOfDistinctValues int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64

	// The highest value in the column.
	MaximumValue float64

	// The lowest value in the column.
	MinimumValue float64
	// contains filtered or unexported fields
}

Defines column statistics supported for floating-point number data columns.

type DropDuplicates added in v1.25.0

type DropDuplicates struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// The name of the columns to be merged or removed if repeating.
	Columns [][]string
	// contains filtered or unexported fields
}

Specifies a transform that removes rows of repeating data from a data set.

type DropFields added in v1.25.0

type DropFields struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A JSON path to a variable in the data structure.
	//
	// This member is required.
	Paths [][]string
	// contains filtered or unexported fields
}

Specifies a transform that chooses the data property keys that you want to drop.

type DropNullFields added in v1.25.0

type DropNullFields struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A structure that represents whether certain values are recognized as null
	// values for removal.
	NullCheckBoxList *NullCheckBoxList

	// A structure that specifies a list of NullValueField structures that represent a
	// custom null value such as zero or other value being used as a null placeholder
	// unique to the dataset. The DropNullFields transform removes custom null values
	// only if both the value of the null placeholder and the datatype match the data.
	NullTextList []NullValueField
	// contains filtered or unexported fields
}

Specifies a transform that removes columns from the dataset if all values in the column are 'null'. By default, Glue Studio will recognize null objects, but some values such as empty strings, strings that are "null", -1 integers or other placeholders such as zeros, are not automatically recognized as nulls.

type DynamicTransform added in v1.36.0

type DynamicTransform struct {

	// Specifies the name of the function of the dynamic transform.
	//
	// This member is required.
	FunctionName *string

	// Specifies the inputs for the dynamic transform that are required.
	//
	// This member is required.
	Inputs []string

	// Specifies the name of the dynamic transform.
	//
	// This member is required.
	Name *string

	// Specifies the path of the dynamic transform source and config files.
	//
	// This member is required.
	Path *string

	// Specifies the name of the dynamic transform as it appears in the Glue Studio
	// visual editor.
	//
	// This member is required.
	TransformName *string

	// Specifies the data schema for the dynamic transform.
	OutputSchemas []GlueSchema

	// Specifies the parameters of the dynamic transform.
	Parameters []TransformConfigParameter

	// This field is not used and will be deprecated in future release.
	Version *string
	// contains filtered or unexported fields
}

Specifies the set of parameters needed to perform the dynamic transform.

type DynamoDBCatalogSource added in v1.25.0

type DynamoDBCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a DynamoDB data source in the Glue Data Catalog.

type DynamoDBTarget

type DynamoDBTarget struct {

	// The name of the DynamoDB table to crawl.
	Path *string

	// Indicates whether to scan all the records, or to sample rows from the table.
	// Scanning all the records can take a long time when the table is not a high
	// throughput table. A value of true means to scan all records, while a value of
	// false means to sample the records. If no value is specified, the value defaults
	// to true .
	ScanAll *bool

	// The percentage of the configured read capacity units to use by the Glue
	// crawler. Read capacity units is a term defined by DynamoDB, and is a numeric
	// value that acts as rate limiter for the number of reads that can be performed on
	// that table per second. The valid values are null or a value between 0.1 to 1.5.
	// A null value is used when user does not provide a value, and defaults to 0.5 of
	// the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max
	// configured Read Capacity Unit (for tables using on-demand mode).
	ScanRate *float64
	// contains filtered or unexported fields
}

Specifies an Amazon DynamoDB table to crawl.

type Edge

type Edge struct {

	// The unique of the node within the workflow where the edge ends.
	DestinationId *string

	// The unique of the node within the workflow where the edge starts.
	SourceId *string
	// contains filtered or unexported fields
}

An edge represents a directed connection between two Glue components that are part of the workflow the edge belongs to.

type EnableHybridValues

type EnableHybridValues string
const (
	EnableHybridValuesTrue  EnableHybridValues = "TRUE"
	EnableHybridValuesFalse EnableHybridValues = "FALSE"
)

Enum values for EnableHybridValues

func (EnableHybridValues) Values added in v0.29.0

Values returns all known values for EnableHybridValues. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type EncryptionAtRest

type EncryptionAtRest struct {

	// The encryption-at-rest mode for encrypting Data Catalog data.
	//
	// This member is required.
	CatalogEncryptionMode CatalogEncryptionMode

	// The role that Glue assumes to encrypt and decrypt the Data Catalog objects on
	// the caller's behalf.
	CatalogEncryptionServiceRole *string

	// The ID of the KMS key to use for encryption at rest.
	SseAwsKmsKeyId *string
	// contains filtered or unexported fields
}

Specifies the encryption-at-rest configuration for the Data Catalog.

type EncryptionConfiguration

type EncryptionConfiguration struct {

	// The encryption configuration for Amazon CloudWatch.
	CloudWatchEncryption *CloudWatchEncryption

	// The encryption configuration for job bookmarks.
	JobBookmarksEncryption *JobBookmarksEncryption

	// The encryption configuration for Amazon Simple Storage Service (Amazon S3) data.
	S3Encryption []S3Encryption
	// contains filtered or unexported fields
}

Specifies an encryption configuration.

type EntityNotFoundException

type EntityNotFoundException struct {
	Message *string

	ErrorCodeOverride *string

	FromFederationSource *bool
	// contains filtered or unexported fields
}

A specified entity does not exist

func (*EntityNotFoundException) Error

func (e *EntityNotFoundException) Error() string

func (*EntityNotFoundException) ErrorCode

func (e *EntityNotFoundException) ErrorCode() string

func (*EntityNotFoundException) ErrorFault

func (e *EntityNotFoundException) ErrorFault() smithy.ErrorFault

func (*EntityNotFoundException) ErrorMessage

func (e *EntityNotFoundException) ErrorMessage() string

type ErrorDetail

type ErrorDetail struct {

	// The code associated with this error.
	ErrorCode *string

	// A message describing the error.
	ErrorMessage *string
	// contains filtered or unexported fields
}

Contains details about an error.

type ErrorDetails added in v0.30.0

type ErrorDetails struct {

	// The error code for an error.
	ErrorCode *string

	// The error message for an error.
	ErrorMessage *string
	// contains filtered or unexported fields
}

An object containing error details.

type EvaluateDataQuality added in v1.37.0

type EvaluateDataQuality struct {

	// The inputs of your data quality evaluation.
	//
	// This member is required.
	Inputs []string

	// The name of the data quality evaluation.
	//
	// This member is required.
	Name *string

	// The ruleset for your data quality evaluation.
	//
	// This member is required.
	Ruleset *string

	// The output of your data quality evaluation.
	Output DQTransformOutput

	// Options to configure how your results are published.
	PublishingOptions *DQResultsPublishingOptions

	// Options to configure how your job will stop if your data quality evaluation
	// fails.
	StopJobOnFailureOptions *DQStopJobOnFailureOptions
	// contains filtered or unexported fields
}

Specifies your data quality evaluation criteria.

type EvaluateDataQualityMultiFrame added in v1.49.0

type EvaluateDataQualityMultiFrame struct {

	// The inputs of your data quality evaluation. The first input in this list is the
	// primary data source.
	//
	// This member is required.
	Inputs []string

	// The name of the data quality evaluation.
	//
	// This member is required.
	Name *string

	// The ruleset for your data quality evaluation.
	//
	// This member is required.
	Ruleset *string

	// The aliases of all data sources except primary.
	AdditionalDataSources map[string]string

	// Options to configure runtime behavior of the transform.
	AdditionalOptions map[string]string

	// Options to configure how your results are published.
	PublishingOptions *DQResultsPublishingOptions

	// Options to configure how your job will stop if your data quality evaluation
	// fails.
	StopJobOnFailureOptions *DQStopJobOnFailureOptions
	// contains filtered or unexported fields
}

Specifies your data quality evaluation criteria.

type EvaluationMetrics

type EvaluationMetrics struct {

	// The type of machine learning transform.
	//
	// This member is required.
	TransformType TransformType

	// The evaluation metrics for the find matches algorithm.
	FindMatchesMetrics *FindMatchesMetrics
	// contains filtered or unexported fields
}

Evaluation metrics provide an estimate of the quality of your machine learning transform.

type EventBatchingCondition added in v1.9.0

type EventBatchingCondition struct {

	// Number of events that must be received from Amazon EventBridge before
	// EventBridge event trigger fires.
	//
	// This member is required.
	BatchSize *int32

	// Window of time in seconds after which EventBridge event trigger fires. Window
	// starts when first event is received.
	BatchWindow *int32
	// contains filtered or unexported fields
}

Batch condition that must be met (specified number of events received or batch time window expired) before EventBridge event trigger fires.

type ExecutionClass added in v1.29.0

type ExecutionClass string
const (
	ExecutionClassFlex     ExecutionClass = "FLEX"
	ExecutionClassStandard ExecutionClass = "STANDARD"
)

Enum values for ExecutionClass

func (ExecutionClass) Values added in v1.29.0

func (ExecutionClass) Values() []ExecutionClass

Values returns all known values for ExecutionClass. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ExecutionProperty

type ExecutionProperty struct {

	// The maximum number of concurrent runs allowed for the job. The default is 1. An
	// error is returned when this threshold is reached. The maximum value you can
	// specify is controlled by a service limit.
	MaxConcurrentRuns int32
	// contains filtered or unexported fields
}

An execution property of a job.

type ExistCondition

type ExistCondition string
const (
	ExistConditionMustExist ExistCondition = "MUST_EXIST"
	ExistConditionNotExist  ExistCondition = "NOT_EXIST"
	ExistConditionNone      ExistCondition = "NONE"
)

Enum values for ExistCondition

func (ExistCondition) Values added in v0.29.0

func (ExistCondition) Values() []ExistCondition

Values returns all known values for ExistCondition. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ExportLabelsTaskRunProperties

type ExportLabelsTaskRunProperties struct {

	// The Amazon Simple Storage Service (Amazon S3) path where you will export the
	// labels.
	OutputS3Path *string
	// contains filtered or unexported fields
}

Specifies configuration properties for an exporting labels task run.

type FederatedDatabase added in v1.45.0

type FederatedDatabase struct {

	// The name of the connection to the external metastore.
	ConnectionName *string

	// A unique identifier for the federated database.
	Identifier *string
	// contains filtered or unexported fields
}

A database that points to an entity outside the Glue Data Catalog.

type FederatedResourceAlreadyExistsException added in v1.45.0

type FederatedResourceAlreadyExistsException struct {
	Message *string

	ErrorCodeOverride *string

	AssociatedGlueResource *string
	// contains filtered or unexported fields
}

A federated resource already exists.

func (*FederatedResourceAlreadyExistsException) Error added in v1.45.0

func (*FederatedResourceAlreadyExistsException) ErrorCode added in v1.45.0

func (*FederatedResourceAlreadyExistsException) ErrorFault added in v1.45.0

func (*FederatedResourceAlreadyExistsException) ErrorMessage added in v1.45.0

type FederatedTable added in v1.45.0

type FederatedTable struct {

	// The name of the connection to the external metastore.
	ConnectionName *string

	// A unique identifier for the federated database.
	DatabaseIdentifier *string

	// A unique identifier for the federated table.
	Identifier *string
	// contains filtered or unexported fields
}

A table that points to an entity outside the Glue Data Catalog.

type FederationSourceErrorCode added in v1.45.0

type FederationSourceErrorCode string
const (
	FederationSourceErrorCodeInvalidResponseException       FederationSourceErrorCode = "InvalidResponseException"
	FederationSourceErrorCodeOperationTimeoutException      FederationSourceErrorCode = "OperationTimeoutException"
	FederationSourceErrorCodeOperationNotSupportedException FederationSourceErrorCode = "OperationNotSupportedException"
	FederationSourceErrorCodeInternalServiceException       FederationSourceErrorCode = "InternalServiceException"
	FederationSourceErrorCodeThrottlingException            FederationSourceErrorCode = "ThrottlingException"
)

Enum values for FederationSourceErrorCode

func (FederationSourceErrorCode) Values added in v1.45.0

Values returns all known values for FederationSourceErrorCode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FederationSourceException added in v1.45.0

type FederationSourceException struct {
	Message *string

	ErrorCodeOverride *string

	FederationSourceErrorCode FederationSourceErrorCode
	// contains filtered or unexported fields
}

A federation source failed.

func (*FederationSourceException) Error added in v1.45.0

func (e *FederationSourceException) Error() string

func (*FederationSourceException) ErrorCode added in v1.45.0

func (e *FederationSourceException) ErrorCode() string

func (*FederationSourceException) ErrorFault added in v1.45.0

func (*FederationSourceException) ErrorMessage added in v1.45.0

func (e *FederationSourceException) ErrorMessage() string

type FederationSourceRetryableException added in v1.45.0

type FederationSourceRetryableException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A federation source failed, but the operation may be retried.

func (*FederationSourceRetryableException) Error added in v1.45.0

func (*FederationSourceRetryableException) ErrorCode added in v1.45.0

func (*FederationSourceRetryableException) ErrorFault added in v1.45.0

func (*FederationSourceRetryableException) ErrorMessage added in v1.45.0

func (e *FederationSourceRetryableException) ErrorMessage() string

type FieldName added in v1.26.0

type FieldName string
const (
	FieldNameCrawlId   FieldName = "CRAWL_ID"
	FieldNameState     FieldName = "STATE"
	FieldNameStartTime FieldName = "START_TIME"
	FieldNameEndTime   FieldName = "END_TIME"
	FieldNameDpuHour   FieldName = "DPU_HOUR"
)

Enum values for FieldName

func (FieldName) Values added in v1.26.0

func (FieldName) Values() []FieldName

Values returns all known values for FieldName. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FillMissingValues added in v1.25.0

type FillMissingValues struct {

	// A JSON path to a variable in the data structure for the dataset that is imputed.
	//
	// This member is required.
	ImputedPath *string

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A JSON path to a variable in the data structure for the dataset that is filled.
	FilledPath *string
	// contains filtered or unexported fields
}

Specifies a transform that locates records in the dataset that have missing values and adds a new field with a value determined by imputation. The input data set is used to train the machine learning model that determines what the missing value should be.

type Filter added in v1.25.0

type Filter struct {

	// Specifies a filter expression.
	//
	// This member is required.
	Filters []FilterExpression

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The operator used to filter rows by comparing the key value to a specified
	// value.
	//
	// This member is required.
	LogicalOperator FilterLogicalOperator

	// The name of the transform node.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a transform that splits a dataset into two, based on a filter condition.

type FilterExpression added in v1.25.0

type FilterExpression struct {

	// The type of operation to perform in the expression.
	//
	// This member is required.
	Operation FilterOperation

	// A list of filter values.
	//
	// This member is required.
	Values []FilterValue

	// Whether the expression is to be negated.
	Negated *bool
	// contains filtered or unexported fields
}

Specifies a filter expression.

type FilterLogicalOperator added in v1.25.0

type FilterLogicalOperator string
const (
	FilterLogicalOperatorAnd FilterLogicalOperator = "AND"
	FilterLogicalOperatorOr  FilterLogicalOperator = "OR"
)

Enum values for FilterLogicalOperator

func (FilterLogicalOperator) Values added in v1.25.0

Values returns all known values for FilterLogicalOperator. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FilterOperation added in v1.25.0

type FilterOperation string
const (
	FilterOperationEq     FilterOperation = "EQ"
	FilterOperationLt     FilterOperation = "LT"
	FilterOperationGt     FilterOperation = "GT"
	FilterOperationLte    FilterOperation = "LTE"
	FilterOperationGte    FilterOperation = "GTE"
	FilterOperationRegex  FilterOperation = "REGEX"
	FilterOperationIsnull FilterOperation = "ISNULL"
)

Enum values for FilterOperation

func (FilterOperation) Values added in v1.25.0

func (FilterOperation) Values() []FilterOperation

Values returns all known values for FilterOperation. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FilterOperator added in v1.26.0

type FilterOperator string
const (
	FilterOperatorGt FilterOperator = "GT"
	FilterOperatorGe FilterOperator = "GE"
	FilterOperatorLt FilterOperator = "LT"
	FilterOperatorLe FilterOperator = "LE"
	FilterOperatorEq FilterOperator = "EQ"
	FilterOperatorNe FilterOperator = "NE"
)

Enum values for FilterOperator

func (FilterOperator) Values added in v1.26.0

func (FilterOperator) Values() []FilterOperator

Values returns all known values for FilterOperator. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FilterValue added in v1.25.0

type FilterValue struct {

	// The type of filter value.
	//
	// This member is required.
	Type FilterValueType

	// The value to be associated.
	//
	// This member is required.
	Value []string
	// contains filtered or unexported fields
}

Represents a single entry in the list of values for a FilterExpression .

type FilterValueType added in v1.25.0

type FilterValueType string
const (
	FilterValueTypeColumnextracted FilterValueType = "COLUMNEXTRACTED"
	FilterValueTypeConstant        FilterValueType = "CONSTANT"
)

Enum values for FilterValueType

func (FilterValueType) Values added in v1.25.0

func (FilterValueType) Values() []FilterValueType

Values returns all known values for FilterValueType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type FindMatchesMetrics

type FindMatchesMetrics struct {

	// The area under the precision/recall curve (AUPRC) is a single number measuring
	// the overall quality of the transform, that is independent of the choice made for
	// precision vs. recall. Higher values indicate that you have a more attractive
	// precision vs. recall tradeoff. For more information, see Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall)
	// in Wikipedia.
	AreaUnderPRCurve *float64

	// A list of ColumnImportance structures containing column importance metrics,
	// sorted in order of descending importance.
	ColumnImportances []ColumnImportance

	// The confusion matrix shows you what your transform is predicting accurately and
	// what types of errors it is making. For more information, see Confusion matrix (https://en.wikipedia.org/wiki/Confusion_matrix)
	// in Wikipedia.
	ConfusionMatrix *ConfusionMatrix

	// The maximum F1 metric indicates the transform's accuracy between 0 and 1, where
	// 1 is the best accuracy. For more information, see F1 score (https://en.wikipedia.org/wiki/F1_score)
	// in Wikipedia.
	F1 *float64

	// The precision metric indicates when often your transform is correct when it
	// predicts a match. Specifically, it measures how well the transform finds true
	// positives from the total true positives possible. For more information, see
	// Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) in
	// Wikipedia.
	Precision *float64

	// The recall metric indicates that for an actual match, how often your transform
	// predicts the match. Specifically, it measures how well the transform finds true
	// positives from the total records in the source data. For more information, see
	// Precision and recall (https://en.wikipedia.org/wiki/Precision_and_recall) in
	// Wikipedia.
	Recall *float64
	// contains filtered or unexported fields
}

The evaluation metrics for the find matches algorithm. The quality of your machine learning transform is measured by getting your transform to predict some matches and comparing the results to known matches from the same dataset. The quality metrics are based on a subset of your data, so they are not precise.

type FindMatchesParameters

type FindMatchesParameters struct {

	// The value that is selected when tuning your transform for a balance between
	// accuracy and cost. A value of 0.5 means that the system balances accuracy and
	// cost concerns. A value of 1.0 means a bias purely for accuracy, which typically
	// results in a higher cost, sometimes substantially higher. A value of 0.0 means a
	// bias purely for cost, which results in a less accurate FindMatches transform,
	// sometimes with unacceptable accuracy. Accuracy measures how well the transform
	// finds true positives and true negatives. Increasing accuracy requires more
	// machine resources and cost. But it also results in increased recall. Cost
	// measures how many compute resources, and thus money, are consumed to run the
	// transform.
	AccuracyCostTradeoff *float64

	// The value to switch on or off to force the output to match the provided labels
	// from users. If the value is True , the find matches transform forces the output
	// to match the provided labels. The results override the normal conflation
	// results. If the value is False , the find matches transform does not ensure all
	// the labels provided are respected, and the results rely on the trained model.
	// Note that setting this value to true may increase the conflation execution time.
	EnforceProvidedLabels *bool

	// The value selected when tuning your transform for a balance between precision
	// and recall. A value of 0.5 means no preference; a value of 1.0 means a bias
	// purely for precision, and a value of 0.0 means a bias for recall. Because this
	// is a tradeoff, choosing values close to 1.0 means very low recall, and choosing
	// values close to 0.0 results in very low precision. The precision metric
	// indicates how often your model is correct when it predicts a match. The recall
	// metric indicates that for an actual match, how often your model predicts the
	// match.
	PrecisionRecallTradeoff *float64

	// The name of a column that uniquely identifies rows in the source table. Used to
	// help identify matching records.
	PrimaryKeyColumnName *string
	// contains filtered or unexported fields
}

The parameters to configure the find matches transform.

type FindMatchesTaskRunProperties

type FindMatchesTaskRunProperties struct {

	// The job ID for the Find Matches task run.
	JobId *string

	// The name assigned to the job for the Find Matches task run.
	JobName *string

	// The job run ID for the Find Matches task run.
	JobRunId *string
	// contains filtered or unexported fields
}

Specifies configuration properties for a Find Matches task run.

type GetConnectionsFilter

type GetConnectionsFilter struct {

	// The type of connections to return. Currently, SFTP is not supported.
	ConnectionType ConnectionType

	// A criteria string that must match the criteria recorded in the connection
	// definition for that connection definition to be returned.
	MatchCriteria []string
	// contains filtered or unexported fields
}

Filters the connection definitions that are returned by the GetConnections API operation.

type GlueEncryptionException

type GlueEncryptionException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An encryption operation failed.

func (*GlueEncryptionException) Error

func (e *GlueEncryptionException) Error() string

func (*GlueEncryptionException) ErrorCode

func (e *GlueEncryptionException) ErrorCode() string

func (*GlueEncryptionException) ErrorFault

func (e *GlueEncryptionException) ErrorFault() smithy.ErrorFault

func (*GlueEncryptionException) ErrorMessage

func (e *GlueEncryptionException) ErrorMessage() string

type GluePolicy

type GluePolicy struct {

	// The date and time at which the policy was created.
	CreateTime *time.Time

	// Contains the hash value associated with this policy.
	PolicyHash *string

	// Contains the requested policy document, in JSON format.
	PolicyInJson *string

	// The date and time at which the policy was last updated.
	UpdateTime *time.Time
	// contains filtered or unexported fields
}

A structure for returning a resource policy.

type GlueRecordType added in v1.25.0

type GlueRecordType string
const (
	GlueRecordTypeDate       GlueRecordType = "DATE"
	GlueRecordTypeString     GlueRecordType = "STRING"
	GlueRecordTypeTimestamp  GlueRecordType = "TIMESTAMP"
	GlueRecordTypeInt        GlueRecordType = "INT"
	GlueRecordTypeFloat      GlueRecordType = "FLOAT"
	GlueRecordTypeLong       GlueRecordType = "LONG"
	GlueRecordTypeBigdecimal GlueRecordType = "BIGDECIMAL"
	GlueRecordTypeByte       GlueRecordType = "BYTE"
	GlueRecordTypeShort      GlueRecordType = "SHORT"
	GlueRecordTypeDouble     GlueRecordType = "DOUBLE"
)

Enum values for GlueRecordType

func (GlueRecordType) Values added in v1.25.0

func (GlueRecordType) Values() []GlueRecordType

Values returns all known values for GlueRecordType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type GlueSchema added in v1.25.0

type GlueSchema struct {

	// Specifies the column definitions that make up a Glue schema.
	Columns []GlueStudioSchemaColumn
	// contains filtered or unexported fields
}

Specifies a user-defined schema when a schema cannot be determined by Glue.

type GlueStudioSchemaColumn added in v1.25.0

type GlueStudioSchemaColumn struct {

	// The name of the column in the Glue Studio schema.
	//
	// This member is required.
	Name *string

	// The hive type for this column in the Glue Studio schema.
	Type *string
	// contains filtered or unexported fields
}

Specifies a single column in a Glue schema definition.

type GlueTable

type GlueTable struct {

	// A database name in the Glue Data Catalog.
	//
	// This member is required.
	DatabaseName *string

	// A table name in the Glue Data Catalog.
	//
	// This member is required.
	TableName *string

	// Additional options for the table. Currently there are two keys supported:
	//   - pushDownPredicate : to filter on partitions without having to list and read
	//   all the files in your dataset.
	//   - catalogPartitionPredicate : to use server-side partition pruning using
	//   partition indexes in the Glue Data Catalog.
	AdditionalOptions map[string]string

	// A unique identifier for the Glue Data Catalog.
	CatalogId *string

	// The name of the connection to the Glue Data Catalog.
	ConnectionName *string
	// contains filtered or unexported fields
}

The database and table in the Glue Data Catalog that is used for input or output data.

type GovernedCatalogSource added in v1.25.0

type GovernedCatalogSource struct {

	// The database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// The database table to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalOptions *S3SourceAdditionalOptions

	// Partitions satisfying this predicate are deleted. Files within the retention
	// period in these partitions are not deleted. Set to "" – empty by default.
	PartitionPredicate *string
	// contains filtered or unexported fields
}

Specifies the data store in the governed Glue Data Catalog.

type GovernedCatalogTarget added in v1.25.0

type GovernedCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the governed catalog.
	SchemaChangePolicy *CatalogSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

type GrokClassifier

type GrokClassifier struct {

	// An identifier of the data format that the classifier matches, such as Twitter,
	// JSON, Omniture logs, and so on.
	//
	// This member is required.
	Classification *string

	// The grok pattern applied to a data store by this classifier. For more
	// information, see built-in patterns in Writing Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html)
	// .
	//
	// This member is required.
	GrokPattern *string

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// The time that this classifier was registered.
	CreationTime *time.Time

	// Optional custom grok patterns defined by this classifier. For more information,
	// see custom patterns in Writing Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html)
	// .
	CustomPatterns *string

	// The time that this classifier was last updated.
	LastUpdated *time.Time

	// The version of this classifier.
	Version int64
	// contains filtered or unexported fields
}

A classifier that uses grok patterns.

type HudiTarget added in v1.56.0

type HudiTarget struct {

	// The name of the connection to use to connect to the Hudi target. If your Hudi
	// files are stored in buckets that require VPC authorization, you can set their
	// connection properties here.
	ConnectionName *string

	// A list of glob patterns used to exclude from the crawl. For more information,
	// see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html)
	// .
	Exclusions []string

	// The maximum depth of Amazon S3 paths that the crawler can traverse to discover
	// the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run
	// time.
	MaximumTraversalDepth *int32

	// An array of Amazon S3 location strings for Hudi, each indicating the root
	// folder with which the metadata files for a Hudi table resides. The Hudi folder
	// may be located in a child folder of the root folder. The crawler will scan all
	// folders underneath a path for a Hudi folder.
	Paths []string
	// contains filtered or unexported fields
}

Specifies an Apache Hudi data source.

type HudiTargetCompressionType added in v1.40.0

type HudiTargetCompressionType string
const (
	HudiTargetCompressionTypeGzip         HudiTargetCompressionType = "gzip"
	HudiTargetCompressionTypeLzo          HudiTargetCompressionType = "lzo"
	HudiTargetCompressionTypeUncompressed HudiTargetCompressionType = "uncompressed"
	HudiTargetCompressionTypeSnappy       HudiTargetCompressionType = "snappy"
)

Enum values for HudiTargetCompressionType

func (HudiTargetCompressionType) Values added in v1.40.0

Values returns all known values for HudiTargetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type IcebergInput added in v1.54.0

type IcebergInput struct {

	// A required metadata operation. Can only be set to CREATE .
	//
	// This member is required.
	MetadataOperation MetadataOperation

	// The table version for the Iceberg table. Defaults to 2.
	Version *string
	// contains filtered or unexported fields
}

A structure that defines an Apache Iceberg metadata table to create in the catalog.

type IcebergTarget added in v1.53.0

type IcebergTarget struct {

	// The name of the connection to use to connect to the Iceberg target.
	ConnectionName *string

	// A list of glob patterns used to exclude from the crawl. For more information,
	// see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html)
	// .
	Exclusions []string

	// The maximum depth of Amazon S3 paths that the crawler can traverse to discover
	// the Iceberg metadata folder in your Amazon S3 path. Used to limit the crawler
	// run time.
	MaximumTraversalDepth *int32

	// One or more Amazon S3 paths that contains Iceberg metadata folders as
	// s3://bucket/prefix .
	Paths []string
	// contains filtered or unexported fields
}

Specifies an Apache Iceberg data source where Iceberg tables are stored in Amazon S3.

type IdempotentParameterMismatchException

type IdempotentParameterMismatchException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The same unique identifier was associated with two different records.

func (*IdempotentParameterMismatchException) Error

func (*IdempotentParameterMismatchException) ErrorCode

func (*IdempotentParameterMismatchException) ErrorFault

func (*IdempotentParameterMismatchException) ErrorMessage

func (e *IdempotentParameterMismatchException) ErrorMessage() string

type IllegalBlueprintStateException added in v1.11.0

type IllegalBlueprintStateException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The blueprint is in an invalid state to perform a requested operation.

func (*IllegalBlueprintStateException) Error added in v1.11.0

func (*IllegalBlueprintStateException) ErrorCode added in v1.11.0

func (e *IllegalBlueprintStateException) ErrorCode() string

func (*IllegalBlueprintStateException) ErrorFault added in v1.11.0

func (*IllegalBlueprintStateException) ErrorMessage added in v1.11.0

func (e *IllegalBlueprintStateException) ErrorMessage() string

type IllegalSessionStateException added in v1.22.0

type IllegalSessionStateException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The session is in an invalid state to perform a requested operation.

func (*IllegalSessionStateException) Error added in v1.22.0

func (*IllegalSessionStateException) ErrorCode added in v1.22.0

func (e *IllegalSessionStateException) ErrorCode() string

func (*IllegalSessionStateException) ErrorFault added in v1.22.0

func (*IllegalSessionStateException) ErrorMessage added in v1.22.0

func (e *IllegalSessionStateException) ErrorMessage() string

type IllegalWorkflowStateException

type IllegalWorkflowStateException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The workflow is in an invalid state to perform a requested operation.

func (*IllegalWorkflowStateException) Error

func (*IllegalWorkflowStateException) ErrorCode

func (e *IllegalWorkflowStateException) ErrorCode() string

func (*IllegalWorkflowStateException) ErrorFault

func (*IllegalWorkflowStateException) ErrorMessage

func (e *IllegalWorkflowStateException) ErrorMessage() string

type ImportLabelsTaskRunProperties

type ImportLabelsTaskRunProperties struct {

	// The Amazon Simple Storage Service (Amazon S3) path from where you will import
	// the labels.
	InputS3Path *string

	// Indicates whether to overwrite your existing labels.
	Replace bool
	// contains filtered or unexported fields
}

Specifies configuration properties for an importing labels task run.

type InternalServiceException

type InternalServiceException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An internal service error occurred.

func (*InternalServiceException) Error

func (e *InternalServiceException) Error() string

func (*InternalServiceException) ErrorCode

func (e *InternalServiceException) ErrorCode() string

func (*InternalServiceException) ErrorFault

func (e *InternalServiceException) ErrorFault() smithy.ErrorFault

func (*InternalServiceException) ErrorMessage

func (e *InternalServiceException) ErrorMessage() string

type InvalidInputException

type InvalidInputException struct {
	Message *string

	ErrorCodeOverride *string

	FromFederationSource *bool
	// contains filtered or unexported fields
}

The input provided was not valid.

func (*InvalidInputException) Error

func (e *InvalidInputException) Error() string

func (*InvalidInputException) ErrorCode

func (e *InvalidInputException) ErrorCode() string

func (*InvalidInputException) ErrorFault

func (e *InvalidInputException) ErrorFault() smithy.ErrorFault

func (*InvalidInputException) ErrorMessage

func (e *InvalidInputException) ErrorMessage() string

type InvalidStateException added in v1.16.0

type InvalidStateException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

An error that indicates your data is in an invalid state.

func (*InvalidStateException) Error added in v1.16.0

func (e *InvalidStateException) Error() string

func (*InvalidStateException) ErrorCode added in v1.16.0

func (e *InvalidStateException) ErrorCode() string

func (*InvalidStateException) ErrorFault added in v1.16.0

func (e *InvalidStateException) ErrorFault() smithy.ErrorFault

func (*InvalidStateException) ErrorMessage added in v1.16.0

func (e *InvalidStateException) ErrorMessage() string

type JDBCConnectionType added in v1.42.0

type JDBCConnectionType string
const (
	JDBCConnectionTypeSqlserver  JDBCConnectionType = "sqlserver"
	JDBCConnectionTypeMysql      JDBCConnectionType = "mysql"
	JDBCConnectionTypeOracle     JDBCConnectionType = "oracle"
	JDBCConnectionTypePostgresql JDBCConnectionType = "postgresql"
	JDBCConnectionTypeRedshift   JDBCConnectionType = "redshift"
)

Enum values for JDBCConnectionType

func (JDBCConnectionType) Values added in v1.42.0

Values returns all known values for JDBCConnectionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JDBCConnectorOptions added in v1.25.0

type JDBCConnectorOptions struct {

	// Custom data type mapping that builds a mapping from a JDBC data type to an Glue
	// data type. For example, the option "dataTypeMapping":{"FLOAT":"STRING"} maps
	// data fields of JDBC type FLOAT into the Java String type by calling the
	// ResultSet.getString() method of the driver, and uses it to build the Glue
	// record. The ResultSet object is implemented by each driver, so the behavior is
	// specific to the driver you use. Refer to the documentation for your JDBC driver
	// to understand how the driver performs the conversions.
	DataTypeMapping map[string]GlueRecordType

	// Extra condition clause to filter data from source. For example:
	// BillingCity='Mountain View' When using a query instead of a table name, you
	// should validate that the query works with the specified filterPredicate .
	FilterPredicate *string

	// The name of the job bookmark keys on which to sort.
	JobBookmarkKeys []string

	// Specifies an ascending or descending sort order.
	JobBookmarkKeysSortOrder *string

	// The minimum value of partitionColumn that is used to decide partition stride.
	LowerBound *int64

	// The number of partitions. This value, along with lowerBound (inclusive) and
	// upperBound (exclusive), form partition strides for generated WHERE clause
	// expressions that are used to split the partitionColumn .
	NumPartitions *int64

	// The name of an integer column that is used for partitioning. This option works
	// only when it's included with lowerBound , upperBound , and numPartitions . This
	// option works the same way as in the Spark SQL JDBC reader.
	PartitionColumn *string

	// The maximum value of partitionColumn that is used to decide partition stride.
	UpperBound *int64
	// contains filtered or unexported fields
}

Additional connection options for the connector.

type JDBCConnectorSource added in v1.25.0

type JDBCConnectorSource struct {

	// The name of the connection that is associated with the connector.
	//
	// This member is required.
	ConnectionName *string

	// The type of connection, such as marketplace.jdbc or custom.jdbc, designating a
	// connection to a JDBC data store.
	//
	// This member is required.
	ConnectionType *string

	// The name of a connector that assists with accessing the data store in Glue
	// Studio.
	//
	// This member is required.
	ConnectorName *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// Additional connection options for the connector.
	AdditionalOptions *JDBCConnectorOptions

	// The name of the table in the data source.
	ConnectionTable *string

	// Specifies the data schema for the custom JDBC source.
	OutputSchemas []GlueSchema

	// The table or SQL query to get the data from. You can specify either
	// ConnectionTable or query , but not both.
	Query *string
	// contains filtered or unexported fields
}

Specifies a connector to a JDBC data source.

type JDBCConnectorTarget added in v1.25.0

type JDBCConnectorTarget struct {

	// The name of the connection that is associated with the connector.
	//
	// This member is required.
	ConnectionName *string

	// The name of the table in the data target.
	//
	// This member is required.
	ConnectionTable *string

	// The type of connection, such as marketplace.jdbc or custom.jdbc, designating a
	// connection to a JDBC data target.
	//
	// This member is required.
	ConnectionType *string

	// The name of a connector that will be used.
	//
	// This member is required.
	ConnectorName *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// Additional connection options for the connector.
	AdditionalOptions map[string]string

	// Specifies the data schema for the JDBC target.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

type JDBCDataType added in v1.25.0

type JDBCDataType string
const (
	JDBCDataTypeArray                 JDBCDataType = "ARRAY"
	JDBCDataTypeBigint                JDBCDataType = "BIGINT"
	JDBCDataTypeBinary                JDBCDataType = "BINARY"
	JDBCDataTypeBit                   JDBCDataType = "BIT"
	JDBCDataTypeBlob                  JDBCDataType = "BLOB"
	JDBCDataTypeBoolean               JDBCDataType = "BOOLEAN"
	JDBCDataTypeChar                  JDBCDataType = "CHAR"
	JDBCDataTypeClob                  JDBCDataType = "CLOB"
	JDBCDataTypeDatalink              JDBCDataType = "DATALINK"
	JDBCDataTypeDate                  JDBCDataType = "DATE"
	JDBCDataTypeDecimal               JDBCDataType = "DECIMAL"
	JDBCDataTypeDistinct              JDBCDataType = "DISTINCT"
	JDBCDataTypeDouble                JDBCDataType = "DOUBLE"
	JDBCDataTypeFloat                 JDBCDataType = "FLOAT"
	JDBCDataTypeInteger               JDBCDataType = "INTEGER"
	JDBCDataTypeJavaObject            JDBCDataType = "JAVA_OBJECT"
	JDBCDataTypeLongnvarchar          JDBCDataType = "LONGNVARCHAR"
	JDBCDataTypeLongvarbinary         JDBCDataType = "LONGVARBINARY"
	JDBCDataTypeLongvarchar           JDBCDataType = "LONGVARCHAR"
	JDBCDataTypeNchar                 JDBCDataType = "NCHAR"
	JDBCDataTypeNclob                 JDBCDataType = "NCLOB"
	JDBCDataTypeNull                  JDBCDataType = "NULL"
	JDBCDataTypeNumeric               JDBCDataType = "NUMERIC"
	JDBCDataTypeNvarchar              JDBCDataType = "NVARCHAR"
	JDBCDataTypeOther                 JDBCDataType = "OTHER"
	JDBCDataTypeReal                  JDBCDataType = "REAL"
	JDBCDataTypeRef                   JDBCDataType = "REF"
	JDBCDataTypeRefCursor             JDBCDataType = "REF_CURSOR"
	JDBCDataTypeRowid                 JDBCDataType = "ROWID"
	JDBCDataTypeSmallint              JDBCDataType = "SMALLINT"
	JDBCDataTypeSqlxml                JDBCDataType = "SQLXML"
	JDBCDataTypeStruct                JDBCDataType = "STRUCT"
	JDBCDataTypeTime                  JDBCDataType = "TIME"
	JDBCDataTypeTimeWithTimezone      JDBCDataType = "TIME_WITH_TIMEZONE"
	JDBCDataTypeTimestamp             JDBCDataType = "TIMESTAMP"
	JDBCDataTypeTimestampWithTimezone JDBCDataType = "TIMESTAMP_WITH_TIMEZONE"
	JDBCDataTypeTinyint               JDBCDataType = "TINYINT"
	JDBCDataTypeVarbinary             JDBCDataType = "VARBINARY"
	JDBCDataTypeVarchar               JDBCDataType = "VARCHAR"
)

Enum values for JDBCDataType

func (JDBCDataType) Values added in v1.25.0

func (JDBCDataType) Values() []JDBCDataType

Values returns all known values for JDBCDataType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JdbcMetadataEntry added in v1.35.0

type JdbcMetadataEntry string
const (
	JdbcMetadataEntryComments JdbcMetadataEntry = "COMMENTS"
	JdbcMetadataEntryRawtypes JdbcMetadataEntry = "RAWTYPES"
)

Enum values for JdbcMetadataEntry

func (JdbcMetadataEntry) Values added in v1.35.0

Values returns all known values for JdbcMetadataEntry. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JdbcTarget

type JdbcTarget struct {

	// The name of the connection to use to connect to the JDBC target.
	ConnectionName *string

	// Specify a value of RAWTYPES or COMMENTS to enable additional metadata in table
	// responses. RAWTYPES provides the native-level datatype. COMMENTS provides
	// comments associated with a column or table in the database. If you do not need
	// additional metadata, keep the field empty.
	EnableAdditionalMetadata []JdbcMetadataEntry

	// A list of glob patterns used to exclude from the crawl. For more information,
	// see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html)
	// .
	Exclusions []string

	// The path of the JDBC target.
	Path *string
	// contains filtered or unexported fields
}

Specifies a JDBC data store to crawl.

type Job

type Job struct {

	// This field is deprecated. Use MaxCapacity instead. The number of Glue data
	// processing units (DPUs) allocated to runs of this job. You can allocate a
	// minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing
	// power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
	// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
	//
	// Deprecated: This property is deprecated, use MaxCapacity instead.
	AllocatedCapacity int32

	// The representation of a directed acyclic graph on which both the Glue Studio
	// visual component and Glue Studio code generation is based.
	CodeGenConfigurationNodes map[string]CodeGenConfigurationNode

	// The JobCommand that runs this job.
	Command *JobCommand

	// The connections used for this job.
	Connections *ConnectionsList

	// The time and date that this job definition was created.
	CreatedOn *time.Time

	// The default arguments for every run of this job, specified as name-value pairs.
	// You can specify arguments here that your own job-execution script consumes, as
	// well as arguments that Glue itself consumes. Job arguments may be logged. Do not
	// pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
	// Secrets Manager or other secret management mechanism if you intend to keep them
	// within the Job. For information about how to specify and consume your own Job
	// arguments, see the Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Spark jobs, see the Special Parameters
	// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Ray jobs, see Using job parameters in
	// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
	// in the developer guide.
	DefaultArguments map[string]string

	// A description of the job.
	Description *string

	// Indicates whether the job is run with a standard or flexible execution class.
	// The standard execution class is ideal for time-sensitive workloads that require
	// fast job startup and dedicated resources. The flexible execution class is
	// appropriate for time-insensitive jobs whose start and completion times may vary.
	// Only jobs with Glue version 3.0 and above and command type glueetl will be
	// allowed to set ExecutionClass to FLEX . The flexible execution class is
	// available for Spark jobs.
	ExecutionClass ExecutionClass

	// An ExecutionProperty specifying the maximum number of concurrent runs allowed
	// for this job.
	ExecutionProperty *ExecutionProperty

	// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python
	// that Glue available in a job. The Python version indicates the version supported
	// for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater.
	// However, the versions of Ray, Python and additional libraries available in your
	// Ray job are determined by the Runtime parameter of the Job command. For more
	// information about the available Glue versions and corresponding Spark and Python
	// versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html)
	// in the developer guide. Jobs that are created without specifying a Glue version
	// default to Glue 0.9.
	GlueVersion *string

	// The last point in time when this job definition was modified.
	LastModifiedOn *time.Time

	// This field is reserved for future use.
	LogUri *string

	// For Glue version 1.0 or earlier jobs, using the standard worker type, the
	// number of Glue data processing units (DPUs) that can be allocated when this job
	// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
	// of compute capacity and 16 GB of memory. For more information, see the Glue
	// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0 or
	// later jobs, you cannot specify a Maximum capacity . Instead, you should specify
	// a Worker type and the Number of workers . Do not set MaxCapacity if using
	// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
	// depends on whether you are running a Python shell job, an Apache Spark ETL job,
	// or an Apache Spark streaming ETL job:
	//   - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
	//   can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
	//   - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
	//   Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
	//   allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
	//   fractional DPU allocation.
	MaxCapacity *float64

	// The maximum number of times to retry this job after a JobRun fails.
	MaxRetries int32

	// The name you assign to this job definition.
	Name *string

	// Arguments for this job that are not overridden when providing job arguments in
	// a job run, specified as name-value pairs.
	NonOverridableArguments map[string]string

	// Specifies configuration properties of a job notification.
	NotificationProperty *NotificationProperty

	// The number of workers of a defined workerType that are allocated when a job
	// runs.
	NumberOfWorkers *int32

	// The name or Amazon Resource Name (ARN) of the IAM role associated with this job.
	Role *string

	// The name of the SecurityConfiguration structure to be used with this job.
	SecurityConfiguration *string

	// The details for a source control configuration for a job, allowing
	// synchronization of job artifacts to or from a remote repository.
	SourceControlDetails *SourceControlDetails

	// The job timeout in minutes. This is the maximum time that a job run can consume
	// resources before it is terminated and enters TIMEOUT status. The default is
	// 2,880 minutes (48 hours).
	Timeout *int32

	// The type of predefined worker that is allocated when a job runs. Accepts a
	// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
	// for Ray jobs.
	//   - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
	//   memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
	//   memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs in the following
	//   Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
	//   (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
	//   Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
	//   - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
	//   memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
	//   Web Services Regions as supported for the G.4X worker type.
	//   - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for low volume streaming jobs. This worker
	//   type is only available for Glue version 3.0 streaming jobs.
	//   - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
	//   memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
	//   workers based on the autoscaler.
	WorkerType WorkerType
	// contains filtered or unexported fields
}

Specifies a job definition.

type JobBookmarkEntry

type JobBookmarkEntry struct {

	// The attempt ID number.
	Attempt int32

	// The bookmark itself.
	JobBookmark *string

	// The name of the job in question.
	JobName *string

	// The unique run identifier associated with the previous job run.
	PreviousRunId *string

	// The run ID number.
	Run int32

	// The run ID number.
	RunId *string

	// The version of the job.
	Version int32
	// contains filtered or unexported fields
}

Defines a point that a job can resume processing.

type JobBookmarksEncryption

type JobBookmarksEncryption struct {

	// The encryption mode to use for job bookmarks data.
	JobBookmarksEncryptionMode JobBookmarksEncryptionMode

	// The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
	KmsKeyArn *string
	// contains filtered or unexported fields
}

Specifies how job bookmark data should be encrypted.

type JobBookmarksEncryptionMode

type JobBookmarksEncryptionMode string
const (
	JobBookmarksEncryptionModeDisabled JobBookmarksEncryptionMode = "DISABLED"
	JobBookmarksEncryptionModeCsekms   JobBookmarksEncryptionMode = "CSE-KMS"
)

Enum values for JobBookmarksEncryptionMode

func (JobBookmarksEncryptionMode) Values added in v0.29.0

Values returns all known values for JobBookmarksEncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JobCommand

type JobCommand struct {

	// The name of the job command. For an Apache Spark ETL job, this must be glueetl .
	// For a Python shell job, it must be pythonshell . For an Apache Spark streaming
	// ETL job, this must be gluestreaming . For a Ray job, this must be glueray .
	Name *string

	// The Python version being used to run a Python shell job. Allowed values are 2
	// or 3.
	PythonVersion *string

	// In Ray jobs, Runtime is used to specify the versions of Ray, Python and
	// additional libraries available in your environment. This field is not used in
	// other job types. For supported runtime environment values, see Supported Ray
	// runtime environments (https://docs.aws.amazon.com/glue/latest/dg/ray-jobs-section.html)
	// in the Glue Developer Guide.
	Runtime *string

	// Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that
	// runs a job.
	ScriptLocation *string
	// contains filtered or unexported fields
}

Specifies code that runs when a job is run.

type JobNodeDetails

type JobNodeDetails struct {

	// The information for the job runs represented by the job node.
	JobRuns []JobRun
	// contains filtered or unexported fields
}

The details of a Job node present in the workflow.

type JobRun

type JobRun struct {

	// This field is deprecated. Use MaxCapacity instead. The number of Glue data
	// processing units (DPUs) allocated to this JobRun. From 2 to 100 DPUs can be
	// allocated; the default is 10. A DPU is a relative measure of processing power
	// that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
	// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
	//
	// Deprecated: This property is deprecated, use MaxCapacity instead.
	AllocatedCapacity int32

	// The job arguments associated with this run. For this job run, they replace the
	// default arguments set in the job definition itself. You can specify arguments
	// here that your own job-execution script consumes, as well as arguments that Glue
	// itself consumes. Job arguments may be logged. Do not pass plaintext secrets as
	// arguments. Retrieve secrets from a Glue Connection, Secrets Manager or other
	// secret management mechanism if you intend to keep them within the Job. For
	// information about how to specify and consume your own Job arguments, see the
	// Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Spark jobs, see the Special Parameters
	// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Ray jobs, see Using job parameters in
	// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
	// in the developer guide.
	Arguments map[string]string

	// The number of the attempt to run this job.
	Attempt int32

	// The date and time that this job run completed.
	CompletedOn *time.Time

	// This field populates only for Auto Scaling job runs, and represents the total
	// time each executor ran during the lifecycle of a job run in seconds, multiplied
	// by a DPU factor (1 for G.1X , 2 for G.2X , or 0.25 for G.025X workers). This
	// value may be different than the executionEngineRuntime * MaxCapacity as in the
	// case of Auto Scaling jobs, as the number of executors running at a given time
	// may be less than the MaxCapacity . Therefore, it is possible that the value of
	// DPUSeconds is less than executionEngineRuntime * MaxCapacity .
	DPUSeconds *float64

	// An error message associated with this job run.
	ErrorMessage *string

	// Indicates whether the job is run with a standard or flexible execution class.
	// The standard execution-class is ideal for time-sensitive workloads that require
	// fast job startup and dedicated resources. The flexible execution class is
	// appropriate for time-insensitive jobs whose start and completion times may vary.
	// Only jobs with Glue version 3.0 and above and command type glueetl will be
	// allowed to set ExecutionClass to FLEX . The flexible execution class is
	// available for Spark jobs.
	ExecutionClass ExecutionClass

	// The amount of time (in seconds) that the job run consumed resources.
	ExecutionTime int32

	// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python
	// that Glue available in a job. The Python version indicates the version supported
	// for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater.
	// However, the versions of Ray, Python and additional libraries available in your
	// Ray job are determined by the Runtime parameter of the Job command. For more
	// information about the available Glue versions and corresponding Spark and Python
	// versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html)
	// in the developer guide. Jobs that are created without specifying a Glue version
	// default to Glue 0.9.
	GlueVersion *string

	// The ID of this job run.
	Id *string

	// The name of the job definition being used in this run.
	JobName *string

	// The current state of the job run. For more information about the statuses of
	// jobs that have terminated abnormally, see Glue Job Run Statuses (https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html)
	// .
	JobRunState JobRunState

	// The last time that this job run was modified.
	LastModifiedOn *time.Time

	// The name of the log group for secure logging that can be server-side encrypted
	// in Amazon CloudWatch using KMS. This name can be /aws-glue/jobs/ , in which case
	// the default encryption is NONE . If you add a role name and
	// SecurityConfiguration name (in other words,
	// /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/ ), then that security
	// configuration is used to encrypt the log group.
	LogGroupName *string

	// For Glue version 1.0 or earlier jobs, using the standard worker type, the
	// number of Glue data processing units (DPUs) that can be allocated when this job
	// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
	// of compute capacity and 16 GB of memory. For more information, see the Glue
	// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
	// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
	// Worker type and the Number of workers . Do not set MaxCapacity if using
	// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
	// depends on whether you are running a Python shell job, an Apache Spark ETL job,
	// or an Apache Spark streaming ETL job:
	//   - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
	//   can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
	//   - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
	//   Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
	//   allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
	//   fractional DPU allocation.
	MaxCapacity *float64

	// Specifies configuration properties of a job run notification.
	NotificationProperty *NotificationProperty

	// The number of workers of a defined workerType that are allocated when a job
	// runs.
	NumberOfWorkers *int32

	// A list of predecessors to this job run.
	PredecessorRuns []Predecessor

	// The ID of the previous run of this job. For example, the JobRunId specified in
	// the StartJobRun action.
	PreviousRunId *string

	// The name of the SecurityConfiguration structure to be used with this job run.
	SecurityConfiguration *string

	// The date and time at which this job run was started.
	StartedOn *time.Time

	// The JobRun timeout in minutes. This is the maximum time that a job run can
	// consume resources before it is terminated and enters TIMEOUT status. This value
	// overrides the timeout value set in the parent job. Streaming jobs do not have a
	// timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
	Timeout *int32

	// The name of the trigger that started this job run.
	TriggerName *string

	// The type of predefined worker that is allocated when a job runs. Accepts a
	// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
	// for Ray jobs.
	//   - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
	//   memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
	//   memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs in the following
	//   Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
	//   (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
	//   Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
	//   - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
	//   memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
	//   Web Services Regions as supported for the G.4X worker type.
	//   - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for low volume streaming jobs. This worker
	//   type is only available for Glue version 3.0 streaming jobs.
	//   - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
	//   memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
	//   workers based on the autoscaler.
	WorkerType WorkerType
	// contains filtered or unexported fields
}

Contains information about a job run.

type JobRunState

type JobRunState string
const (
	JobRunStateStarting  JobRunState = "STARTING"
	JobRunStateRunning   JobRunState = "RUNNING"
	JobRunStateStopping  JobRunState = "STOPPING"
	JobRunStateStopped   JobRunState = "STOPPED"
	JobRunStateSucceeded JobRunState = "SUCCEEDED"
	JobRunStateFailed    JobRunState = "FAILED"
	JobRunStateTimeout   JobRunState = "TIMEOUT"
	JobRunStateError     JobRunState = "ERROR"
	JobRunStateWaiting   JobRunState = "WAITING"
)

Enum values for JobRunState

func (JobRunState) Values added in v0.29.0

func (JobRunState) Values() []JobRunState

Values returns all known values for JobRunState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JobUpdate

type JobUpdate struct {

	// This field is deprecated. Use MaxCapacity instead. The number of Glue data
	// processing units (DPUs) to allocate to this job. You can allocate a minimum of 2
	// DPUs; the default is 10. A DPU is a relative measure of processing power that
	// consists of 4 vCPUs of compute capacity and 16 GB of memory. For more
	// information, see the Glue pricing page (https://aws.amazon.com/glue/pricing/) .
	//
	// Deprecated: This property is deprecated, use MaxCapacity instead.
	AllocatedCapacity int32

	// The representation of a directed acyclic graph on which both the Glue Studio
	// visual component and Glue Studio code generation is based.
	CodeGenConfigurationNodes map[string]CodeGenConfigurationNode

	// The JobCommand that runs this job (required).
	Command *JobCommand

	// The connections used for this job.
	Connections *ConnectionsList

	// The default arguments for every run of this job, specified as name-value pairs.
	// You can specify arguments here that your own job-execution script consumes, as
	// well as arguments that Glue itself consumes. Job arguments may be logged. Do not
	// pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection,
	// Secrets Manager or other secret management mechanism if you intend to keep them
	// within the Job. For information about how to specify and consume your own Job
	// arguments, see the Calling Glue APIs in Python (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Spark jobs, see the Special Parameters
	// Used by Glue (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html)
	// topic in the developer guide. For information about the arguments you can
	// provide to this field when configuring Ray jobs, see Using job parameters in
	// Ray jobs (https://docs.aws.amazon.com/glue/latest/dg/author-job-ray-job-parameters.html)
	// in the developer guide.
	DefaultArguments map[string]string

	// Description of the job being defined.
	Description *string

	// Indicates whether the job is run with a standard or flexible execution class.
	// The standard execution-class is ideal for time-sensitive workloads that require
	// fast job startup and dedicated resources. The flexible execution class is
	// appropriate for time-insensitive jobs whose start and completion times may vary.
	// Only jobs with Glue version 3.0 and above and command type glueetl will be
	// allowed to set ExecutionClass to FLEX . The flexible execution class is
	// available for Spark jobs.
	ExecutionClass ExecutionClass

	// An ExecutionProperty specifying the maximum number of concurrent runs allowed
	// for this job.
	ExecutionProperty *ExecutionProperty

	// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python
	// that Glue available in a job. The Python version indicates the version supported
	// for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater.
	// However, the versions of Ray, Python and additional libraries available in your
	// Ray job are determined by the Runtime parameter of the Job command. For more
	// information about the available Glue versions and corresponding Spark and Python
	// versions, see Glue version (https://docs.aws.amazon.com/glue/latest/dg/add-job.html)
	// in the developer guide. Jobs that are created without specifying a Glue version
	// default to Glue 0.9.
	GlueVersion *string

	// This field is reserved for future use.
	LogUri *string

	// For Glue version 1.0 or earlier jobs, using the standard worker type, the
	// number of Glue data processing units (DPUs) that can be allocated when this job
	// runs. A DPU is a relative measure of processing power that consists of 4 vCPUs
	// of compute capacity and 16 GB of memory. For more information, see the Glue
	// pricing page (https://aws.amazon.com/glue/pricing/) . For Glue version 2.0+
	// jobs, you cannot specify a Maximum capacity . Instead, you should specify a
	// Worker type and the Number of workers . Do not set MaxCapacity if using
	// WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity
	// depends on whether you are running a Python shell job, an Apache Spark ETL job,
	// or an Apache Spark streaming ETL job:
	//   - When you specify a Python shell job ( JobCommand.Name ="pythonshell"), you
	//   can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
	//   - When you specify an Apache Spark ETL job ( JobCommand.Name ="glueetl") or
	//   Apache Spark streaming ETL job ( JobCommand.Name ="gluestreaming"), you can
	//   allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a
	//   fractional DPU allocation.
	MaxCapacity *float64

	// The maximum number of times to retry this job if it fails.
	MaxRetries int32

	// Arguments for this job that are not overridden when providing job arguments in
	// a job run, specified as name-value pairs.
	NonOverridableArguments map[string]string

	// Specifies the configuration properties of a job notification.
	NotificationProperty *NotificationProperty

	// The number of workers of a defined workerType that are allocated when a job
	// runs.
	NumberOfWorkers *int32

	// The name or Amazon Resource Name (ARN) of the IAM role associated with this job
	// (required).
	Role *string

	// The name of the SecurityConfiguration structure to be used with this job.
	SecurityConfiguration *string

	// The details for a source control configuration for a job, allowing
	// synchronization of job artifacts to or from a remote repository.
	SourceControlDetails *SourceControlDetails

	// The job timeout in minutes. This is the maximum time that a job run can consume
	// resources before it is terminated and enters TIMEOUT status. The default is
	// 2,880 minutes (48 hours).
	Timeout *int32

	// The type of predefined worker that is allocated when a job runs. Accepts a
	// value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X
	// for Ray jobs.
	//   - For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of
	//   memory) with 128GB disk (approximately 77GB free), and provides 1 executor per
	//   worker. We recommend this worker type for workloads such as data transforms,
	//   joins, and queries, to offers a scalable and cost effective way to run most
	//   jobs.
	//   - For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of
	//   memory) with 256GB disk (approximately 235GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs in the following
	//   Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West
	//   (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo),
	//   Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
	//   - For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of
	//   memory) with 512GB disk (approximately 487GB free), and provides 1 executor per
	//   worker. We recommend this worker type for jobs whose workloads contain your most
	//   demanding transforms, aggregations, joins, and queries. This worker type is
	//   available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon
	//   Web Services Regions as supported for the G.4X worker type.
	//   - For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of
	//   memory) with 84GB disk (approximately 34GB free), and provides 1 executor per
	//   worker. We recommend this worker type for low volume streaming jobs. This worker
	//   type is only available for Glue version 3.0 streaming jobs.
	//   - For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of
	//   memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray
	//   workers based on the autoscaler.
	WorkerType WorkerType
	// contains filtered or unexported fields
}

Specifies information used to update an existing job definition. The previous job definition is completely overwritten by this information.

type Join added in v1.25.0

type Join struct {

	// A list of the two columns to be joined.
	//
	// This member is required.
	Columns []JoinColumn

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// Specifies the type of join to be performed on the datasets.
	//
	// This member is required.
	JoinType JoinType

	// The name of the transform node.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a transform that joins two datasets into one dataset using a comparison phrase on the specified data property keys. You can use inner, outer, left, right, left semi, and left anti joins.

type JoinColumn added in v1.25.0

type JoinColumn struct {

	// The column to be joined.
	//
	// This member is required.
	From *string

	// The key of the column to be joined.
	//
	// This member is required.
	Keys [][]string
	// contains filtered or unexported fields
}

Specifies a column to be joined.

type JoinType added in v1.25.0

type JoinType string
const (
	JoinTypeEquijoin JoinType = "equijoin"
	JoinTypeLeft     JoinType = "left"
	JoinTypeRight    JoinType = "right"
	JoinTypeOuter    JoinType = "outer"
	JoinTypeLeftSemi JoinType = "leftsemi"
	JoinTypeLeftAnti JoinType = "leftanti"
)

Enum values for JoinType

func (JoinType) Values added in v1.25.0

func (JoinType) Values() []JoinType

Values returns all known values for JoinType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type JsonClassifier

type JsonClassifier struct {

	// A JsonPath string defining the JSON data for the classifier to classify. Glue
	// supports a subset of JsonPath, as described in Writing JsonPath Custom
	// Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json)
	// .
	//
	// This member is required.
	JsonPath *string

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// The time that this classifier was registered.
	CreationTime *time.Time

	// The time that this classifier was last updated.
	LastUpdated *time.Time

	// The version of this classifier.
	Version int64
	// contains filtered or unexported fields
}

A classifier for JSON content.

type KafkaStreamingSourceOptions added in v1.25.0

type KafkaStreamingSourceOptions struct {

	// When this option is set to 'true', the data output will contain an additional
	// column named "__src_timestamp" that indicates the time when the corresponding
	// record received by the topic. The default value is 'false'. This option is
	// supported in Glue version 4.0 or later.
	AddRecordTimestamp *string

	// The specific TopicPartitions to consume. You must specify at least one of
	// "topicName" , "assign" or "subscribePattern" .
	Assign *string

	// A list of bootstrap server URLs, for example, as
	// b-1.vpc-test-2.o4q88o.c6.kafka.us-east-1.amazonaws.com:9094 . This option must
	// be specified in the API call or defined in the table metadata in the Data
	// Catalog.
	BootstrapServers *string

	// An optional classification.
	Classification *string

	// The name of the connection.
	ConnectionName *string

	// Specifies the delimiter character.
	Delimiter *string

	// When this option is set to 'true', for each batch, it will emit the metrics for
	// the duration between the oldest record received by the topic and the time it
	// arrives in Glue to CloudWatch. The metric's name is
	// "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This
	// option is supported in Glue version 4.0 or later.
	EmitConsumerLagMetrics *string

	// The end point when a batch query is ended. Possible values are either "latest"
	// or a JSON string that specifies an ending offset for each TopicPartition .
	EndingOffsets *string

	// Whether to include the Kafka headers. When the option is set to "true", the
	// data output will contain an additional column named
	// "glue_streaming_kafka_headers" with type Array[Struct(key: String, value:
	// String)] . The default value is "false". This option is available in Glue
	// version 3.0 or later only.
	IncludeHeaders *bool

	// The rate limit on the maximum number of offsets that are processed per trigger
	// interval. The specified total number of offsets is proportionally split across
	// topicPartitions of different volumes. The default value is null, which means
	// that the consumer reads all offsets until the known latest offset.
	MaxOffsetsPerTrigger *int64

	// The desired minimum number of partitions to read from Kafka. The default value
	// is null, which means that the number of spark partitions is equal to the number
	// of Kafka partitions.
	MinPartitions *int32

	// The number of times to retry before failing to fetch Kafka offsets. The default
	// value is 3 .
	NumRetries *int32

	// The timeout in milliseconds to poll data from Kafka in Spark job executors. The
	// default value is 512 .
	PollTimeoutMs *int64

	// The time in milliseconds to wait before retrying to fetch Kafka offsets. The
	// default value is 10 .
	RetryIntervalMs *int64

	// The protocol used to communicate with brokers. The possible values are "SSL" or
	// "PLAINTEXT" .
	SecurityProtocol *string

	// The starting position in the Kafka topic to read data from. The possible values
	// are "earliest" or "latest" . The default value is "latest" .
	StartingOffsets *string

	// The timestamp of the record in the Kafka topic to start reading data from. The
	// possible values are a timestamp string in UTC format of the pattern
	// yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For
	// example: "2023-04-04T08:00:00+08:00"). Only one of StartingTimestamp or
	// StartingOffsets must be set.
	StartingTimestamp *time.Time

	// A Java regex string that identifies the topic list to subscribe to. You must
	// specify at least one of "topicName" , "assign" or "subscribePattern" .
	SubscribePattern *string

	// The topic name as specified in Apache Kafka. You must specify at least one of
	// "topicName" , "assign" or "subscribePattern" .
	TopicName *string
	// contains filtered or unexported fields
}

Additional options for streaming.

type KeySchemaElement added in v0.29.0

type KeySchemaElement struct {

	// The name of a partition key.
	//
	// This member is required.
	Name *string

	// The type of a partition key.
	//
	// This member is required.
	Type *string
	// contains filtered or unexported fields
}

A partition key pair consisting of a name and a type.

type KinesisStreamingSourceOptions added in v1.25.0

type KinesisStreamingSourceOptions struct {

	// Adds a time delay between two consecutive getRecords operations. The default
	// value is "False" . This option is only configurable for Glue version 2.0 and
	// above.
	AddIdleTimeBetweenReads *bool

	// When this option is set to 'true', the data output will contain an additional
	// column named "__src_timestamp" that indicates the time when the corresponding
	// record received by the stream. The default value is 'false'. This option is
	// supported in Glue version 4.0 or later.
	AddRecordTimestamp *string

	// Avoids creating an empty microbatch job by checking for unread data in the
	// Kinesis data stream before the batch is started. The default value is "False" .
	AvoidEmptyBatches *bool

	// An optional classification.
	Classification *string

	// Specifies the delimiter character.
	Delimiter *string

	// The minimum time interval between two ListShards API calls for your script to
	// consider resharding. The default value is 1s .
	DescribeShardInterval *int64

	// When this option is set to 'true', for each batch, it will emit the metrics for
	// the duration between the oldest record received by the stream and the time it
	// arrives in Glue to CloudWatch. The metric's name is
	// "glue.driver.streaming.maxConsumerLagInMs". The default value is 'false'. This
	// option is supported in Glue version 4.0 or later.
	EmitConsumerLagMetrics *string

	// The URL of the Kinesis endpoint.
	EndpointUrl *string

	// The minimum time delay between two consecutive getRecords operations, specified
	// in ms. The default value is 1000 . This option is only configurable for Glue
	// version 2.0 and above.
	IdleTimeBetweenReadsInMs *int64

	// The maximum number of records to fetch per shard in the Kinesis data stream per
	// microbatch. Note: The client can exceed this limit if the streaming job has
	// already read extra records from Kinesis (in the same get-records call). If
	// MaxFetchRecordsPerShard needs to be strict then it needs to be a multiple of
	// MaxRecordPerRead . The default value is 100000 .
	MaxFetchRecordsPerShard *int64

	// The maximum time spent for the job executor to read records for the current
	// batch from the Kinesis data stream, specified in milliseconds (ms). Multiple
	// GetRecords API calls may be made within this time. The default value is 1000 .
	MaxFetchTimeInMs *int64

	// The maximum number of records to fetch from the Kinesis data stream in each
	// getRecords operation. The default value is 10000 .
	MaxRecordPerRead *int64

	// The maximum cool-off time period (specified in ms) between two retries of a
	// Kinesis Data Streams API call. The default value is 10000 .
	MaxRetryIntervalMs *int64

	// The maximum number of retries for Kinesis Data Streams API requests. The
	// default value is 3 .
	NumRetries *int32

	// The cool-off time period (specified in ms) before retrying the Kinesis Data
	// Streams API call. The default value is 1000 .
	RetryIntervalMs *int64

	// The Amazon Resource Name (ARN) of the role to assume using AWS Security Token
	// Service (AWS STS). This role must have permissions for describe or read record
	// operations for the Kinesis data stream. You must use this parameter when
	// accessing a data stream in a different account. Used in conjunction with
	// "awsSTSSessionName" .
	RoleArn *string

	// An identifier for the session assuming the role using AWS STS. You must use
	// this parameter when accessing a data stream in a different account. Used in
	// conjunction with "awsSTSRoleARN" .
	RoleSessionName *string

	// The starting position in the Kinesis data stream to read data from. The
	// possible values are "latest" , "trim_horizon" , "earliest" , or a timestamp
	// string in UTC format in the pattern yyyy-mm-ddTHH:MM:SSZ (where Z represents a
	// UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00-04:00"). The
	// default value is "latest" . Note: Using a value that is a timestamp string in
	// UTC format for "startingPosition" is supported only for Glue version 4.0 or
	// later.
	StartingPosition StartingPosition

	// The timestamp of the record in the Kinesis data stream to start reading data
	// from. The possible values are a timestamp string in UTC format of the pattern
	// yyyy-mm-ddTHH:MM:SSZ (where Z represents a UTC timezone offset with a +/-. For
	// example: "2023-04-04T08:00:00+08:00").
	StartingTimestamp *time.Time

	// The Amazon Resource Name (ARN) of the Kinesis data stream.
	StreamArn *string

	// The name of the Kinesis data stream.
	StreamName *string
	// contains filtered or unexported fields
}

Additional options for the Amazon Kinesis streaming data source.

type LabelingSetGenerationTaskRunProperties

type LabelingSetGenerationTaskRunProperties struct {

	// The Amazon Simple Storage Service (Amazon S3) path where you will generate the
	// labeling set.
	OutputS3Path *string
	// contains filtered or unexported fields
}

Specifies configuration properties for a labeling set generation task run.

type LakeFormationConfiguration added in v1.18.0

type LakeFormationConfiguration struct {

	// Required for cross account crawls. For same account crawls as the target data,
	// this can be left as null.
	AccountId *string

	// Specifies whether to use Lake Formation credentials for the crawler instead of
	// the IAM role credentials.
	UseLakeFormationCredentials *bool
	// contains filtered or unexported fields
}

Specifies Lake Formation configuration settings for the crawler.

type Language

type Language string
const (
	LanguagePython Language = "PYTHON"
	LanguageScala  Language = "SCALA"
)

Enum values for Language

func (Language) Values added in v0.29.0

func (Language) Values() []Language

Values returns all known values for Language. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type LastActiveDefinition added in v1.11.0

type LastActiveDefinition struct {

	// Specifies a path in Amazon S3 where the blueprint is published by the Glue
	// developer.
	BlueprintLocation *string

	// Specifies a path in Amazon S3 where the blueprint is copied when you create or
	// update the blueprint.
	BlueprintServiceLocation *string

	// The description of the blueprint.
	Description *string

	// The date and time the blueprint was last modified.
	LastModifiedOn *time.Time

	// A JSON string specifying the parameters for the blueprint.
	ParameterSpec *string
	// contains filtered or unexported fields
}

When there are multiple versions of a blueprint and the latest version has some errors, this attribute indicates the last successful blueprint definition that is available with the service.

type LastCrawlInfo

type LastCrawlInfo struct {

	// If an error occurred, the error information about the last crawl.
	ErrorMessage *string

	// The log group for the last crawl.
	LogGroup *string

	// The log stream for the last crawl.
	LogStream *string

	// The prefix for a message about this crawl.
	MessagePrefix *string

	// The time at which the crawl started.
	StartTime *time.Time

	// Status of the last crawl.
	Status LastCrawlStatus
	// contains filtered or unexported fields
}

Status and error information about the most recent crawl.

type LastCrawlStatus

type LastCrawlStatus string
const (
	LastCrawlStatusSucceeded LastCrawlStatus = "SUCCEEDED"
	LastCrawlStatusCancelled LastCrawlStatus = "CANCELLED"
	LastCrawlStatusFailed    LastCrawlStatus = "FAILED"
)

Enum values for LastCrawlStatus

func (LastCrawlStatus) Values added in v0.29.0

func (LastCrawlStatus) Values() []LastCrawlStatus

Values returns all known values for LastCrawlStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type LineageConfiguration added in v0.31.0

type LineageConfiguration struct {

	// Specifies whether data lineage is enabled for the crawler. Valid values are:
	//   - ENABLE: enables data lineage for the crawler
	//   - DISABLE: disables data lineage for the crawler
	CrawlerLineageSettings CrawlerLineageSettings
	// contains filtered or unexported fields
}

Specifies data lineage configuration settings for the crawler.

type Location

type Location struct {

	// An Amazon DynamoDB table location.
	DynamoDB []CodeGenNodeArg

	// A JDBC location.
	Jdbc []CodeGenNodeArg

	// An Amazon Simple Storage Service (Amazon S3) location.
	S3 []CodeGenNodeArg
	// contains filtered or unexported fields
}

The location of resources.

type Logical

type Logical string
const (
	LogicalAnd Logical = "AND"
	LogicalAny Logical = "ANY"
)

Enum values for Logical

func (Logical) Values added in v0.29.0

func (Logical) Values() []Logical

Values returns all known values for Logical. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type LogicalOperator

type LogicalOperator string
const (
	LogicalOperatorEquals LogicalOperator = "EQUALS"
)

Enum values for LogicalOperator

func (LogicalOperator) Values added in v0.29.0

func (LogicalOperator) Values() []LogicalOperator

Values returns all known values for LogicalOperator. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type LongColumnStatisticsData

type LongColumnStatisticsData struct {

	// The number of distinct values in a column.
	//
	// This member is required.
	NumberOfDistinctValues int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64

	// The highest value in the column.
	MaximumValue int64

	// The lowest value in the column.
	MinimumValue int64
	// contains filtered or unexported fields
}

Defines column statistics supported for integer data columns.

type MLTransform

type MLTransform struct {

	// A timestamp. The time and date that this machine learning transform was created.
	CreatedOn *time.Time

	// A user-defined, long-form description text for the machine learning transform.
	// Descriptions are not guaranteed to be unique and can be changed at any time.
	Description *string

	// An EvaluationMetrics object. Evaluation metrics provide an estimate of the
	// quality of your machine learning transform.
	EvaluationMetrics *EvaluationMetrics

	// This value determines which version of Glue this machine learning transform is
	// compatible with. Glue 1.0 is recommended for most customers. If the value is not
	// set, the Glue compatibility defaults to Glue 0.9. For more information, see
	// Glue Versions (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions)
	// in the developer guide.
	GlueVersion *string

	// A list of Glue table definitions used by the transform.
	InputRecordTables []GlueTable

	// A count identifier for the labeling files generated by Glue for this transform.
	// As you create a better transform, you can iteratively download, label, and
	// upload the labeling file.
	LabelCount int32

	// A timestamp. The last point in time when this machine learning transform was
	// modified.
	LastModifiedOn *time.Time

	// The number of Glue data processing units (DPUs) that are allocated to task runs
	// for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A
	// DPU is a relative measure of processing power that consists of 4 vCPUs of
	// compute capacity and 16 GB of memory. For more information, see the Glue
	// pricing page (http://aws.amazon.com/glue/pricing/) . MaxCapacity is a mutually
	// exclusive option with NumberOfWorkers and WorkerType .
	//   - If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be
	//   set.
	//   - If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
	//   - If WorkerType is set, then NumberOfWorkers is required (and vice versa).
	//   - MaxCapacity and NumberOfWorkers must both be at least 1.
	// When the WorkerType field is set to a value other than Standard , the
	// MaxCapacity field is set automatically and becomes read-only.
	MaxCapacity *float64

	// The maximum number of times to retry after an MLTaskRun of the machine learning
	// transform fails.
	MaxRetries *int32

	// A user-defined name for the machine learning transform. Names are not
	// guaranteed unique and can be changed at any time.
	Name *string

	// The number of workers of a defined workerType that are allocated when a task of
	// the transform runs. If WorkerType is set, then NumberOfWorkers is required (and
	// vice versa).
	NumberOfWorkers *int32

	// A TransformParameters object. You can use parameters to tune (customize) the
	// behavior of the machine learning transform by specifying what data it learns
	// from and your preference on various tradeoffs (such as precious vs. recall, or
	// accuracy vs. cost).
	Parameters *TransformParameters

	// The name or Amazon Resource Name (ARN) of the IAM role with the required
	// permissions. The required permissions include both Glue service role permissions
	// to Glue resources, and Amazon S3 permissions required by the transform.
	//   - This role needs Glue service role permissions to allow access to resources
	//   in Glue. See Attach a Policy to IAM Users That Access Glue (https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html)
	//   .
	//   - This role needs permission to your Amazon Simple Storage Service (Amazon
	//   S3) sources, targets, temporary directory, scripts, and any libraries used by
	//   the task run for this transform.
	Role *string

	// A map of key-value pairs representing the columns and data types that this
	// transform can run against. Has an upper bound of 100 columns.
	Schema []SchemaColumn

	// The current status of the machine learning transform.
	Status TransformStatusType

	// The timeout in minutes of the machine learning transform.
	Timeout *int32

	// The encryption-at-rest settings of the transform that apply to accessing user
	// data. Machine learning transforms can access user data encrypted in Amazon S3
	// using KMS.
	TransformEncryption *TransformEncryption

	// The unique transform ID that is generated for the machine learning transform.
	// The ID is guaranteed to be unique and does not change.
	TransformId *string

	// The type of predefined worker that is allocated when a task of this transform
	// runs. Accepts a value of Standard, G.1X, or G.2X.
	//   - For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory
	//   and a 50GB disk, and 2 executors per worker.
	//   - For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a
	//   64GB disk, and 1 executor per worker.
	//   - For the G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a
	//   128GB disk, and 1 executor per worker.
	// MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType .
	//   - If either NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be
	//   set.
	//   - If MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set.
	//   - If WorkerType is set, then NumberOfWorkers is required (and vice versa).
	//   - MaxCapacity and NumberOfWorkers must both be at least 1.
	WorkerType WorkerType
	// contains filtered or unexported fields
}

A structure for a machine learning transform.

type MLTransformNotReadyException

type MLTransformNotReadyException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The machine learning transform is not ready to run.

func (*MLTransformNotReadyException) Error

func (*MLTransformNotReadyException) ErrorCode

func (e *MLTransformNotReadyException) ErrorCode() string

func (*MLTransformNotReadyException) ErrorFault

func (*MLTransformNotReadyException) ErrorMessage

func (e *MLTransformNotReadyException) ErrorMessage() string

type MLUserDataEncryption added in v0.29.0

type MLUserDataEncryption struct {

	// The encryption mode applied to user data. Valid values are:
	//   - DISABLED: encryption is disabled
	//   - SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS)
	//   for user data stored in Amazon S3.
	//
	// This member is required.
	MlUserDataEncryptionMode MLUserDataEncryptionModeString

	// The ID for the customer-provided KMS key.
	KmsKeyId *string
	// contains filtered or unexported fields
}

The encryption-at-rest settings of the transform that apply to accessing user data.

type MLUserDataEncryptionModeString added in v0.29.0

type MLUserDataEncryptionModeString string
const (
	MLUserDataEncryptionModeStringDisabled MLUserDataEncryptionModeString = "DISABLED"
	MLUserDataEncryptionModeStringSsekms   MLUserDataEncryptionModeString = "SSE-KMS"
)

Enum values for MLUserDataEncryptionModeString

func (MLUserDataEncryptionModeString) Values added in v0.29.0

Values returns all known values for MLUserDataEncryptionModeString. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Mapping added in v1.25.0

type Mapping struct {

	// Only applicable to nested data structures. If you want to change the parent
	// structure, but also one of its children, you can fill out this data strucutre.
	// It is also Mapping , but its FromPath will be the parent's FromPath plus the
	// FromPath from this structure. For the children part, suppose you have the
	// structure: { "FromPath": "OuterStructure", "ToKey": "OuterStructure", "ToType":
	// "Struct", "Dropped": false, "Chidlren": [{ "FromPath": "inner", "ToKey":
	// "inner", "ToType": "Double", "Dropped": false, }] } You can specify a Mapping
	// that looks like: { "FromPath": "OuterStructure", "ToKey": "OuterStructure",
	// "ToType": "Struct", "Dropped": false, "Chidlren": [{ "FromPath": "inner",
	// "ToKey": "inner", "ToType": "Double", "Dropped": false, }] }
	Children []Mapping

	// If true, then the column is removed.
	Dropped *bool

	// The table or column to be modified.
	FromPath []string

	// The type of the data to be modified.
	FromType *string

	// After the apply mapping, what the name of the column should be. Can be the same
	// as FromPath .
	ToKey *string

	// The data type that the data is to be modified to.
	ToType *string
	// contains filtered or unexported fields
}

Specifies the mapping of data property keys.

type MappingEntry

type MappingEntry struct {

	// The source path.
	SourcePath *string

	// The name of the source table.
	SourceTable *string

	// The source type.
	SourceType *string

	// The target path.
	TargetPath *string

	// The target table.
	TargetTable *string

	// The target type.
	TargetType *string
	// contains filtered or unexported fields
}

Defines a mapping.

type Merge added in v1.25.0

type Merge struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// The list of primary key fields to match records from the source and staging
	// dynamic frames.
	//
	// This member is required.
	PrimaryKeys [][]string

	// The source DynamicFrame that will be merged with a staging DynamicFrame .
	//
	// This member is required.
	Source *string
	// contains filtered or unexported fields
}

Specifies a transform that merges a DynamicFrame with a staging DynamicFrame based on the specified primary keys to identify records. Duplicate records (records with the same primary keys) are not de-duplicated.

type MetadataInfo added in v0.30.0

type MetadataInfo struct {

	// The time at which the entry was created.
	CreatedTime *string

	// The metadata key’s corresponding value.
	MetadataValue *string

	// Other metadata belonging to the same metadata key.
	OtherMetadataValueList []OtherMetadataValueListItem
	// contains filtered or unexported fields
}

A structure containing metadata information for a schema version.

type MetadataKeyValuePair added in v0.30.0

type MetadataKeyValuePair struct {

	// A metadata key.
	MetadataKey *string

	// A metadata key’s corresponding value.
	MetadataValue *string
	// contains filtered or unexported fields
}

A structure containing a key value pair for metadata.

type MetadataOperation added in v1.54.0

type MetadataOperation string
const (
	MetadataOperationCreate MetadataOperation = "CREATE"
)

Enum values for MetadataOperation

func (MetadataOperation) Values added in v1.54.0

Values returns all known values for MetadataOperation. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type MetricBasedObservation added in v1.72.0

type MetricBasedObservation struct {

	// The name of the data quality metric used for generating the observation.
	MetricName *string

	// An object of type DataQualityMetricValues representing the analysis of the data
	// quality metric value.
	MetricValues *DataQualityMetricValues

	// A list of new data quality rules generated as part of the observation based on
	// the data quality metric value.
	NewRules []string
	// contains filtered or unexported fields
}

Describes the metric based observation generated based on evaluated data quality metrics.

type MicrosoftSQLServerCatalogSource added in v1.25.0

type MicrosoftSQLServerCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a Microsoft SQL server data source in the Glue Data Catalog.

type MicrosoftSQLServerCatalogTarget added in v1.25.0

type MicrosoftSQLServerCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a target that uses Microsoft SQL.

type MongoDBTarget added in v0.29.0

type MongoDBTarget struct {

	// The name of the connection to use to connect to the Amazon DocumentDB or
	// MongoDB target.
	ConnectionName *string

	// The path of the Amazon DocumentDB or MongoDB target (database/collection).
	Path *string

	// Indicates whether to scan all the records, or to sample rows from the table.
	// Scanning all the records can take a long time when the table is not a high
	// throughput table. A value of true means to scan all records, while a value of
	// false means to sample the records. If no value is specified, the value defaults
	// to true .
	ScanAll *bool
	// contains filtered or unexported fields
}

Specifies an Amazon DocumentDB or MongoDB data store to crawl.

type MySQLCatalogSource added in v1.25.0

type MySQLCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a MySQL data source in the Glue Data Catalog.

type MySQLCatalogTarget added in v1.25.0

type MySQLCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a target that uses MySQL.

type NoScheduleException

type NoScheduleException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

There is no applicable schedule.

func (*NoScheduleException) Error

func (e *NoScheduleException) Error() string

func (*NoScheduleException) ErrorCode

func (e *NoScheduleException) ErrorCode() string

func (*NoScheduleException) ErrorFault

func (e *NoScheduleException) ErrorFault() smithy.ErrorFault

func (*NoScheduleException) ErrorMessage

func (e *NoScheduleException) ErrorMessage() string

type Node

type Node struct {

	// Details of the crawler when the node represents a crawler.
	CrawlerDetails *CrawlerNodeDetails

	// Details of the Job when the node represents a Job.
	JobDetails *JobNodeDetails

	// The name of the Glue component represented by the node.
	Name *string

	// Details of the Trigger when the node represents a Trigger.
	TriggerDetails *TriggerNodeDetails

	// The type of Glue component represented by the node.
	Type NodeType

	// The unique Id assigned to the node within the workflow.
	UniqueId *string
	// contains filtered or unexported fields
}

A node represents an Glue component (trigger, crawler, or job) on a workflow graph.

type NodeType

type NodeType string
const (
	NodeTypeCrawler NodeType = "CRAWLER"
	NodeTypeJob     NodeType = "JOB"
	NodeTypeTrigger NodeType = "TRIGGER"
)

Enum values for NodeType

func (NodeType) Values added in v0.29.0

func (NodeType) Values() []NodeType

Values returns all known values for NodeType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type NotificationProperty

type NotificationProperty struct {

	// After a job run starts, the number of minutes to wait before sending a job run
	// delay notification.
	NotifyDelayAfter *int32
	// contains filtered or unexported fields
}

Specifies configuration properties of a notification.

type NullCheckBoxList added in v1.25.0

type NullCheckBoxList struct {

	// Specifies that an empty string is considered as a null value.
	IsEmpty *bool

	// Specifies that an integer value of -1 is considered as a null value.
	IsNegOne *bool

	// Specifies that a value spelling out the word 'null' is considered as a null
	// value.
	IsNullString *bool
	// contains filtered or unexported fields
}

Represents whether certain values are recognized as null values for removal.

type NullValueField added in v1.25.0

type NullValueField struct {

	// The datatype of the value.
	//
	// This member is required.
	Datatype *Datatype

	// The value of the null placeholder.
	//
	// This member is required.
	Value *string
	// contains filtered or unexported fields
}

Represents a custom null value such as a zeros or other value being used as a null placeholder unique to the dataset.

type OpenTableFormatInput added in v1.54.0

type OpenTableFormatInput struct {

	// Specifies an IcebergInput structure that defines an Apache Iceberg metadata
	// table.
	IcebergInput *IcebergInput
	// contains filtered or unexported fields
}

A structure representing an open format table.

type OperationTimeoutException

type OperationTimeoutException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The operation timed out.

func (*OperationTimeoutException) Error

func (e *OperationTimeoutException) Error() string

func (*OperationTimeoutException) ErrorCode

func (e *OperationTimeoutException) ErrorCode() string

func (*OperationTimeoutException) ErrorFault

func (*OperationTimeoutException) ErrorMessage

func (e *OperationTimeoutException) ErrorMessage() string

type Option added in v1.47.0

type Option struct {

	// Specifies the description of the option.
	Description *string

	// Specifies the label of the option.
	Label *string

	// Specifies the value of the option.
	Value *string
	// contains filtered or unexported fields
}

Specifies an option value.

type OracleSQLCatalogSource added in v1.25.0

type OracleSQLCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies an Oracle data source in the Glue Data Catalog.

type OracleSQLCatalogTarget added in v1.25.0

type OracleSQLCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a target that uses Oracle SQL.

type Order

type Order struct {

	// The name of the column.
	//
	// This member is required.
	Column *string

	// Indicates that the column is sorted in ascending order ( == 1 ), or in
	// descending order ( ==0 ).
	//
	// This member is required.
	SortOrder int32
	// contains filtered or unexported fields
}

Specifies the sort order of a sorted column.

type OtherMetadataValueListItem added in v1.3.0

type OtherMetadataValueListItem struct {

	// The time at which the entry was created.
	CreatedTime *string

	// The metadata key’s corresponding value for the other metadata belonging to the
	// same metadata key.
	MetadataValue *string
	// contains filtered or unexported fields
}

A structure containing other metadata for a schema version belonging to the same metadata key.

type PIIDetection added in v1.25.0

type PIIDetection struct {

	// Indicates the types of entities the PIIDetection transform will identify as PII
	// data. PII type entities include: PERSON_NAME, DATE, USA_SNN, EMAIL, USA_ITIN,
	// USA_PASSPORT_NUMBER, PHONE_NUMBER, BANK_ACCOUNT, IP_ADDRESS, MAC_ADDRESS,
	// USA_CPT_CODE, USA_HCPCS_CODE, USA_NATIONAL_DRUG_CODE,
	// USA_MEDICARE_BENEFICIARY_IDENTIFIER,
	// USA_HEALTH_INSURANCE_CLAIM_NUMBER,CREDIT_CARD,USA_NATIONAL_PROVIDER_IDENTIFIER,USA_DEA_NUMBER,USA_DRIVING_LICENSE
	//
	// This member is required.
	EntityTypesToDetect []string

	// The node ID inputs to the transform.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// Indicates the type of PIIDetection transform.
	//
	// This member is required.
	PiiType PiiType

	// Indicates the value that will replace the detected entity.
	MaskValue *string

	// Indicates the output column name that will contain any entity type detected in
	// that row.
	OutputColumnName *string

	// Indicates the fraction of the data to sample when scanning for PII entities.
	SampleFraction *float64

	// Indicates the fraction of the data that must be met in order for a column to be
	// identified as PII data.
	ThresholdFraction *float64
	// contains filtered or unexported fields
}

Specifies a transform that identifies, removes or masks PII data.

type ParamType added in v1.36.0

type ParamType string
const (
	ParamTypeStr     ParamType = "str"
	ParamTypeInt     ParamType = "int"
	ParamTypeFloat   ParamType = "float"
	ParamTypeComplex ParamType = "complex"
	ParamTypeBool    ParamType = "bool"
	ParamTypeList    ParamType = "list"
	ParamTypeNull    ParamType = "null"
)

Enum values for ParamType

func (ParamType) Values added in v1.36.0

func (ParamType) Values() []ParamType

Values returns all known values for ParamType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ParquetCompressionType added in v1.25.0

type ParquetCompressionType string
const (
	ParquetCompressionTypeSnappy       ParquetCompressionType = "snappy"
	ParquetCompressionTypeLzo          ParquetCompressionType = "lzo"
	ParquetCompressionTypeGzip         ParquetCompressionType = "gzip"
	ParquetCompressionTypeUncompressed ParquetCompressionType = "uncompressed"
	ParquetCompressionTypeNone         ParquetCompressionType = "none"
)

Enum values for ParquetCompressionType

func (ParquetCompressionType) Values added in v1.25.0

Values returns all known values for ParquetCompressionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Partition

type Partition struct {

	// The ID of the Data Catalog in which the partition resides.
	CatalogId *string

	// The time at which the partition was created.
	CreationTime *time.Time

	// The name of the catalog database in which to create the partition.
	DatabaseName *string

	// The last time at which the partition was accessed.
	LastAccessTime *time.Time

	// The last time at which column statistics were computed for this partition.
	LastAnalyzedTime *time.Time

	// These key-value pairs define partition parameters.
	Parameters map[string]string

	// Provides information about the physical location where the partition is stored.
	StorageDescriptor *StorageDescriptor

	// The name of the database table in which to create the partition.
	TableName *string

	// The values of the partition.
	Values []string
	// contains filtered or unexported fields
}

Represents a slice of table data.

type PartitionError

type PartitionError struct {

	// The details about the partition error.
	ErrorDetail *ErrorDetail

	// The values that define the partition.
	PartitionValues []string
	// contains filtered or unexported fields
}

Contains information about a partition error.

type PartitionIndex added in v0.29.0

type PartitionIndex struct {

	// The name of the partition index.
	//
	// This member is required.
	IndexName *string

	// The keys for the partition index.
	//
	// This member is required.
	Keys []string
	// contains filtered or unexported fields
}

A structure for a partition index.

type PartitionIndexDescriptor added in v0.29.0

type PartitionIndexDescriptor struct {

	// The name of the partition index.
	//
	// This member is required.
	IndexName *string

	// The status of the partition index. The possible statuses are:
	//   - CREATING: The index is being created. When an index is in a CREATING state,
	//   the index or its table cannot be deleted.
	//   - ACTIVE: The index creation succeeds.
	//   - FAILED: The index creation fails.
	//   - DELETING: The index is deleted from the list of indexes.
	//
	// This member is required.
	IndexStatus PartitionIndexStatus

	// A list of one or more keys, as KeySchemaElement structures, for the partition
	// index.
	//
	// This member is required.
	Keys []KeySchemaElement

	// A list of errors that can occur when registering partition indexes for an
	// existing table.
	BackfillErrors []BackfillError
	// contains filtered or unexported fields
}

A descriptor for a partition index in a table.

type PartitionIndexStatus added in v0.29.0

type PartitionIndexStatus string
const (
	PartitionIndexStatusCreating PartitionIndexStatus = "CREATING"
	PartitionIndexStatusActive   PartitionIndexStatus = "ACTIVE"
	PartitionIndexStatusDeleting PartitionIndexStatus = "DELETING"
	PartitionIndexStatusFailed   PartitionIndexStatus = "FAILED"
)

Enum values for PartitionIndexStatus

func (PartitionIndexStatus) Values added in v0.29.0

Values returns all known values for PartitionIndexStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type PartitionInput

type PartitionInput struct {

	// The last time at which the partition was accessed.
	LastAccessTime *time.Time

	// The last time at which column statistics were computed for this partition.
	LastAnalyzedTime *time.Time

	// These key-value pairs define partition parameters.
	Parameters map[string]string

	// Provides information about the physical location where the partition is stored.
	StorageDescriptor *StorageDescriptor

	// The values of the partition. Although this parameter is not required by the
	// SDK, you must specify this parameter for a valid input. The values for the keys
	// for the new partition must be passed as an array of String objects that must be
	// ordered in the same order as the partition keys appearing in the Amazon S3
	// prefix. Otherwise Glue will add the values to the wrong keys.
	Values []string
	// contains filtered or unexported fields
}

The structure used to create and update a partition.

type PartitionValueList

type PartitionValueList struct {

	// The list of values.
	//
	// This member is required.
	Values []string
	// contains filtered or unexported fields
}

Contains a list of values defining partitions.

type Permission

type Permission string
const (
	PermissionAll                Permission = "ALL"
	PermissionSelect             Permission = "SELECT"
	PermissionAlter              Permission = "ALTER"
	PermissionDrop               Permission = "DROP"
	PermissionDelete             Permission = "DELETE"
	PermissionInsert             Permission = "INSERT"
	PermissionCreateDatabase     Permission = "CREATE_DATABASE"
	PermissionCreateTable        Permission = "CREATE_TABLE"
	PermissionDataLocationAccess Permission = "DATA_LOCATION_ACCESS"
)

Enum values for Permission

func (Permission) Values added in v0.29.0

func (Permission) Values() []Permission

Values returns all known values for Permission. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type PermissionType added in v1.18.0

type PermissionType string
const (
	PermissionTypeColumnPermission     PermissionType = "COLUMN_PERMISSION"
	PermissionTypeCellFilterPermission PermissionType = "CELL_FILTER_PERMISSION"
	PermissionTypeNestedPermission     PermissionType = "NESTED_PERMISSION"
	PermissionTypeNestedCellPermission PermissionType = "NESTED_CELL_PERMISSION"
)

Enum values for PermissionType

func (PermissionType) Values added in v1.18.0

func (PermissionType) Values() []PermissionType

Values returns all known values for PermissionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type PermissionTypeMismatchException added in v1.18.0

type PermissionTypeMismatchException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The operation timed out.

func (*PermissionTypeMismatchException) Error added in v1.18.0

func (*PermissionTypeMismatchException) ErrorCode added in v1.18.0

func (e *PermissionTypeMismatchException) ErrorCode() string

func (*PermissionTypeMismatchException) ErrorFault added in v1.18.0

func (*PermissionTypeMismatchException) ErrorMessage added in v1.18.0

func (e *PermissionTypeMismatchException) ErrorMessage() string

type PhysicalConnectionRequirements

type PhysicalConnectionRequirements struct {

	// The connection's Availability Zone. This field is redundant because the
	// specified subnet implies the Availability Zone to be used. Currently the field
	// must be populated, but it will be deprecated in the future.
	AvailabilityZone *string

	// The security group ID list used by the connection.
	SecurityGroupIdList []string

	// The subnet ID used by the connection.
	SubnetId *string
	// contains filtered or unexported fields
}

Specifies the physical requirements for a connection.

type PiiType added in v1.25.0

type PiiType string
const (
	PiiTypeRowAudit      PiiType = "RowAudit"
	PiiTypeRowMasking    PiiType = "RowMasking"
	PiiTypeColumnAudit   PiiType = "ColumnAudit"
	PiiTypeColumnMasking PiiType = "ColumnMasking"
)

Enum values for PiiType

func (PiiType) Values added in v1.25.0

func (PiiType) Values() []PiiType

Values returns all known values for PiiType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type PostgreSQLCatalogSource added in v1.25.0

type PostgreSQLCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a PostgresSQL data source in the Glue Data Catalog.

type PostgreSQLCatalogTarget added in v1.25.0

type PostgreSQLCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a target that uses Postgres SQL.

type Predecessor

type Predecessor struct {

	// The name of the job definition used by the predecessor job run.
	JobName *string

	// The job-run ID of the predecessor job run.
	RunId *string
	// contains filtered or unexported fields
}

A job run that was used in the predicate of a conditional trigger that triggered this job run.

type Predicate

type Predicate struct {

	// A list of the conditions that determine when the trigger will fire.
	Conditions []Condition

	// An optional field if only one condition is listed. If multiple conditions are
	// listed, then this field is required.
	Logical Logical
	// contains filtered or unexported fields
}

Defines the predicate of the trigger, which determines when it fires.

type PrincipalPermissions

type PrincipalPermissions struct {

	// The permissions that are granted to the principal.
	Permissions []Permission

	// The principal who is granted permissions.
	Principal *DataLakePrincipal
	// contains filtered or unexported fields
}

Permissions granted to a principal.

type PrincipalType

type PrincipalType string
const (
	PrincipalTypeUser  PrincipalType = "USER"
	PrincipalTypeRole  PrincipalType = "ROLE"
	PrincipalTypeGroup PrincipalType = "GROUP"
)

Enum values for PrincipalType

func (PrincipalType) Values added in v0.29.0

func (PrincipalType) Values() []PrincipalType

Values returns all known values for PrincipalType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type PropertyPredicate

type PropertyPredicate struct {

	// The comparator used to compare this property to others.
	Comparator Comparator

	// The key of the property.
	Key *string

	// The value of the property.
	Value *string
	// contains filtered or unexported fields
}

Defines a property predicate.

type QuerySessionContext added in v1.73.0

type QuerySessionContext struct {

	// An opaque string-string map passed by the query engine.
	AdditionalContext map[string]string

	// An identifier string for the consumer cluster.
	ClusterId *string

	// A cryptographically generated query identifier generated by Glue or Lake
	// Formation.
	QueryAuthorizationId *string

	// A unique identifier generated by the query engine for the query.
	QueryId *string

	// A timestamp provided by the query engine for when the query started.
	QueryStartTime *time.Time
	// contains filtered or unexported fields
}

A structure used as a protocol between query engines and Lake Formation or Glue. Contains both a Lake Formation generated authorization identifier and information from the request's authorization context.

type QuoteChar added in v1.25.0

type QuoteChar string
const (
	QuoteCharQuote       QuoteChar = "quote"
	QuoteCharQuillemet   QuoteChar = "quillemet"
	QuoteCharSingleQuote QuoteChar = "single_quote"
	QuoteCharDisabled    QuoteChar = "disabled"
)

Enum values for QuoteChar

func (QuoteChar) Values added in v1.25.0

func (QuoteChar) Values() []QuoteChar

Values returns all known values for QuoteChar. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Recipe added in v1.57.0

type Recipe struct {

	// The nodes that are inputs to the recipe node, identified by id.
	//
	// This member is required.
	Inputs []string

	// The name of the Glue Studio node.
	//
	// This member is required.
	Name *string

	// A reference to the DataBrew recipe used by the node.
	//
	// This member is required.
	RecipeReference *RecipeReference
	// contains filtered or unexported fields
}

A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.

type RecipeReference added in v1.57.0

type RecipeReference struct {

	// The ARN of the DataBrew recipe.
	//
	// This member is required.
	RecipeArn *string

	// The RecipeVersion of the DataBrew recipe.
	//
	// This member is required.
	RecipeVersion *string
	// contains filtered or unexported fields
}

A reference to a Glue DataBrew recipe.

type RecrawlBehavior added in v0.29.0

type RecrawlBehavior string
const (
	RecrawlBehaviorCrawlEverything     RecrawlBehavior = "CRAWL_EVERYTHING"
	RecrawlBehaviorCrawlNewFoldersOnly RecrawlBehavior = "CRAWL_NEW_FOLDERS_ONLY"
	RecrawlBehaviorCrawlEventMode      RecrawlBehavior = "CRAWL_EVENT_MODE"
)

Enum values for RecrawlBehavior

func (RecrawlBehavior) Values added in v0.29.0

func (RecrawlBehavior) Values() []RecrawlBehavior

Values returns all known values for RecrawlBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type RecrawlPolicy added in v0.29.0

type RecrawlPolicy struct {

	// Specifies whether to crawl the entire dataset again or to crawl only folders
	// that were added since the last crawler run. A value of CRAWL_EVERYTHING
	// specifies crawling the entire dataset again. A value of CRAWL_NEW_FOLDERS_ONLY
	// specifies crawling only folders that were added since the last crawler run. A
	// value of CRAWL_EVENT_MODE specifies crawling only the changes identified by
	// Amazon S3 events.
	RecrawlBehavior RecrawlBehavior
	// contains filtered or unexported fields
}

When crawling an Amazon S3 data source after the first crawl is complete, specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. For more information, see Incremental Crawls in Glue (https://docs.aws.amazon.com/glue/latest/dg/incremental-crawls.html) in the developer guide.

type RedshiftSource added in v1.25.0

type RedshiftSource struct {

	// The database to read from.
	//
	// This member is required.
	Database *string

	// The name of the Amazon Redshift data store.
	//
	// This member is required.
	Name *string

	// The database table to read from.
	//
	// This member is required.
	Table *string

	// The Amazon S3 path where temporary data can be staged when copying out of the
	// database.
	RedshiftTmpDir *string

	// The IAM role with permissions.
	TmpDirIAMRole *string
	// contains filtered or unexported fields
}

Specifies an Amazon Redshift data store.

type RedshiftTarget added in v1.25.0

type RedshiftTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string

	// The Amazon S3 path where temporary data can be staged when copying out of the
	// database.
	RedshiftTmpDir *string

	// The IAM role with permissions.
	TmpDirIAMRole *string

	// The set of options to configure an upsert operation when writing to a Redshift
	// target.
	UpsertRedshiftOptions *UpsertRedshiftTargetOptions
	// contains filtered or unexported fields
}

Specifies a target that uses Amazon Redshift.

type RegistryId added in v0.30.0

type RegistryId struct {

	// Arn of the registry to be updated. One of RegistryArn or RegistryName has to be
	// provided.
	RegistryArn *string

	// Name of the registry. Used only for lookup. One of RegistryArn or RegistryName
	// has to be provided.
	RegistryName *string
	// contains filtered or unexported fields
}

A wrapper structure that may contain the registry name and Amazon Resource Name (ARN).

type RegistryListItem added in v0.30.0

type RegistryListItem struct {

	// The data the registry was created.
	CreatedTime *string

	// A description of the registry.
	Description *string

	// The Amazon Resource Name (ARN) of the registry.
	RegistryArn *string

	// The name of the registry.
	RegistryName *string

	// The status of the registry.
	Status RegistryStatus

	// The date the registry was updated.
	UpdatedTime *string
	// contains filtered or unexported fields
}

A structure containing the details for a registry.

type RegistryStatus added in v0.30.0

type RegistryStatus string
const (
	RegistryStatusAvailable RegistryStatus = "AVAILABLE"
	RegistryStatusDeleting  RegistryStatus = "DELETING"
)

Enum values for RegistryStatus

func (RegistryStatus) Values added in v0.30.0

func (RegistryStatus) Values() []RegistryStatus

Values returns all known values for RegistryStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type RelationalCatalogSource added in v1.25.0

type RelationalCatalogSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string
	// contains filtered or unexported fields
}

Specifies a Relational database data source in the Glue Data Catalog.

type RenameField added in v1.25.0

type RenameField struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A JSON path to a variable in the data structure for the source data.
	//
	// This member is required.
	SourcePath []string

	// A JSON path to a variable in the data structure for the target data.
	//
	// This member is required.
	TargetPath []string
	// contains filtered or unexported fields
}

Specifies a transform that renames a single data property key.

type ResourceNotReadyException added in v1.16.0

type ResourceNotReadyException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A resource was not ready for a transaction.

func (*ResourceNotReadyException) Error added in v1.16.0

func (e *ResourceNotReadyException) Error() string

func (*ResourceNotReadyException) ErrorCode added in v1.16.0

func (e *ResourceNotReadyException) ErrorCode() string

func (*ResourceNotReadyException) ErrorFault added in v1.16.0

func (*ResourceNotReadyException) ErrorMessage added in v1.16.0

func (e *ResourceNotReadyException) ErrorMessage() string

type ResourceNumberLimitExceededException

type ResourceNumberLimitExceededException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A resource numerical limit was exceeded.

func (*ResourceNumberLimitExceededException) Error

func (*ResourceNumberLimitExceededException) ErrorCode

func (*ResourceNumberLimitExceededException) ErrorFault

func (*ResourceNumberLimitExceededException) ErrorMessage

func (e *ResourceNumberLimitExceededException) ErrorMessage() string

type ResourceShareType

type ResourceShareType string
const (
	ResourceShareTypeForeign   ResourceShareType = "FOREIGN"
	ResourceShareTypeAll       ResourceShareType = "ALL"
	ResourceShareTypeFederated ResourceShareType = "FEDERATED"
)

Enum values for ResourceShareType

func (ResourceShareType) Values added in v0.29.0

Values returns all known values for ResourceShareType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ResourceType

type ResourceType string
const (
	ResourceTypeJar     ResourceType = "JAR"
	ResourceTypeFile    ResourceType = "FILE"
	ResourceTypeArchive ResourceType = "ARCHIVE"
)

Enum values for ResourceType

func (ResourceType) Values added in v0.29.0

func (ResourceType) Values() []ResourceType

Values returns all known values for ResourceType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ResourceUri

type ResourceUri struct {

	// The type of the resource.
	ResourceType ResourceType

	// The URI for accessing the resource.
	Uri *string
	// contains filtered or unexported fields
}

The URIs for function resources.

type RunMetrics added in v1.68.0

type RunMetrics struct {

	// The duration of the job in hours.
	JobDurationInHour *string

	// The number of bytes removed by the compaction job run.
	NumberOfBytesCompacted *string

	// The number of DPU hours consumed by the job.
	NumberOfDpus *string

	// The number of files removed by the compaction job run.
	NumberOfFilesCompacted *string
	// contains filtered or unexported fields
}

Metrics for the optimizer run.

type S3CatalogDeltaSource added in v1.43.0

type S3CatalogDeltaSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the Delta Lake data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalDeltaOptions map[string]string

	// Specifies the data schema for the Delta Lake source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Delta Lake data source that is registered in the Glue Data Catalog. The data source must be stored in Amazon S3.

type S3CatalogHudiSource added in v1.40.0

type S3CatalogHudiSource struct {

	// The name of the database to read from.
	//
	// This member is required.
	Database *string

	// The name of the Hudi data source.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalHudiOptions map[string]string

	// Specifies the data schema for the Hudi source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Hudi data source that is registered in the Glue Data Catalog. The Hudi data source must be stored in Amazon S3.

type S3CatalogSource added in v1.25.0

type S3CatalogSource struct {

	// The database to read from.
	//
	// This member is required.
	Database *string

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// The database table to read from.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options.
	AdditionalOptions *S3SourceAdditionalOptions

	// Partitions satisfying this predicate are deleted. Files within the retention
	// period in these partitions are not deleted. Set to "" – empty by default.
	PartitionPredicate *string
	// contains filtered or unexported fields
}

Specifies an Amazon S3 data store in the Glue Data Catalog.

type S3CatalogTarget added in v1.25.0

type S3CatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *CatalogSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a data target that writes to Amazon S3 using the Glue Data Catalog.

type S3CsvSource added in v1.25.0

type S3CsvSource struct {

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// A list of the Amazon S3 paths to read from.
	//
	// This member is required.
	Paths []string

	// Specifies the character to use for quoting. The default is a double quote: '"' .
	// Set this to -1 to turn off quoting entirely.
	//
	// This member is required.
	QuoteChar QuoteChar

	// Specifies the delimiter character. The default is a comma: ",", but any other
	// character can be specified.
	//
	// This member is required.
	Separator Separator

	// Specifies additional connection options.
	AdditionalOptions *S3DirectSourceAdditionalOptions

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	CompressionType CompressionType

	// Specifies a character to use for escaping. This option is used only when
	// reading CSV files. The default value is none . If enabled, the character which
	// immediately follows is used as-is, except for a small set of well-known escapes
	// ( \n , \r , \t , and \0 ).
	Escaper *string

	// A string containing a JSON list of Unix-style glob patterns to exclude. For
	// example, "[\"**.pdf\"]" excludes all PDF files.
	Exclusions []string

	// Grouping files is turned on by default when the input contains more than 50,000
	// files. To turn on grouping with fewer than 50,000 files, set this parameter to
	// "inPartition". To disable grouping when there are more than 50,000 files, set
	// this parameter to "none" .
	GroupFiles *string

	// The target group size in bytes. The default is computed based on the input data
	// size and the size of your cluster. When there are fewer than 50,000 input files,
	// "groupFiles" must be set to "inPartition" for this to take effect.
	GroupSize *string

	// This option controls the duration in milliseconds after which the s3 listing is
	// likely to be consistent. Files with modification timestamps falling within the
	// last maxBand milliseconds are tracked specially when using JobBookmarks to
	// account for Amazon S3 eventual consistency. Most users don't need to set this
	// option. The default is 900000 milliseconds, or 15 minutes.
	MaxBand *int32

	// This option specifies the maximum number of files to save from the last maxBand
	// seconds. If this number is exceeded, extra files are skipped and only processed
	// in the next job run.
	MaxFilesInBand *int32

	// A Boolean value that specifies whether a single record can span multiple lines.
	// This can occur when a field contains a quoted new-line character. You must set
	// this option to True if any record spans multiple lines. The default value is
	// False , which allows for more aggressive file-splitting during parsing.
	Multiline *bool

	// A Boolean value that specifies whether to use the advanced SIMD CSV reader
	// along with Apache Arrow based columnar memory formats. Only available in Glue
	// version 3.0.
	OptimizePerformance bool

	// Specifies the data schema for the S3 CSV source.
	OutputSchemas []GlueSchema

	// If set to true, recursively reads files in all subdirectories under the
	// specified paths.
	Recurse *bool

	// A Boolean value that specifies whether to skip the first data line. The default
	// value is False .
	SkipFirst *bool

	// A Boolean value that specifies whether to treat the first line as a header. The
	// default value is False .
	WithHeader *bool

	// A Boolean value that specifies whether to write the header to output. The
	// default value is True .
	WriteHeader *bool
	// contains filtered or unexported fields
}

Specifies a command-separated value (CSV) data store stored in Amazon S3.

type S3DeltaCatalogTarget added in v1.43.0

type S3DeltaCatalogTarget struct {

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string

	// Specifies additional connection options for the connector.
	AdditionalOptions map[string]string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *CatalogSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a target that writes to a Delta Lake data source in the Glue Data Catalog.

type S3DeltaDirectTarget added in v1.43.0

type S3DeltaDirectTarget struct {

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	//
	// This member is required.
	Compression DeltaTargetCompressionType

	// Specifies the data output format for the target.
	//
	// This member is required.
	Format TargetFormat

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The Amazon S3 path of your Delta Lake data source to write to.
	//
	// This member is required.
	Path *string

	// Specifies additional connection options for the connector.
	AdditionalOptions map[string]string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *DirectSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a target that writes to a Delta Lake data source in Amazon S3.

type S3DeltaSource added in v1.43.0

type S3DeltaSource struct {

	// The name of the Delta Lake source.
	//
	// This member is required.
	Name *string

	// A list of the Amazon S3 paths to read from.
	//
	// This member is required.
	Paths []string

	// Specifies additional connection options.
	AdditionalDeltaOptions map[string]string

	// Specifies additional options for the connector.
	AdditionalOptions *S3DirectSourceAdditionalOptions

	// Specifies the data schema for the Delta Lake source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Delta Lake data source stored in Amazon S3.

type S3DirectSourceAdditionalOptions added in v1.25.0

type S3DirectSourceAdditionalOptions struct {

	// Sets the upper limit for the target number of files that will be processed.
	BoundedFiles *int64

	// Sets the upper limit for the target size of the dataset in bytes that will be
	// processed.
	BoundedSize *int64

	// Sets option to enable a sample path.
	EnableSamplePath *bool

	// If enabled, specifies the sample path.
	SamplePath *string
	// contains filtered or unexported fields
}

Specifies additional connection options for the Amazon S3 data store.

type S3DirectTarget added in v1.25.0

type S3DirectTarget struct {

	// Specifies the data output format for the target.
	//
	// This member is required.
	Format TargetFormat

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// A single Amazon S3 path to write to.
	//
	// This member is required.
	Path *string

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	Compression *string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *DirectSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a data target that writes to Amazon S3.

type S3Encryption

type S3Encryption struct {

	// The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.
	KmsKeyArn *string

	// The encryption mode to use for Amazon S3 data.
	S3EncryptionMode S3EncryptionMode
	// contains filtered or unexported fields
}

Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted.

type S3EncryptionMode

type S3EncryptionMode string
const (
	S3EncryptionModeDisabled S3EncryptionMode = "DISABLED"
	S3EncryptionModeSsekms   S3EncryptionMode = "SSE-KMS"
	S3EncryptionModeSses3    S3EncryptionMode = "SSE-S3"
)

Enum values for S3EncryptionMode

func (S3EncryptionMode) Values added in v0.29.0

Values returns all known values for S3EncryptionMode. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type S3GlueParquetTarget added in v1.25.0

type S3GlueParquetTarget struct {

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// A single Amazon S3 path to write to.
	//
	// This member is required.
	Path *string

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	Compression ParquetCompressionType

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *DirectSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a data target that writes to Amazon S3 in Apache Parquet columnar storage.

type S3HudiCatalogTarget added in v1.40.0

type S3HudiCatalogTarget struct {

	// Specifies additional connection options for the connector.
	//
	// This member is required.
	AdditionalOptions map[string]string

	// The name of the database to write to.
	//
	// This member is required.
	Database *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The name of the table in the database to write to.
	//
	// This member is required.
	Table *string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *CatalogSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a target that writes to a Hudi data source in the Glue Data Catalog.

type S3HudiDirectTarget added in v1.40.0

type S3HudiDirectTarget struct {

	// Specifies additional connection options for the connector.
	//
	// This member is required.
	AdditionalOptions map[string]string

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	//
	// This member is required.
	Compression HudiTargetCompressionType

	// Specifies the data output format for the target.
	//
	// This member is required.
	Format TargetFormat

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// The Amazon S3 path of your Hudi data source to write to.
	//
	// This member is required.
	Path *string

	// Specifies native partitioning using a sequence of keys.
	PartitionKeys [][]string

	// A policy that specifies update behavior for the crawler.
	SchemaChangePolicy *DirectSchemaChangePolicy
	// contains filtered or unexported fields
}

Specifies a target that writes to a Hudi data source in Amazon S3.

type S3HudiSource added in v1.40.0

type S3HudiSource struct {

	// The name of the Hudi source.
	//
	// This member is required.
	Name *string

	// A list of the Amazon S3 paths to read from.
	//
	// This member is required.
	Paths []string

	// Specifies additional connection options.
	AdditionalHudiOptions map[string]string

	// Specifies additional options for the connector.
	AdditionalOptions *S3DirectSourceAdditionalOptions

	// Specifies the data schema for the Hudi source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Hudi data source stored in Amazon S3.

type S3JsonSource added in v1.25.0

type S3JsonSource struct {

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// A list of the Amazon S3 paths to read from.
	//
	// This member is required.
	Paths []string

	// Specifies additional connection options.
	AdditionalOptions *S3DirectSourceAdditionalOptions

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	CompressionType CompressionType

	// A string containing a JSON list of Unix-style glob patterns to exclude. For
	// example, "[\"**.pdf\"]" excludes all PDF files.
	Exclusions []string

	// Grouping files is turned on by default when the input contains more than 50,000
	// files. To turn on grouping with fewer than 50,000 files, set this parameter to
	// "inPartition". To disable grouping when there are more than 50,000 files, set
	// this parameter to "none" .
	GroupFiles *string

	// The target group size in bytes. The default is computed based on the input data
	// size and the size of your cluster. When there are fewer than 50,000 input files,
	// "groupFiles" must be set to "inPartition" for this to take effect.
	GroupSize *string

	// A JsonPath string defining the JSON data.
	JsonPath *string

	// This option controls the duration in milliseconds after which the s3 listing is
	// likely to be consistent. Files with modification timestamps falling within the
	// last maxBand milliseconds are tracked specially when using JobBookmarks to
	// account for Amazon S3 eventual consistency. Most users don't need to set this
	// option. The default is 900000 milliseconds, or 15 minutes.
	MaxBand *int32

	// This option specifies the maximum number of files to save from the last maxBand
	// seconds. If this number is exceeded, extra files are skipped and only processed
	// in the next job run.
	MaxFilesInBand *int32

	// A Boolean value that specifies whether a single record can span multiple lines.
	// This can occur when a field contains a quoted new-line character. You must set
	// this option to True if any record spans multiple lines. The default value is
	// False , which allows for more aggressive file-splitting during parsing.
	Multiline *bool

	// Specifies the data schema for the S3 JSON source.
	OutputSchemas []GlueSchema

	// If set to true, recursively reads files in all subdirectories under the
	// specified paths.
	Recurse *bool
	// contains filtered or unexported fields
}

Specifies a JSON data store stored in Amazon S3.

type S3ParquetSource added in v1.25.0

type S3ParquetSource struct {

	// The name of the data store.
	//
	// This member is required.
	Name *string

	// A list of the Amazon S3 paths to read from.
	//
	// This member is required.
	Paths []string

	// Specifies additional connection options.
	AdditionalOptions *S3DirectSourceAdditionalOptions

	// Specifies how the data is compressed. This is generally not necessary if the
	// data has a standard file extension. Possible values are "gzip" and "bzip" ).
	CompressionType ParquetCompressionType

	// A string containing a JSON list of Unix-style glob patterns to exclude. For
	// example, "[\"**.pdf\"]" excludes all PDF files.
	Exclusions []string

	// Grouping files is turned on by default when the input contains more than 50,000
	// files. To turn on grouping with fewer than 50,000 files, set this parameter to
	// "inPartition". To disable grouping when there are more than 50,000 files, set
	// this parameter to "none" .
	GroupFiles *string

	// The target group size in bytes. The default is computed based on the input data
	// size and the size of your cluster. When there are fewer than 50,000 input files,
	// "groupFiles" must be set to "inPartition" for this to take effect.
	GroupSize *string

	// This option controls the duration in milliseconds after which the s3 listing is
	// likely to be consistent. Files with modification timestamps falling within the
	// last maxBand milliseconds are tracked specially when using JobBookmarks to
	// account for Amazon S3 eventual consistency. Most users don't need to set this
	// option. The default is 900000 milliseconds, or 15 minutes.
	MaxBand *int32

	// This option specifies the maximum number of files to save from the last maxBand
	// seconds. If this number is exceeded, extra files are skipped and only processed
	// in the next job run.
	MaxFilesInBand *int32

	// Specifies the data schema for the S3 Parquet source.
	OutputSchemas []GlueSchema

	// If set to true, recursively reads files in all subdirectories under the
	// specified paths.
	Recurse *bool
	// contains filtered or unexported fields
}

Specifies an Apache Parquet data store stored in Amazon S3.

type S3SourceAdditionalOptions added in v1.25.0

type S3SourceAdditionalOptions struct {

	// Sets the upper limit for the target number of files that will be processed.
	BoundedFiles *int64

	// Sets the upper limit for the target size of the dataset in bytes that will be
	// processed.
	BoundedSize *int64
	// contains filtered or unexported fields
}

Specifies additional connection options for the Amazon S3 data store.

type S3Target

type S3Target struct {

	// The name of a connection which allows a job or crawler to access data in Amazon
	// S3 within an Amazon Virtual Private Cloud environment (Amazon VPC).
	ConnectionName *string

	// A valid Amazon dead-letter SQS ARN. For example,
	// arn:aws:sqs:region:account:deadLetterQueue .
	DlqEventQueueArn *string

	// A valid Amazon SQS ARN. For example, arn:aws:sqs:region:account:sqs .
	EventQueueArn *string

	// A list of glob patterns used to exclude from the crawl. For more information,
	// see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html)
	// .
	Exclusions []string

	// The path to the Amazon S3 target.
	Path *string

	// Sets the number of files in each leaf folder to be crawled when crawling sample
	// files in a dataset. If not set, all the files are crawled. A valid value is an
	// integer between 1 and 249.
	SampleSize *int32
	// contains filtered or unexported fields
}

Specifies a data store in Amazon Simple Storage Service (Amazon S3).

type Schedule

type Schedule struct {

	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html)
	// . For example, to run something every day at 12:15 UTC, you would specify:
	// cron(15 12 * * ? *) .
	ScheduleExpression *string

	// The state of the schedule.
	State ScheduleState
	// contains filtered or unexported fields
}

A scheduling object using a cron statement to schedule an event.

type ScheduleState

type ScheduleState string
const (
	ScheduleStateScheduled     ScheduleState = "SCHEDULED"
	ScheduleStateNotScheduled  ScheduleState = "NOT_SCHEDULED"
	ScheduleStateTransitioning ScheduleState = "TRANSITIONING"
)

Enum values for ScheduleState

func (ScheduleState) Values added in v0.29.0

func (ScheduleState) Values() []ScheduleState

Values returns all known values for ScheduleState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SchedulerNotRunningException

type SchedulerNotRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The specified scheduler is not running.

func (*SchedulerNotRunningException) Error

func (*SchedulerNotRunningException) ErrorCode

func (e *SchedulerNotRunningException) ErrorCode() string

func (*SchedulerNotRunningException) ErrorFault

func (*SchedulerNotRunningException) ErrorMessage

func (e *SchedulerNotRunningException) ErrorMessage() string

type SchedulerRunningException

type SchedulerRunningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The specified scheduler is already running.

func (*SchedulerRunningException) Error

func (e *SchedulerRunningException) Error() string

func (*SchedulerRunningException) ErrorCode

func (e *SchedulerRunningException) ErrorCode() string

func (*SchedulerRunningException) ErrorFault

func (*SchedulerRunningException) ErrorMessage

func (e *SchedulerRunningException) ErrorMessage() string

type SchedulerTransitioningException

type SchedulerTransitioningException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

The specified scheduler is transitioning.

func (*SchedulerTransitioningException) Error

func (*SchedulerTransitioningException) ErrorCode

func (e *SchedulerTransitioningException) ErrorCode() string

func (*SchedulerTransitioningException) ErrorFault

func (*SchedulerTransitioningException) ErrorMessage

func (e *SchedulerTransitioningException) ErrorMessage() string

type SchemaChangePolicy

type SchemaChangePolicy struct {

	// The deletion behavior when the crawler finds a deleted object.
	DeleteBehavior DeleteBehavior

	// The update behavior when the crawler finds a changed schema.
	UpdateBehavior UpdateBehavior
	// contains filtered or unexported fields
}

A policy that specifies update and deletion behaviors for the crawler.

type SchemaColumn

type SchemaColumn struct {

	// The type of data in the column.
	DataType *string

	// The name of the column.
	Name *string
	// contains filtered or unexported fields
}

A key-value pair representing a column and data type that this transform can run against. The Schema parameter of the MLTransform may contain up to 100 of these structures.

type SchemaDiffType added in v0.30.0

type SchemaDiffType string
const (
	SchemaDiffTypeSyntaxDiff SchemaDiffType = "SYNTAX_DIFF"
)

Enum values for SchemaDiffType

func (SchemaDiffType) Values added in v0.30.0

func (SchemaDiffType) Values() []SchemaDiffType

Values returns all known values for SchemaDiffType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SchemaId added in v0.30.0

type SchemaId struct {

	// The name of the schema registry that contains the schema.
	RegistryName *string

	// The Amazon Resource Name (ARN) of the schema. One of SchemaArn or SchemaName
	// has to be provided.
	SchemaArn *string

	// The name of the schema. One of SchemaArn or SchemaName has to be provided.
	SchemaName *string
	// contains filtered or unexported fields
}

The unique ID of the schema in the Glue schema registry.

type SchemaListItem added in v0.30.0

type SchemaListItem struct {

	// The date and time that a schema was created.
	CreatedTime *string

	// A description for the schema.
	Description *string

	// the name of the registry where the schema resides.
	RegistryName *string

	// The Amazon Resource Name (ARN) for the schema.
	SchemaArn *string

	// The name of the schema.
	SchemaName *string

	// The status of the schema.
	SchemaStatus SchemaStatus

	// The date and time that a schema was updated.
	UpdatedTime *string
	// contains filtered or unexported fields
}

An object that contains minimal details for a schema.

type SchemaReference added in v0.30.0

type SchemaReference struct {

	// A structure that contains schema identity fields. Either this or the
	// SchemaVersionId has to be provided.
	SchemaId *SchemaId

	// The unique ID assigned to a version of the schema. Either this or the SchemaId
	// has to be provided.
	SchemaVersionId *string

	// The version number of the schema.
	SchemaVersionNumber *int64
	// contains filtered or unexported fields
}

An object that references a schema stored in the Glue Schema Registry.

type SchemaStatus added in v0.30.0

type SchemaStatus string
const (
	SchemaStatusAvailable SchemaStatus = "AVAILABLE"
	SchemaStatusPending   SchemaStatus = "PENDING"
	SchemaStatusDeleting  SchemaStatus = "DELETING"
)

Enum values for SchemaStatus

func (SchemaStatus) Values added in v0.30.0

func (SchemaStatus) Values() []SchemaStatus

Values returns all known values for SchemaStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SchemaVersionErrorItem added in v0.30.0

type SchemaVersionErrorItem struct {

	// The details of the error for the schema version.
	ErrorDetails *ErrorDetails

	// The version number of the schema.
	VersionNumber *int64
	// contains filtered or unexported fields
}

An object that contains the error details for an operation on a schema version.

type SchemaVersionListItem added in v0.30.0

type SchemaVersionListItem struct {

	// The date and time the schema version was created.
	CreatedTime *string

	// The Amazon Resource Name (ARN) of the schema.
	SchemaArn *string

	// The unique identifier of the schema version.
	SchemaVersionId *string

	// The status of the schema version.
	Status SchemaVersionStatus

	// The version number of the schema.
	VersionNumber *int64
	// contains filtered or unexported fields
}

An object containing the details about a schema version.

type SchemaVersionNumber added in v0.30.0

type SchemaVersionNumber struct {

	// The latest version available for the schema.
	LatestVersion bool

	// The version number of the schema.
	VersionNumber *int64
	// contains filtered or unexported fields
}

A structure containing the schema version information.

type SchemaVersionStatus added in v0.30.0

type SchemaVersionStatus string
const (
	SchemaVersionStatusAvailable SchemaVersionStatus = "AVAILABLE"
	SchemaVersionStatusPending   SchemaVersionStatus = "PENDING"
	SchemaVersionStatusFailure   SchemaVersionStatus = "FAILURE"
	SchemaVersionStatusDeleting  SchemaVersionStatus = "DELETING"
)

Enum values for SchemaVersionStatus

func (SchemaVersionStatus) Values added in v0.30.0

Values returns all known values for SchemaVersionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SecurityConfiguration

type SecurityConfiguration struct {

	// The time at which this security configuration was created.
	CreatedTimeStamp *time.Time

	// The encryption configuration associated with this security configuration.
	EncryptionConfiguration *EncryptionConfiguration

	// The name of the security configuration.
	Name *string
	// contains filtered or unexported fields
}

Specifies a security configuration.

type Segment

type Segment struct {

	// The zero-based index number of the segment. For example, if the total number of
	// segments is 4, SegmentNumber values range from 0 through 3.
	//
	// This member is required.
	SegmentNumber int32

	// The total number of segments.
	//
	// This member is required.
	TotalSegments *int32
	// contains filtered or unexported fields
}

Defines a non-overlapping region of a table's partitions, allowing multiple requests to be run in parallel.

type SelectFields added in v1.25.0

type SelectFields struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A JSON path to a variable in the data structure.
	//
	// This member is required.
	Paths [][]string
	// contains filtered or unexported fields
}

Specifies a transform that chooses the data property keys that you want to keep.

type SelectFromCollection added in v1.25.0

type SelectFromCollection struct {

	// The index for the DynamicFrame to be selected.
	//
	// This member is required.
	Index int32

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string
	// contains filtered or unexported fields
}

Specifies a transform that chooses one DynamicFrame from a collection of DynamicFrames . The output is the selected DynamicFrame

type Separator added in v1.25.0

type Separator string
const (
	SeparatorComma     Separator = "comma"
	SeparatorCtrla     Separator = "ctrla"
	SeparatorPipe      Separator = "pipe"
	SeparatorSemicolon Separator = "semicolon"
	SeparatorTab       Separator = "tab"
)

Enum values for Separator

func (Separator) Values added in v1.25.0

func (Separator) Values() []Separator

Values returns all known values for Separator. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SerDeInfo

type SerDeInfo struct {

	// Name of the SerDe.
	Name *string

	// These key-value pairs define initialization parameters for the SerDe.
	Parameters map[string]string

	// Usually the class that implements the SerDe. An example is
	// org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe .
	SerializationLibrary *string
	// contains filtered or unexported fields
}

Information about a serialization/deserialization program (SerDe) that serves as an extractor and loader.

type Session added in v1.22.0

type Session struct {

	// The command object.See SessionCommand.
	Command *SessionCommand

	// The date and time that this session is completed.
	CompletedOn *time.Time

	// The number of connections used for the session.
	Connections *ConnectionsList

	// The time and date when the session was created.
	CreatedOn *time.Time

	// The DPUs consumed by the session (formula: ExecutionTime * MaxCapacity).
	DPUSeconds *float64

	// A map array of key-value pairs. Max is 75 pairs.
	DefaultArguments map[string]string

	// The description of the session.
	Description *string

	// The error message displayed during the session.
	ErrorMessage *string

	// The total time the session ran for.
	ExecutionTime *float64

	// The Glue version determines the versions of Apache Spark and Python that Glue
	// supports. The GlueVersion must be greater than 2.0.
	GlueVersion *string

	// The ID of the session.
	Id *string

	// The number of minutes when idle before the session times out.
	IdleTimeout *int32

	// The number of Glue data processing units (DPUs) that can be allocated when the
	// job runs. A DPU is a relative measure of processing power that consists of 4
	// vCPUs of compute capacity and 16 GB memory.
	MaxCapacity *float64

	// The number of workers of a defined WorkerType to use for the session.
	NumberOfWorkers *int32

	// The code execution progress of the session.
	Progress float64

	// The name or Amazon Resource Name (ARN) of the IAM role associated with the
	// Session.
	Role *string

	// The name of the SecurityConfiguration structure to be used with the session.
	SecurityConfiguration *string

	// The session status.
	Status SessionStatus

	// The type of predefined worker that is allocated when a session runs. Accepts a
	// value of G.1X , G.2X , G.4X , or G.8X for Spark sessions. Accepts the value Z.2X
	// for Ray sessions.
	WorkerType WorkerType
	// contains filtered or unexported fields
}

The period in which a remote Spark runtime environment is running.

type SessionCommand added in v1.22.0

type SessionCommand struct {

	// Specifies the name of the SessionCommand. Can be 'glueetl' or 'gluestreaming'.
	Name *string

	// Specifies the Python version. The Python version indicates the version
	// supported for jobs of type Spark.
	PythonVersion *string
	// contains filtered or unexported fields
}

The SessionCommand that runs the job.

type SessionStatus added in v1.22.0

type SessionStatus string
const (
	SessionStatusProvisioning SessionStatus = "PROVISIONING"
	SessionStatusReady        SessionStatus = "READY"
	SessionStatusFailed       SessionStatus = "FAILED"
	SessionStatusTimeout      SessionStatus = "TIMEOUT"
	SessionStatusStopping     SessionStatus = "STOPPING"
	SessionStatusStopped      SessionStatus = "STOPPED"
)

Enum values for SessionStatus

func (SessionStatus) Values added in v1.22.0

func (SessionStatus) Values() []SessionStatus

Values returns all known values for SessionStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SkewedInfo

type SkewedInfo struct {

	// A list of names of columns that contain skewed values.
	SkewedColumnNames []string

	// A mapping of skewed values to the columns that contain them.
	SkewedColumnValueLocationMaps map[string]string

	// A list of values that appear so frequently as to be considered skewed.
	SkewedColumnValues []string
	// contains filtered or unexported fields
}

Specifies skewed values in a table. Skewed values are those that occur with very high frequency.

type SnowflakeNodeData added in v1.58.0

type SnowflakeNodeData struct {

	// Specifies what action to take when writing to a table with preexisting data.
	// Valid values: append , merge , truncate , drop .
	Action *string

	// Specifies additional options passed to the Snowflake connector. If options are
	// specified elsewhere in this node, this will take precedence.
	AdditionalOptions map[string]string

	// Specifies whether automatic query pushdown is enabled. If pushdown is enabled,
	// then when a query is run on Spark, if part of the query can be "pushed down" to
	// the Snowflake server, it is pushed down. This improves performance of some
	// queries.
	AutoPushdown bool

	// Specifies a Glue Data Catalog Connection to a Snowflake endpoint.
	Connection *Option

	// Specifies a Snowflake database for your node to use.
	Database *string

	// Not currently used.
	IamRole *Option

	// Specifies a merge action. Valid values: simple , custom . If simple, merge
	// behavior is defined by MergeWhenMatched and  MergeWhenNotMatched . If custom,
	// defined by MergeClause .
	MergeAction *string

	// A SQL statement that specifies a custom merge behavior.
	MergeClause *string

	// Specifies how to resolve records that match preexisting data when merging.
	// Valid values: update , delete .
	MergeWhenMatched *string

	// Specifies how to process records that do not match preexisting data when
	// merging. Valid values: insert , none .
	MergeWhenNotMatched *string

	// A SQL string run after the Snowflake connector performs its standard actions.
	PostAction *string

	// A SQL string run before the Snowflake connector performs its standard actions.
	PreAction *string

	// A SQL string used to retrieve data with the query sourcetype.
	SampleQuery *string

	// Specifies a Snowflake database schema for your node to use.
	Schema *string

	// Specifies the columns combined to identify a record when detecting matches for
	// merges and upserts. A list of structures with value , label and  description
	// keys. Each structure describes a column.
	SelectedColumns []Option

	// Specifies how retrieved data is specified. Valid values: "table" ,  "query" .
	SourceType *string

	// The name of a staging table used when performing merge or upsert append
	// actions. Data is written to this table, then moved to table by a generated
	// postaction.
	StagingTable *string

	// Specifies a Snowflake table for your node to use.
	Table *string

	// Manually defines the target schema for the node. A list of structures with value
	// , label and description keys. Each structure defines a column.
	TableSchema []Option

	// Not currently used.
	TempDir *string

	// Used when Action is append . Specifies the resolution behavior when a row
	// already exists. If true, preexisting rows will be updated. If false, those rows
	// will be inserted.
	Upsert bool
	// contains filtered or unexported fields
}

Specifies configuration for Snowflake nodes in Glue Studio.

type SnowflakeSource added in v1.58.0

type SnowflakeSource struct {

	// Configuration for the Snowflake data source.
	//
	// This member is required.
	Data *SnowflakeNodeData

	// The name of the Snowflake data source.
	//
	// This member is required.
	Name *string

	// Specifies user-defined schemas for your output data.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a Snowflake data source.

type SnowflakeTarget added in v1.58.0

type SnowflakeTarget struct {

	// Specifies the data of the Snowflake target node.
	//
	// This member is required.
	Data *SnowflakeNodeData

	// The name of the Snowflake target.
	//
	// This member is required.
	Name *string

	// The nodes that are inputs to the data target.
	Inputs []string
	// contains filtered or unexported fields
}

Specifies a Snowflake target.

type Sort

type Sort string
const (
	SortAscending  Sort = "ASC"
	SortDescending Sort = "DESC"
)

Enum values for Sort

func (Sort) Values added in v0.29.0

func (Sort) Values() []Sort

Values returns all known values for Sort. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SortCriterion

type SortCriterion struct {

	// The name of the field on which to sort.
	FieldName *string

	// An ascending or descending sort.
	Sort Sort
	// contains filtered or unexported fields
}

Specifies a field to sort by and a sort order.

type SortDirectionType

type SortDirectionType string
const (
	SortDirectionTypeDescending SortDirectionType = "DESCENDING"
	SortDirectionTypeAscending  SortDirectionType = "ASCENDING"
)

Enum values for SortDirectionType

func (SortDirectionType) Values added in v0.29.0

Values returns all known values for SortDirectionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SourceControlAuthStrategy added in v1.33.0

type SourceControlAuthStrategy string
const (
	SourceControlAuthStrategyPersonalAccessToken SourceControlAuthStrategy = "PERSONAL_ACCESS_TOKEN"
	SourceControlAuthStrategyAwsSecretsManager   SourceControlAuthStrategy = "AWS_SECRETS_MANAGER"
)

Enum values for SourceControlAuthStrategy

func (SourceControlAuthStrategy) Values added in v1.33.0

Values returns all known values for SourceControlAuthStrategy. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SourceControlDetails added in v1.33.0

type SourceControlDetails struct {

	// The type of authentication, which can be an authentication token stored in
	// Amazon Web Services Secrets Manager, or a personal access token.
	AuthStrategy SourceControlAuthStrategy

	// The value of an authorization token.
	AuthToken *string

	// An optional branch in the remote repository.
	Branch *string

	// An optional folder in the remote repository.
	Folder *string

	// The last commit ID for a commit in the remote repository.
	LastCommitId *string

	// The owner of the remote repository that contains the job artifacts.
	Owner *string

	// The provider for the remote repository.
	Provider SourceControlProvider

	// The name of the remote repository that contains the job artifacts.
	Repository *string
	// contains filtered or unexported fields
}

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

type SourceControlProvider added in v1.33.0

type SourceControlProvider string
const (
	SourceControlProviderGithub        SourceControlProvider = "GITHUB"
	SourceControlProviderGitlab        SourceControlProvider = "GITLAB"
	SourceControlProviderBitbucket     SourceControlProvider = "BITBUCKET"
	SourceControlProviderAwsCodeCommit SourceControlProvider = "AWS_CODE_COMMIT"
)

Enum values for SourceControlProvider

func (SourceControlProvider) Values added in v1.33.0

Values returns all known values for SourceControlProvider. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type SparkConnectorSource added in v1.25.0

type SparkConnectorSource struct {

	// The name of the connection that is associated with the connector.
	//
	// This member is required.
	ConnectionName *string

	// The type of connection, such as marketplace.spark or custom.spark, designating
	// a connection to an Apache Spark data store.
	//
	// This member is required.
	ConnectionType *string

	// The name of a connector that assists with accessing the data store in Glue
	// Studio.
	//
	// This member is required.
	ConnectorName *string

	// The name of the data source.
	//
	// This member is required.
	Name *string

	// Additional connection options for the connector.
	AdditionalOptions map[string]string

	// Specifies data schema for the custom spark source.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a connector to an Apache Spark data source.

type SparkConnectorTarget added in v1.25.0

type SparkConnectorTarget struct {

	// The name of a connection for an Apache Spark connector.
	//
	// This member is required.
	ConnectionName *string

	// The type of connection, such as marketplace.spark or custom.spark, designating
	// a connection to an Apache Spark data store.
	//
	// This member is required.
	ConnectionType *string

	// The name of an Apache Spark connector.
	//
	// This member is required.
	ConnectorName *string

	// The nodes that are inputs to the data target.
	//
	// This member is required.
	Inputs []string

	// The name of the data target.
	//
	// This member is required.
	Name *string

	// Additional connection options for the connector.
	AdditionalOptions map[string]string

	// Specifies the data schema for the custom spark target.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a target that uses an Apache Spark connector.

type SparkSQL added in v1.25.0

type SparkSQL struct {

	// The data inputs identified by their node names. You can associate a table name
	// with each input node to use in the SQL query. The name you choose must meet the
	// Spark SQL naming restrictions.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A list of aliases. An alias allows you to specify what name to use in the SQL
	// for a given input. For example, you have a datasource named "MyDataSource". If
	// you specify From as MyDataSource, and Alias as SqlName, then in your SQL you
	// can do: select * from SqlName and that gets data from MyDataSource.
	//
	// This member is required.
	SqlAliases []SqlAlias

	// A SQL query that must use Spark SQL syntax and return a single data set.
	//
	// This member is required.
	SqlQuery *string

	// Specifies the data schema for the SparkSQL transform.
	OutputSchemas []GlueSchema
	// contains filtered or unexported fields
}

Specifies a transform where you enter a SQL query using Spark SQL syntax to transform the data. The output is a single DynamicFrame .

type Spigot added in v1.25.0

type Spigot struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A path in Amazon S3 where the transform will write a subset of records from the
	// dataset to a JSON file in an Amazon S3 bucket.
	//
	// This member is required.
	Path *string

	// The probability (a decimal value with a maximum value of 1) of picking any
	// given record. A value of 1 indicates that each row read from the dataset should
	// be included in the sample output.
	Prob *float64

	// Specifies a number of records to write starting from the beginning of the
	// dataset.
	Topk *int32
	// contains filtered or unexported fields
}

Specifies a transform that writes samples of the data to an Amazon S3 bucket.

type SplitFields added in v1.25.0

type SplitFields struct {

	// The data inputs identified by their node names.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// A JSON path to a variable in the data structure.
	//
	// This member is required.
	Paths [][]string
	// contains filtered or unexported fields
}

Specifies a transform that splits data property keys into two DynamicFrames . The output is a collection of DynamicFrames : one with selected data property keys, and one with the remaining data property keys.

type SqlAlias added in v1.25.0

type SqlAlias struct {

	// A temporary name given to a table, or a column in a table.
	//
	// This member is required.
	Alias *string

	// A table, or a column in a table.
	//
	// This member is required.
	From *string
	// contains filtered or unexported fields
}

Represents a single entry in the list of values for SqlAliases .

type StartingEventBatchCondition added in v1.9.0

type StartingEventBatchCondition struct {

	// Number of events in the batch.
	BatchSize *int32

	// Duration of the batch window in seconds.
	BatchWindow *int32
	// contains filtered or unexported fields
}

The batch condition that started the workflow run. Either the number of events in the batch size arrived, in which case the BatchSize member is non-zero, or the batch window expired, in which case the BatchWindow member is non-zero.

type StartingPosition added in v1.25.0

type StartingPosition string
const (
	StartingPositionLatest      StartingPosition = "latest"
	StartingPositionTrimHorizon StartingPosition = "trim_horizon"
	StartingPositionEarliest    StartingPosition = "earliest"
	StartingPositionTimestamp   StartingPosition = "timestamp"
)

Enum values for StartingPosition

func (StartingPosition) Values added in v1.25.0

Values returns all known values for StartingPosition. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Statement added in v1.22.0

type Statement struct {

	// The execution code of the statement.
	Code *string

	// The unix time and date that the job definition was completed.
	CompletedOn int64

	// The ID of the statement.
	Id int32

	// The output in JSON.
	Output *StatementOutput

	// The code execution progress.
	Progress float64

	// The unix time and date that the job definition was started.
	StartedOn int64

	// The state while request is actioned.
	State StatementState
	// contains filtered or unexported fields
}

The statement or request for a particular action to occur in a session.

type StatementOutput added in v1.22.0

type StatementOutput struct {

	// The code execution output.
	Data *StatementOutputData

	// The name of the error in the output.
	ErrorName *string

	// The error value of the output.
	ErrorValue *string

	// The execution count of the output.
	ExecutionCount int32

	// The status of the code execution output.
	Status StatementState

	// The traceback of the output.
	Traceback []string
	// contains filtered or unexported fields
}

The code execution output in JSON format.

type StatementOutputData added in v1.22.0

type StatementOutputData struct {

	// The code execution output in text format.
	TextPlain *string
	// contains filtered or unexported fields
}

The code execution output in JSON format.

type StatementState added in v1.22.0

type StatementState string
const (
	StatementStateWaiting    StatementState = "WAITING"
	StatementStateRunning    StatementState = "RUNNING"
	StatementStateAvailable  StatementState = "AVAILABLE"
	StatementStateCancelling StatementState = "CANCELLING"
	StatementStateCancelled  StatementState = "CANCELLED"
	StatementStateError      StatementState = "ERROR"
)

Enum values for StatementState

func (StatementState) Values added in v1.22.0

func (StatementState) Values() []StatementState

Values returns all known values for StatementState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type StorageDescriptor

type StorageDescriptor struct {

	// A list of locations that point to the path where a Delta table is located.
	AdditionalLocations []string

	// A list of reducer grouping columns, clustering columns, and bucketing columns
	// in the table.
	BucketColumns []string

	// A list of the Columns in the table.
	Columns []Column

	// True if the data in the table is compressed, or False if not.
	Compressed bool

	// The input format: SequenceFileInputFormat (binary), or TextInputFormat , or a
	// custom format.
	InputFormat *string

	// The physical location of the table. By default, this takes the form of the
	// warehouse location, followed by the database location in the warehouse, followed
	// by the table name.
	Location *string

	// Must be specified if the table contains any dimension columns.
	NumberOfBuckets int32

	// The output format: SequenceFileOutputFormat (binary), or
	// IgnoreKeyTextOutputFormat , or a custom format.
	OutputFormat *string

	// The user-supplied properties in key-value form.
	Parameters map[string]string

	// An object that references a schema stored in the Glue Schema Registry. When
	// creating a table, you can pass an empty list of columns for the schema, and
	// instead use a schema reference.
	SchemaReference *SchemaReference

	// The serialization/deserialization (SerDe) information.
	SerdeInfo *SerDeInfo

	// The information about values that appear frequently in a column (skewed values).
	SkewedInfo *SkewedInfo

	// A list specifying the sort order of each bucket in the table.
	SortColumns []Order

	// True if the table data is stored in subdirectories, or False if not.
	StoredAsSubDirectories bool
	// contains filtered or unexported fields
}

Describes the physical storage of table data.

type StreamingDataPreviewOptions added in v1.25.0

type StreamingDataPreviewOptions struct {

	// The polling time in milliseconds.
	PollingTime *int64

	// The limit to the number of records polled.
	RecordPollingLimit *int64
	// contains filtered or unexported fields
}

Specifies options related to data preview for viewing a sample of your data.

type StringColumnStatisticsData

type StringColumnStatisticsData struct {

	// The average string length in the column.
	//
	// This member is required.
	AverageLength float64

	// The size of the longest string in the column.
	//
	// This member is required.
	MaximumLength int64

	// The number of distinct values in a column.
	//
	// This member is required.
	NumberOfDistinctValues int64

	// The number of null values in the column.
	//
	// This member is required.
	NumberOfNulls int64
	// contains filtered or unexported fields
}

Defines column statistics supported for character sequence data values.

type SupportedDialect added in v1.73.0

type SupportedDialect struct {

	// The dialect of the query engine.
	Dialect ViewDialect

	// The version of the dialect of the query engine. For example, 3.0.0.
	DialectVersion *string
	// contains filtered or unexported fields
}

A structure specifying the dialect and dialect version used by the query engine.

type Table

type Table struct {

	// The table name. For Hive compatibility, this must be entirely lowercase.
	//
	// This member is required.
	Name *string

	// The ID of the Data Catalog in which the table resides.
	CatalogId *string

	// The time when the table definition was created in the Data Catalog.
	CreateTime *time.Time

	// The person or entity who created the table.
	CreatedBy *string

	// The name of the database where the table metadata resides. For Hive
	// compatibility, this must be all lowercase.
	DatabaseName *string

	// A description of the table.
	Description *string

	// A FederatedTable structure that references an entity outside the Glue Data
	// Catalog.
	FederatedTable *FederatedTable

	// Specifies whether the view supports the SQL dialects of one or more different
	// query engines and can therefore be read by those engines.
	IsMultiDialectView *bool

	// Indicates whether the table has been registered with Lake Formation.
	IsRegisteredWithLakeFormation bool

	// The last time that the table was accessed. This is usually taken from HDFS, and
	// might not be reliable.
	LastAccessTime *time.Time

	// The last time that column statistics were computed for this table.
	LastAnalyzedTime *time.Time

	// The owner of the table.
	Owner *string

	// These key-value pairs define properties associated with the table.
	Parameters map[string]string

	// A list of columns by which the table is partitioned. Only primitive types are
	// supported as partition keys. When you create a table used by Amazon Athena, and
	// you do not specify any partitionKeys , you must at least set the value of
	// partitionKeys to an empty list. For example: "PartitionKeys": []
	PartitionKeys []Column

	// The retention time for this table.
	Retention int32

	// A storage descriptor containing information about the physical storage of this
	// table.
	StorageDescriptor *StorageDescriptor

	// The type of this table. Glue will create tables with the EXTERNAL_TABLE type.
	// Other services, such as Athena, may create tables with additional table types.
	// Glue related table types: EXTERNAL_TABLE Hive compatible attribute - indicates a
	// non-Hive managed table. GOVERNED Used by Lake Formation. The Glue Data Catalog
	// understands GOVERNED .
	TableType *string

	// A TableIdentifier structure that describes a target table for resource linking.
	TargetTable *TableIdentifier

	// The last time that the table was updated.
	UpdateTime *time.Time

	// The ID of the table version.
	VersionId *string

	// A structure that contains all the information that defines the view, including
	// the dialect or dialects for the view, and the query.
	ViewDefinition *ViewDefinition

	// Included for Apache Hive compatibility. Not used in the normal course of Glue
	// operations.
	ViewExpandedText *string

	// Included for Apache Hive compatibility. Not used in the normal course of Glue
	// operations. If the table is a VIRTUAL_VIEW , certain Athena configuration
	// encoded in base64.
	ViewOriginalText *string
	// contains filtered or unexported fields
}

Represents a collection of related data organized in columns and rows.

type TableError

type TableError struct {

	// The details about the error.
	ErrorDetail *ErrorDetail

	// The name of the table. For Hive compatibility, this must be entirely lowercase.
	TableName *string
	// contains filtered or unexported fields
}

An error record for table operations.

type TableIdentifier

type TableIdentifier struct {

	// The ID of the Data Catalog in which the table resides.
	CatalogId *string

	// The name of the catalog database that contains the target table.
	DatabaseName *string

	// The name of the target table.
	Name *string

	// Region of the target table.
	Region *string
	// contains filtered or unexported fields
}

A structure that describes a target table for resource linking.

type TableInput

type TableInput struct {

	// The table name. For Hive compatibility, this is folded to lowercase when it is
	// stored.
	//
	// This member is required.
	Name *string

	// A description of the table.
	Description *string

	// The last time that the table was accessed.
	LastAccessTime *time.Time

	// The last time that column statistics were computed for this table.
	LastAnalyzedTime *time.Time

	// The table owner. Included for Apache Hive compatibility. Not used in the normal
	// course of Glue operations.
	Owner *string

	// These key-value pairs define properties associated with the table.
	Parameters map[string]string

	// A list of columns by which the table is partitioned. Only primitive types are
	// supported as partition keys. When you create a table used by Amazon Athena, and
	// you do not specify any partitionKeys , you must at least set the value of
	// partitionKeys to an empty list. For example: "PartitionKeys": []
	PartitionKeys []Column

	// The retention time for this table.
	Retention int32

	// A storage descriptor containing information about the physical storage of this
	// table.
	StorageDescriptor *StorageDescriptor

	// The type of this table. Glue will create tables with the EXTERNAL_TABLE type.
	// Other services, such as Athena, may create tables with additional table types.
	// Glue related table types: EXTERNAL_TABLE Hive compatible attribute - indicates a
	// non-Hive managed table. GOVERNED Used by Lake Formation. The Glue Data Catalog
	// understands GOVERNED .
	TableType *string

	// A TableIdentifier structure that describes a target table for resource linking.
	TargetTable *TableIdentifier

	// Included for Apache Hive compatibility. Not used in the normal course of Glue
	// operations.
	ViewExpandedText *string

	// Included for Apache Hive compatibility. Not used in the normal course of Glue
	// operations. If the table is a VIRTUAL_VIEW , certain Athena configuration
	// encoded in base64.
	ViewOriginalText *string
	// contains filtered or unexported fields
}

A structure used to define a table.

type TableOptimizer added in v1.68.0

type TableOptimizer struct {

	// A TableOptimizerConfiguration object that was specified when creating or
	// updating a table optimizer.
	Configuration *TableOptimizerConfiguration

	// A TableOptimizerRun object representing the last run of the table optimizer.
	LastRun *TableOptimizerRun

	// The type of table optimizer. Currently, the only valid value is compaction .
	Type TableOptimizerType
	// contains filtered or unexported fields
}

Contains details about an optimizer associated with a table.

type TableOptimizerConfiguration added in v1.68.0

type TableOptimizerConfiguration struct {

	// Whether table optimization is enabled.
	Enabled *bool

	// A role passed by the caller which gives the service permission to update the
	// resources associated with the optimizer on the caller's behalf.
	RoleArn *string
	// contains filtered or unexported fields
}

Contains details on the configuration of a table optimizer. You pass this configuration when creating or updating a table optimizer.

type TableOptimizerEventType added in v1.68.0

type TableOptimizerEventType string
const (
	TableOptimizerEventTypeStarting   TableOptimizerEventType = "starting"
	TableOptimizerEventTypeCompleted  TableOptimizerEventType = "completed"
	TableOptimizerEventTypeFailed     TableOptimizerEventType = "failed"
	TableOptimizerEventTypeInProgress TableOptimizerEventType = "in_progress"
)

Enum values for TableOptimizerEventType

func (TableOptimizerEventType) Values added in v1.68.0

Values returns all known values for TableOptimizerEventType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TableOptimizerRun added in v1.68.0

type TableOptimizerRun struct {

	// Represents the epoch timestamp at which the compaction job ended.
	EndTimestamp *time.Time

	// An error that occured during the optimizer run.
	Error *string

	// An event type representing the status of the table optimizer run.
	EventType TableOptimizerEventType

	// A RunMetrics object containing metrics for the optimizer run.
	Metrics *RunMetrics

	// Represents the epoch timestamp at which the compaction job was started within
	// Lake Formation.
	StartTimestamp *time.Time
	// contains filtered or unexported fields
}

Contains details for a table optimizer run.

type TableOptimizerType added in v1.68.0

type TableOptimizerType string
const (
	TableOptimizerTypeCompaction TableOptimizerType = "compaction"
)

Enum values for TableOptimizerType

func (TableOptimizerType) Values added in v1.68.0

Values returns all known values for TableOptimizerType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TableVersion

type TableVersion struct {

	// The table in question.
	Table *Table

	// The ID value that identifies this table version. A VersionId is a string
	// representation of an integer. Each version is incremented by 1.
	VersionId *string
	// contains filtered or unexported fields
}

Specifies a version of a table.

type TableVersionError

type TableVersionError struct {

	// The details about the error.
	ErrorDetail *ErrorDetail

	// The name of the table in question.
	TableName *string

	// The ID value of the version in question. A VersionID is a string representation
	// of an integer. Each version is incremented by 1.
	VersionId *string
	// contains filtered or unexported fields
}

An error record for table-version operations.

type TargetFormat added in v1.25.0

type TargetFormat string
const (
	TargetFormatJson    TargetFormat = "json"
	TargetFormatCsv     TargetFormat = "csv"
	TargetFormatAvro    TargetFormat = "avro"
	TargetFormatOrc     TargetFormat = "orc"
	TargetFormatParquet TargetFormat = "parquet"
	TargetFormatHudi    TargetFormat = "hudi"
	TargetFormatDelta   TargetFormat = "delta"
)

Enum values for TargetFormat

func (TargetFormat) Values added in v1.25.0

func (TargetFormat) Values() []TargetFormat

Values returns all known values for TargetFormat. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TaskRun

type TaskRun struct {

	// The last point in time that the requested task run was completed.
	CompletedOn *time.Time

	// The list of error strings associated with this task run.
	ErrorString *string

	// The amount of time (in seconds) that the task run consumed resources.
	ExecutionTime int32

	// The last point in time that the requested task run was updated.
	LastModifiedOn *time.Time

	// The names of the log group for secure logging, associated with this task run.
	LogGroupName *string

	// Specifies configuration properties associated with this task run.
	Properties *TaskRunProperties

	// The date and time that this task run started.
	StartedOn *time.Time

	// The current status of the requested task run.
	Status TaskStatusType

	// The unique identifier for this task run.
	TaskRunId *string

	// The unique identifier for the transform.
	TransformId *string
	// contains filtered or unexported fields
}

The sampling parameters that are associated with the machine learning transform.

type TaskRunFilterCriteria

type TaskRunFilterCriteria struct {

	// Filter on task runs started after this date.
	StartedAfter *time.Time

	// Filter on task runs started before this date.
	StartedBefore *time.Time

	// The current status of the task run.
	Status TaskStatusType

	// The type of task run.
	TaskRunType TaskType
	// contains filtered or unexported fields
}

The criteria that are used to filter the task runs for the machine learning transform.

type TaskRunProperties

type TaskRunProperties struct {

	// The configuration properties for an exporting labels task run.
	ExportLabelsTaskRunProperties *ExportLabelsTaskRunProperties

	// The configuration properties for a find matches task run.
	FindMatchesTaskRunProperties *FindMatchesTaskRunProperties

	// The configuration properties for an importing labels task run.
	ImportLabelsTaskRunProperties *ImportLabelsTaskRunProperties

	// The configuration properties for a labeling set generation task run.
	LabelingSetGenerationTaskRunProperties *LabelingSetGenerationTaskRunProperties

	// The type of task run.
	TaskType TaskType
	// contains filtered or unexported fields
}

The configuration properties for the task run.

type TaskRunSortColumnType

type TaskRunSortColumnType string
const (
	TaskRunSortColumnTypeTaskRunType TaskRunSortColumnType = "TASK_RUN_TYPE"
	TaskRunSortColumnTypeStatus      TaskRunSortColumnType = "STATUS"
	TaskRunSortColumnTypeStarted     TaskRunSortColumnType = "STARTED"
)

Enum values for TaskRunSortColumnType

func (TaskRunSortColumnType) Values added in v0.29.0

Values returns all known values for TaskRunSortColumnType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TaskRunSortCriteria

type TaskRunSortCriteria struct {

	// The column to be used to sort the list of task runs for the machine learning
	// transform.
	//
	// This member is required.
	Column TaskRunSortColumnType

	// The sort direction to be used to sort the list of task runs for the machine
	// learning transform.
	//
	// This member is required.
	SortDirection SortDirectionType
	// contains filtered or unexported fields
}

The sorting criteria that are used to sort the list of task runs for the machine learning transform.

type TaskStatusType

type TaskStatusType string
const (
	TaskStatusTypeStarting  TaskStatusType = "STARTING"
	TaskStatusTypeRunning   TaskStatusType = "RUNNING"
	TaskStatusTypeStopping  TaskStatusType = "STOPPING"
	TaskStatusTypeStopped   TaskStatusType = "STOPPED"
	TaskStatusTypeSucceeded TaskStatusType = "SUCCEEDED"
	TaskStatusTypeFailed    TaskStatusType = "FAILED"
	TaskStatusTypeTimeout   TaskStatusType = "TIMEOUT"
)

Enum values for TaskStatusType

func (TaskStatusType) Values added in v0.29.0

func (TaskStatusType) Values() []TaskStatusType

Values returns all known values for TaskStatusType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TaskType

type TaskType string
const (
	TaskTypeEvaluation            TaskType = "EVALUATION"
	TaskTypeLabelingSetGeneration TaskType = "LABELING_SET_GENERATION"
	TaskTypeImportLabels          TaskType = "IMPORT_LABELS"
	TaskTypeExportLabels          TaskType = "EXPORT_LABELS"
	TaskTypeFindMatches           TaskType = "FIND_MATCHES"
)

Enum values for TaskType

func (TaskType) Values added in v0.29.0

func (TaskType) Values() []TaskType

Values returns all known values for TaskType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TransformConfigParameter added in v1.36.0

type TransformConfigParameter struct {

	// Specifies the name of the parameter in the config file of the dynamic transform.
	//
	// This member is required.
	Name *string

	// Specifies the parameter type in the config file of the dynamic transform.
	//
	// This member is required.
	Type ParamType

	// Specifies whether the parameter is optional or not in the config file of the
	// dynamic transform.
	IsOptional *bool

	// Specifies the list type of the parameter in the config file of the dynamic
	// transform.
	ListType ParamType

	// Specifies the validation message in the config file of the dynamic transform.
	ValidationMessage *string

	// Specifies the validation rule in the config file of the dynamic transform.
	ValidationRule *string

	// Specifies the value of the parameter in the config file of the dynamic
	// transform.
	Value []string
	// contains filtered or unexported fields
}

Specifies the parameters in the config file of the dynamic transform.

type TransformEncryption added in v0.29.0

type TransformEncryption struct {

	// An MLUserDataEncryption object containing the encryption mode and
	// customer-provided KMS key ID.
	MlUserDataEncryption *MLUserDataEncryption

	// The name of the security configuration.
	TaskRunSecurityConfigurationName *string
	// contains filtered or unexported fields
}

The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS. Additionally, imported labels and trained transforms can now be encrypted using a customer provided KMS key.

type TransformFilterCriteria

type TransformFilterCriteria struct {

	// The time and date after which the transforms were created.
	CreatedAfter *time.Time

	// The time and date before which the transforms were created.
	CreatedBefore *time.Time

	// This value determines which version of Glue this machine learning transform is
	// compatible with. Glue 1.0 is recommended for most customers. If the value is not
	// set, the Glue compatibility defaults to Glue 0.9. For more information, see
	// Glue Versions (https://docs.aws.amazon.com/glue/latest/dg/release-notes.html#release-notes-versions)
	// in the developer guide.
	GlueVersion *string

	// Filter on transforms last modified after this date.
	LastModifiedAfter *time.Time

	// Filter on transforms last modified before this date.
	LastModifiedBefore *time.Time

	// A unique transform name that is used to filter the machine learning transforms.
	Name *string

	// Filters on datasets with a specific schema. The Map object is an array of
	// key-value pairs representing the schema this transform accepts, where Column is
	// the name of a column, and Type is the type of the data such as an integer or
	// string. Has an upper bound of 100 columns.
	Schema []SchemaColumn

	// Filters the list of machine learning transforms by the last known status of the
	// transforms (to indicate whether a transform can be used or not). One of
	// "NOT_READY", "READY", or "DELETING".
	Status TransformStatusType

	// The type of machine learning transform that is used to filter the machine
	// learning transforms.
	TransformType TransformType
	// contains filtered or unexported fields
}

The criteria used to filter the machine learning transforms.

type TransformParameters

type TransformParameters struct {

	// The type of machine learning transform. For information about the types of
	// machine learning transforms, see Creating Machine Learning Transforms (https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html)
	// .
	//
	// This member is required.
	TransformType TransformType

	// The parameters for the find matches algorithm.
	FindMatchesParameters *FindMatchesParameters
	// contains filtered or unexported fields
}

The algorithm-specific parameters that are associated with the machine learning transform.

type TransformSortColumnType

type TransformSortColumnType string
const (
	TransformSortColumnTypeName          TransformSortColumnType = "NAME"
	TransformSortColumnTypeTransformType TransformSortColumnType = "TRANSFORM_TYPE"
	TransformSortColumnTypeStatus        TransformSortColumnType = "STATUS"
	TransformSortColumnTypeCreated       TransformSortColumnType = "CREATED"
	TransformSortColumnTypeLastModified  TransformSortColumnType = "LAST_MODIFIED"
)

Enum values for TransformSortColumnType

func (TransformSortColumnType) Values added in v0.29.0

Values returns all known values for TransformSortColumnType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TransformSortCriteria

type TransformSortCriteria struct {

	// The column to be used in the sorting criteria that are associated with the
	// machine learning transform.
	//
	// This member is required.
	Column TransformSortColumnType

	// The sort direction to be used in the sorting criteria that are associated with
	// the machine learning transform.
	//
	// This member is required.
	SortDirection SortDirectionType
	// contains filtered or unexported fields
}

The sorting criteria that are associated with the machine learning transform.

type TransformStatusType

type TransformStatusType string
const (
	TransformStatusTypeNotReady TransformStatusType = "NOT_READY"
	TransformStatusTypeReady    TransformStatusType = "READY"
	TransformStatusTypeDeleting TransformStatusType = "DELETING"
)

Enum values for TransformStatusType

func (TransformStatusType) Values added in v0.29.0

Values returns all known values for TransformStatusType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TransformType

type TransformType string
const (
	TransformTypeFindMatches TransformType = "FIND_MATCHES"
)

Enum values for TransformType

func (TransformType) Values added in v0.29.0

func (TransformType) Values() []TransformType

Values returns all known values for TransformType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Trigger

type Trigger struct {

	// The actions initiated by this trigger.
	Actions []Action

	// A description of this trigger.
	Description *string

	// Batch condition that must be met (specified number of events received or batch
	// time window expired) before EventBridge event trigger fires.
	EventBatchingCondition *EventBatchingCondition

	// Reserved for future use.
	Id *string

	// The name of the trigger.
	Name *string

	// The predicate of this trigger, which defines when it will fire.
	Predicate *Predicate

	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html)
	// . For example, to run something every day at 12:15 UTC, you would specify:
	// cron(15 12 * * ? *) .
	Schedule *string

	// The current state of the trigger.
	State TriggerState

	// The type of trigger that this is.
	Type TriggerType

	// The name of the workflow associated with the trigger.
	WorkflowName *string
	// contains filtered or unexported fields
}

Information about a specific trigger.

type TriggerNodeDetails

type TriggerNodeDetails struct {

	// The information of the trigger represented by the trigger node.
	Trigger *Trigger
	// contains filtered or unexported fields
}

The details of a Trigger node present in the workflow.

type TriggerState

type TriggerState string
const (
	TriggerStateCreating     TriggerState = "CREATING"
	TriggerStateCreated      TriggerState = "CREATED"
	TriggerStateActivating   TriggerState = "ACTIVATING"
	TriggerStateActivated    TriggerState = "ACTIVATED"
	TriggerStateDeactivating TriggerState = "DEACTIVATING"
	TriggerStateDeactivated  TriggerState = "DEACTIVATED"
	TriggerStateDeleting     TriggerState = "DELETING"
	TriggerStateUpdating     TriggerState = "UPDATING"
)

Enum values for TriggerState

func (TriggerState) Values added in v0.29.0

func (TriggerState) Values() []TriggerState

Values returns all known values for TriggerState. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TriggerType

type TriggerType string
const (
	TriggerTypeScheduled   TriggerType = "SCHEDULED"
	TriggerTypeConditional TriggerType = "CONDITIONAL"
	TriggerTypeOnDemand    TriggerType = "ON_DEMAND"
	TriggerTypeEvent       TriggerType = "EVENT"
)

Enum values for TriggerType

func (TriggerType) Values added in v0.29.0

func (TriggerType) Values() []TriggerType

Values returns all known values for TriggerType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type TriggerUpdate

type TriggerUpdate struct {

	// The actions initiated by this trigger.
	Actions []Action

	// A description of this trigger.
	Description *string

	// Batch condition that must be met (specified number of events received or batch
	// time window expired) before EventBridge event trigger fires.
	EventBatchingCondition *EventBatchingCondition

	// Reserved for future use.
	Name *string

	// The predicate of this trigger, which defines when it will fire.
	Predicate *Predicate

	// A cron expression used to specify the schedule (see Time-Based Schedules for
	// Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html)
	// . For example, to run something every day at 12:15 UTC, you would specify:
	// cron(15 12 * * ? *) .
	Schedule *string
	// contains filtered or unexported fields
}

A structure used to provide information used to update a trigger. This object updates the previous trigger definition by overwriting it completely.

type UnfilteredPartition added in v1.18.0

type UnfilteredPartition struct {

	// The list of columns the user has permissions to access.
	AuthorizedColumns []string

	// A Boolean value indicating that the partition location is registered with Lake
	// Formation.
	IsRegisteredWithLakeFormation bool

	// The partition object.
	Partition *Partition
	// contains filtered or unexported fields
}

A partition that contains unfiltered metadata.

type Union added in v1.25.0

type Union struct {

	// The node ID inputs to the transform.
	//
	// This member is required.
	Inputs []string

	// The name of the transform node.
	//
	// This member is required.
	Name *string

	// Indicates the type of Union transform. Specify ALL to join all rows from data
	// sources to the resulting DynamicFrame. The resulting union does not remove
	// duplicate rows. Specify DISTINCT to remove duplicate rows in the resulting
	// DynamicFrame.
	//
	// This member is required.
	UnionType UnionType
	// contains filtered or unexported fields
}

Specifies a transform that combines the rows from two or more datasets into a single result.

type UnionType added in v1.25.0

type UnionType string
const (
	UnionTypeAll      UnionType = "ALL"
	UnionTypeDistinct UnionType = "DISTINCT"
)

Enum values for UnionType

func (UnionType) Values added in v1.25.0

func (UnionType) Values() []UnionType

Values returns all known values for UnionType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type UpdateBehavior

type UpdateBehavior string
const (
	UpdateBehaviorLog              UpdateBehavior = "LOG"
	UpdateBehaviorUpdateInDatabase UpdateBehavior = "UPDATE_IN_DATABASE"
)

Enum values for UpdateBehavior

func (UpdateBehavior) Values added in v0.29.0

func (UpdateBehavior) Values() []UpdateBehavior

Values returns all known values for UpdateBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type UpdateCatalogBehavior added in v1.25.0

type UpdateCatalogBehavior string
const (
	UpdateCatalogBehaviorUpdateInDatabase UpdateCatalogBehavior = "UPDATE_IN_DATABASE"
	UpdateCatalogBehaviorLog              UpdateCatalogBehavior = "LOG"
)

Enum values for UpdateCatalogBehavior

func (UpdateCatalogBehavior) Values added in v1.25.0

Values returns all known values for UpdateCatalogBehavior. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type UpdateCsvClassifierRequest

type UpdateCsvClassifierRequest struct {

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// Enables the processing of files that contain only one column.
	AllowSingleColumn *bool

	// Indicates whether the CSV file contains a header.
	ContainsHeader CsvHeaderOption

	// Specifies the configuration of custom datatypes.
	CustomDatatypeConfigured *bool

	// Specifies a list of supported custom datatypes.
	CustomDatatypes []string

	// A custom symbol to denote what separates each column entry in the row.
	Delimiter *string

	// Specifies not to trim values before identifying the type of column values. The
	// default value is true.
	DisableValueTrimming *bool

	// A list of strings representing column names.
	Header []string

	// A custom symbol to denote what combines content into a single column value. It
	// must be different from the column delimiter.
	QuoteSymbol *string

	// Sets the SerDe for processing CSV in the classifier, which will be applied in
	// the Data Catalog. Valid values are OpenCSVSerDe , LazySimpleSerDe , and None .
	// You can specify the None value when you want the crawler to do the detection.
	Serde CsvSerdeOption
	// contains filtered or unexported fields
}

Specifies a custom CSV classifier to be updated.

type UpdateGrokClassifierRequest

type UpdateGrokClassifierRequest struct {

	// The name of the GrokClassifier .
	//
	// This member is required.
	Name *string

	// An identifier of the data format that the classifier matches, such as Twitter,
	// JSON, Omniture logs, Amazon CloudWatch Logs, and so on.
	Classification *string

	// Optional custom grok patterns used by this classifier.
	CustomPatterns *string

	// The grok pattern used by this classifier.
	GrokPattern *string
	// contains filtered or unexported fields
}

Specifies a grok classifier to update when passed to UpdateClassifier .

type UpdateJsonClassifierRequest

type UpdateJsonClassifierRequest struct {

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// A JsonPath string defining the JSON data for the classifier to classify. Glue
	// supports a subset of JsonPath, as described in Writing JsonPath Custom
	// Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json)
	// .
	JsonPath *string
	// contains filtered or unexported fields
}

Specifies a JSON classifier to be updated.

type UpdateXMLClassifierRequest

type UpdateXMLClassifierRequest struct {

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// An identifier of the data format that the classifier matches.
	Classification *string

	// The XML tag designating the element that contains each record in an XML
	// document being parsed. This cannot identify a self-closing element (closed by />
	// ). An empty row element that contains only attributes can be parsed as long as
	// it ends with a closing tag (for example, is okay, but  is not).
	RowTag *string
	// contains filtered or unexported fields
}

Specifies an XML classifier to be updated.

type UpsertRedshiftTargetOptions added in v1.25.0

type UpsertRedshiftTargetOptions struct {

	// The name of the connection to use to write to Redshift.
	ConnectionName *string

	// The physical location of the Redshift table.
	TableLocation *string

	// The keys used to determine whether to perform an update or insert.
	UpsertKeys []string
	// contains filtered or unexported fields
}

The options to configure an upsert operation when writing to a Redshift target .

type UserDefinedFunction

type UserDefinedFunction struct {

	// The ID of the Data Catalog in which the function resides.
	CatalogId *string

	// The Java class that contains the function code.
	ClassName *string

	// The time at which the function was created.
	CreateTime *time.Time

	// The name of the catalog database that contains the function.
	DatabaseName *string

	// The name of the function.
	FunctionName *string

	// The owner of the function.
	OwnerName *string

	// The owner type.
	OwnerType PrincipalType

	// The resource URIs for the function.
	ResourceUris []ResourceUri
	// contains filtered or unexported fields
}

Represents the equivalent of a Hive user-defined function ( UDF ) definition.

type UserDefinedFunctionInput

type UserDefinedFunctionInput struct {

	// The Java class that contains the function code.
	ClassName *string

	// The name of the function.
	FunctionName *string

	// The owner of the function.
	OwnerName *string

	// The owner type.
	OwnerType PrincipalType

	// The resource URIs for the function.
	ResourceUris []ResourceUri
	// contains filtered or unexported fields
}

A structure used to create or update a user-defined function.

type ValidationException

type ValidationException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

A value could not be validated.

func (*ValidationException) Error

func (e *ValidationException) Error() string

func (*ValidationException) ErrorCode

func (e *ValidationException) ErrorCode() string

func (*ValidationException) ErrorFault

func (e *ValidationException) ErrorFault() smithy.ErrorFault

func (*ValidationException) ErrorMessage

func (e *ValidationException) ErrorMessage() string

type VersionMismatchException

type VersionMismatchException struct {
	Message *string

	ErrorCodeOverride *string
	// contains filtered or unexported fields
}

There was a version conflict.

func (*VersionMismatchException) Error

func (e *VersionMismatchException) Error() string

func (*VersionMismatchException) ErrorCode

func (e *VersionMismatchException) ErrorCode() string

func (*VersionMismatchException) ErrorFault

func (e *VersionMismatchException) ErrorFault() smithy.ErrorFault

func (*VersionMismatchException) ErrorMessage

func (e *VersionMismatchException) ErrorMessage() string

type ViewDefinition added in v1.78.0

type ViewDefinition struct {

	// The definer of a view in SQL.
	Definer *string

	// You can set this flag as true to instruct the engine not to push user-provided
	// operations into the logical plan of the view during query planning. However,
	// setting this flag does not guarantee that the engine will comply. Refer to the
	// engine's documentation to understand the guarantees provided, if any.
	IsProtected *bool

	// A list of representations.
	Representations []ViewRepresentation

	// A list of table Amazon Resource Names (ARNs).
	SubObjects []string
	// contains filtered or unexported fields
}

A structure containing details for representations.

type ViewDialect added in v1.73.0

type ViewDialect string
const (
	ViewDialectRedshift ViewDialect = "REDSHIFT"
	ViewDialectAthena   ViewDialect = "ATHENA"
	ViewDialectSpark    ViewDialect = "SPARK"
)

Enum values for ViewDialect

func (ViewDialect) Values added in v1.73.0

func (ViewDialect) Values() []ViewDialect

Values returns all known values for ViewDialect. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type ViewRepresentation added in v1.78.0

type ViewRepresentation struct {

	// The dialect of the query engine.
	Dialect ViewDialect

	// The version of the dialect of the query engine. For example, 3.0.0.
	DialectVersion *string

	// Dialects marked as stale are no longer valid and must be updated before they
	// can be queried in their respective query engines.
	IsStale *bool

	// The expanded SQL for the view. This SQL is used by engines while processing a
	// query on a view. Engines may perform operations during view creation to
	// transform ViewOriginalText to ViewExpandedText . For example:
	//   - Fully qualify identifiers: SELECT * from table1 → SELECT * from db1.table1
	ViewExpandedText *string

	// The SELECT query provided by the customer during CREATE VIEW DDL . This SQL is
	// not used during a query on a view ( ViewExpandedText is used instead).
	// ViewOriginalText is used for cases like SHOW CREATE VIEW where users want to
	// see the original DDL command that created the view.
	ViewOriginalText *string
	// contains filtered or unexported fields
}

A structure that contains the dialect of the view, and the query that defines the view.

type WorkerType

type WorkerType string
const (
	WorkerTypeStandard WorkerType = "Standard"
	WorkerTypeG1x      WorkerType = "G.1X"
	WorkerTypeG2x      WorkerType = "G.2X"
	WorkerTypeG025x    WorkerType = "G.025X"
	WorkerTypeG4x      WorkerType = "G.4X"
	WorkerTypeG8x      WorkerType = "G.8X"
	WorkerTypeZ2x      WorkerType = "Z.2X"
)

Enum values for WorkerType

func (WorkerType) Values added in v0.29.0

func (WorkerType) Values() []WorkerType

Values returns all known values for WorkerType. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type Workflow

type Workflow struct {

	// This structure indicates the details of the blueprint that this particular
	// workflow is created from.
	BlueprintDetails *BlueprintDetails

	// The date and time when the workflow was created.
	CreatedOn *time.Time

	// A collection of properties to be used as part of each execution of the
	// workflow. The run properties are made available to each job in the workflow. A
	// job can modify the properties for the next jobs in the flow.
	DefaultRunProperties map[string]string

	// A description of the workflow.
	Description *string

	// The graph representing all the Glue components that belong to the workflow as
	// nodes and directed connections between them as edges.
	Graph *WorkflowGraph

	// The date and time when the workflow was last modified.
	LastModifiedOn *time.Time

	// The information about the last execution of the workflow.
	LastRun *WorkflowRun

	// You can use this parameter to prevent unwanted multiple updates to data, to
	// control costs, or in some cases, to prevent exceeding the maximum number of
	// concurrent runs of any of the component jobs. If you leave this parameter blank,
	// there is no limit to the number of concurrent workflow runs.
	MaxConcurrentRuns *int32

	// The name of the workflow.
	Name *string
	// contains filtered or unexported fields
}

A workflow is a collection of multiple dependent Glue jobs and crawlers that are run to complete a complex ETL task. A workflow manages the execution and monitoring of all its jobs and crawlers.

type WorkflowGraph

type WorkflowGraph struct {

	// A list of all the directed connections between the nodes belonging to the
	// workflow.
	Edges []Edge

	// A list of the the Glue components belong to the workflow represented as nodes.
	Nodes []Node
	// contains filtered or unexported fields
}

A workflow graph represents the complete workflow containing all the Glue components present in the workflow and all the directed connections between them.

type WorkflowRun

type WorkflowRun struct {

	// The date and time when the workflow run completed.
	CompletedOn *time.Time

	// This error message describes any error that may have occurred in starting the
	// workflow run. Currently the only error message is "Concurrent runs exceeded for
	// workflow: foo ."
	ErrorMessage *string

	// The graph representing all the Glue components that belong to the workflow as
	// nodes and directed connections between them as edges.
	Graph *WorkflowGraph

	// Name of the workflow that was run.
	Name *string

	// The ID of the previous workflow run.
	PreviousRunId *string

	// The date and time when the workflow run was started.
	StartedOn *time.Time

	// The batch condition that started the workflow run.
	StartingEventBatchCondition *StartingEventBatchCondition

	// The statistics of the run.
	Statistics *WorkflowRunStatistics

	// The status of the workflow run.
	Status WorkflowRunStatus

	// The ID of this workflow run.
	WorkflowRunId *string

	// The workflow run properties which were set during the run.
	WorkflowRunProperties map[string]string
	// contains filtered or unexported fields
}

A workflow run is an execution of a workflow providing all the runtime information.

type WorkflowRunStatistics

type WorkflowRunStatistics struct {

	// Indicates the count of job runs in the ERROR state in the workflow run.
	ErroredActions int32

	// Total number of Actions that have failed.
	FailedActions int32

	// Total number Actions in running state.
	RunningActions int32

	// Total number of Actions that have stopped.
	StoppedActions int32

	// Total number of Actions that have succeeded.
	SucceededActions int32

	// Total number of Actions that timed out.
	TimeoutActions int32

	// Total number of Actions in the workflow run.
	TotalActions int32

	// Indicates the count of job runs in WAITING state in the workflow run.
	WaitingActions int32
	// contains filtered or unexported fields
}

Workflow run statistics provides statistics about the workflow run.

type WorkflowRunStatus

type WorkflowRunStatus string
const (
	WorkflowRunStatusRunning   WorkflowRunStatus = "RUNNING"
	WorkflowRunStatusCompleted WorkflowRunStatus = "COMPLETED"
	WorkflowRunStatusStopping  WorkflowRunStatus = "STOPPING"
	WorkflowRunStatusStopped   WorkflowRunStatus = "STOPPED"
	WorkflowRunStatusError     WorkflowRunStatus = "ERROR"
)

Enum values for WorkflowRunStatus

func (WorkflowRunStatus) Values added in v0.29.0

Values returns all known values for WorkflowRunStatus. Note that this can be expanded in the future, and so it is only as up to date as the client. The ordering of this slice is not guaranteed to be stable across updates.

type XMLClassifier

type XMLClassifier struct {

	// An identifier of the data format that the classifier matches.
	//
	// This member is required.
	Classification *string

	// The name of the classifier.
	//
	// This member is required.
	Name *string

	// The time that this classifier was registered.
	CreationTime *time.Time

	// The time that this classifier was last updated.
	LastUpdated *time.Time

	// The XML tag designating the element that contains each record in an XML
	// document being parsed. This can't identify a self-closing element (closed by />
	// ). An empty row element that contains only attributes can be parsed as long as
	// it ends with a closing tag (for example, is okay, but  is not).
	RowTag *string

	// The version of this classifier.
	Version int64
	// contains filtered or unexported fields
}

A classifier for XML content.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL