config

package
v0.0.0-...-9b598c7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 28, 2023 License: Apache-2.0 Imports: 30 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// ManualScaling is the manual DataPlaneClusterScalingType via the configuration file
	ManualScaling string = "manual"
	// AutoScaling is the automatic DataPlaneClusterScalingType depending on cluster capacity as reported by the Agent Operator
	AutoScaling string = "auto"
	// NoScaling disables cluster scaling. This is useful in testing
	NoScaling string = "none"
)
View Source
const (
	InMemoryTLSCertStorageType     = "in-memory"
	SecureTLSCertStorageType       = "secure-storage"
	FileTLSCertStorageType         = "file"
	ManualCertificateManagement    = "manual"
	AutomaticCertificateManagement = "automatic"
)

Variables

This section is empty.

Functions

This section is empty.

Types

type AWSConfig

type AWSConfig struct {
	Route53                     awsRoute53Config
	SecretManager               awsSecretManagerConfig
	ConfigForOSDClusterCreation awsConfigForOSDClusterCreation
}

func NewAWSConfig

func NewAWSConfig() *AWSConfig

func (*AWSConfig) AddFlags

func (c *AWSConfig) AddFlags(fs *pflag.FlagSet)

func (*AWSConfig) ReadFiles

func (c *AWSConfig) ReadFiles() error

type AutomaticCertificateManagementConfig

type AutomaticCertificateManagementConfig struct {
	RenewalWindowRatio           float64 `validate:"gte=0,lte=1"`
	EmailToSendNotificationTo    string  `validate:"required,email"`
	AcmeIssuerAccountKeyFilePath string  `validate:"required"`
	CertificateCacheTTL          time.Duration
	AcmeIssuerAccountKey         string
	MustStaple                   bool
}

type ClusterConfig

type ClusterConfig struct {
	// contains filtered or unexported fields
}

func NewClusterConfig

func NewClusterConfig(clusters ClusterList) *ClusterConfig

func (*ClusterConfig) ExcessClusters

func (conf *ClusterConfig) ExcessClusters(clusterList map[string]api.Cluster) []string

func (*ClusterConfig) GetCapacityForRegion

func (conf *ClusterConfig) GetCapacityForRegion(region string) int

func (*ClusterConfig) GetCapacityForRegionAndInstanceType

func (conf *ClusterConfig) GetCapacityForRegionAndInstanceType(region, instanceType string, isolatedClustersOnly bool) int

GetCapacityForRegionAndInstanceType returns the total capacity for the specified region and instance type. Set isolatedClustersOnly to true if you want to get the capacity of clusters that only supports this instance type.

func (*ClusterConfig) GetClusterSupportedInstanceType

func (conf *ClusterConfig) GetClusterSupportedInstanceType(clusterID string) (string, bool)

func (*ClusterConfig) GetManualClusters

func (conf *ClusterConfig) GetManualClusters() []ManualCluster

func (*ClusterConfig) IsClusterSchedulable

func (conf *ClusterConfig) IsClusterSchedulable(clusterID string) bool

func (*ClusterConfig) IsNumberOfStreamingUnitsWithinClusterLimit

func (conf *ClusterConfig) IsNumberOfStreamingUnitsWithinClusterLimit(clusterID string, count int) bool

func (*ClusterConfig) MissingClusters

func (conf *ClusterConfig) MissingClusters(clusterMap map[string]api.Cluster) []ManualCluster

type ClusterList

type ClusterList []ManualCluster

type ComputeMachineConfig

type ComputeMachineConfig struct {
	ComputeMachineType      string                         `yaml:"compute_machine_type" validate:"required"`
	ComputeNodesAutoscaling *ComputeNodesAutoscalingConfig `yaml:"compute_node_autoscaling" validate:"required"`
}

type ComputeMachinesConfig

type ComputeMachinesConfig struct {
	ClusterWideWorkload          *ComputeMachineConfig           `yaml:"cluster_wide_workload" validate:"required"`
	KafkaWorkloadPerInstanceType map[string]ComputeMachineConfig `yaml:"kafka_workload_per_instance_type" validate:"required"`
}

func (*ComputeMachinesConfig) GetKafkaWorkloadConfigForInstanceType

func (c *ComputeMachinesConfig) GetKafkaWorkloadConfigForInstanceType(instanceTypeID string) (ComputeMachineConfig, bool)

type ComputeNodesAutoscalingConfig

type ComputeNodesAutoscalingConfig struct {
	MaxComputeNodes int `yaml:"max_compute_nodes" validate:"gt=0,gtefield=MinComputeNodes"`
	MinComputeNodes int `yaml:"min_compute_nodes" validate:"gt=0"`
}

type DataplaneClusterConfig

type DataplaneClusterConfig struct {
	ImagePullDockerConfigContent string
	ImagePullDockerConfigFile    string
	// Possible values are:
	// 'manual' to use OSD Cluster configuration file,
	// 'auto' to use dynamic scaling
	// 'none' to disabled scaling all together, useful in testing
	DataPlaneClusterScalingType                 string
	DataPlaneClusterConfigFile                  string
	ReadOnlyUserList                            userv1.OptionalNames
	ReadOnlyUserListFile                        string
	KafkaSREUsers                               userv1.OptionalNames
	KafkaSREUsersFile                           string
	ClusterConfig                               *ClusterConfig
	EnableReadyDataPlaneClustersReconcile       bool
	EnableKafkaSreIdentityProviderConfiguration bool
	Kubeconfig                                  string
	RawKubernetesConfig                         *clientcmdapi.Config
	StrimziOperatorOLMConfig                    OperatorInstallationConfig
	KasFleetshardOperatorOLMConfig              OperatorInstallationConfig
	ObservabilityOperatorOLMConfig              OperatorInstallationConfig
	DynamicScalingConfig                        DynamicScalingConfig
	NodePrewarmingConfig                        NodePrewarmingConfig
}

func NewDataplaneClusterConfig

func NewDataplaneClusterConfig() *DataplaneClusterConfig

func (*DataplaneClusterConfig) AddFlags

func (c *DataplaneClusterConfig) AddFlags(fs *pflag.FlagSet)

func (*DataplaneClusterConfig) DefaultComputeMachinesConfig

func (c *DataplaneClusterConfig) DefaultComputeMachinesConfig(cloudProviderID cloudproviders.CloudProviderID) (ComputeMachinesConfig, error)

DefaultComputeMachinesConfig returns the Compute Machine config for the given `cloudProviderID`. If `cloudProviderID` is not a known cloud provider return an error.

func (*DataplaneClusterConfig) FindClusterNameByClusterId

func (c *DataplaneClusterConfig) FindClusterNameByClusterId(clusterId string) string

func (*DataplaneClusterConfig) IsDataPlaneAutoScalingEnabled

func (c *DataplaneClusterConfig) IsDataPlaneAutoScalingEnabled() bool

func (*DataplaneClusterConfig) IsDataPlaneManualScalingEnabled

func (c *DataplaneClusterConfig) IsDataPlaneManualScalingEnabled() bool

func (*DataplaneClusterConfig) IsReadyDataPlaneClustersReconcileEnabled

func (c *DataplaneClusterConfig) IsReadyDataPlaneClustersReconcileEnabled() bool

func (*DataplaneClusterConfig) ReadFiles

func (c *DataplaneClusterConfig) ReadFiles() error

func (*DataplaneClusterConfig) Validate

func (c *DataplaneClusterConfig) Validate(env *environments.Env) error

type DynamicScalingConfig

type DynamicScalingConfig struct {
	ComputeMachinePerCloudProvider                map[cloudproviders.CloudProviderID]ComputeMachinesConfig `yaml:"compute_machine_per_cloud_provider" validate:"required"`
	EnableDynamicScaleUpManagerScaleUpTrigger     bool                                                     `yaml:"enable_dynamic_data_plane_scale_up"`
	EnableDynamicScaleDownManagerScaleDownTrigger bool                                                     `yaml:"enable_dynamic_data_plane_scale_down"`
	NewDataPlaneOpenShiftVersion                  string                                                   `yaml:"new_data_plane_openshift_version"`
	// contains filtered or unexported fields
}

func NewDynamicScalingConfig

func NewDynamicScalingConfig() DynamicScalingConfig

func (*DynamicScalingConfig) IsDataplaneScaleDownTriggerEnabled

func (c *DynamicScalingConfig) IsDataplaneScaleDownTriggerEnabled() bool

func (*DynamicScalingConfig) IsDataplaneScaleUpTriggerEnabled

func (c *DynamicScalingConfig) IsDataplaneScaleUpTriggerEnabled() bool

type GCPConfig

type GCPConfig struct {
	GCPCredentials GCPCredentials
	// contains filtered or unexported fields
}

func NewGCPConfig

func NewGCPConfig() *GCPConfig

func (*GCPConfig) AddFlags

func (c *GCPConfig) AddFlags(fs *pflag.FlagSet)

func (*GCPConfig) ReadFiles

func (c *GCPConfig) ReadFiles() error

func (*GCPConfig) Validate

func (c *GCPConfig) Validate(env *environments.Env) error

type GCPCredentials

type GCPCredentials struct {
	AuthProviderX509CertURL string `json:"auth_provider_x509_cert_url" validate:"required"`
	AuthURI                 string `json:"auth_uri" validate:"required"`
	ClientEmail             string `json:"client_email" validate:"required"`
	ClientID                string `json:"client_id" validate:"required"`
	ClientX509CertURL       string `json:"client_x509_cert_url" validate:"required"`
	PrivateKey              string `json:"private_key" validate:"required"`
	PrivateKeyID            string `json:"private_key_id" validate:"required"`
	ProjectID               string `json:"project_id" validate:"required"`
	TokenURI                string `json:"token_uri" validate:"required"`
	Type                    string `json:"type" validate:"required"`
}

type InstanceType

type InstanceType types.KafkaInstanceType

type InstanceTypeConfig

type InstanceTypeConfig struct {
	Limit *int `yaml:"limit"`
	// Minimum capacity in number of kafka streaming units that should be
	// available (free) at any given moment for a supported instance type in a
	// region. If not provided, its default value is 0 which means that there is
	// no minimum available capacity required.
	// Used for dynamic scaling evaluation.
	MinAvailableCapacitySlackStreamingUnits int `yaml:"min_available_capacity_slack_streaming_units"`
}

type InstanceTypeMap

type InstanceTypeMap map[string]InstanceTypeConfig

func (InstanceTypeMap) AsSlice

func (itl InstanceTypeMap) AsSlice() []string

Returns a region's supported instance type list as a slice

type InstanceTypeNodePrewarmingConfig

type InstanceTypeNodePrewarmingConfig struct {
	BaseStreamingUnitSize string `yaml:"base_streaming_unit_size"`
	NumReservedInstances  int    `yaml:"num_reserved_instances"`
}

type KafkaBillingModel

type KafkaBillingModel struct {
	ID               string   `yaml:"id" validate:"required"`
	AMSResource      string   `yaml:"ams_resource" validate:"required,ams_resource_validator"`
	AMSProduct       string   `yaml:"ams_product" validate:"required,ams_product_validator"`
	AMSBillingModels []string `yaml:"ams_billing_models" validate:"min=1,unique,ams_billing_models_validator"`
	GracePeriodDays  int      `yaml:"grace_period_days" validate:"min=0"`
}

func (*KafkaBillingModel) HasSupportForAMSBillingModel

func (kbm *KafkaBillingModel) HasSupportForAMSBillingModel(amsBillingModel string) bool

func (*KafkaBillingModel) HasSupportForMarketplace

func (kbm *KafkaBillingModel) HasSupportForMarketplace() bool

func (*KafkaBillingModel) HasSupportForStandard

func (kbm *KafkaBillingModel) HasSupportForStandard() bool

type KafkaConfig

type KafkaConfig struct {
	EnableKafkaCNAMERegistration bool
	KafkaDomainName              string
	BrowserUrl                   string

	Quota                  *KafkaQuotaConfig
	SupportedInstanceTypes *KafkaSupportedInstanceTypesConfig
	EnableKafkaOwnerConfig bool
	KafkaOwnerList         []string
	KafkaOwnerListFile     string
}

func NewKafkaConfig

func NewKafkaConfig() *KafkaConfig

func (*KafkaConfig) AddFlags

func (c *KafkaConfig) AddFlags(fs *pflag.FlagSet)

func (*KafkaConfig) GetBillingModelByID

func (c *KafkaConfig) GetBillingModelByID(instanceType, billingModelID string) (KafkaBillingModel, error)

func (*KafkaConfig) GetBillingModels

func (c *KafkaConfig) GetBillingModels(instanceType string) ([]KafkaBillingModel, error)

func (*KafkaConfig) GetFirstAvailableSize

func (c *KafkaConfig) GetFirstAvailableSize(instanceType string) (*KafkaInstanceSize, error)

func (*KafkaConfig) GetKafkaInstanceSize

func (c *KafkaConfig) GetKafkaInstanceSize(instanceType, sizeId string) (*KafkaInstanceSize, error)

func (*KafkaConfig) ReadFiles

func (c *KafkaConfig) ReadFiles() error

func (*KafkaConfig) Validate

func (c *KafkaConfig) Validate(env *environments.Env) error

type KafkaInstanceSize

type KafkaInstanceSize struct {
	Id                          string   `yaml:"id"`
	DisplayName                 string   `yaml:"display_name"`
	IngressThroughputPerSec     Quantity `yaml:"ingressThroughputPerSec"`
	EgressThroughputPerSec      Quantity `yaml:"egressThroughputPerSec"`
	TotalMaxConnections         int      `yaml:"totalMaxConnections"`
	MaxDataRetentionSize        Quantity `yaml:"maxDataRetentionSize"`
	MaxPartitions               int      `yaml:"maxPartitions"`
	MaxDataRetentionPeriod      string   `yaml:"maxDataRetentionPeriod"`
	MaxConnectionAttemptsPerSec int      `yaml:"maxConnectionAttemptsPerSec"`
	MaxMessageSize              Quantity `yaml:"maxMessageSize"`
	QuotaConsumed               int      `yaml:"quotaConsumed"`
	// DeprecatedQuotaType is deprecated. To be removed after API
	// deprecation period.
	DeprecatedQuotaType string         `yaml:"quotaType"`
	CapacityConsumed    int            `yaml:"capacityConsumed"`
	SupportedAZModes    []string       `yaml:"supportedAZModes"`
	MinInSyncReplicas   int            `yaml:"minInSyncReplicas"` // also abbreviated as ISR in Kafka terminology
	ReplicationFactor   int            `yaml:"replicationFactor"` // also abbreviated as RF in Kafka terminology
	LifespanSeconds     *int           `yaml:"lifespanSeconds"`
	MaturityStatus      MaturityStatus `yaml:"maturityStatus"`
}

type KafkaInstanceType

type KafkaInstanceType struct {
	Id                     string              `yaml:"id"`
	DisplayName            string              `yaml:"display_name"`
	Sizes                  []KafkaInstanceSize `yaml:"sizes"`
	SupportedBillingModels []KafkaBillingModel `yaml:"supported_billing_models" validate:"min=1,unique=ID,dive"`
}

func (*KafkaInstanceType) GetBiggestCapacityConsumedSize

func (kp *KafkaInstanceType) GetBiggestCapacityConsumedSize() *KafkaInstanceSize

GetBiggestCapacityConsumedSize gets the Kafka instance size of the kafka instance size that has the biggest capacity consumed defined. If there are two sizes with the same capacity consumed the first one defined is returned. If there are no kafka instance sizes for the instance type nil is returned.

func (*KafkaInstanceType) GetKafkaInstanceSizeByID

func (kp *KafkaInstanceType) GetKafkaInstanceSizeByID(sizeId string) (*KafkaInstanceSize, error)

func (*KafkaInstanceType) GetKafkaSupportedBillingModelByID

func (kp *KafkaInstanceType) GetKafkaSupportedBillingModelByID(kafkaBillingModelID string) (*KafkaBillingModel, error)

func (*KafkaInstanceType) HasAnInstanceSizeWithLifespan

func (kp *KafkaInstanceType) HasAnInstanceSizeWithLifespan() bool

HasAnInstanceSizeWithLifespan returns true if kp contains at least one Kafka size with a non-nil LifespanSeconds value

type KafkaQuotaConfig

type KafkaQuotaConfig struct {
	Type                         string
	AllowDeveloperInstance       bool
	MaxAllowedDeveloperInstances int
}

func NewKafkaQuotaConfig

func NewKafkaQuotaConfig() *KafkaQuotaConfig

type KafkaSupportedInstanceTypesConfig

type KafkaSupportedInstanceTypesConfig struct {
	Configuration     SupportedKafkaInstanceTypesConfig
	ConfigurationFile string
}

func NewKafkaSupportedInstanceTypesConfig

func NewKafkaSupportedInstanceTypesConfig() *KafkaSupportedInstanceTypesConfig

type KafkaTLSCertificateManagementConfig

type KafkaTLSCertificateManagementConfig struct {
	CertificateAuthorityEndpoint         string
	CertificateManagementStrategy        string
	StorageType                          string
	EnableKafkaExternalCertificate       bool
	ManualCertificateManagementConfig    ManualCertificateManagementConfig
	AutomaticCertificateManagementConfig AutomaticCertificateManagementConfig
}

func NewCertificateManagementConfig

func NewCertificateManagementConfig() *KafkaTLSCertificateManagementConfig

func (*KafkaTLSCertificateManagementConfig) AddFlags

func (*KafkaTLSCertificateManagementConfig) ReadFiles

func (*KafkaTLSCertificateManagementConfig) Validate

type KasFleetshardConfig

type KasFleetshardConfig struct {
	PollInterval   string
	ResyncInterval string
}

func NewKasFleetshardConfig

func NewKasFleetshardConfig() *KasFleetshardConfig

func (*KasFleetshardConfig) AddFlags

func (c *KasFleetshardConfig) AddFlags(fs *pflag.FlagSet)

func (*KasFleetshardConfig) ReadFiles

func (c *KasFleetshardConfig) ReadFiles() error

type ManualCertificateManagementConfig

type ManualCertificateManagementConfig struct {
	KafkaTLSCert         string
	KafkaTLSKey          string
	KafkaTLSCertFilePath string `validate:"required"`
	KafkaTLSKeyFilePath  string `validate:"required"`
}

type ManualCluster

type ManualCluster struct {
	Name                  string                  `yaml:"name"`
	ClusterId             string                  `yaml:"cluster_id"`
	CloudProvider         string                  `yaml:"cloud_provider"`
	Region                string                  `yaml:"region"`
	MultiAZ               bool                    `yaml:"multi_az"`
	Schedulable           bool                    `yaml:"schedulable"`
	KafkaInstanceLimit    int                     `yaml:"kafka_instance_limit"`
	Status                api.ClusterStatus       `yaml:"status"`
	ProviderType          api.ClusterProviderType `yaml:"provider_type"`
	ClusterDNS            string                  `yaml:"cluster_dns"`
	SupportedInstanceType string                  `yaml:"supported_instance_type"`
}

manual cluster configuration

func (*ManualCluster) UnmarshalYAML

func (c *ManualCluster) UnmarshalYAML(unmarshal func(interface{}) error) error

type MaturityStatus

type MaturityStatus string
const (
	MaturityStatusTechPreview MaturityStatus = "preview"
	MaturityStatusStable      MaturityStatus = "stable"
)

type NodePrewarmingConfig

type NodePrewarmingConfig struct {
	Configuration map[string]InstanceTypeNodePrewarmingConfig
	// contains filtered or unexported fields
}

func NewNodePrewarmingConfig

func NewNodePrewarmingConfig() NodePrewarmingConfig

func (*NodePrewarmingConfig) ForInstanceType

func (c *NodePrewarmingConfig) ForInstanceType(instanceTypeID string) (InstanceTypeNodePrewarmingConfig, bool)

type OperatorInstallationConfig

type OperatorInstallationConfig struct {
	Namespace               string
	IndexImage              string
	Package                 string
	SubscriptionChannel     string
	SubscriptionConfig      *operatorsv1alpha1.SubscriptionConfig
	SubscriptionConfigFile  string
	SubscriptionStartingCSV string
}

type Plan

type Plan string

func (Plan) GetInstanceType

func (p Plan) GetInstanceType() (string, error)

func (Plan) GetSizeID

func (p Plan) GetSizeID() (string, error)

func (Plan) String

func (p Plan) String() string

type Provider

type Provider struct {
	Name    string     `yaml:"name"`
	Default bool       `yaml:"default"`
	Regions RegionList `yaml:"regions"`
}

func (Provider) GetDefaultRegion

func (provider Provider) GetDefaultRegion() (Region, error)

func (Provider) IsRegionSupported

func (provider Provider) IsRegionSupported(regionName string) bool

func (Provider) Validate

func (provider Provider) Validate(dataplaneClusterConfig *DataplaneClusterConfig) error

type ProviderConfig

type ProviderConfig struct {
	ProvidersConfig     ProviderConfiguration
	ProvidersConfigFile string
}

func NewSupportedProvidersConfig

func NewSupportedProvidersConfig() *ProviderConfig

func (*ProviderConfig) AddFlags

func (c *ProviderConfig) AddFlags(fs *pflag.FlagSet)

func (*ProviderConfig) GetInstanceLimit

func (c *ProviderConfig) GetInstanceLimit(region string, providerName string, instanceType string) (*int, *errs.ServiceError)

func (*ProviderConfig) ReadFiles

func (c *ProviderConfig) ReadFiles() error

func (*ProviderConfig) Validate

func (c *ProviderConfig) Validate(env *environments.Env) error

type ProviderConfiguration

type ProviderConfiguration struct {
	SupportedProviders ProviderList `yaml:"supported_providers"`
}

type ProviderList

type ProviderList []Provider

func (ProviderList) GetByName

func (pl ProviderList) GetByName(providerName string) (Provider, bool)

func (ProviderList) GetDefault

func (c ProviderList) GetDefault() (Provider, error)

func (ProviderList) String

func (pl ProviderList) String() string

type Quantity

type Quantity string

func (*Quantity) IsEmpty

func (q *Quantity) IsEmpty() bool

func (*Quantity) String

func (q *Quantity) String() string

func (*Quantity) ToInt64

func (q *Quantity) ToInt64() (int64, error)

func (*Quantity) ToK8Quantity

func (q *Quantity) ToK8Quantity() (*resource.Quantity, error)

type Region

type Region struct {
	Name                   string          `yaml:"name"`
	Default                bool            `yaml:"default"`
	SupportedInstanceTypes InstanceTypeMap `yaml:"supported_instance_type"`
}

func (Region) IsInstanceTypeSupported

func (r Region) IsInstanceTypeSupported(instanceType InstanceType) bool

func (Region) RegionHasZeroOrNoLimitInstanceType

func (r Region) RegionHasZeroOrNoLimitInstanceType() bool

func (Region) Validate

func (r Region) Validate(dataplaneClusterConfig *DataplaneClusterConfig) error

type RegionList

type RegionList []Region

func (RegionList) GetByName

func (rl RegionList) GetByName(regionName string) (Region, bool)

func (RegionList) String

func (rl RegionList) String() string

type SupportedKafkaInstanceTypesConfig

type SupportedKafkaInstanceTypesConfig struct {
	SupportedKafkaInstanceTypes []KafkaInstanceType `yaml:"supported_instance_types"`
}

func (*SupportedKafkaInstanceTypesConfig) GetKafkaInstanceTypeByID

func (s *SupportedKafkaInstanceTypesConfig) GetKafkaInstanceTypeByID(instanceType string) (*KafkaInstanceType, error)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL