kafka_nais_io_v1

package
v0.0.0-...-3aa7b29 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 30, 2024 License: MIT Imports: 13 Imported by: 7

Documentation

Overview

Package v1 contains API Schema definitions for the kafka.nais.io v1 API group +kubebuilder:object:generate=true +groupName=kafka.nais.io +versionName=v1

Index

Constants

View Source
const (
	EventRolloutComplete       = "RolloutComplete"
	EventFailedPrepare         = "FailedPrepare"
	EventFailedSynchronization = "FailedSynchronization"

	RemoveDataAnnotation = "kafka.nais.io/removeDataWhenResourceIsDeleted"

	TeamNameLength            = 20
	AppNameLength             = 30
	AivenSyncFailureThreshold = time.Hour * 12
)

Variables

View Source
var (
	// GroupVersion is group version used to register these objects
	GroupVersion = schema.GroupVersion{Group: "kafka.nais.io", Version: "v1"}

	// SchemeBuilder is used to add go types to the GroupVersionKind scheme
	SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}

	// AddToScheme adds the types in this group-version to the given scheme.
	AddToScheme = SchemeBuilder.AddToScheme
)

Functions

func ServiceUserNameWithSuffix

func ServiceUserNameWithSuffix(teamName, appName, suffix string) (string, error)

Types

type Config

type Config struct {
	// CleanupPolicy is either "delete" or "compact" or both.
	// This designates the retention policy to use on old log segments.
	// +nais:doc:Default="delete"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_cleanup.policy"
	// +kubebuilder:validation:Enum=delete;compact;"compact,delete"
	CleanupPolicy *string `json:"cleanupPolicy,omitempty"`

	// When a producer sets acks to "all" (or "-1"), `min.insync.replicas` specifies the minimum number of replicas
	// that must acknowledge a write for the write to be considered successful.
	// +nais:doc:Default="2"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.insync.replicas"
	// +kubebuilder:validation:Minimum=1
	// +kubebuilder:validation:Maximum=7
	MinimumInSyncReplicas *int `json:"minimumInSyncReplicas,omitempty"`

	// The default number of log partitions per topic.
	// +nais:doc:Default="1"
	// +kubebuilder:validation:Minimum=1
	// +kubebuilder:validation:Maximum=1000000
	Partitions *int `json:"partitions,omitempty"`

	// The default replication factor for created topics.
	// +nais:doc:Default="3"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#replication"
	// +kubebuilder:validation:Minimum=2
	Replication *int `json:"replication,omitempty"`

	// Configuration controls the maximum size a partition can grow to before we will discard old log segments
	// to free up space if we are using the "delete" retention policy. By default there is no size limit only a time limit.
	// Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes.
	// +nais:doc:Default="-1"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_retention.bytes"
	RetentionBytes *int `json:"retentionBytes,omitempty"`

	// The number of hours to keep a log file before deleting it.
	// +nais:doc:Default="168"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_retention.ms"
	// +kubebuilder:validation:Maximum=2562047788015
	RetentionHours *int `json:"retentionHours,omitempty"`

	// The number of hours after which Kafka will force the log to roll even if the segment file isn't full to ensure
	// that retention can delete or compact old data.
	// +nais:doc:Default="168"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_segment.ms"
	// +kubebuilder:validation:Minimum=1
	// +kubebuilder:validation:Maximum=8760
	SegmentHours *int `json:"segmentHours,omitempty"`

	// The largest record batch size allowed by Kafka (after compression if compression is enabled).
	// If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased
	// so that they can fetch record batches this large. In the latest message format version, records are always grouped
	// into batches for efficiency. In previous message format versions, uncompressed records are not grouped into
	// batches and this limit only applies to a single record in that case.
	// +nais:doc:Default="1048588"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_max.message.bytes"
	// +kubebuilder:validation:Minimum=1
	// +kubebuilder:validation:Maximum=5242880
	MaxMessageBytes *int `json:"maxMessageBytes,omitempty"`

	// MinCompactionLagMs indicates the minimum time a message will remain uncompacted in the log
	// +nais:doc:Default="0"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.compaction.lag.ms"
	// +kubebuilder:validation:Minimum=0
	MinCompactionLagMs *int `json:"minCompactionLagMs,omitempty"`

	// MaxCompactionLagMs indicates the maximum time a message will remain ineligible for compaction in the log
	// +nais:doc:Default="Inf"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_max.compaction.lag.ms"
	// +kubebuilder:validation:Minimum=0
	MaxCompactionLagMs *int `json:"maxCompactionLagMs,omitempty"`

	// MinCleanableDirtyRatio indicates the minimum ratio of dirty log to retention size to initiate log compaction
	// +nais:doc:Default="50%"
	// +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.cleanable.dirty.ratio"
	MinCleanableDirtyRatioPercent *intstr.IntOrString `json:"minCleanableDirtyRatioPercent,omitempty"`
}

func (*Config) ApplyDefaults

func (cfg *Config) ApplyDefaults()

Apply default values to Topic Config where the nil-value is not what we want

func (*Config) DeepCopy

func (in *Config) DeepCopy() *Config

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.

func (*Config) DeepCopyInto

func (in *Config) DeepCopyInto(out *Config)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Stream

type Stream struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec              StreamSpec    `json:"spec"`
	Status            *StreamStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.synchronizationState"

func (*Stream) ACL

func (in *Stream) ACL() TopicACL

func (*Stream) DeepCopy

func (in *Stream) DeepCopy() *Stream

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream.

func (*Stream) DeepCopyInto

func (in *Stream) DeepCopyInto(out *Stream)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Stream) DeepCopyObject

func (in *Stream) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*Stream) Hash

func (in *Stream) Hash() (string, error)

func (*Stream) NeedsSynchronization

func (in *Stream) NeedsSynchronization(hash string) bool

func (*Stream) TopicPrefix

func (in *Stream) TopicPrefix() string

func (*Stream) TopicWildcard

func (in *Stream) TopicWildcard() string

type StreamList

type StreamList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Stream `json:"items"`
}

+kubebuilder:object:root=true

func (*StreamList) DeepCopy

func (in *StreamList) DeepCopy() *StreamList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList.

func (*StreamList) DeepCopyInto

func (in *StreamList) DeepCopyInto(out *StreamList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*StreamList) DeepCopyObject

func (in *StreamList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type StreamSpec

type StreamSpec struct {
	Pool string `json:"pool"`
}

func (*StreamSpec) DeepCopy

func (in *StreamSpec) DeepCopy() *StreamSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec.

func (*StreamSpec) DeepCopyInto

func (in *StreamSpec) DeepCopyInto(out *StreamSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type StreamStatus

type StreamStatus struct {
	SynchronizationState      string   `json:"synchronizationState,omitempty"`
	SynchronizationHash       string   `json:"synchronizationHash,omitempty"`
	SynchronizationTime       string   `json:"synchronizationTime,omitempty"`
	Errors                    []string `json:"errors,omitempty"`
	Message                   string   `json:"message,omitempty"`
	FullyQualifiedTopicPrefix string   `json:"fullyQualifiedTopicPrefix,omitempty"`
}

func (*StreamStatus) DeepCopy

func (in *StreamStatus) DeepCopy() *StreamStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus.

func (*StreamStatus) DeepCopyInto

func (in *StreamStatus) DeepCopyInto(out *StreamStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Topic

type Topic struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec              TopicSpec    `json:"spec"`
	Status            *TopicStatus `json:"status,omitempty"`
}

+kubebuilder:object:root=true +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.synchronizationState" +kubebuilder:printcolumn:name="Fully Qualified Name",type="string",JSONPath=".status.fullyQualifiedName"

func ExampleTopicForDocumentation

func ExampleTopicForDocumentation() *Topic

func (*Topic) ApplyDefaults

func (in *Topic) ApplyDefaults() error

func (*Topic) DeepCopy

func (in *Topic) DeepCopy() *Topic

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic.

func (*Topic) DeepCopyInto

func (in *Topic) DeepCopyInto(out *Topic)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Topic) DeepCopyObject

func (in *Topic) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (Topic) FullName

func (in Topic) FullName() string

func (*Topic) Hash

func (in *Topic) Hash() (string, error)

func (*Topic) NeedsSynchronization

func (in *Topic) NeedsSynchronization(hash string) bool

func (Topic) RemoveDataWhenDeleted

func (in Topic) RemoveDataWhenDeleted() bool

type TopicACL

type TopicACL struct {
	// Access type granted for a application.
	// Defaults to `readwrite`.
	// +kubebuilder:validation:Enum=read;write;readwrite
	Access string `json:"access"`
	// The name of the specified application
	Application string `json:"application"`
	// The team of the specified application
	Team string `json:"team"`
}

TopicACL describes the access granted for the topic.

func (*TopicACL) DeepCopy

func (in *TopicACL) DeepCopy() *TopicACL

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicACL.

func (*TopicACL) DeepCopyInto

func (in *TopicACL) DeepCopyInto(out *TopicACL)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (TopicACL) ServiceUserNameWithSuffix

func (in TopicACL) ServiceUserNameWithSuffix(suffix string) (string, error)

Generate name to use for ServiceUser. Suffix should be "*" in ACLs, or a counter (generation) % 100 for actual usernames.

type TopicACLs

type TopicACLs []TopicACL

func (TopicACLs) DeepCopy

func (in TopicACLs) DeepCopy() TopicACLs

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicACLs.

func (TopicACLs) DeepCopyInto

func (in TopicACLs) DeepCopyInto(out *TopicACLs)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type TopicList

type TopicList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Topic `json:"items"`
}

+kubebuilder:object:root=true

func (*TopicList) DeepCopy

func (in *TopicList) DeepCopy() *TopicList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList.

func (*TopicList) DeepCopyInto

func (in *TopicList) DeepCopyInto(out *TopicList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*TopicList) DeepCopyObject

func (in *TopicList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

type TopicSpec

type TopicSpec struct {
	Pool   string    `json:"pool"`
	Config *Config   `json:"config,omitempty"`
	ACL    TopicACLs `json:"acl"`
}

TopicSpec is a specification of the desired behavior of the topic.

func (*TopicSpec) DeepCopy

func (in *TopicSpec) DeepCopy() *TopicSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSpec.

func (*TopicSpec) DeepCopyInto

func (in *TopicSpec) DeepCopyInto(out *TopicSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type TopicStatus

type TopicStatus struct {
	SynchronizationState   string   `json:"synchronizationState,omitempty"`
	SynchronizationHash    string   `json:"synchronizationHash,omitempty"`
	SynchronizationTime    string   `json:"synchronizationTime,omitempty"`
	CredentialsExpiryTime  string   `json:"credentialsExpiryTime,omitempty"`
	Errors                 []string `json:"errors,omitempty"`
	Message                string   `json:"message,omitempty"`
	FullyQualifiedName     string   `json:"fullyQualifiedName,omitempty"`
	LatestAivenSyncFailure string   `json:"latestAivenSyncFailure,omitempty"`
}

func (*TopicStatus) DeepCopy

func (in *TopicStatus) DeepCopy() *TopicStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus.

func (*TopicStatus) DeepCopyInto

func (in *TopicStatus) DeepCopyInto(out *TopicStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type User

type User struct {
	Username    string
	Application string
	Team        string
}

func (*User) DeepCopy

func (in *User) DeepCopy() *User

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.

func (*User) DeepCopyInto

func (in *User) DeepCopyInto(out *User)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL