Documentation ¶
Overview ¶
Package v1 contains API Schema definitions for the kafka.nais.io v1 API group +kubebuilder:object:generate=true +groupName=kafka.nais.io +versionName=v1
Index ¶
- Constants
- Variables
- func ServiceUserNameWithSuffix(teamName, appName, suffix string) (string, error)
- type Config
- type Stream
- func (in *Stream) ACL() TopicACL
- func (in *Stream) DeepCopy() *Stream
- func (in *Stream) DeepCopyInto(out *Stream)
- func (in *Stream) DeepCopyObject() runtime.Object
- func (in *Stream) Hash() (string, error)
- func (in *Stream) NeedsSynchronization(hash string) bool
- func (in *Stream) TopicPrefix() string
- func (in *Stream) TopicWildcard() string
- type StreamList
- type StreamSpec
- type StreamStatus
- type Topic
- func (in *Topic) ApplyDefaults() error
- func (in *Topic) DeepCopy() *Topic
- func (in *Topic) DeepCopyInto(out *Topic)
- func (in *Topic) DeepCopyObject() runtime.Object
- func (in Topic) FullName() string
- func (in *Topic) Hash() (string, error)
- func (in *Topic) NeedsSynchronization(hash string) bool
- func (in Topic) RemoveDataWhenDeleted() bool
- type TopicACL
- type TopicACLs
- type TopicList
- type TopicSpec
- type TopicStatus
- type User
Constants ¶
const ( EventRolloutComplete = "RolloutComplete" EventFailedPrepare = "FailedPrepare" EventFailedSynchronization = "FailedSynchronization" RemoveDataAnnotation = "kafka.nais.io/removeDataWhenResourceIsDeleted" TeamNameLength = 20 AppNameLength = 30 AivenSyncFailureThreshold = time.Hour * 12 )
Variables ¶
var ( // GroupVersion is group version used to register these objects GroupVersion = schema.GroupVersion{Group: "kafka.nais.io", Version: "v1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
Functions ¶
Types ¶
type Config ¶
type Config struct { // CleanupPolicy is either "delete" or "compact" or both. // This designates the retention policy to use on old log segments. // +nais:doc:Default="delete" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_cleanup.policy" // +kubebuilder:validation:Enum=delete;compact;"compact,delete" CleanupPolicy *string `json:"cleanupPolicy,omitempty"` // When a producer sets acks to "all" (or "-1"), `min.insync.replicas` specifies the minimum number of replicas // that must acknowledge a write for the write to be considered successful. // +nais:doc:Default="2" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.insync.replicas" // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=7 MinimumInSyncReplicas *int `json:"minimumInSyncReplicas,omitempty"` // The default number of log partitions per topic. // +nais:doc:Default="1" // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=1000000 Partitions *int `json:"partitions,omitempty"` // The default replication factor for created topics. // +nais:doc:Default="3" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#replication" // +kubebuilder:validation:Minimum=2 Replication *int `json:"replication,omitempty"` // Configuration controls the maximum size a partition can grow to before we will discard old log segments // to free up space if we are using the "delete" retention policy. By default there is no size limit only a time limit. // Since this limit is enforced at the partition level, multiply it by the number of partitions to compute the topic retention in bytes. // +nais:doc:Default="-1" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_retention.bytes" RetentionBytes *int `json:"retentionBytes,omitempty"` // The number of hours to keep a log file before deleting it. // +nais:doc:Default="168" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_retention.ms" // +kubebuilder:validation:Maximum=2562047788015 RetentionHours *int `json:"retentionHours,omitempty"` // The number of hours after which Kafka will force the log to roll even if the segment file isn't full to ensure // that retention can delete or compact old data. // +nais:doc:Default="168" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_segment.ms" // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=8760 SegmentHours *int `json:"segmentHours,omitempty"` // The largest record batch size allowed by Kafka (after compression if compression is enabled). // If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased // so that they can fetch record batches this large. In the latest message format version, records are always grouped // into batches for efficiency. In previous message format versions, uncompressed records are not grouped into // batches and this limit only applies to a single record in that case. // +nais:doc:Default="1048588" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_max.message.bytes" // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=5242880 MaxMessageBytes *int `json:"maxMessageBytes,omitempty"` // MinCompactionLagMs indicates the minimum time a message will remain uncompacted in the log // +nais:doc:Default="0" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.compaction.lag.ms" // +kubebuilder:validation:Minimum=0 MinCompactionLagMs *int `json:"minCompactionLagMs,omitempty"` // MaxCompactionLagMs indicates the maximum time a message will remain ineligible for compaction in the log // +nais:doc:Default="Inf" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_max.compaction.lag.ms" // +kubebuilder:validation:Minimum=0 MaxCompactionLagMs *int `json:"maxCompactionLagMs,omitempty"` // MinCleanableDirtyRatio indicates the minimum ratio of dirty log to retention size to initiate log compaction // +nais:doc:Default="50%" // +nais:doc:Link="https://kafka.apache.org/33/documentation.html#topicconfigs_min.cleanable.dirty.ratio" MinCleanableDirtyRatioPercent *intstr.IntOrString `json:"minCleanableDirtyRatioPercent,omitempty"` }
func (*Config) ApplyDefaults ¶
func (cfg *Config) ApplyDefaults()
Apply default values to Topic Config where the nil-value is not what we want
func (*Config) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (*Config) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Stream ¶
type Stream struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec StreamSpec `json:"spec"` Status *StreamStatus `json:"status,omitempty"` }
+kubebuilder:object:root=true +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.synchronizationState"
func (*Stream) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream.
func (*Stream) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Stream) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*Stream) NeedsSynchronization ¶
func (*Stream) TopicPrefix ¶
func (*Stream) TopicWildcard ¶
type StreamList ¶
type StreamList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Stream `json:"items"` }
+kubebuilder:object:root=true
func (*StreamList) DeepCopy ¶
func (in *StreamList) DeepCopy() *StreamList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList.
func (*StreamList) DeepCopyInto ¶
func (in *StreamList) DeepCopyInto(out *StreamList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*StreamList) DeepCopyObject ¶
func (in *StreamList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type StreamSpec ¶
type StreamSpec struct {
Pool string `json:"pool"`
}
func (*StreamSpec) DeepCopy ¶
func (in *StreamSpec) DeepCopy() *StreamSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec.
func (*StreamSpec) DeepCopyInto ¶
func (in *StreamSpec) DeepCopyInto(out *StreamSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type StreamStatus ¶
type StreamStatus struct { SynchronizationState string `json:"synchronizationState,omitempty"` SynchronizationHash string `json:"synchronizationHash,omitempty"` SynchronizationTime string `json:"synchronizationTime,omitempty"` Errors []string `json:"errors,omitempty"` Message string `json:"message,omitempty"` FullyQualifiedTopicPrefix string `json:"fullyQualifiedTopicPrefix,omitempty"` }
func (*StreamStatus) DeepCopy ¶
func (in *StreamStatus) DeepCopy() *StreamStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus.
func (*StreamStatus) DeepCopyInto ¶
func (in *StreamStatus) DeepCopyInto(out *StreamStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Topic ¶
type Topic struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec TopicSpec `json:"spec"` Status *TopicStatus `json:"status,omitempty"` }
+kubebuilder:object:root=true +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.synchronizationState" +kubebuilder:printcolumn:name="Fully Qualified Name",type="string",JSONPath=".status.fullyQualifiedName"
func ExampleTopicForDocumentation ¶
func ExampleTopicForDocumentation() *Topic
func (*Topic) ApplyDefaults ¶
func (*Topic) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topic.
func (*Topic) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Topic) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*Topic) NeedsSynchronization ¶
func (Topic) RemoveDataWhenDeleted ¶
type TopicACL ¶
type TopicACL struct { // Access type granted for a application. // Defaults to `readwrite`. // +kubebuilder:validation:Enum=read;write;readwrite Access string `json:"access"` // The name of the specified application Application string `json:"application"` // The team of the specified application Team string `json:"team"` }
TopicACL describes the access granted for the topic.
func (*TopicACL) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicACL.
func (*TopicACL) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type TopicACLs ¶
type TopicACLs []TopicACL
func (TopicACLs) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicACLs.
func (TopicACLs) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type TopicList ¶
type TopicList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Topic `json:"items"` }
+kubebuilder:object:root=true
func (*TopicList) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicList.
func (*TopicList) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*TopicList) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type TopicSpec ¶
type TopicSpec struct { Pool string `json:"pool"` Config *Config `json:"config,omitempty"` ACL TopicACLs `json:"acl"` }
TopicSpec is a specification of the desired behavior of the topic.
func (*TopicSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicSpec.
func (*TopicSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type TopicStatus ¶
type TopicStatus struct { SynchronizationState string `json:"synchronizationState,omitempty"` SynchronizationHash string `json:"synchronizationHash,omitempty"` SynchronizationTime string `json:"synchronizationTime,omitempty"` CredentialsExpiryTime string `json:"credentialsExpiryTime,omitempty"` Errors []string `json:"errors,omitempty"` Message string `json:"message,omitempty"` FullyQualifiedName string `json:"fullyQualifiedName,omitempty"` LatestAivenSyncFailure string `json:"latestAivenSyncFailure,omitempty"` }
func (*TopicStatus) DeepCopy ¶
func (in *TopicStatus) DeepCopy() *TopicStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopicStatus.
func (*TopicStatus) DeepCopyInto ¶
func (in *TopicStatus) DeepCopyInto(out *TopicStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.