pkg

package
v0.0.0-...-0937915 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 12, 2024 License: Apache-2.0 Imports: 27 Imported by: 0

Documentation

Index

Constants

View Source
const (
	SERVICE_CODE_KEY = "service_code"
	QUOTA_CODE_KEY   = "quota_code"
)
View Source
const (
	QUOTA_VPCS_PER_REGION                 string = "L-F678F1CE"
	QUOTA_SUBNETS_PER_VPC                 string = "L-407747CB"
	QUOTA_ROUTES_PER_ROUTE_TABLE          string = "L-93826ACB"
	QUOTA_INTERFACE_VPC_ENDPOINTS_PER_VPC string = "L-29B6F2EB"
	QUOTA_ROUTE_TABLES_PER_VPC            string = "L-589F43AA"
	QUOTA_IPV4_BLOCKS_PER_VPC             string = "L-83CA0A9D"
	SERVICE_CODE_VPC                      string = "vpc"
)

Variables

View Source
var AllocatedStorage *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_allocatedstorage"),
	"The amount of allocated storage in bytes.",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var DBInstanceClass *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_dbinstanceclass"),
	"The DB instance class (type).",
	[]string{"aws_region", "dbinstance_identifier", "instance_class"},
	nil,
)
View Source
var DBInstanceStatus *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_dbinstancestatus"),
	"The instance status.",
	[]string{"aws_region", "dbinstance_identifier", "instance_status"},
	nil,
)
View Source
var DBMaxConnections = map[string]map[string]int64{
	"db.t2.micro": map[string]int64{
		"default": 87,
	},
	"db.t2.small": map[string]int64{
		"default":          150,
		"default.mysql5.7": 150,
	},
	"db.t3.micro": map[string]int64{
		"default":            112,
		"default.postgres10": 112,
		"default.postgres11": 112,
		"default.postgres12": 112,
		"default.postgres13": 112,
		"default.postgres14": 112,
	},
	"db.t3.small": map[string]int64{
		"default":            225,
		"default.postgres10": 225,
		"default.postgres11": 225,
		"default.postgres12": 225,
		"default.postgres13": 225,
		"default.postgres14": 225,
	},
	"db.t3.medium": map[string]int64{
		"default":            550,
		"default.postgres10": 550,
		"default.postgres11": 550,
		"default.postgres12": 550,
		"default.postgres13": 550,
		"default.postgres14": 550,
	},
	"db.m3.medium": map[string]int64{
		"default": 392,
	},
	"db.m3.large": map[string]int64{
		"default": 801,
	},
	"db.m3.2xlarge": map[string]int64{
		"default": 3379,
	},
	"db.m4.large": map[string]int64{
		"default":            823,
		"default.postgres10": 823,
		"default.postgres11": 823,
		"default.postgres12": 823,
		"default.postgres13": 823,
		"default.postgres14": 823,
	},
	"db.m5.large": map[string]int64{
		"default":            823,
		"default.postgres10": 823,
		"default.postgres11": 823,
		"default.postgres12": 823,
		"default.postgres13": 823,
		"default.postgres14": 823,
	},
	"db.m5.xlarge": map[string]int64{
		"default":            1646,
		"default.postgres10": 1646,
		"default.postgres11": 1646,
		"default.postgres12": 1646,
		"default.postgres13": 1646,
		"default.postgres14": 1646,
	},
	"db.m5.2xlarge": map[string]int64{
		"default":            3429,
		"default.postgres10": 3429,
		"default.postgres11": 3429,
		"default.postgres12": 3429,
		"default.postgres13": 3429,
		"default.postgres14": 3429,
	},
	"db.m5.4xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.m5.16xlarge": map[string]int64{
		"default":                 6000,
		"default.aurora-mysql5.7": 6000,
	},
	"db.m5d.large": map[string]int64{
		"default":            823,
		"default.postgres11": 823,
		"default.postgres12": 823,
		"default.postgres13": 823,
		"default.postgres14": 823,
	},
	"db.m5d.xlarge": map[string]int64{
		"default":            1646,
		"default.postgres11": 1646,
		"default.postgres12": 1646,
		"default.postgres13": 1646,
		"default.postgres14": 1646,
	},
	"db.m5d.2xlarge": map[string]int64{
		"default":            3429,
		"default.postgres11": 3429,
		"default.postgres12": 3429,
		"default.postgres13": 3429,
		"default.postgres14": 3429,
	},
	"db.m5d.4xlarge": map[string]int64{
		"default":            5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.r4.large": map[string]int64{
		"default":          1301,
		"default.mysql5.7": 1301,
	},
	"db.r4.4xlarge": map[string]int64{
		"default":          10410,
		"default.mysql5.7": 10410,
	},

	"db.r4.8xlarge": map[string]int64{
		"default":          20820,
		"default.mysql5.7": 20820,
	},
	"db.r5.large": map[string]int64{
		"default":            1802,
		"default.postgres10": 1802,
		"default.postgres11": 1802,
		"default.postgres12": 1802,
		"default.postgres13": 1802,
		"default.postgres14": 1802,
	},
	"db.r5.xlarge": map[string]int64{
		"default":            2730,
		"default.mysql5.7":   2730,
		"default.postgres10": 3604,
		"default.postgres11": 3604,
		"default.postgres12": 3604,
		"default.postgres13": 3604,
		"default.postgres14": 3604,
	},
	"db.r5.2xlarge": map[string]int64{
		"default":                 3000,
		"default.aurora-mysql5.7": 3000,
	},
	"db.r5.4xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.r5.8xlarge": map[string]int64{
		"default":          16000,
		"default.mysql5.7": 16000,
	},
	"db.r5.12xlarge": map[string]int64{
		"default":          16000,
		"default.mysql5.7": 16000,
	},
	"db.r5.16xlarge": map[string]int64{
		"default":          16000,
		"default.mysql5.7": 16000,
	},
	"db.r5.24xlarge": map[string]int64{
		"default":          16000,
		"default.mysql5.7": 16000,
	},
	"db.m6g.large": map[string]int64{
		"default":            901,
		"default.postgres12": 901,
		"default.postgres13": 901,
		"default.postgres14": 901,
	},
	"db.m6g.xlarge": map[string]int64{
		"default":            1705,
		"default.postgres10": 1705,
		"default.postgres11": 1705,
		"default.postgres12": 1705,
		"default.postgres13": 1705,
		"default.postgres14": 1705,
	},
	"db.m6g.2xlarge": map[string]int64{
		"default":            3410,
		"default.postgres10": 3410,
		"default.postgres11": 3410,
		"default.postgres12": 3410,
		"default.postgres13": 3410,
		"default.postgres14": 3410,
	},
	"db.m6g.4xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.m6g.8xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.m6i.2xlarge": map[string]int64{
		"default":            3410,
		"default.postgres10": 3410,
		"default.postgres11": 3410,
		"default.postgres12": 3410,
		"default.postgres13": 3410,
		"default.postgres14": 3410,
	},
	"db.r6i.16xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.m7g.large": map[string]int64{
		"default":            901,
		"default.postgres12": 901,
		"default.postgres13": 901,
		"default.postgres14": 901,
	},
	"db.m7g.xlarge": map[string]int64{
		"default":            1705,
		"default.postgres10": 1705,
		"default.postgres11": 1705,
		"default.postgres12": 1705,
		"default.postgres13": 1705,
		"default.postgres14": 1705,
	},
	"db.m7g.2xlarge": map[string]int64{
		"default":            3410,
		"default.postgres10": 3410,
		"default.postgres11": 3410,
		"default.postgres12": 3410,
		"default.postgres13": 3410,
		"default.postgres14": 3410,
	},
	"db.m7g.4xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
	"db.m7g.8xlarge": map[string]int64{
		"default":            5000,
		"default.postgres10": 5000,
		"default.postgres11": 5000,
		"default.postgres12": 5000,
		"default.postgres13": 5000,
		"default.postgres14": 5000,
	},
}

DBMaxConnections is a hardcoded map of instance types and DB Parameter Group names This is a dump workaround created because by default the DB Parameter Group `max_connections` is a function that is hard to parse and process in code and it contains a variable whose value is unknown to us (DBInstanceClassMemory) AWS has no means to return the actual `max_connections` value. For Aurora see: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Performance.html For MySQL: {DBInstanceClassMemory/12582880} --> Memory (in GiB) * 1024 * 1024 * 1024 / 12582880

View Source
var EOLInfos *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_eol_info"),
	"The EOL date and status for the DB engine type and version.",
	[]string{"aws_region", "dbinstance_identifier", "engine", "engine_version", "eol_date", "eol_status"},
	nil,
)
View Source
var EngineVersion *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_engineversion"),
	"The DB engine type and version.",
	[]string{"aws_region", "dbinstance_identifier", "engine", "engine_version", "aws_account_id"},
	nil,
)
View Source
var LatestRestorableTime *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_latestrestorabletime"),
	"Latest restorable time (UTC date timestamp).",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var LogsAmount *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_logs_amount"),
	"The amount of existent log files",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var LogsStorageSize *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_logsstorage_size_bytes"),
	"The amount of storage consumed by log files (in bytes)",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var MSKInfos *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "msk_eol_info"),
	"The MSK eol date and status for the version.",
	[]string{"aws_region", "cluster_name", "msk_version", "eol_date", "eol_status"},
	nil,
)
View Source
var MaxConnections *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_maxconnections"),
	"The DB's max_connections value",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var MaxConnectionsMappingError *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_maxconnections_error"),
	"Indicates no mapping found for instance/parameter group.",
	[]string{"aws_region", "dbinstance_identifier", "instance_class"},
	nil,
)
View Source
var PendingMaintenanceActions *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_pendingmaintenanceactions"),
	"Pending maintenance actions for a RDS instance. 0 indicates no available maintenance and a separate metric with a value of 1 will be published for every separate action.",
	[]string{"aws_region", "dbinstance_identifier", "action", "auto_apply_after", "current_apply_date", "description"},
	nil,
)
View Source
var PubliclyAccessible *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_publiclyaccessible"),
	"Indicates if the DB is publicly accessible",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var RDS_LOGS_METRICS_TTL = "LOGS_METRICS_TTL"

Default TTL value for RDS logs related metrics To get the log metrics an api call for each instance is needed Since this cause rate limit problems to the AWS api, these metrics are cached for this amount of time before requesting them again

View Source
var RDS_LOGS_METRICS_TTL_DEFAULT = 300
View Source
var RDS_LOGS_METRICS_WORKERS = "LOGS_METRICS_WORKERS"

RDS log metrics are requested in parallel with a workerPool. this variable sets the number of workers

View Source
var RDS_LOGS_METRICS_WORKERS_DEFAULT = 10
View Source
var RedisVersion *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "elasticache_redisversion"),
	"The ElastiCache engine type and version.",
	[]string{"aws_region", "replication_group_id", "engine", "engine_version", "aws_account_id"},
	nil,
)
View Source
var StorageEncrypted *prometheus.Desc = prometheus.NewDesc(
	prometheus.BuildFQName(namespace, "", "rds_storageencrypted"),
	"Indicates if the DB storage is encrypted",
	[]string{"aws_region", "dbinstance_identifier"},
	nil,
)
View Source
var TransitGatewaysQuota *prometheus.Desc
View Source
var TransitGatewaysUsage *prometheus.Desc

Functions

func GetEOLStatus

func GetEOLStatus(eol string, thresholds []Threshold) (string, error)

Determines status from the number of days until EOL

func GetEnvIntValue

func GetEnvIntValue(envname string) (*int, error)

func GetHostedZoneLimitWithBackoff

func GetHostedZoneLimitWithBackoff(client awsclient.Client, ctx context.Context, hostedZoneId *string, maxTries int, logger log.Logger) (*route53.GetHostedZoneLimitOutput, error)

func ListHostedZonesWithBackoff

func ListHostedZonesWithBackoff(client awsclient.Client, ctx context.Context, input *route53.ListHostedZonesInput, maxTries int, logger log.Logger) (*route53.ListHostedZonesOutput, error)

func WithKeyValue

func WithKeyValue(m map[string]string, key string, value string) map[string]string

Add a new key to the map and return the new map

Types

type BaseConfig

type BaseConfig struct {
	Enabled  bool           `yaml:"enabled"`
	Interval *time.Duration `yaml:"interval"`
	Timeout  *time.Duration `yaml:"timeout"`
	CacheTTL *time.Duration `yaml:"cache_ttl"`
}

type Config

type Config struct {
	RdsConfig         RDSConfig         `yaml:"rds"`
	VpcConfig         VPCConfig         `yaml:"vpc"`
	Route53Config     Route53Config     `yaml:"route53"`
	EC2Config         EC2Config         `yaml:"ec2"`
	ElastiCacheConfig ElastiCacheConfig `yaml:"elasticache"`
	MskConfig         MSKConfig         `yaml:"msk"`
}

func LoadExporterConfiguration

func LoadExporterConfiguration(logger log.Logger, configFile string) (*Config, error)

type EC2Config

type EC2Config struct {
	BaseConfig `yaml:"base,inline"`
	Regions    []string `yaml:"regions"`
}

type EC2Exporter

type EC2Exporter struct {
	// contains filtered or unexported fields
}

func NewEC2Exporter

func NewEC2Exporter(sessions []*session.Session, logger log.Logger, config EC2Config, awsAccountId string) *EC2Exporter

func (*EC2Exporter) Collect

func (e *EC2Exporter) Collect(ch chan<- prometheus.Metric)

func (*EC2Exporter) CollectLoop

func (e *EC2Exporter) CollectLoop()

func (*EC2Exporter) Describe

func (e *EC2Exporter) Describe(ch chan<- *prometheus.Desc)

type EOLInfo

type EOLInfo struct {
	Engine  string `yaml:"engine"`
	EOL     string `yaml:"eol"`
	Version string `yaml:"version"`
}

type EOLKey

type EOLKey struct {
	Engine  string
	Version string
}

type ElastiCacheConfig

type ElastiCacheConfig struct {
	BaseConfig `yaml:"base,inline"`
	Regions    []string `yaml:"regions"`
}

type ElastiCacheExporter

type ElastiCacheExporter struct {
	// contains filtered or unexported fields
}

func NewElastiCacheExporter

func NewElastiCacheExporter(sessions []*session.Session, logger log.Logger, config ElastiCacheConfig, awsAccountId string) *ElastiCacheExporter

NewElastiCacheExporter creates a new ElastiCacheExporter instance

func (*ElastiCacheExporter) Collect

func (e *ElastiCacheExporter) Collect(ch chan<- prometheus.Metric)

func (*ElastiCacheExporter) CollectLoop

func (e *ElastiCacheExporter) CollectLoop()

func (*ElastiCacheExporter) Describe

func (e *ElastiCacheExporter) Describe(ch chan<- *prometheus.Desc)

type MSKConfig

type MSKConfig struct {
	BaseConfig `yaml:"base,inline"`
	Regions    []string    `yaml:"regions"`
	MSKInfos   []MSKInfo   `yaml:"msk_info"`
	Thresholds []Threshold `yaml:"thresholds"`
}

type MSKExporter

type MSKExporter struct {
	// contains filtered or unexported fields
}

func NewMSKExporter

func NewMSKExporter(sessions []*session.Session, logger log.Logger, config MSKConfig, awsAccountId string) *MSKExporter

NewMSKExporter creates a new MSKExporter instance

func (*MSKExporter) Collect

func (e *MSKExporter) Collect(ch chan<- prometheus.Metric)

func (*MSKExporter) CollectLoop

func (e *MSKExporter) CollectLoop()

func (*MSKExporter) Describe

func (e *MSKExporter) Describe(ch chan<- *prometheus.Desc)

type MSKInfo

type MSKInfo struct {
	EOL     string `yaml:"eol"`
	Version string `yaml:"version"`
}

type MetricProxy

type MetricProxy struct {
	// contains filtered or unexported fields
}

func NewMetricProxy

func NewMetricProxy() *MetricProxy

func (*MetricProxy) GetMetricById

func (mp *MetricProxy) GetMetricById(id string) (*MetricProxyItem, error)

func (*MetricProxy) StoreMetricById

func (mp *MetricProxy) StoreMetricById(id string, value interface{}, ttl int)

type MetricProxyItem

type MetricProxyItem struct {
	// contains filtered or unexported fields
}

type MetricsCache

type MetricsCache struct {
	// contains filtered or unexported fields
}

func NewMetricsCache

func NewMetricsCache(ttl time.Duration) *MetricsCache

func (*MetricsCache) AddMetric

func (mc *MetricsCache) AddMetric(metric prometheus.Metric)

AddMetric adds a metric to the cache

func (*MetricsCache) GetAllMetrics

func (mc *MetricsCache) GetAllMetrics() []prometheus.Metric

GetAllMetrics Iterates over all cached metrics and discards expired ones.

type RDSConfig

type RDSConfig struct {
	BaseConfig `yaml:"base,inline"`
	Regions    []string    `yaml:"regions"`
	EOLInfos   []EOLInfo   `yaml:"eol_info"`
	Thresholds []Threshold `yaml:"thresholds"`
}

type RDSExporter

type RDSExporter struct {
	// contains filtered or unexported fields
}

RDSExporter defines an instance of the RDS Exporter

func NewRDSExporter

func NewRDSExporter(sessions []*session.Session, logger log.Logger, config RDSConfig, awsAccountId string) *RDSExporter

NewRDSExporter creates a new RDSExporter instance

func (*RDSExporter) Collect

func (e *RDSExporter) Collect(ch chan<- prometheus.Metric)

Collect is used by the Prometheus client to collect and return the metrics values

func (*RDSExporter) CollectLoop

func (e *RDSExporter) CollectLoop()

func (*RDSExporter) Describe

func (e *RDSExporter) Describe(ch chan<- *prometheus.Desc)

Describe is used by the Prometheus client to return a description of the metrics

type RDSLogsMetrics

type RDSLogsMetrics struct {
	// contains filtered or unexported fields
}

Struct to store RDS Instances log files data This struct is used to store the data in the MetricsProxy

type Route53Config

type Route53Config struct {
	BaseConfig `yaml:"base,inline"`
	Region     string `yaml:"region"` // Use only a single Region for now, as the current metric is global
}

type Route53Exporter

type Route53Exporter struct {
	RecordsPerHostedZoneQuota  *prometheus.Desc
	RecordsPerHostedZoneUsage  *prometheus.Desc
	HostedZonesPerAccountQuota *prometheus.Desc
	HostedZonesPerAccountUsage *prometheus.Desc
	LastUpdateTime             *prometheus.Desc
	Cancel                     context.CancelFunc
	// contains filtered or unexported fields
}

func NewRoute53Exporter

func NewRoute53Exporter(sess *session.Session, logger log.Logger, config Route53Config, awsAccountId string) *Route53Exporter

func (*Route53Exporter) Collect

func (e *Route53Exporter) Collect(ch chan<- prometheus.Metric)

func (*Route53Exporter) CollectLoop

func (e *Route53Exporter) CollectLoop()

CollectLoop runs indefinitely to collect the route53 metrics in a cache. Metrics are only written into the cache once all have been collected to ensure that we don't have a partial collect.

func (*Route53Exporter) Describe

func (e *Route53Exporter) Describe(ch chan<- *prometheus.Desc)

type Threshold

type Threshold struct {
	Name string `yaml:"name"`
	Days int    `yaml:"days"`
}

type VPCCollector

type VPCCollector struct {
	// contains filtered or unexported fields
}

type VPCConfig

type VPCConfig struct {
	BaseConfig `yaml:"base,inline"`
	Regions    []string `yaml:"regions"`
}

type VPCExporter

type VPCExporter struct {
	VpcsPerRegionQuota               *prometheus.Desc
	VpcsPerRegionUsage               *prometheus.Desc
	SubnetsPerVpcQuota               *prometheus.Desc
	SubnetsPerVpcUsage               *prometheus.Desc
	RoutesPerRouteTableQuota         *prometheus.Desc
	RoutesPerRouteTableUsage         *prometheus.Desc
	InterfaceVpcEndpointsPerVpcQuota *prometheus.Desc
	InterfaceVpcEndpointsPerVpcUsage *prometheus.Desc
	RouteTablesPerVpcQuota           *prometheus.Desc
	RouteTablesPerVpcUsage           *prometheus.Desc
	IPv4BlocksPerVpcQuota            *prometheus.Desc
	IPv4BlocksPerVpcUsage            *prometheus.Desc
	// contains filtered or unexported fields
}

func NewVPCExporter

func NewVPCExporter(sess []*session.Session, logger log.Logger, config VPCConfig, awsAccountId string) *VPCExporter

func (*VPCExporter) Collect

func (e *VPCExporter) Collect(ch chan<- prometheus.Metric)

func (*VPCExporter) CollectInRegion

func (e *VPCExporter) CollectInRegion(session *session.Session, region *string, wg *sync.WaitGroup)

func (*VPCExporter) CollectLoop

func (e *VPCExporter) CollectLoop()

func (*VPCExporter) Describe

func (e *VPCExporter) Describe(ch chan<- *prometheus.Desc)

func (*VPCExporter) GetQuotaValue

func (e *VPCExporter) GetQuotaValue(client *servicequotas.ServiceQuotas, serviceCode string, quotaCode string) (float64, error)

Directories

Path Synopsis
mock
Package mock is a generated GoMock package.
Package mock is a generated GoMock package.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL