bosun.org: bosun.org/cmd/scollector/collectors Index | Files

package collectors

import "bosun.org/cmd/scollector/collectors"

Index

Package Files

apache_mod_info_linux.go aws.go awsBilling.go azureeabilling.go cadvisor.go cassandra_unix.go chef_linux.go collectors.go conntrack_linux.go dell_hw.go disk.go disk_linux.go elasticsearch.go exim_linux.go extrahop.go fake.go fastly.go github.go google_analytics.go google_webmaster.go haproxy_unix.go hbase_unix.go hi.go httpunit.go icmp.go ifstat_linux.go interval.go keepalived_linux.go local_listener.go memcached_unix.go metadata_linux.go netbackup.go nexpose.go ntp_unix.go opentsdb.go oracle.go processes_linux.go procstats_linux.go program.go program_linux.go puppet.go puppet_linux.go rabbitmq.go railgun_linux.go redis_counters.go redis_linux.go riak.go snmp.go snmp_bridge.go snmp_cisco.go snmp_ciscobgp.go snmp_fortinet.go snmp_ifaces.go snmp_ips.go snmp_lag.go snmp_sys.go stream.go systemd_linux.go tag_override.go varnish_unix.go vsphere.go yum_update_linux.go

Constants

const (
    VRRPInstanceTable = ".1.3.6.1.4.1.9586.100.5.2.3.1"
    VRRPAddressTable  = ".1.3.6.1.4.1.9586.100.5.2.6.1"
)
const (
    DescRmqBackingQueueStatusAvgAckEgressRate  = "Rate at which unacknowledged message records leave RAM, e.g. because acks arrive or unacked messages are paged out"
    DescRmqBackingQueueStatusAvgAckIngressRate = "Rate at which unacknowledged message records enter RAM, e.g. because messages are delivered requiring acknowledgement"
    DescRmqBackingQueueStatusAvgEgressRate     = "Average egress (outbound) rate, not including messages that straight through to auto-acking consumers."
    DescRmqBackingQueueStatusAvgIngressRate    = "Average ingress (inbound) rate, not including messages that straight through to auto-acking consumers."
    DescRmqBackingQueueStatusLen               = "Total backing queue length."
    DescRmqConsumers                           = "Number of consumers."
    DescRmqConsumerUtilisation                 = "" /* 205 byte string literal not displayed */
    DescRmqDiskFreeAlarm                       = "Whether the disk alarm has gone off."
    DescRmqDiskFree                            = "Disk free space in bytes."
    DescRmqDiskFreeLimit                       = "Point at which the disk alarm will go off."
    DescRmqDownSlaveNodes                      = "Count of down nodes having a copy of the queue."
    DescRmqFDTotal                             = "File descriptors available."
    DescRmqFDUsed                              = "Used file descriptors."
    DescRmqIOReadAvgTime                       = "Average wall time (milliseconds) for each disk read operation in the last statistics interval."
    DescRmqIOReadBytes                         = "Total number of bytes read from disk by the persister."
    DescRmqIOReadCount                         = "Total number of read operations by the persister."
    DescRmqIOReopenCount                       = "" /* 250 byte string literal not displayed */
    DescRmqIOSeekAvgTime                       = "Average wall time (milliseconds) for each seek operation in the last statistics interval."
    DescRmqIOSeekCount                         = "Total number of seek operations by the persister."
    DescRmqIOSyncAvgTime                       = "Average wall time (milliseconds) for each sync operation in the last statistics interval."
    DescRmqIOSyncCount                         = "Total number of fsync() operations by the persister."
    DescRmqIOWriteAvgTime                      = "Average wall time (milliseconds) for each write operation in the last statistics interval."
    DescRmqIOWriteBytes                        = "Total number of bytes written to disk by the persister."
    DescRmqIOWriteCount                        = "Total number of write operations by the persister."
    DescRmqMemAlarm                            = ""
    DescRmqMemLimit                            = "Point at which the memory alarm will go off."
    DescRmqMemory                              = "Bytes of memory consumed by the Erlang process associated with the queue, including stack, heap and internal structures."
    DescRmqMemUsed                             = "Memory used in bytes."
    DescRmqMessageBytesPersistent              = "Like messageBytes but counting only those messages which are persistent."
    DescRmqMessageBytesRAM                     = "Like messageBytes but counting only those messages which are in RAM."
    DescRmqMessageBytesReady                   = "Like messageBytes but counting only those messages ready to be delivered to clients."
    DescRmqMessageBytes                        = "" /* 135 byte string literal not displayed */
    DescRmqMessageBytesUnacknowledged          = "Like messageBytes but counting only those messages delivered to clients but not yet acknowledged."
    DescRmqMessagesPersistent                  = "Total number of persistent messages in the queue (will always be 0 for transient queues)."
    DescRmqMessagesRAM                         = "Total number of messages which are resident in ram."
    DescRmqMessagesReady                       = "Number of messages ready to be delivered to clients."
    DescRmqMessagesReadyRAM                    = "Number of messages from messagesReady which are resident in ram."
    DescRmqMessages                            = "Sum of ready and unacknowledged messages (queue depth)."
    DescRmqMessageStatsAck                     = "Count of acknowledged messages."
    DescRmqMessageStatsConfirm                 = "Count of messages confirmed."
    DescRmqMessageStatsDeliver                 = "Count of messages delivered in acknowledgement mode to consumers."
    DescRmqMessageStatsDeliverGet              = "Sum of deliver, deliverNoack, get, getNoack."
    DescRmqMessageStatsDeliverNoAck            = "Count of messages delivered in no-acknowledgement mode to consumers."
    DescRmqMessageStatsGet                     = "Count of messages delivered in acknowledgement mode in response to basic.get."
    DescRmqMessageStatsGetNoack                = "Count of messages delivered in no-acknowledgement mode in response to basic.get."
    DescRmqMessageStatsPublish                 = "Count of messages published."
    DescRmqMessageStatsPublishIn               = "Count of messages published \"in\" to an exchange, i.e. not taking account of routing."
    DescRmqMessageStatsPublishOut              = "Count of messages published \"out\" of an exchange, i.e. taking account of routing."
    DescRmqMessageStatsRedeliver               = "Count of subset of messages in deliverGet which had the redelivered flag set."
    DescRmqMessageStatsReturn                  = "Count of messages returned to publisher as unroutable."
    DescRmqMessagesUnacknowledged              = "Number of messages delivered to clients but not yet acknowledged."
    DescRmqMessagesUnacknowledgedRAM           = "Number of messages from messagesUnacknowledged which are resident in ram."
    DescRmqMnesiaDiskTxCount                   = "" /* 182 byte string literal not displayed */
    DescRmqMnesiaRAMTxCount                    = "" /* 191 byte string literal not displayed */
    DescRmqMsgStoreReadCount                   = "Number of messages which have been read from the message store."
    DescRmqMsgStoreWriteCount                  = "Number of messages which have been written to the message store."
    DescRmqObjecttotalsChannels                = "Overall number of channels."
    DescRmqObjectTotalsConnections             = "Overall number of connections."
    DescRmqObjectTotalsConsumers               = "Overall number of consumers."
    DescRmqObjectTotalsExchanges               = "Overall number of exchanges."
    DescRmqObjectTotalsQueues                  = "Overall number of queues."
    DescRmqPartitions                          = "Count of network partitions this node is seeing."
    DescRmqProcessors                          = "Number of cores detected and usable by Erlang."
    DescRmqProcTotal                           = "Maximum number of Erlang processes."
    DescRmqProcUsed                            = "Number of Erlang processes in use."
    DescRmqQueueIndexJournalWriteCount         = "" /* 181 byte string literal not displayed */
    DescRmqQueueIndexReadCount                 = "Number of records read from the queue index."
    DescRmqQueueIndexWriteCount                = "Number of records written to the queue index."
    DescRmqQueueTotalsMessages                 = "Overall sum of ready and unacknowledged messages (queue depth)."
    DescRmqQueueTotalsMessagesReady            = "Overall number of messages ready to be delivered to clients."
    DescRmqQueueTotalsMessagesUnacknowledged   = "Overall number of messages delivered to clients but not yet acknowledged."
    DescRmqRunning                             = "Boolean for whether this node is up. Obviously if this is false, most other stats will be missing."
    DescRmqRunQueue                            = "Average number of Erlang processes waiting to run."
    DescRmqSlaveNodes                          = "Count of nodes having a copy of the queue."
    DescRmqSocketsTotal                        = "File descriptors available for use as sockets."
    DescRmqSocketsUsed                         = "File descriptors used as sockets."
    DescRmqState                               = "The state of the queue. Unknown=> -1, Running=> 0, Syncing=> 1, Flow=> 2, Down=> 3"
    DescRmqSynchronisedSlaveNodes              = "Count of nodes having synchronised copy of the queue."
    DescRmqSyncMessages                        = "Count of already synchronised messages on a slave node."
    DescRmqUptime                              = "Node uptime in seconds."
)

Variables

var (
    // DefaultFreq is the duration between collection intervals if none is
    // specified.
    DefaultFreq = time.Second * 15

    AddTags opentsdb.TagSet

    AddProcessDotNetConfig = func(params conf.ProcessDotNet) error {
        return fmt.Errorf("process_dotnet watching not implemented on this platform")
    }
    WatchProcessesDotNet = func() {}

    KeepalivedCommunity = ""

    //TotalScollectorMemory stores the total memory used by Scollector (including CGO and WMI)
    TotalScollectorMemoryMB uint64

    MetricPrefix = ""
)
var CPU_FIELDS = []string{
    "user",
    "nice",
    "system",
    "idle",
    "iowait",
    "irq",
    "softirq",
    "steal",
    "guest",
    "guest_nice",
}

func AWS Uses

func AWS(accessKey, secretKey, region, productCodes, bucketName, bucketPath string, purgeDays int) error

func Add Uses

func Add(md *opentsdb.MultiDataPoint, name string, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

Add appends a new data point with given metric name, value, and tags. Tags may be nil. If tags is nil or does not contain a host key, it will be automatically added. If the value of the host key is the empty string, it will be removed (use this to prevent the normal auto-adding of the host tag).

func AddElasticIndexFilter Uses

func AddElasticIndexFilter(s string, exclude bool) error

func AddMetricFilters Uses

func AddMetricFilters(s string) error

AddMetricFilters adds metric filters provided by the conf

func AddProcessConfig Uses

func AddProcessConfig(params conf.ProcessParams) error

func AddSystemdServiceConfig Uses

func AddSystemdServiceConfig(params conf.ServiceParams) error

func AddTS Uses

func AddTS(md *opentsdb.MultiDataPoint, name string, ts int64, value interface{}, t opentsdb.TagSet, rate metadata.RateType, unit metadata.Unit, desc string)

AddTS is the same as Add but lets you specify the timestamp

func AddTagOverrides Uses

func AddTagOverrides(s []Collector, tagOverride []conf.TagOverride) error

Adds configured tag overrides to all matching collectors

func ExtraHop Uses

func ExtraHop(host, apikey, filterby string, filterpercent int, customMetrics []string, certMatch string, certActivityGroup int) error

ExtraHop collection registration

func GenericSnmp Uses

func GenericSnmp(cfg conf.SNMP, mib conf.MIB) (opentsdb.MultiDataPoint, error)

func HTTPUnitHiera Uses

func HTTPUnitHiera(filename string, freq time.Duration) error

func HTTPUnitPlans Uses

func HTTPUnitPlans(name string, plans *httpunit.Plans, freq time.Duration)

func HTTPUnitTOML Uses

func HTTPUnitTOML(filename string, freq time.Duration) error

func ICMP Uses

func ICMP(host string) error

ICMP registers an ICMP collector a given host.

func InContainer Uses

func InContainer(pid string) bool

InContainer detects if a process is running in a Linux container.

func Init Uses

func Init(c *conf.Conf)

func InitFake Uses

func InitFake(fake int)

func InitPrograms Uses

func InitPrograms(cpath string)

func IsAlNum Uses

func IsAlNum(s string) bool

IsAlNum returns true if s is alphanumeric.

func IsDigit Uses

func IsDigit(s string) bool

IsDigit returns true if s consists of decimal digits.

func RabbitMQ Uses

func RabbitMQ(url string) error

RabbitMQ registers a RabbitMQ collector.

func Riak Uses

func Riak(s string) error

func Run Uses

func Run(cs []Collector) (chan *opentsdb.DataPoint, chan struct{})

Run runs specified collectors. Use nil for all collectors.

func SNMP Uses

func SNMP(cfg conf.SNMP, mibs map[string]conf.MIB) error

func SNMPBridge Uses

func SNMPBridge(cfg conf.SNMP)

SNMP Bridge registers

func SNMPCiscoASA Uses

func SNMPCiscoASA(cfg conf.SNMP)

SNMPCiscoASA registers a SNMP CISCO IOS collector for the given community and host.

func SNMPCiscoBGP Uses

func SNMPCiscoBGP(cfg conf.SNMP)

func SNMPCiscoIOS Uses

func SNMPCiscoIOS(cfg conf.SNMP)

SNMPCiscoIOS registers a SNMP CISCO IOS collector for the given community and host.

func SNMPCiscoNXOS Uses

func SNMPCiscoNXOS(cfg conf.SNMP)

SNMPCiscoNXOS registers a SNMP Cisco's NXOS collector (i.e. nexus switches) for the given community and host.

func SNMPFortinet Uses

func SNMPFortinet(cfg conf.SNMP)

SNMPFortinet registers a SNMP Fortinet collector for the given community and host.

func SNMPIPAddresses Uses

func SNMPIPAddresses(cfg conf.SNMP)

SNMPIfaces registers a SNMP Interfaces collector for the given community and host.

func SNMPIfaces Uses

func SNMPIfaces(cfg conf.SNMP)

SNMPIfaces registers a SNMP Interfaces collector for the given community and host.

func SNMPLag Uses

func SNMPLag(cfg conf.SNMP)

SNMPLag registers a SNMP Interfaces collector for the given community and host.

func SNMPSys Uses

func SNMPSys(cfg conf.SNMP)

SNMPSys registers a SNMP system data collector for the given community and host.

func TSys100NStoEpoch Uses

func TSys100NStoEpoch(nsec uint64) int64

func Vsphere Uses

func Vsphere(user, pwd, host string) error

Vsphere registers a vSphere collector.

func WatchProcesses Uses

func WatchProcesses()

type ByLastModified Uses

type ByLastModified []*s3.Object

ByLastModified implements sorter for S3 data by last modified date

func (ByLastModified) Len Uses

func (a ByLastModified) Len() int

func (ByLastModified) Less Uses

func (a ByLastModified) Less(i, j int) bool

func (ByLastModified) Swap Uses

func (a ByLastModified) Swap(i, j int)

type Collector Uses

type Collector interface {
    Run(chan<- *opentsdb.DataPoint, <-chan struct{})
    Name() string
    Init()
    AddTagOverrides(map[string]string, opentsdb.TagSet) error
    ApplyTagOverrides(opentsdb.TagSet)
}
func Search(s []string) []Collector

Search returns all collectors matching the pattern s.

type ElasticBreakersStat Uses

type ElasticBreakersStat struct {
    EstimatedSize        string  `json:"estimated_size"`
    EstimatedSizeInBytes int     `json:"estimated_size_in_bytes"`
    LimitSize            string  `json:"limit_size"`
    LimitSizeInBytes     int     `json:"limit_size_in_bytes"`
    Overhead             float64 `json:"overhead"`
    Tripped              int     `json:"tripped"`
}

type ElasticClusterState Uses

type ElasticClusterState struct {
    MasterNode string `json:"master_node"`
}

type ElasticClusterStats Uses

type ElasticClusterStats struct {
    ClusterName string `json:"cluster_name"`
    Nodes       map[string]struct {
        Attributes struct {
            Master string `json:"master"`
        }   `json:"attributes"`
        Breakers struct {
            Fielddata ElasticBreakersStat `json:"fielddata"`
            Parent    ElasticBreakersStat `json:"parent"`
            Request   ElasticBreakersStat `json:"request"`
        }   `json:"breakers" exclude:"true"`
        FS  struct {
            Data []struct {
                AvailableInBytes     int    `json:"available_in_bytes"`
                Dev                  string `json:"dev" version:"1"`                      // 1.0 only
                DiskIoOp             int    `json:"disk_io_op" version:"1"`               // 1.0 only
                DiskIoSizeInBytes    int    `json:"disk_io_size_in_bytes" version:"1"`    // 1.0 only
                DiskQueue            string `json:"disk_queue" version:"1"`               // 1.0 only
                DiskReadSizeInBytes  int    `json:"disk_read_size_in_bytes" version:"1"`  // 1.0 only
                DiskReads            int    `json:"disk_reads" version:"1"`               // 1.0 only
                DiskServiceTime      string `json:"disk_service_time" version:"1"`        // 1.0 only
                DiskWriteSizeInBytes int    `json:"disk_write_size_in_bytes" version:"1"` // 1.0 only
                DiskWrites           int    `json:"disk_writes" version:"1"`              // 1.0 only
                FreeInBytes          int    `json:"free_in_bytes"`
                Mount                string `json:"mount"`
                Path                 string `json:"path"`
                TotalInBytes         int    `json:"total_in_bytes"`
                Type                 string `json:"type" version:"2"` // 2.0 only
            }   `json:"data"`
            Timestamp int `json:"timestamp"`
            Total     struct {
                AvailableInBytes     int    `json:"available_in_bytes"`
                DiskIoOp             int    `json:"disk_io_op" version:"1"`               // 1.0 only
                DiskIoSizeInBytes    int    `json:"disk_io_size_in_bytes" version:"1"`    // 1.0 only
                DiskQueue            string `json:"disk_queue" version:"1"`               // 1.0 only
                DiskReadSizeInBytes  int    `json:"disk_read_size_in_bytes" version:"1"`  // 1.0 only
                DiskReads            int    `json:"disk_reads" version:"1"`               // 1.0 only
                DiskServiceTime      string `json:"disk_service_time" version:"1"`        // 1.0 only
                DiskWriteSizeInBytes int    `json:"disk_write_size_in_bytes" version:"1"` // 1.0 only
                DiskWrites           int    `json:"disk_writes" version:"1"`              // 1.0 only
                FreeInBytes          int    `json:"free_in_bytes"`
                TotalInBytes         int    `json:"total_in_bytes"`
            }   `json:"total"`
        }   `json:"fs" exclude:"true"`
        Host string `json:"host"`
        HTTP struct {
            CurrentOpen int `json:"current_open"`
            TotalOpened int `json:"total_opened"`
        }   `json:"http"`
        Indices ElasticIndexDetails `json:"indices" exclude:"true"` // Stored under elastic.indices.local namespace.
        //IP      []string            `json:"ip" exclude:"true"`	// Incompatible format between 5.x and previous, and not used in collector
        JVM struct {
            BufferPools struct {
                Direct struct {
                    Count                int `json:"count"`
                    TotalCapacityInBytes int `json:"total_capacity_in_bytes"`
                    UsedInBytes          int `json:"used_in_bytes"`
                }   `json:"direct"`
                Mapped struct {
                    Count                int `json:"count"`
                    TotalCapacityInBytes int `json:"total_capacity_in_bytes"`
                    UsedInBytes          int `json:"used_in_bytes"`
                }   `json:"mapped"`
            }   `json:"buffer_pools"`
            Classes struct {
                CurrentLoadedCount int `json:"current_loaded_count" version:"2"` // 2.0 only
                TotalLoadedCount   int `json:"total_loaded_count" version:"2"`   // 2.0 only
                TotalUnloadedCount int `json:"total_unloaded_count" version:"2"` // 2.0 only
            }   `json:"classes"`
            GC  struct {
                Collectors struct {
                    Old struct {
                        CollectionCount        int `json:"collection_count"`
                        CollectionTimeInMillis int `json:"collection_time_in_millis"`
                    }   `json:"old"`
                    Young struct {
                        CollectionCount        int `json:"collection_count"`
                        CollectionTimeInMillis int `json:"collection_time_in_millis"`
                    }   `json:"young"`
                } `json:"collectors"`
            }   `json:"gc" exclude:"true"` // This is recorded manually so we can tag the GC collector type.
            Mem struct {
                HeapCommittedInBytes    int `json:"heap_committed_in_bytes" metric:"heap_committed"`
                HeapMaxInBytes          int `json:"heap_max_in_bytes"`
                HeapUsedInBytes         int `json:"heap_used_in_bytes" metric:"heap_used"`
                HeapUsedPercent         int `json:"heap_used_percent"`
                NonHeapCommittedInBytes int `json:"non_heap_committed_in_bytes"`
                NonHeapUsedInBytes      int `json:"non_heap_used_in_bytes"`
                Pools                   struct {
                    Old struct {
                        MaxInBytes      int `json:"max_in_bytes"`
                        PeakMaxInBytes  int `json:"peak_max_in_bytes"`
                        PeakUsedInBytes int `json:"peak_used_in_bytes"`
                        UsedInBytes     int `json:"used_in_bytes"`
                    }   `json:"old"`
                    Survivor struct {
                        MaxInBytes      int `json:"max_in_bytes"`
                        PeakMaxInBytes  int `json:"peak_max_in_bytes"`
                        PeakUsedInBytes int `json:"peak_used_in_bytes"`
                        UsedInBytes     int `json:"used_in_bytes"`
                    }   `json:"survivor"`
                    Young struct {
                        MaxInBytes      int `json:"max_in_bytes"`
                        PeakMaxInBytes  int `json:"peak_max_in_bytes"`
                        PeakUsedInBytes int `json:"peak_used_in_bytes"`
                        UsedInBytes     int `json:"used_in_bytes"`
                    }   `json:"young"`
                }   `json:"pools" exclude:"true"`
            }   `json:"mem"`
            Threads struct {
                Count     int `json:"count"`
                PeakCount int `json:"peak_count"`
            }   `json:"threads"`
            Timestamp      int `json:"timestamp"`
            UptimeInMillis int `json:"uptime_in_millis"`
        }   `json:"jvm"`
        Name    string `json:"name"`
        Network struct {
            TCP struct {
                ActiveOpens  int `json:"active_opens" version:"1"`  // 1.0 only
                AttemptFails int `json:"attempt_fails" version:"1"` // 1.0 only
                CurrEstab    int `json:"curr_estab" version:"1"`    // 1.0 only
                EstabResets  int `json:"estab_resets" version:"1"`  // 1.0 only
                InErrs       int `json:"in_errs" version:"1"`       // 1.0 only
                InSegs       int `json:"in_segs" version:"1"`       // 1.0 only
                OutRsts      int `json:"out_rsts" version:"1"`      // 1.0 only
                OutSegs      int `json:"out_segs" version:"1"`      // 1.0 only
                PassiveOpens int `json:"passive_opens" version:"1"` // 1.0 only
                RetransSegs  int `json:"retrans_segs" version:"1"`  // 1.0 only
            } `json:"tcp"`
        }   `json:"network"`
        OS  struct {
            CPU struct {
                Idle   int `json:"idle" version:"1"`   // 1.0 only
                Stolen int `json:"stolen" version:"1"` // 1.0 only
                Sys    int `json:"sys" version:"1"`    // 1.0 only
                Usage  int `json:"usage" version:"1"`  // 1.0 only
                User   int `json:"user" version:"1"`   // 1.0 only
            }   `json:"cpu"`
            //			LoadAverage []float64 `json:"load_average"` // 1.0 only
            //			LoadAverage float64 `json:"load_average"` // 2.0 only
            Mem struct {
                ActualFreeInBytes int `json:"actual_free_in_bytes" version:"1"` // 1.0 only
                ActualUsedInBytes int `json:"actual_used_in_bytes" version:"1"` // 1.0 only
                FreeInBytes       int `json:"free_in_bytes"`
                FreePercent       int `json:"free_percent"`
                TotalInBytes      int `json:"total_in_bytes" version:"2"` // 2.0 only
                UsedInBytes       int `json:"used_in_bytes"`
                UsedPercent       int `json:"used_percent"`
            }   `json:"mem"`
            Swap struct {
                FreeInBytes  int `json:"free_in_bytes"`
                TotalInBytes int `json:"total_in_bytes" version:"2"` // 2.0 only
                UsedInBytes  int `json:"used_in_bytes"`
            }   `json:"swap"`
            Timestamp      int `json:"timestamp"`
            UptimeInMillis int `json:"uptime_in_millis"`
        }   `json:"os" exclude:"true"` // These are OS-wide stats, and are already gathered by other collectors.
        Process struct {
            CPU struct {
                Percent       int `json:"percent" exclude:"true"`
                SysInMillis   int `json:"sys_in_millis" version:"1"` // 1.0 only
                TotalInMillis int `json:"total_in_millis"`
                UserInMillis  int `json:"user_in_millis" version:"1"` // 1.0 only
            }   `json:"cpu"`
            MaxFileDescriptors int `json:"max_file_descriptors" version:"2"` // 2.0 only
            Mem                struct {
                ResidentInBytes     int `json:"resident_in_bytes" metric:"resident" version:"1"` // 1.0 only
                ShareInBytes        int `json:"share_in_bytes" metric:"shared" version:"1"`      // 1.0 only
                TotalVirtualInBytes int `json:"total_virtual_in_bytes" metric:"total_virtual"`
            }   `json:"mem"`
            OpenFileDescriptors int `json:"open_file_descriptors"`
            Timestamp           int `json:"timestamp" exclude:"true"`
        }   `json:"process"`
        Script struct {
            CacheEvictions int `json:"cache_evictions" version:"2"` // 2.0 only
            Compilations   int `json:"compilations" version:"2"`    // 2.0 only
        }   `json:"script"`
        ThreadPool struct {
            Bulk              ElasticThreadPoolStat `json:"bulk"`
            FetchShardStarted ElasticThreadPoolStat `json:"fetch_shard_started" version:"2"` // 2.0 only
            FetchShardStore   ElasticThreadPoolStat `json:"fetch_shard_store" version:"2"`   // 2.0 only
            Flush             ElasticThreadPoolStat `json:"flush"`
            Generic           ElasticThreadPoolStat `json:"generic"`
            Get               ElasticThreadPoolStat `json:"get"`
            Index             ElasticThreadPoolStat `json:"index"`
            Listener          ElasticThreadPoolStat `json:"listener"`
            Management        ElasticThreadPoolStat `json:"management"`
            Merge             ElasticThreadPoolStat `json:"merge" version:"1"` // 1.0 only
            Optimize          ElasticThreadPoolStat `json:"optimize"`
            Percolate         ElasticThreadPoolStat `json:"percolate"`
            Refresh           ElasticThreadPoolStat `json:"refresh"`
            Search            ElasticThreadPoolStat `json:"search"`
            Snapshot          ElasticThreadPoolStat `json:"snapshot"`
            Suggest           ElasticThreadPoolStat `json:"suggest"`
            Warmer            ElasticThreadPoolStat `json:"warmer"`
        }   `json:"thread_pool" exclude:"true"`
        Timestamp int `json:"timestamp"`
        Transport struct {
            RxCount       int `json:"rx_count"`
            RxSizeInBytes int `json:"rx_size_in_bytes"`
            ServerOpen    int `json:"server_open"`
            TxCount       int `json:"tx_count"`
            TxSizeInBytes int `json:"tx_size_in_bytes"`
        }   `json:"transport"`
        TransportAddress string `json:"transport_address"`
    }   `json:"nodes"`
}

type ElasticHealth Uses

type ElasticHealth struct {
    ActivePrimaryShards         int                           `json:"active_primary_shards" desc:"The number of active primary shards. Each document is stored in a single primary shard and then when it is indexed it is copied the replicas of that shard."`
    ActiveShards                int                           `json:"active_shards" desc:"The number of active shards."`
    ActiveShardsPercentAsNumber float64                       `json:"active_shards_percent_as_number" version:"2"` // 2.0 only
    ClusterName                 string                        `json:"cluster_name"`
    DelayedUnassignedShards     int                           `json:"delayed_unassigned_shards" version:"2"` // 2.0 only
    Indices                     map[string]ElasticIndexHealth `json:"indices" exclude:"true"`
    InitializingShards          int                           `json:"initializing_shards" desc:"The number of initalizing shards."`
    NumberOfDataNodes           int                           `json:"number_of_data_nodes"`
    NumberOfInFlightFetch       int                           `json:"number_of_in_flight_fetch" version:"2"` // 2.0 only
    NumberOfNodes               int                           `json:"number_of_nodes"`
    NumberOfPendingTasks        int                           `json:"number_of_pending_tasks"`
    RelocatingShards            int                           `json:"relocating_shards" desc:"The number of shards relocating."`
    Status                      string                        `json:"status" desc:"The current status of the cluster. 0: green, 1: yellow, 2: red."`
    TaskMaxWaitingInQueueMillis int                           `json:"task_max_waiting_in_queue_millis" version:"2"` // 2.0 only
    TimedOut                    bool                          `json:"timed_out" exclude:"true"`
    UnassignedShards            int                           `json:"unassigned_shards" version:"2"` // 2.0 only
}

type ElasticIndex Uses

type ElasticIndex struct {
    Primaries ElasticIndexDetails `json:"primaries"`
    Total     ElasticIndexDetails `json:"total"`
}

type ElasticIndexDetails Uses

type ElasticIndexDetails struct {
    Completion struct {
        SizeInBytes int `json:"size_in_bytes" desc:"Size of the completion index (used for auto-complete functionallity)."`
    }   `json:"completion"`
    Docs struct {
        Count   int `json:"count" rate:"gauge" rate:"gauge" unit:"documents" desc:"The number of documents in the index."`
        Deleted int `json:"deleted" rate:"gauge" unit:"documents" desc:"The number of deleted documents in the index."`
    }   `json:"docs"`
    Fielddata struct {
        Evictions         int `json:"evictions" rate:"counter" unit:"evictions" desc:"The number of cache evictions for field data."`
        MemorySizeInBytes int `json:"memory_size_in_bytes" desc:"The amount of memory used for field data."`
    }   `json:"fielddata"`
    FilterCache struct {
        Evictions         int `json:"evictions" version:"1" rate:"counter" unit:"evictions" desc:"The number of cache evictions for filter data."` // 1.0 only
        MemorySizeInBytes int `json:"memory_size_in_bytes" version:"1" desc:"The amount of memory used for filter data."`                          // 1.0 only
    }   `json:"filter_cache"`
    Flush struct {
        Total             int `json:"total" rate:"counter" unit:"flushes" desc:"The number of flush operations. The flush process of an index basically frees memory from the index by flushing data to the index storage and clearing the internal transaction log."`
        TotalTimeInMillis int `json:"total_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on flush operations. The flush process of an index basically frees memory from the index by flushing data to the index storage and clearing the internal transaction log."`
    }   `json:"flush"`
    Get struct {
        Current             int `json:"current" rate:"gauge" unit:"gets" desc:"The current number of get operations. Gets get a typed JSON document from the index based on its id."`
        ExistsTimeInMillis  int `json:"exists_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on get exists operations. Gets exists sees if a document exists."`
        ExistsTotal         int `json:"exists_total" rate:"counter" unit:"get exists" desc:"The total number of get exists operations. Gets exists sees if a document exists."`
        MissingTimeInMillis int `json:"missing_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent trying to get documents that turned out to be missing."`
        MissingTotal        int `json:"missing_total" rate:"counter" unit:"operations" desc:"The total number of operations that tried to get a document that turned out to be missing."`
        TimeInMillis        int `json:"time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on get operations. Gets get a typed JSON document from the index based on its id."`
        Total               int `json:"total" rate:"counter" unit:"operations" desc:"The total number of get operations. Gets get a typed JSON document from the index based on its id."`
    }   `json:"get"`
    IDCache struct {
        MemorySizeInBytes int `json:"memory_size_in_bytes" version:"1" desc:"The size of the id cache."` // 1.0 only
    }   `json:"id_cache"`
    Indexing struct {
        DeleteCurrent        int  `json:"delete_current" rate:"gauge" unit:"documents" desc:"The current number of documents being deleted via indexing commands (such as a delete query)."`
        DeleteTimeInMillis   int  `json:"delete_time_in_millis" rate:"counter" unit:"seconds" desc:"The time spent deleting documents."`
        DeleteTotal          int  `json:"delete_total" rate:"counter" unit:"documents" desc:"The total number of documents deleted."`
        IndexCurrent         int  `json:"index_current" rate:"gauge" unit:"documents" desc:"The current number of documents being indexed."`
        IndexTimeInMillis    int  `json:"index_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent indexing documents."`
        IndexTotal           int  `json:"index_total" rate:"counter" unit:"documents" desc:"The total number of documents indexed."`
        IsThrottled          bool `json:"is_throttled" exclude:"true"`
        NoopUpdateTotal      int  `json:"noop_update_total"`
        ThrottleTimeInMillis int  `json:"throttle_time_in_millis"`
    }   `json:"indexing"`
    Merges struct {
        Current                    int `json:"current" rate:"gauge" unit:"merges" desc:"The current number of merge operations. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        CurrentDocs                int `json:"current_docs" rate:"gauge" unit:"documents" desc:"The current number of documents that have an underlying merge operation going on. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        CurrentSizeInBytes         int `json:"current_size_in_bytes" desc:"The current number of bytes being merged. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        Total                      int `json:"total" rate:"counter" unit:"merges" desc:"The total number of merges. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        TotalAutoThrottleInBytes   int `json:"total_auto_throttle_in_bytes" version:"2"` // 2.0 only
        TotalDocs                  int `json:"total_docs" rate:"counter" unit:"documents" desc:"The total number of documents that have had an underlying merge operation. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        TotalSizeInBytes           int `json:"total_size_in_bytes" desc:"The total number of bytes merged. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
        TotalStoppedTimeInMillis   int `json:"total_stopped_time_in_millis" version:"2"`   // 2.0 only
        TotalThrottledTimeInMillis int `json:"total_throttled_time_in_millis" version:"2"` // 2.0 only
        TotalTimeInMillis          int `json:"total_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on merge operations. In elastic Lucene segments are merged behind the scenes. It is possible these can impact search performance."`
    }   `json:"merges"`
    Percolate struct {
        Current           int    `json:"current" rate:"gauge" unit:"operations" desc:"The current number of percolate operations."`
        MemorySize        string `json:"memory_size"`
        MemorySizeInBytes int    `json:"memory_size_in_bytes" desc:"The amount of memory used for the percolate index. Percolate is a reverse query to document operation."`
        Queries           int    `json:"queries" rate:"counter" unit:"queries" desc:"The total number of percolate queries. Percolate is a reverse query to document operation."`
        TimeInMillis      int    `json:"time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on percolating. Percolate is a reverse query to document operation."`
        Total             int    `json:"total" rate:"gauge" unit:"operations" desc:"The total number of percolate operations. Percolate is a reverse query to document operation."`
    }   `json:"percolate"`
    QueryCache struct {
        CacheCount        int `json:"cache_count" version:"2"` // 2.0 only
        CacheSize         int `json:"cache_size" version:"2"`  // 2.0 only
        Evictions         int `json:"evictions"`
        HitCount          int `json:"hit_count"`
        MemorySizeInBytes int `json:"memory_size_in_bytes"`
        MissCount         int `json:"miss_count"`
        TotalCount        int `json:"total_count" version:"2"` // 2.0 only
    }   `json:"query_cache"`
    Recovery struct {
        CurrentAsSource      int `json:"current_as_source"`
        CurrentAsTarget      int `json:"current_as_target"`
        ThrottleTimeInMillis int `json:"throttle_time_in_millis"`
    }   `json:"recovery"`
    Refresh struct {
        Total             int `json:"total" rate:"counter" unit:"refresh" desc:"The total number of refreshes. Refreshing makes all operations performed since the last search available."`
        TotalTimeInMillis int `json:"total_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent on refreshes. Refreshing makes all operations performed since the last search available."`
    }   `json:"refresh"`
    RequestCache struct {
        Evictions         int `json:"evictions" version:"2"`            // 2.0 only
        HitCount          int `json:"hit_count" version:"2"`            // 2.0 only
        MemorySizeInBytes int `json:"memory_size_in_bytes" version:"2"` // 2.0 only
        MissCount         int `json:"miss_count" version:"2"`           // 2.0 only
    }   `json:"request_cache"`
    Search struct {
        FetchCurrent       int `json:"fetch_current" rate:"gauge" unit:"documents" desc:"The current number of documents being fetched. Fetching is a phase of querying in a distributed search."`
        FetchTimeInMillis  int `json:"fetch_time_in_millis" rate:"counter" unit:"seconds" desc:"The total time spent fetching documents. Fetching is a phase of querying in a distributed search."`
        FetchTotal         int `json:"fetch_total" rate:"counter" unit:"documents" desc:"The total number of documents fetched. Fetching is a phase of querying in a distributed search."`
        OpenContexts       int `json:"open_contexts" rate:"gauge" unit:"contexts" desc:"The current number of open contexts. A search is left open when srolling (i.e. pagination)."`
        QueryCurrent       int `json:"query_current" rate:"gauge" unit:"queries" desc:"The current number of queries."`
        QueryTimeInMillis  int `json:"query_time_in_millis" rate:"counter" unit:"seconds" desc:"The total amount of time spent querying."`
        QueryTotal         int `json:"query_total" rate:"counter" unit:"queries" desc:"The total number of queries."`
        ScrollCurrent      int `json:"scroll_current" version:"2"`        // 2.0 only
        ScrollTimeInMillis int `json:"scroll_time_in_millis" version:"2"` // 2.0 only
        ScrollTotal        int `json:"scroll_total" version:"2"`          // 2.0 only
    }   `json:"search"`
    Segments struct {
        Count                       int `json:"count" rate:"counter" unit:"segments" desc:"The number of segments that make up the index."`
        DocValuesMemoryInBytes      int `json:"doc_values_memory_in_bytes" version:"2"` // 2.0 only
        FixedBitSetMemoryInBytes    int `json:"fixed_bit_set_memory_in_bytes"`
        IndexWriterMaxMemoryInBytes int `json:"index_writer_max_memory_in_bytes"`
        IndexWriterMemoryInBytes    int `json:"index_writer_memory_in_bytes"`
        MemoryInBytes               int `json:"memory_in_bytes" desc:"The total amount of memory used for Lucene segments."`
        NormsMemoryInBytes          int `json:"norms_memory_in_bytes" version:"2"`         // 2.0 only
        StoredFieldsMemoryInBytes   int `json:"stored_fields_memory_in_bytes" version:"2"` // 2.0 only
        TermVectorsMemoryInBytes    int `json:"term_vectors_memory_in_bytes" version:"2"`  // 2.0 only
        TermsMemoryInBytes          int `json:"terms_memory_in_bytes" version:"2"`         // 2.0 only
        VersionMapMemoryInBytes     int `json:"version_map_memory_in_bytes"`
    }   `json:"segments"`
    Store struct {
        SizeInBytes          int `json:"size_in_bytes" unit:"bytes" desc:"The current size of the store."`
        ThrottleTimeInMillis int `json:"throttle_time_in_millis" rate:"gauge" unit:"seconds" desc:"The amount of time that merges where throttled."`
    }   `json:"store"`
    Suggest struct {
        Current      int `json:"current" rate:"gauge" unit:"suggests" desc:"The current number of suggest operations."`
        TimeInMillis int `json:"time_in_millis" rate:"gauge" unit:"seconds" desc:"The total amount of time spent on suggest operations."`
        Total        int `json:"total" rate:"gauge" unit:"suggests" desc:"The total number of suggest operations."`
    }   `json:"suggest"`
    Translog struct {
        Operations  int `json:"operations" rate:"gauge" unit:"operations" desc:"The total number of translog operations. The transaction logs (or write ahead logs) ensure atomicity of operations."`
        SizeInBytes int `json:"size_in_bytes" desc:"The current size of transaction log. The transaction log (or write ahead log) ensure atomicity of operations."`
    }   `json:"translog"`
    Warmer struct {
        Current           int `json:"current" rate:"gauge" unit:"operations" desc:"The current number of warmer operations. Warming registers search requests in the background to speed up actual search requests."`
        Total             int `json:"total" rate:"gauge" unit:"operations" desc:"The total number of warmer operations. Warming registers search requests in the background to speed up actual search requests."`
        TotalTimeInMillis int `json:"total_time_in_millis" rate:"gauge" unit:"seconds" desc:"The total time spent on warmer operations. Warming registers search requests in the background to speed up actual search requests."`
    }   `json:"warmer"`
}

type ElasticIndexHealth Uses

type ElasticIndexHealth struct {
    ActivePrimaryShards int    `json:"active_primary_shards" desc:"The number of active primary shards. Each document is stored in a single primary shard and then when it is indexed it is copied the replicas of that shard."`
    ActiveShards        int    `json:"active_shards" desc:"The number of active shards."`
    InitializingShards  int    `json:"initializing_shards" desc:"The number of initalizing shards."`
    NumberOfReplicas    int    `json:"number_of_replicas" desc:"The number of replicas."`
    NumberOfShards      int    `json:"number_of_shards" desc:"The number of shards."`
    RelocatingShards    int    `json:"relocating_shards" desc:"The number of shards relocating."`
    Status              string `json:"status" desc:"The current status of the index. 0: green, 1: yellow, 2: red."`
    UnassignedShards    int    `json:"unassigned_shards"`
}

type ElasticIndexStats Uses

type ElasticIndexStats struct {
    All    ElasticIndex `json:"_all"`
    Shards struct {
        Failed     float64 `json:"failed"`
        Successful float64 `json:"successful"`
        Total      float64 `json:"total"`
    }   `json:"_shards"`
    Indices map[string]ElasticIndex `json:"indices"`
}

type ElasticStatus Uses

type ElasticStatus struct {
    Status  int    `json:"status"`
    Name    string `json:"name"`
    Version struct {
        Number string `json:"number"`
    }   `json:"version"`
}

type ElasticThreadPoolStat Uses

type ElasticThreadPoolStat struct {
    Active    int `json:"active"`
    Completed int `json:"completed"`
    Largest   int `json:"largest"`
    Queue     int `json:"queue"`
    Rejected  int `json:"rejected"`
    Threads   int `json:"threads"`
}

type IntervalCollector Uses

type IntervalCollector struct {
    F        func() (opentsdb.MultiDataPoint, error)
    Interval time.Duration // defaults to DefaultFreq if unspecified
    Enable   func() bool

    // internal use
    sync.Mutex

    TagOverride
    // contains filtered or unexported fields
}

func (*IntervalCollector) Enabled Uses

func (c *IntervalCollector) Enabled() bool

func (*IntervalCollector) Init Uses

func (c *IntervalCollector) Init()

func (*IntervalCollector) Name Uses

func (c *IntervalCollector) Name() string

func (*IntervalCollector) Run Uses

func (c *IntervalCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type L7Stats Uses

type L7Stats struct {
    Rate        metadata.RateType
    Unit        metadata.Unit
    Description string
}

type MetricMeta Uses

type MetricMeta struct {
    Metric   string
    TagSet   opentsdb.TagSet
    RateType metadata.RateType
    Unit     metadata.Unit
    Desc     string
}

type MetricMetaHAProxy Uses

type MetricMetaHAProxy struct {
    Name   string
    Ignore bool
    MetricMeta
}

MetricMetaHAProxy is a super-structure which adds a friendly Name, as well as an indicator on if a metric is to be ignored.

type MetricSet Uses

type MetricSet map[string]string

type PRReport Uses

type PRReport struct {
    Status string `yaml:"status"`
    Time   string `yaml:"time"` // 2006-01-02 15:04:05.999999 -07:00
}

type PRSummary Uses

type PRSummary struct {
    Changes struct {
        Total float64 `yaml:"total"`
    }   `yaml:"changes"`
    Events struct {
        Failure float64 `yaml:"failure"`
        Success float64 `yaml:"success"`
        Total   float64 `yaml:"total"`
    }   `yaml:"events"`
    Resources struct {
        Changed         float64 `yaml:"changed"`
        Failed          float64 `yaml:"failed"`
        FailedToRestart float64 `yaml:"failed_to_restart"`
        OutOfSync       float64 `yaml:"out_of_sync"`
        Restarted       float64 `yaml:"restarted"`
        Scheduled       float64 `yaml:"scheduled"`
        Skipped         float64 `yaml:"skipped"`
        Total           float64 `yaml:"total"`
    }   `yaml:"resources"`
    Time    map[string]string `yaml:"time"`
    Version struct {
        Config string `yaml:"config"`
        Puppet string `yaml:"puppet"`
    }   `yaml:"version"`
}

type Process Uses

type Process struct {
    Pid       string
    Command   string
    Arguments string
    Started   time.Time
}

type ProgramCollector Uses

type ProgramCollector struct {
    Path     string
    Interval time.Duration

    TagOverride
}

func (*ProgramCollector) Init Uses

func (c *ProgramCollector) Init()

func (*ProgramCollector) Name Uses

func (c *ProgramCollector) Name() string

func (*ProgramCollector) Run Uses

func (c *ProgramCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type StreamCollector Uses

type StreamCollector struct {
    F   func() <-chan *opentsdb.MultiDataPoint

    TagOverride
    // contains filtered or unexported fields
}

func (*StreamCollector) Enabled Uses

func (s *StreamCollector) Enabled() bool

func (*StreamCollector) Init Uses

func (s *StreamCollector) Init()

func (*StreamCollector) Name Uses

func (s *StreamCollector) Name() string

func (*StreamCollector) Run Uses

func (s *StreamCollector) Run(dpchan chan<- *opentsdb.DataPoint, quit <-chan struct{})

type TagOverride Uses

type TagOverride struct {
    // contains filtered or unexported fields
}

func (*TagOverride) AddTagOverrides Uses

func (to *TagOverride) AddTagOverrides(sources map[string]string, t opentsdb.TagSet) error

func (*TagOverride) ApplyTagOverrides Uses

func (to *TagOverride) ApplyTagOverrides(t opentsdb.TagSet)

type TeamPort Uses

type TeamPort struct {
    Ifinfo struct {
        DevAddr    string  `json:"dev_addr"`
        DevAddrLen float64 `json:"dev_addr_len"`
        Ifindex    float64 `json:"ifindex"`
        Ifname     string  `json:"ifname"`
    }
    Link struct {
        Duplex string  `json:"duplex"`
        Speed  float64 `json:"speed"`
        Up     bool    `json:"up"`
    }   `json:"link"`
    LinkWatches struct {
        List struct {
            LinkWatch0 struct {
                DelayDown float64 `json:"delay_down"`
                DelayUp   float64 `json:"delay_up"`
                Name      string  `json:"name"`
                Up        bool    `json:"up"`
            } `json:"link_watch_0"`
        }   `json:"list"`
        Up  bool `json:"up"`
    }   `json:"link_watches"`
    Runner struct {
        ActorLacpduInfo struct {
            Key            float64 `json:"key"`
            Port           float64 `json:"port"`
            PortPriority   float64 `json:"port_priority"`
            State          float64 `json:"state"`
            System         string  `json:"system"`
            SystemPriority float64 `json:"system_priority"`
        }   `json:"actor_lacpdu_info"`
        Aggregator struct {
            ID       float64 `json:"id"`
            Selected bool    `json:"selected"`
        }   `json:"aggregator"`
        Key               float64 `json:"key"`
        PartnerLacpduInfo struct {
            Key            float64 `json:"key"`
            Port           float64 `json:"port"`
            PortPriority   float64 `json:"port_priority"`
            State          float64 `json:"state"`
            System         string  `json:"system"`
            SystemPriority float64 `json:"system_priority"`
        }   `json:"partner_lacpdu_info"`
        Prio     float64 `json:"prio"`
        Selected bool    `json:"selected"`
        State    string  `json:"state"`
    }   `json:"runner"`
}

type TeamState Uses

type TeamState struct {
    TeamPorts map[string]TeamPort `json:"ports"`
    Runner    struct {
        Active       bool    `json:"active"`
        FastRate     bool    `json:"fast_rate"`
        SelectPolicy string  `json:"select_policy"`
        SysPrio      float64 `json:"sys_prio"`
    }   `json:"runner"`
    Setup struct {
        Daemonized         bool    `json:"daemonized"`
        DbusEnabled        bool    `json:"dbus_enabled"`
        DebugLevel         float64 `json:"debug_level"`
        KernelTeamModeName string  `json:"kernel_team_mode_name"`
        Pid                float64 `json:"pid"`
        PidFile            string  `json:"pid_file"`
        RunnerName         string  `json:"runner_name"`
        ZmqEnabled         bool    `json:"zmq_enabled"`
    }   `json:"setup"`
    TeamDevice struct {
        Ifinfo struct {
            DevAddr    string  `json:"dev_addr"`
            DevAddrLen float64 `json:"dev_addr_len"`
            Ifindex    float64 `json:"ifindex"`
            Ifname     string  `json:"ifname"`
        } `json:"ifinfo"`
    }   `json:"team_device"`
}

type VRRPAddressEntry Uses

type VRRPAddressEntry struct {
    VRRPAddressIndex       int64
    VRRPAddressType        int64
    VRRPAddressValue       string `snmp:"octet"`
    VRRPAddressBroadcast   string `snmp:"octet"`
    VRRPAddressMask        int64
    VRRPAddressScope       int64
    VRRPAddressIfIndex     int64
    VRRPAddressIfName      string
    VRRPAddressIfAlias     string
    VRRPAddressStatus      int64
    VRRPAddressAdvertising int64
}

type VRRPInstanceEntry Uses

type VRRPInstanceEntry struct {
    VInstanceIndex             int64
    VInstanceName              string
    VInstanceVirtualRouterId   int64
    VInstanceState             int64
    VInstanceInitialState      int64
    VInstanceWantedState       int64
    VInstanceBasePriority      int64
    VInstanceEffectivePriority int64
    VInstanceVipsStatus        int64
    VInstancePrimaryInterface  string
    VInstanceTrackPrimaryIf    int64
    VInstanceAdvertisementsInt int64
    VInstancePreempt           int64
    VInstancePreemptDelay      int64
    VInstanceAuthType          int64
    VInstanceLvsSyncDaemon     int64
    VInstanceLvsSyncInterface  string
    VInstanceSyncGroup         string
    VInstanceGarpDelay         int64
    VInstanceSmtpAlert         int64
    VInstanceNotifyExec        int64
    VInstanceScriptMaster      string
    VInstanceScriptBackup      string
    VInstanceScriptFault       string
    VInstanceScriptStop        string
    VInstanceScript            string
    VInstanceAccept            int64
}

type WatchedProc Uses

type WatchedProc struct {
    Command      *regexp.Regexp
    Name         string
    IncludeCount bool
    Processes    map[Process]int
    ArgMatch     *regexp.Regexp
    // contains filtered or unexported fields
}

func NewWatchedProc Uses

func NewWatchedProc(params conf.ProcessParams) (*WatchedProc, error)

NewWatchedProc takes a configuration block [[Process]] from conf

func (*WatchedProc) Check Uses

func (w *WatchedProc) Check(procs []*Process)

Check finds all matching processes and assigns them a new unique id. If WatchedProc has processes that no longer exist, it removes them from WatchedProc.Processes.

func (*WatchedProc) Remove Uses

func (w *WatchedProc) Remove(proc Process)

Package collectors imports 72 packages (graph) and is imported by 9 packages. Updated 2019-05-09. Refresh now. Tools for package owners.