newrelic

package
v0.0.0-...-4f470da Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 29, 2024 License: Apache-2.0 Imports: 29 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrLocked     = errors.New("pid file lock held by another process")
	ErrRetryLimit = errors.New("max retries exceeded trying to create pid file")
)

Functions

func DefaultListenSocket

func DefaultListenSocket() string

DefaultListenSocket is the default location for the agent daemon communication socket. Note that this should match the agent's NR_PHP_INI_DEFAULT_PORT value.

Linux systems use the abstract socket "@newrelic" by default, MacOS and FreeBSD use the socket file "/tmp/.newrelic.sock".

func EncodePayload

func EncodePayload(payload interface{}) ([]byte, error)

func IntegrationData

func IntegrationData(p PayloadCreator, id AgentRunID, harvestStart time.Time) ([]byte, error)

func MarshalAppInfoReply

func MarshalAppInfoReply(reply AppInfoReply) []byte

func OpenClientConnection

func OpenClientConnection(addr string) (net.Conn, error)

func OrderScrubMetrics

func OrderScrubMetrics(metrics []byte, scrub []*regexp.Regexp) ([]byte, error)

OrderScrubMetrics is used to sort the metric JSON for the collector for deterministic tests.

func TestHarvestTriggerCustomBuilder

func TestHarvestTriggerCustomBuilder(t *testing.T)

Due to https://github.com/golang/go/issues/20903, this test is flappy on Go 1.8 and older.

Types

type AgentDataHandler

type AgentDataHandler interface {
	IncomingTxnData(id AgentRunID, sample AggregaterInto)
	IncomingSpanBatch(batch SpanBatch)
	IncomingAppInfo(id *AgentRunID, info *AppInfo) AppInfoReply
}

type AgentPolicies

type AgentPolicies struct {
	Policies map[string]SecurityPolicyAgent `json:"agent_policies"`
}

type AgentRunID

type AgentRunID string

AgentRunID is a string as of agent listener protocol version 14.

func (AgentRunID) String

func (id AgentRunID) String() string

type AggregaterInto

type AggregaterInto interface {
	AggregateInto(h *Harvest)
}

type AnalyticsEvent

type AnalyticsEvent struct {
	// contains filtered or unexported fields
}

AnalyticsEvent represents an analytics event reported by an agent.

type App

type App struct {
	RawSecurityPolicies []byte
	RawConnectReply     []byte
	HarvestTrigger      HarvestTriggerFunc
	LastActivity        time.Time
	Rules               MetricRules
	PhpPackages         map[PhpPackagesKey]struct{}
	// contains filtered or unexported fields
}

An App represents the state of an application.

func NewApp

func NewApp(info *AppInfo) *App

func (*App) Inactive

func (app *App) Inactive(threshold time.Duration) bool

Inactive determines whether the elapsed time since app last had activity exceeds a threshold.

func (*App) Key

func (app *App) Key() AppKey

func (*App) NeedsConnectAttempt

func (app *App) NeedsConnectAttempt(now time.Time, backoff time.Duration) bool

func (*App) String

func (app *App) String() string

type AppHarvest

type AppHarvest struct {
	*App
	*Harvest
	*infinite_tracing.TraceObserver
	// contains filtered or unexported fields
}

This type takes the HarvestType values sent from an application's harvest trigger function, decorates them with the application, run ID, and harvest, and then sends them to a processor as ProcessorHarvest messages. Whenever an AppHarvest is closed, an event is sent via the cancel channel so that the harvest trigger function may also be closed.

func NewAppHarvest

func NewAppHarvest(id AgentRunID, app *App, harvest *Harvest, ph chan ProcessorHarvest) *AppHarvest

func (*AppHarvest) Close

func (ah *AppHarvest) Close() error

func (*AppHarvest) NewProcessorHarvestEvent

func (ah *AppHarvest) NewProcessorHarvestEvent(id AgentRunID, t HarvestType) ProcessorHarvest

type AppInfo

type AppInfo struct {
	License                   collector.LicenseKey
	Appname                   string
	AgentLanguage             string
	AgentVersion              string
	HostDisplayName           string
	Settings                  map[string]interface{}
	Environment               JSONString
	HighSecurity              bool
	Labels                    JSONString
	Metadata                  JSONString
	RedirectCollector         string
	SecurityPolicyToken       string
	SupportedSecurityPolicies AgentPolicies
	Hostname                  string
	TraceObserverHost         string
	TraceObserverPort         uint16
	SpanQueueSize             uint64
	AgentEventLimits          collector.EventConfigs
	DockerId                  string
}

AppInfo encapsulates information provided by an agent about an application. The information is used to construct part of the connect message sent to the collector, and the fields should not be modified.

func UnmarshalAppInfo

func UnmarshalAppInfo(tbl flatbuffers.Table) *AppInfo

func (*AppInfo) ConnectPayload

func (info *AppInfo) ConnectPayload(util *utilization.Data) *RawConnectPayload

ConnectPayload creates the JSON of a connect request to be sent to the New Relic backend.

Utilization is always expected to be present.

func (*AppInfo) ConnectPayloadInternal

func (info *AppInfo) ConnectPayloadInternal(pid int, util *utilization.Data) *RawConnectPayload

func (*AppInfo) Key

func (info *AppInfo) Key() AppKey

func (*AppInfo) String

func (info *AppInfo) String() string

type AppInfoMessage

type AppInfoMessage struct {
	ID         *AgentRunID
	Info       *AppInfo
	ResultChan chan AppInfoReply
}

type AppInfoReply

type AppInfoReply struct {
	RunIDValid       bool
	State            AppState
	ConnectReply     []byte
	SecurityPolicies []byte
	ConnectTimestamp uint64
	HarvestFrequency uint16
	SamplingTarget   uint16
}

type AppKey

type AppKey struct {
	License           collector.LicenseKey
	Appname           string
	RedirectCollector string
	HighSecurity      bool
	AgentLanguage     string
	AgentPolicies     string
	AgentHostname     string
	TraceObserverHost string
	TraceObserverPort uint16
}

An AppKey uniquely identifies an application.

type AppState

type AppState int
const (
	AppStateUnknown AppState = iota
	AppStateConnected
	AppStateDisconnected
	AppStateRestart
	AppStateInvalidLicense
	AppStateInvalidSecurityPolicies
)

type Client

type Client collector.Client

func NewClient

func NewClient(cfg *ClientConfig) (Client, error)

NewClient wraps collector.NewClient in order to ensure that the constants MaxOutboundConns and HarvestTimeout are used. This wrapper allows for these constants to be kept in this package alongside the other limits.

type ClientConfig

type ClientConfig struct {
	CAFile string
	CAPath string
	Proxy  string
}

type CommandsHandler

type CommandsHandler struct {
	Processor AgentDataHandler
}

func (CommandsHandler) HandleMessage

func (h CommandsHandler) HandleMessage(msg RawMessage) ([]byte, error)

type ConnectArgs

type ConnectArgs struct {
	RedirectCollector            string
	Payload                      []byte
	PayloadRaw                   *RawConnectPayload
	License                      collector.LicenseKey
	SecurityPolicyToken          string
	HighSecurity                 bool
	Client                       collector.Client
	AppKey                       AppKey
	AgentLanguage                string
	AgentVersion                 string
	AgentEventLimits             collector.EventConfigs
	PayloadPreconnect            []byte
	AppSupportedSecurityPolicies AgentPolicies
}

type ConnectAttempt

type ConnectAttempt struct {
	Key                 AppKey
	Collector           string
	Reply               *ConnectReply
	RawReply            collector.RPMResponse
	Err                 error
	RawSecurityPolicies []byte
}

func ConnectApplication

func ConnectApplication(args *ConnectArgs) ConnectAttempt

type ConnectReply

type ConnectReply struct {
	ID                     *AgentRunID                      `json:"agent_run_id"`
	MetricRules            MetricRules                      `json:"metric_name_rules"`
	SamplingFrequency      int                              `json:"sampling_target_period_in_seconds"`
	SamplingTarget         int                              `json:"sampling_target"`
	EventHarvestConfig     collector.EventHarvestConfig     `json:"event_harvest_config"`
	SpanEventHarvestConfig collector.SpanEventHarvestConfig `json:"span_event_harvest_config"`
	RequestHeadersMap      map[string]string                `json:"request_headers_map"`
	MaxPayloadSizeInBytes  int                              `json:"max_payload_size_in_bytes"`
}

ConnectReply contains all of the fields from the app connect command reply that are used in the daemon. The reply contains many more fields, but most of them are used in the agent.

type CustomEvents

type CustomEvents struct {
	// contains filtered or unexported fields
}

CustomEvents is a wrapper over AnalyticsEvents created for additional type safety and proper FailedHarvest behavior.

func NewCustomEvents

func NewCustomEvents(max int) *CustomEvents

NewCustomEvents returns a new analytics event reservoir with capacity max.

func (CustomEvents) AddEvent

func (events CustomEvents) AddEvent(e AnalyticsEvent)

AddEvent observes the occurrence of an analytics event. If the reservoir is full, sampling occurs. Note, when sampling occurs, it is possible the event may be discarded instead of added.

func (*CustomEvents) AddEventFromData

func (events *CustomEvents) AddEventFromData(data []byte, priority SamplingPriority)

AddEventFromData observes the occurrence of a custom analytics event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (CustomEvents) Audit

func (events CustomEvents) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For analytics events, the audit schema is the same as the schema expected by the collector.

func (*CustomEvents) Cmd

func (x *CustomEvents) Cmd() string

func (CustomEvents) CollectorJSON

func (events CustomEvents) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (CustomEvents) Data

func (events CustomEvents) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (CustomEvents) Empty

func (events CustomEvents) Empty() bool

Empty returns true if the collection is empty.

func (*CustomEvents) FailedHarvest

func (events *CustomEvents) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver events to the collector fails. After a failed delivery attempt, the events are merged into the upcoming harvest, possibly with random sampling.

func (CustomEvents) Merge

func (events CustomEvents) Merge(other *analyticsEvents)

Merge merges the analytics events contained in other into events. If the combined number of events exceeds the maximum capacity of events, reservoir sampling with uniform distribution is performed.

func (CustomEvents) MergeFailed

func (events CustomEvents) MergeFailed(other *analyticsEvents)

MergeFailed merges the analytics events contained in other into events after a failed delivery attempt. If FailedEventsAttemptsLimit attempts have been made, the events in other are discarded. If events is full, reservoir sampling is performed.

func (CustomEvents) NumFailedAttempts

func (events CustomEvents) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

func (CustomEvents) NumSaved

func (events CustomEvents) NumSaved() float64

NumSaved returns the number of analytics events in the reservoir.

func (CustomEvents) NumSeen

func (events CustomEvents) NumSeen() float64

NumSeen returns the total number of analytics events observed.

func (CustomEvents) Split

func (events CustomEvents) Split() (*analyticsEvents, *analyticsEvents)

Split splits the events into two. NOTE! The two event pools are not valid priority queues, and should only be used to create JSON, not for adding any events.

type Error

type Error struct {
	// Priority indicates which errors should be saved in the event that the
	// the number of errors is larger than the limit.  Higher numbers have
	// priority.  This field is not sent to the collector.
	Priority int
	// Data contains the error JSON as it is expected by the collector.
	Data JSONString
}

Error is the datatype representing an error or exception captured by the instrumented application. Errors are instance data and are not aggregated together in any way. Therefore, the final JSON expected by the collector is created by the agent and is sent to the daemon complete.

func (*Error) MarshalJSON

func (e *Error) MarshalJSON() ([]byte, error)

MarshalJSON marshals e to JSON according to the schema expected by the collector.

type ErrorEvents

type ErrorEvents struct {
	// contains filtered or unexported fields
}

ErrorEvents is a wrapper over AnalyticsEvents created for additional type safety and proper FailedHarvest behavior.

func NewErrorEvents

func NewErrorEvents(max int) *ErrorEvents

NewErrorEvents returns a new error event reservoir with capacity max.

func (ErrorEvents) AddEvent

func (events ErrorEvents) AddEvent(e AnalyticsEvent)

AddEvent observes the occurrence of an analytics event. If the reservoir is full, sampling occurs. Note, when sampling occurs, it is possible the event may be discarded instead of added.

func (*ErrorEvents) AddEventFromData

func (events *ErrorEvents) AddEventFromData(data []byte, priority SamplingPriority)

AddEventFromData observes the occurrence of an error event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (ErrorEvents) Audit

func (events ErrorEvents) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For analytics events, the audit schema is the same as the schema expected by the collector.

func (*ErrorEvents) Cmd

func (x *ErrorEvents) Cmd() string

func (ErrorEvents) CollectorJSON

func (events ErrorEvents) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (ErrorEvents) Data

func (events ErrorEvents) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (ErrorEvents) Empty

func (events ErrorEvents) Empty() bool

Empty returns true if the collection is empty.

func (*ErrorEvents) FailedHarvest

func (events *ErrorEvents) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of events to the collector fails. After a failed delivery attempt, events is merged into the upcoming harvest. This may result in sampling.

func (ErrorEvents) Merge

func (events ErrorEvents) Merge(other *analyticsEvents)

Merge merges the analytics events contained in other into events. If the combined number of events exceeds the maximum capacity of events, reservoir sampling with uniform distribution is performed.

func (ErrorEvents) MergeFailed

func (events ErrorEvents) MergeFailed(other *analyticsEvents)

MergeFailed merges the analytics events contained in other into events after a failed delivery attempt. If FailedEventsAttemptsLimit attempts have been made, the events in other are discarded. If events is full, reservoir sampling is performed.

func (ErrorEvents) NumFailedAttempts

func (events ErrorEvents) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

func (ErrorEvents) NumSaved

func (events ErrorEvents) NumSaved() float64

NumSaved returns the number of analytics events in the reservoir.

func (ErrorEvents) NumSeen

func (events ErrorEvents) NumSeen() float64

NumSeen returns the total number of analytics events observed.

func (ErrorEvents) Split

func (events ErrorEvents) Split() (*analyticsEvents, *analyticsEvents)

Split splits the events into two. NOTE! The two event pools are not valid priority queues, and should only be used to create JSON, not for adding any events.

type ErrorHeap

type ErrorHeap []*Error

ErrorHeap is a bounded collection of Errors captured by an instrumented application. Once the collection is full, replacement occurs based on the relative priorities of the errors.

func NewErrorHeap

func NewErrorHeap(max int) *ErrorHeap

NewErrorHeap returns a new ErrorHeap with maximum capacity max.

func (*ErrorHeap) AddError

func (h *ErrorHeap) AddError(priority int, dataNeedsCopy []byte)

AddError observes an error captured by an application. If the collection is full, replacement is performed.

func (*ErrorHeap) Audit

func (h *ErrorHeap) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For traced errors, the audit schema is the same as the schema expected by the collector.

func (*ErrorHeap) Cmd

func (x *ErrorHeap) Cmd() string

func (*ErrorHeap) Data

func (h *ErrorHeap) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (*ErrorHeap) Empty

func (h *ErrorHeap) Empty() bool

Empty returns true if the collection is empty.

func (*ErrorHeap) FailedHarvest

func (h *ErrorHeap) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the collection to the collector fails. Traced errors are discarded after one failed attempt.

func (ErrorHeap) Len

func (h ErrorHeap) Len() int

func (ErrorHeap) Less

func (h ErrorHeap) Less(i, j int) bool

func (*ErrorHeap) Pop

func (h *ErrorHeap) Pop() interface{}

Pop removes the lowest priority element from the error heap.

func (*ErrorHeap) Push

func (h *ErrorHeap) Push(x interface{})

Push appends x to the collection. This method should not be called directly because it does not enforce the maximum capacity. Use AddError instead.

func (ErrorHeap) Swap

func (h ErrorHeap) Swap(i, j int)

type FailedHarvestSaver

type FailedHarvestSaver interface {
	FailedHarvest(*Harvest)
}

type FlatTxn

type FlatTxn []byte

func (FlatTxn) AggregateInto

func (t FlatTxn) AggregateInto(h *Harvest)

type Harvest

type Harvest struct {
	Metrics      *MetricTable
	Errors       *ErrorHeap
	SlowSQLs     *SlowSQLs
	TxnTraces    *TxnTraces
	TxnEvents    *TxnEvents
	CustomEvents *CustomEvents
	ErrorEvents  *ErrorEvents
	SpanEvents   *SpanEvents
	LogEvents    *LogEvents
	PhpPackages  *PhpPackages
	// contains filtered or unexported fields
}

func NewHarvest

func NewHarvest(now time.Time, hl collector.EventConfigs) *Harvest

func (*Harvest) IncrementHttpErrors

func (h *Harvest) IncrementHttpErrors(statusCode int)

Update the Http error counts

type HarvestError

type HarvestError struct {
	Reply collector.RPMResponse
	// contains filtered or unexported fields
}

type HarvestTriggerFunc

type HarvestTriggerFunc func(trigger chan HarvestType, cancel chan bool)

A harvest trigger function. Each App has one of these, and it sends HarvestType messages to the given channel to initiate a harvest for that app. When an AppHarvest is closed, it sends a `true` event over the trigger function's cancel channel.

type HarvestType

type HarvestType uint16
const (
	HarvestMetrics      HarvestType = (1 << 0)
	HarvestErrors       HarvestType = (1 << 1)
	HarvestSlowSQLs     HarvestType = (1 << 2)
	HarvestTxnTraces    HarvestType = (1 << 3)
	HarvestTxnEvents    HarvestType = (1 << 4)
	HarvestCustomEvents HarvestType = (1 << 5)
	HarvestErrorEvents  HarvestType = (1 << 6)
	HarvestSpanEvents   HarvestType = (1 << 7)
	HarvestLogEvents    HarvestType = (1 << 8)
	HarvestPhpPackages  HarvestType = (1 << 9)
	HarvestDefaultData  HarvestType = HarvestMetrics | HarvestErrors | HarvestSlowSQLs | HarvestTxnTraces | HarvestPhpPackages
	HarvestAll          HarvestType = HarvestDefaultData | HarvestTxnEvents | HarvestCustomEvents | HarvestErrorEvents | HarvestSpanEvents | HarvestLogEvents
)

type JSONString

type JSONString []byte

func (JSONString) MarshalJSON

func (js JSONString) MarshalJSON() ([]byte, error)

func (*JSONString) UnmarshalJSON

func (js *JSONString) UnmarshalJSON(data []byte) error

type Listener

type Listener struct {
	// contains filtered or unexported fields
}

func Listen

func Listen(network, addr string) (*Listener, error)

func (*Listener) Close

func (l *Listener) Close() error

func (*Listener) Serve

func (l *Listener) Serve(h MessageHandler) error

type LogEvents

type LogEvents struct {
	// contains filtered or unexported fields
}

LogEvents is a wrapper over AnalyticsEvents created for additional type safety and proper FailedHarvest behavior.

func NewLogEvents

func NewLogEvents(max int) *LogEvents

NewLogEvents returns a new Log event reservoir with capacity max.

func (LogEvents) AddEvent

func (events LogEvents) AddEvent(e AnalyticsEvent)

AddEvent observes the occurrence of an analytics event. If the reservoir is full, sampling occurs. Note, when sampling occurs, it is possible the event may be discarded instead of added.

func (*LogEvents) AddEventFromData

func (events *LogEvents) AddEventFromData(data []byte, priority SamplingPriority)

AddEventFromData observes the occurrence of an Log event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (*LogEvents) Audit

func (events *LogEvents) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For analytics events, the audit schema is the same as the schema expected by the collector.

func (*LogEvents) Cmd

func (x *LogEvents) Cmd() string

func (*LogEvents) CollectorJSON

func (events *LogEvents) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (*LogEvents) Data

func (events *LogEvents) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (LogEvents) Empty

func (events LogEvents) Empty() bool

Empty returns true if the collection is empty.

func (*LogEvents) FailedHarvest

func (events *LogEvents) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of events to the collector fails. After a failed delivery attempt, events is merged into the upcoming harvest. This may result in sampling.

func (LogEvents) Merge

func (events LogEvents) Merge(other *analyticsEvents)

Merge merges the analytics events contained in other into events. If the combined number of events exceeds the maximum capacity of events, reservoir sampling with uniform distribution is performed.

func (LogEvents) MergeFailed

func (events LogEvents) MergeFailed(other *analyticsEvents)

MergeFailed merges the analytics events contained in other into events after a failed delivery attempt. If FailedEventsAttemptsLimit attempts have been made, the events in other are discarded. If events is full, reservoir sampling is performed.

func (LogEvents) NumFailedAttempts

func (events LogEvents) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

func (LogEvents) NumSaved

func (events LogEvents) NumSaved() float64

NumSaved returns the number of analytics events in the reservoir.

func (LogEvents) NumSeen

func (events LogEvents) NumSeen() float64

NumSeen returns the total number of analytics events observed.

func (LogEvents) Split

func (events LogEvents) Split() (*analyticsEvents, *analyticsEvents)

Split splits the events into two. NOTE! The two event pools are not valid priority queues, and should only be used to create JSON, not for adding any events.

type MessageHandler

type MessageHandler interface {
	HandleMessage(RawMessage) ([]byte, error)
}

type MessageType

type MessageType uint32

MessageType identifies the encoding for a message body.

const (
	MessageTypeRaw MessageType = iota
	MessageTypeJSON
	MessageTypeBinary
)

func (MessageType) String

func (mt MessageType) String() string

type MessageWriter

type MessageWriter struct {
	W    io.Writer   // underlying writer
	Type MessageType // message encoding
	// contains filtered or unexported fields
}

A MessageWriter writes data to a stream as messages.

func (*MessageWriter) Write

func (mw *MessageWriter) Write(p []byte) (n int, err error)

Write writes len(p) bytes from p to the underlying data stream as a single message. It returns the number of bytes written and any error encountered that caused the write to stop early. When write fails to write the complete message, the underlying data stream should be assumed to be out of sync.

func (*MessageWriter) WriteString

func (mw *MessageWriter) WriteString(s string) (n int, err error)

WriteString writes the contents of the string s to mw as a message. If the underlying writer implements a WriteString method, it is invoked directly.

type MetricForce

type MetricForce int

MetricForce describes the kind of a metric. Metrics can be either forced or unforced.

const (
	// Forced indicates a metric that is critical to quality in some way.
	// For example, the WebTransaction metric that powers the overview
	// charts. Forced metrics are not subject to per-harvest metric limits.
	Forced MetricForce = iota

	// Unforced indicates a metric that is not critical. Unforced metrics
	// are safe to discard when the maximum number of unique metrics per
	// harvest is reached.
	Unforced
)

type MetricRule

type MetricRule struct {
	// 'Ignore' indicates if the entire transaction should be discarded if
	// there is a match.  This field is only used by "url_rules" and
	// "transaction_name_rules", not "metric_name_rules".
	Ignore              bool   `json:"ignore"`
	EachSegment         bool   `json:"each_segment"`
	ReplaceAll          bool   `json:"replace_all"`
	Terminate           bool   `json:"terminate_chain"`
	Order               int    `json:"eval_order"`
	OriginalReplacement string `json:"replacement"`
	RawExpr             string `json:"match_expression"`

	// Go's regexp backreferences use '${1}' instead of the Perlish '\1', so
	// we transform the replacement string into the Go syntax and store it here.
	TransformedReplacement string
	// contains filtered or unexported fields
}

func (*MetricRule) Apply

func (r *MetricRule) Apply(s string) (MetricRuleResult, string)

type MetricRuleResult

type MetricRuleResult int
const (
	RuleResultMatched MetricRuleResult = iota
	RuleResultUnmatched
	RuleResultIgnore
)

type MetricRules

type MetricRules []*MetricRule

func NewMetricRulesFromJSON

func NewMetricRulesFromJSON(data []byte) MetricRules

func (MetricRules) Apply

func (rules MetricRules) Apply(s string) (MetricRuleResult, string)

func (MetricRules) Len

func (rules MetricRules) Len() int

func (MetricRules) Less

func (rules MetricRules) Less(i, j int) bool

Rules should be applied in increasing order

func (MetricRules) Swap

func (rules MetricRules) Swap(i, j int)

func (*MetricRules) UnmarshalJSON

func (rules *MetricRules) UnmarshalJSON(b []byte) (err error)

type MetricTable

type MetricTable struct {
	// contains filtered or unexported fields
}

A MetricTable represents an aggregate of metrics reported by agents during a harvest period. Each metric table enforces a limit on the maximum number of unique metrics that can be recorded. However, this is a soft limit. Some metrics are critical and must be delivered. These are called forced metrics, and the maximum number of unique, forced metrics is unlimited.

func NewMetricTable

func NewMetricTable(maxTableSize int, now time.Time) *MetricTable

NewMetricTable returns a new metric table with capacity maxTableSize.

func (*MetricTable) AddCount

func (mt *MetricTable) AddCount(name, scope string, count float64,
	force MetricForce)

AddCount adds a metric with the given call count to mt. If mt is full, and the metric is unforced, the metric will not be added.

func (*MetricTable) AddRaw

func (mt *MetricTable) AddRaw(nameSlice []byte, nameString, scope string,
	data [6]float64, force MetricForce)

AddRaw adds a metric to mt. If mt is full, and the metric is unforced, the metric will not be added.

func (*MetricTable) AddValue

func (mt *MetricTable) AddValue(name, scope string, value float64,
	force MetricForce)

AddValue adds a metric with the given duration to mt. If mt is full, and the metric is unforced, the metric will not be added.

func (*MetricTable) ApplyRules

func (mt *MetricTable) ApplyRules(rules MetricRules) *MetricTable

ApplyRules returns a new MetricTable containing the results of applying the given metric rename rules to mt.

func (*MetricTable) Audit

func (mt *MetricTable) Audit(id AgentRunID, harvestStart time.Time) ([]byte,
	error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For metrics, the audit schema is the same as the schema expected by the collector.

func (*MetricTable) Cmd

func (x *MetricTable) Cmd() string

func (*MetricTable) CollectorJSON

func (mt *MetricTable) CollectorJSON(id AgentRunID, now time.Time) ([]byte,
	error)

CollectorJSON marshals the metric table to JSON according to the schema expected by the collector.

func (*MetricTable) CollectorJSONSorted

func (mt *MetricTable) CollectorJSONSorted(id AgentRunID,
	now time.Time) ([]byte, error)

CollectorJSONSorted marshals the metric table to JSON according to the schema expected by the collector. The metrics are ordered by name and scope.

func (*MetricTable) Data

func (mt *MetricTable) Data(id AgentRunID, harvestStart time.Time) ([]byte,
	error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (*MetricTable) DebugJSON

func (mt *MetricTable) DebugJSON() string

DebugJSON marshals the metrics to JSON in a format useful for debugging.

func (*MetricTable) Empty

func (mt *MetricTable) Empty() bool

Empty returns true if the metric table is empty.

func (*MetricTable) FailedHarvest

func (mt *MetricTable) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of mt to the collector fails. After a failed delivery attempt, mt is merged into the upcoming harvest. This may result in some unforced metrics being discarded.

func (*MetricTable) Has

func (mt *MetricTable) Has(name string) bool

Has returns true if the given metric exists in the metric table (regardless of scope).

func (*MetricTable) Merge

func (mt *MetricTable) Merge(from *MetricTable)

Merge merges the given metric table into mt.

func (*MetricTable) MergeFailed

func (mt *MetricTable) MergeFailed(from *MetricTable)

MergeFailed merges the given metrics into mt after a failed delivery attempt. If FailedMetricAttemptsLimit attempts have been made, the metrics in from are discarded. Unforced metrics in from may be discarded if mt is full.

func (*MetricTable) NumFailedAttempts

func (mt *MetricTable) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

type PayloadCreator

type PayloadCreator interface {
	FailedHarvestSaver
	Empty() bool
	Data(id AgentRunID, harvestStart time.Time) ([]byte, error)
	// For many data types, the audit version is the same as the data. Those
	// data types return nil from Audit.
	Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)
	Cmd() string
}

type PhpPackages

type PhpPackages struct {
	// contains filtered or unexported fields
}

phpPackages represents all detected packages reported by an agent.

func NewPhpPackages

func NewPhpPackages() *PhpPackages

newPhpPackages returns a new PhpPackages struct.

func (*PhpPackages) AddPhpPackagesFromData

func (packages *PhpPackages) AddPhpPackagesFromData(data []byte) error

AddPhpPackagesFromData observes the PHP packages info from the agent.

func (*PhpPackages) Audit

func (packages *PhpPackages) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For PHP packages, the audit schema is the same as the schema expected by the collector.

func (*PhpPackages) Cmd

func (x *PhpPackages) Cmd() string

func (*PhpPackages) CollectorJSON

func (packages *PhpPackages) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (*PhpPackages) Data

func (packages *PhpPackages) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (*PhpPackages) Empty

func (packages *PhpPackages) Empty() bool

Empty returns true if the collection is empty.

func (*PhpPackages) FailedHarvest

func (packages *PhpPackages) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of events to the collector fails. After a failed delivery attempt, package info is currently dropped

func (*PhpPackages) NumSaved

func (packages *PhpPackages) NumSaved() float64

NumSeen returns the total number PHP packages payloads stored. Should always be 0 or 1. The agent reports all the PHP packages as a single JSON string.

func (*PhpPackages) SetPhpPackages

func (packages *PhpPackages) SetPhpPackages(data []byte) error

SetPhpPackages sets the observed package list.

type PhpPackagesKey

type PhpPackagesKey struct {
	Name    string
	Version string
}

type PidFile

type PidFile struct {
	// contains filtered or unexported fields
}

PidFile represents an open file descriptor to a pid file.

A few words about pid files. The daemon requires a pid file to prevent a race condition where many agents attempt to spawn a daemon at the same time. To fully close the race condition the daemon that holds the pid file lock must retain it for its entire lifetime. For that reason we do not close the pid file and we do not delete the pid file on exit.

func CreatePidFile

func CreatePidFile(name string) (*PidFile, error)

CreatePidFile opens the given pid file and acquires an exclusive write lock.

func (*PidFile) Close

func (f *PidFile) Close() error

Close closes the pid file, releasing its write lock and rendering it unusable for I/O. It returns an error, if any. Note, closing the pid file does not cause it to be removed.

func (*PidFile) Name

func (f *PidFile) Name() string

Name returns the name of the pid file as presented to CreatePidFile.

func (*PidFile) Remove

func (f *PidFile) Remove() error

Remove attempts to remove the pid file. Removing a pid file releases its write lock and renders the PidFile unusable for I/O. For a daemon process, removing the pid file should be the very last step before exiting.

func (*PidFile) Write

func (f *PidFile) Write() (n int, err error)

Write writes the process id of the current process to f replacing its contents. It returns the number of bytes written and an error, if any.

type PreconnectReply

type PreconnectReply struct {
	Collector        string                    `json:"redirect_host"`
	SecurityPolicies map[string]SecurityPolicy `json:"security_policies"`
}

PreconnectReply contains all of the fields from the app preconnect command reply that are used in the daemon.

type Processor

type Processor struct {
	// contains filtered or unexported fields
}

func NewProcessor

func NewProcessor(cfg ProcessorConfig) *Processor

func (*Processor) CleanExit

func (p *Processor) CleanExit()

CleanExit terminates p.Run()'s loop and iterates over the processor's harvests, explicitly calling doHarvest on them. By setting the ProcessorHarvest's Type to HarvestFinal, later functions avoid goroutines so that we only return from this function when all harvests complete

func (*Processor) IncomingAppInfo

func (p *Processor) IncomingAppInfo(id *AgentRunID, info *AppInfo) AppInfoReply

func (*Processor) IncomingSpanBatch

func (p *Processor) IncomingSpanBatch(batch SpanBatch)

func (*Processor) IncomingTxnData

func (p *Processor) IncomingTxnData(id AgentRunID, sample AggregaterInto)

func (*Processor) Run

func (p *Processor) Run() error

type ProcessorConfig

type ProcessorConfig struct {
	Client          collector.Client
	IntegrationMode bool
	UtilConfig      utilization.Config
	AppTimeout      time.Duration
}

type ProcessorHarvest

type ProcessorHarvest struct {
	*AppHarvest
	ID       AgentRunID
	Type     HarvestType
	Blocking bool
}

ProcessorHarvest represents a processor harvest event: when this is received by a processor, it indicates that a harvest should be performed for the harvest and run ID contained within. The word "event" doesn't appear in the type only to avoid confusion with analytic events.

type RawConnectPayload

type RawConnectPayload struct {
	Pid                int                          `json:"pid"`
	Language           string                       `json:"language"`
	Version            string                       `json:"agent_version"`
	Host               string                       `json:"host"`
	HostDisplayName    string                       `json:"display_host,omitempty"`
	Settings           map[string]interface{}       `json:"settings"`
	AppName            []string                     `json:"app_name"`
	HighSecurity       bool                         `json:"high_security"`
	Labels             JSONString                   `json:"labels"`
	Environment        JSONString                   `json:"environment"`
	Metadata           JSONString                   `json:"metadata"`
	Identifier         string                       `json:"identifier"`
	Util               *utilization.Data            `json:"utilization,omitempty"`
	SecurityPolicies   map[string]SecurityPolicy    `json:"security_policies,omitempty"`
	EventHarvestConfig collector.EventHarvestConfig `json:"event_harvest_config"`
}

type RawMessage

type RawMessage struct {
	Type  MessageType
	Bytes []byte
}

RawMessage contains a single message's contents: Bytes does not contain the message header.

func ReadMessage

func ReadMessage(r io.Reader) (RawMessage, error)

type RawPreconnectPayload

type RawPreconnectPayload struct {
	SecurityPolicyToken string `json:"security_policies_token,omitempty"`
	HighSecurity        bool   `json:"high_security"`
}

type SQLId

type SQLId uint32

An SQLId is a unique identifier for an SQL statement. The agent is responsible generating these values using an implementation defined algorithm.

type SamplingPriority

type SamplingPriority float64

The SamplingPriority type is used by the Daemon wherever it must make any decisions on what data to replace when pool limits are reached during a single harvest.

func (SamplingPriority) IsLowerPriority

func (x SamplingPriority) IsLowerPriority(y SamplingPriority) bool

type SecurityPolicy

type SecurityPolicy struct {
	Enabled  bool `json:"enabled"`
	Required bool `json:"required,omitempty"`
}

Structure of the security policies used on Preconnect and Connect

type SecurityPolicyAgent

type SecurityPolicyAgent struct {
	Enabled   bool `json:"enabled"`
	Supported bool `json:"supported"`
}

type SlowSQL

type SlowSQL struct {
	ID          SQLId  // unique identifier generated by the observing agent
	Count       int32  // number of times the query has been observed
	TotalMicros uint64 // cumulative duration (usecs)
	MinMicros   uint64 // minimum observed duration (usecs)
	MaxMicros   uint64 // maximum observed duration (usecs)

	MetricName string // the datastore metric derived from the statement
	Query      string // the SQL statement
	TxnName    string // the name of the originating transaction
	TxnURL     string // the URI of the originating request

	// Params is a JSON-encoded object containing additional metadata
	// associated with the SQL statement. This includes attributes
	// generated by the agent such as the backtrace, as well as attributes
	// generated by the application and added via an API.
	Params JSONString
}

A SlowSQL aggregates information about occurrences of an SQL statement based on observations reported by an agent. Typically, agents do not report all occurrences, so the Count, TotalMicros and MinMicros fields should be taken with a grain of salt.

type SlowSQLs

type SlowSQLs struct {
	// contains filtered or unexported fields
}

SlowSQLs represents a bounded collection of SQL statements built from observations reported by agents. When the collection is full, the maximum observed duration for each SQL statement is used to implement the replacement strategy.

func NewSlowSQLs

func NewSlowSQLs(max int) *SlowSQLs

NewSlowSQLs returns a new, empty collection of SQL statements with maximum capacity max.

func (*SlowSQLs) Audit

func (slows *SlowSQLs) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection of slow SQL statements into JSON according to the schema expected by the audit log. This is the same schema expected by the collector except compression and base64 encoding is disabled to aid readability.

func (*SlowSQLs) Cmd

func (x *SlowSQLs) Cmd() string

func (*SlowSQLs) CollectorJSON

func (slows *SlowSQLs) CollectorJSON(compressEncode bool) ([]byte, error)

CollectorJSON marshals the collection of slow SQL statement s into JSON according to the schema expected by the collector.

Note: This JSON does not contain the agentRunID. This is for historical reasons. Since the agentRunID is included in the url, its use in the other commands' JSON is admittedly redundant, although required.

func (*SlowSQLs) Data

func (slows *SlowSQLs) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection of slow SQL statements into JSON according to the schema expected by the collector.

func (*SlowSQLs) Empty

func (slows *SlowSQLs) Empty() bool

Empty returns true if the collection is empty.

func (*SlowSQLs) FailedHarvest

func (slows *SlowSQLs) FailedHarvest(newHarvest *Harvest)

FailedHarvest discards the collection of slow SQL statements after an attempt to send them to the collector fails.

func (*SlowSQLs) Observe

func (slows *SlowSQLs) Observe(slow *SlowSQL)

Observe aggregates an SQL statement into the collection if the query has previously been observed or the collection has sufficient capacity to add it. Otherwise, the SQL statement is added conditionally based on the collection's replacement strategy.

type SpanBatch

type SpanBatch struct {
	// contains filtered or unexported fields
}

type SpanEvents

type SpanEvents struct {
	// contains filtered or unexported fields
}

SpanEvents is a wrapper over AnalyticsEvents created for additional type safety and proper FailedHarvest behavior.

func NewSpanEvents

func NewSpanEvents(max int) *SpanEvents

NewSpanEvents returns a new span event reservoir with capacity max.

func (SpanEvents) AddEvent

func (events SpanEvents) AddEvent(e AnalyticsEvent)

AddEvent observes the occurrence of an analytics event. If the reservoir is full, sampling occurs. Note, when sampling occurs, it is possible the event may be discarded instead of added.

func (*SpanEvents) AddEventFromData

func (events *SpanEvents) AddEventFromData(data []byte, priority SamplingPriority)

AddEventFromData observes the occurrence of a span event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (SpanEvents) Audit

func (events SpanEvents) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For analytics events, the audit schema is the same as the schema expected by the collector.

func (*SpanEvents) Cmd

func (x *SpanEvents) Cmd() string

func (SpanEvents) CollectorJSON

func (events SpanEvents) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (SpanEvents) Data

func (events SpanEvents) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (SpanEvents) Empty

func (events SpanEvents) Empty() bool

Empty returns true if the collection is empty.

func (*SpanEvents) FailedHarvest

func (events *SpanEvents) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of events to the collector fails. After a failed delivery attempt, events is merged into the upcoming harvest. This may result in sampling.

func (SpanEvents) Merge

func (events SpanEvents) Merge(other *analyticsEvents)

Merge merges the analytics events contained in other into events. If the combined number of events exceeds the maximum capacity of events, reservoir sampling with uniform distribution is performed.

func (SpanEvents) MergeFailed

func (events SpanEvents) MergeFailed(other *analyticsEvents)

MergeFailed merges the analytics events contained in other into events after a failed delivery attempt. If FailedEventsAttemptsLimit attempts have been made, the events in other are discarded. If events is full, reservoir sampling is performed.

func (SpanEvents) NumFailedAttempts

func (events SpanEvents) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

func (SpanEvents) NumSaved

func (events SpanEvents) NumSaved() float64

NumSaved returns the number of analytics events in the reservoir.

func (SpanEvents) NumSeen

func (events SpanEvents) NumSeen() float64

NumSeen returns the total number of analytics events observed.

func (SpanEvents) Split

func (events SpanEvents) Split() (*analyticsEvents, *analyticsEvents)

Split splits the events into two. NOTE! The two event pools are not valid priority queues, and should only be used to create JSON, not for adding any events.

type TxnData

type TxnData struct {
	ID     AgentRunID
	Sample AggregaterInto
}

type TxnEvents

type TxnEvents struct {
	// contains filtered or unexported fields
}

TxnEvents is a wrapper over AnalyticsEvents created for additional type safety and proper FailedHarvest behavior.

func NewTxnEvents

func NewTxnEvents(max int) *TxnEvents

NewTxnEvents returns a new transaction event reservoir with capacity max.

func (TxnEvents) AddEvent

func (events TxnEvents) AddEvent(e AnalyticsEvent)

AddEvent observes the occurrence of an analytics event. If the reservoir is full, sampling occurs. Note, when sampling occurs, it is possible the event may be discarded instead of added.

func (*TxnEvents) AddSyntheticsEvent

func (events *TxnEvents) AddSyntheticsEvent(data []byte, priority SamplingPriority)

AddSyntheticsEvent observes the occurrence of a Synthetics transaction event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (*TxnEvents) AddTxnEvent

func (events *TxnEvents) AddTxnEvent(data []byte, priority SamplingPriority)

AddTxnEvent observes the occurrence of a transaction event. If the reservoir is full, sampling occurs. Note: when sampling occurs, it is possible the new event may be discarded.

func (TxnEvents) Audit

func (events TxnEvents) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

Audit marshals the collection to JSON according to the schema expected by the audit log. For analytics events, the audit schema is the same as the schema expected by the collector.

func (*TxnEvents) Cmd

func (x *TxnEvents) Cmd() string

func (TxnEvents) CollectorJSON

func (events TxnEvents) CollectorJSON(id AgentRunID) ([]byte, error)

CollectorJSON marshals events to JSON according to the schema expected by the collector.

func (TxnEvents) Data

func (events TxnEvents) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

Data marshals the collection to JSON according to the schema expected by the collector.

func (TxnEvents) Empty

func (events TxnEvents) Empty() bool

Empty returns true if the collection is empty.

func (*TxnEvents) FailedHarvest

func (events *TxnEvents) FailedHarvest(newHarvest *Harvest)

FailedHarvest is a callback invoked by the processor when an attempt to deliver the contents of events to the collector fails. After a failed delivery attempt, events is merged into the upcoming harvest. This may result in sampling.

func (TxnEvents) Merge

func (events TxnEvents) Merge(other *analyticsEvents)

Merge merges the analytics events contained in other into events. If the combined number of events exceeds the maximum capacity of events, reservoir sampling with uniform distribution is performed.

func (TxnEvents) MergeFailed

func (events TxnEvents) MergeFailed(other *analyticsEvents)

MergeFailed merges the analytics events contained in other into events after a failed delivery attempt. If FailedEventsAttemptsLimit attempts have been made, the events in other are discarded. If events is full, reservoir sampling is performed.

func (TxnEvents) NumFailedAttempts

func (events TxnEvents) NumFailedAttempts() float64

NumAttempts returns the total number of attempts sent to this endpoint. The value is the number of times the agent attempted to call the given endpoint before it was successful. This metric MUST NOT be generated if only one attempt was made. Does not include the successful attempt.

func (TxnEvents) NumSaved

func (events TxnEvents) NumSaved() float64

NumSaved returns the number of analytics events in the reservoir.

func (TxnEvents) NumSeen

func (events TxnEvents) NumSeen() float64

NumSeen returns the total number of analytics events observed.

func (TxnEvents) Split

func (events TxnEvents) Split() (*analyticsEvents, *analyticsEvents)

Split splits the events into two. NOTE! The two event pools are not valid priority queues, and should only be used to create JSON, not for adding any events.

type TxnTrace

type TxnTrace struct {
	MetricName           string
	RequestURI           string
	UnixTimestampMillis  float64
	DurationMillis       float64
	Data                 JSONString
	GUID                 string
	ForcePersist         bool
	SyntheticsResourceID string
}

type TxnTraceHeap

type TxnTraceHeap []*TxnTrace

func NewTxnTraceHeap

func NewTxnTraceHeap(max int) *TxnTraceHeap

func (*TxnTraceHeap) AddTxnTrace

func (h *TxnTraceHeap) AddTxnTrace(t *TxnTrace)

func (*TxnTraceHeap) IsKeeper

func (h *TxnTraceHeap) IsKeeper(tt *TxnTrace) bool

func (TxnTraceHeap) Len

func (h TxnTraceHeap) Len() int

func (TxnTraceHeap) Less

func (h TxnTraceHeap) Less(i, j int) bool

func (*TxnTraceHeap) Pop

func (h *TxnTraceHeap) Pop() interface{}

func (*TxnTraceHeap) Push

func (h *TxnTraceHeap) Push(x interface{})

func (TxnTraceHeap) Swap

func (h TxnTraceHeap) Swap(i, j int)

type TxnTraces

type TxnTraces struct {
	// contains filtered or unexported fields
}

func NewTxnTraces

func NewTxnTraces() *TxnTraces

func (*TxnTraces) AddTxnTrace

func (traces *TxnTraces) AddTxnTrace(t *TxnTrace)

func (*TxnTraces) Audit

func (traces *TxnTraces) Audit(id AgentRunID, harvestStart time.Time) ([]byte, error)

func (*TxnTraces) Cmd

func (x *TxnTraces) Cmd() string

func (*TxnTraces) CollectorJSON

func (traces *TxnTraces) CollectorJSON(id AgentRunID, compressEncode bool) ([]byte, error)

func (*TxnTraces) Data

func (traces *TxnTraces) Data(id AgentRunID, harvestStart time.Time) ([]byte, error)

func (*TxnTraces) Empty

func (traces *TxnTraces) Empty() bool

func (*TxnTraces) FailedHarvest

func (traces *TxnTraces) FailedHarvest(newHarvest *Harvest)

func (*TxnTraces) IsKeeper

func (traces *TxnTraces) IsKeeper(tt *TxnTrace) bool

Directories

Path Synopsis
Package config implements configuration file parsing.
Package config implements configuration file parsing.
Package jsonx extends the encoding/json package to encode JSON incrementally and without requiring reflection.
Package jsonx extends the encoding/json package to encode JSON incrementally and without requiring reflection.
Default limits.
Default limits.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL