elastic: github.com/olivere/elastic Index | Examples | Files | Directories

package elastic

import "github.com/olivere/elastic"

Package elastic provides an interface to the Elasticsearch server (https://www.elastic.co/products/elasticsearch).

The first thing you do is to create a Client. If you have Elasticsearch installed and running with its default settings (i.e. available at http://127.0.0.1:9200), all you need to do is:

client, err := elastic.NewClient()
if err != nil {
	// Handle error
}

If your Elasticsearch server is running on a different IP and/or port, just provide a URL to NewClient:

// Create a client and connect to http://192.168.2.10:9201
client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
if err != nil {
  // Handle error
}

You can pass many more configuration parameters to NewClient. Review the documentation of NewClient for more information.

If no Elasticsearch server is available, services will fail when creating a new request and will return ErrNoClient.

A Client provides services. The services usually come with a variety of methods to prepare the query and a Do function to execute it against the Elasticsearch REST interface and return a response. Here is an example of the IndexExists service that checks if a given index already exists.

exists, err := client.IndexExists("twitter").Do(context.Background())
if err != nil {
	// Handle error
}
if !exists {
	// Index does not exist yet.
}

Look up the documentation for Client to get an idea of the services provided and what kinds of responses you get when executing the Do function of a service. Also see the wiki on Github for more details.

Copyright 2012-present Oliver Eilhard. All rights reserved. Use of this source code is governed by a MIT-license. See http://olivere.mit-license.org/license.txt for details.

Code:

errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)

// Obtain a client. You can also provide your own HTTP client here.
client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
// Trace request and response details like this
// client, err := elastic.NewClient(elastic.SetTraceLog(log.New(os.Stdout, "", 0)))
if err != nil {
    // Handle error
    panic(err)
}

// Ping the Elasticsearch server to get e.g. the version number
info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
fmt.Printf("Elasticsearch returned with code %d and version %s\n", code, info.Version.Number)

// Getting the ES version number is quite common, so there's a shortcut
esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
if err != nil {
    // Handle error
    panic(err)
}
fmt.Printf("Elasticsearch version %s\n", esversion)

// Use the IndexExists service to check if a specified index exists.
exists, err := client.IndexExists("twitter").Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
if !exists {
    // Create a new index.
    mapping := `
{
	"settings":{
		"number_of_shards":1,
		"number_of_replicas":0
	},
	"mappings":{
		"doc":{
			"properties":{
				"user":{
					"type":"keyword"
				},
				"message":{
					"type":"text",
					"store": true,
					"fielddata": true
				},
            "retweets":{
                "type":"long"
            },
				"tags":{
					"type":"keyword"
				},
				"location":{
					"type":"geo_point"
				},
				"suggest_field":{
					"type":"completion"
				}
			}
		}
	}
}
`
    createIndex, err := client.CreateIndex("twitter").Body(mapping).Do(context.Background())
    if err != nil {
        // Handle error
        panic(err)
    }
    if !createIndex.Acknowledged {
        // Not acknowledged
    }
}

// Index a tweet (using JSON serialization)
tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
put1, err := client.Index().
    Index("twitter").
    Id("1").
    BodyJson(tweet1).
    Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)

// Index a second tweet (by string)
tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
put2, err := client.Index().
    Index("twitter").
    Id("2").
    BodyString(tweet2).
    Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)

// Get tweet with specified ID
get1, err := client.Get().
    Index("twitter").
    Id("1").
    Do(context.Background())
if err != nil {
    switch {
    case elastic.IsNotFound(err):
        panic(fmt.Sprintf("Document not found: %v", err))
    case elastic.IsTimeout(err):
        panic(fmt.Sprintf("Timeout retrieving document: %v", err))
    case elastic.IsConnErr(err):
        panic(fmt.Sprintf("Connection problem: %v", err))
    default:
        // Some other kind of error
        panic(err)
    }
}
fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)

// Refresh to make sure the documents are searchable.
_, err = client.Refresh().Index("twitter").Do(context.Background())
if err != nil {
    panic(err)
}

// Search with a term query
termQuery := elastic.NewTermQuery("user", "olivere")
searchResult, err := client.Search().
    Index("twitter").        // search in index "twitter"
    Query(termQuery).        // specify the query
    Sort("user", true).      // sort by "user" field, ascending
    From(0).Size(10).        // take documents 0-9
    Pretty(true).            // pretty print request and response JSON
    Do(context.Background()) // execute
if err != nil {
    // Handle error
    panic(err)
}

// searchResult is of type SearchResult and returns hits, suggestions,
// and all kinds of other information from Elasticsearch.
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)

// Each is a convenience function that iterates over hits in a search result.
// It makes sure you don't need to check for nil values in the response.
// However, it ignores errors in serialization. If you want full control
// over iterating the hits, see below.
var ttyp Tweet
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
    t := item.(Tweet)
    fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
}
// TotalHits is another convenience function that works even when something goes wrong.
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())

// Here's how you iterate through results with full control over each step.
if searchResult.TotalHits() > 0 {
    fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())

    // Iterate through results
    for _, hit := range searchResult.Hits.Hits {
        // hit.Index contains the name of the index

        // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
        var t Tweet
        err := json.Unmarshal(hit.Source, &t)
        if err != nil {
            // Deserialization failed
        }

        // Work with tweet
        fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
    }
} else {
    // No hits
    fmt.Print("Found no tweets\n")
}

// Update a tweet by the update API of Elasticsearch.
// We just increment the number of retweets.
script := elastic.NewScript("ctx._source.retweets += params.num").Param("num", 1)
update, err := client.Update().Index("twitter").Id("1").
    Script(script).
    Upsert(map[string]interface{}{"retweets": 0}).
    Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)

// ...

// Delete an index.
deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
if err != nil {
    // Handle error
    panic(err)
}
if !deleteIndex.Acknowledged {
    // Not acknowledged
}

Index

Examples

Package Files

acknowledged_response.go backoff.go bulk.go bulk_delete_request.go bulk_delete_request_easyjson.go bulk_index_request.go bulk_index_request_easyjson.go bulk_processor.go bulk_request.go bulk_update_request.go bulk_update_request_easyjson.go canonicalize.go cat_aliases.go cat_allocation.go cat_count.go cat_health.go cat_indices.go clear_scroll.go client.go cluster_health.go cluster_reroute.go cluster_state.go cluster_stats.go connection.go count.go decoder.go delete.go delete_by_query.go doc.go docvalue_field.go errors.go exists.go explain.go fetch_source_context.go field_caps.go geo_point.go get.go highlight.go index.go indices_analyze.go indices_close.go indices_create.go indices_delete.go indices_delete_template.go indices_exists.go indices_exists_template.go indices_flush.go indices_flush_synced.go indices_forcemerge.go indices_freeze.go indices_get.go indices_get_aliases.go indices_get_field_mapping.go indices_get_mapping.go indices_get_settings.go indices_get_template.go indices_open.go indices_put_alias.go indices_put_mapping.go indices_put_settings.go indices_put_template.go indices_refresh.go indices_rollover.go indices_segments.go indices_shrink.go indices_stats.go indices_unfreeze.go ingest_delete_pipeline.go ingest_get_pipeline.go ingest_put_pipeline.go ingest_simulate_pipeline.go inner_hit.go logger.go mget.go msearch.go mtermvectors.go nodes_info.go nodes_stats.go ping.go plugins.go query.go reindex.go request.go rescore.go rescorer.go response.go retrier.go retry.go script.go script_delete.go script_get.go script_put.go scroll.go search.go search_aggs.go search_aggs_bucket_adjacency_matrix.go search_aggs_bucket_children.go search_aggs_bucket_composite.go search_aggs_bucket_count_thresholds.go search_aggs_bucket_date_histogram.go search_aggs_bucket_date_range.go search_aggs_bucket_diversified_sampler.go search_aggs_bucket_filter.go search_aggs_bucket_filters.go search_aggs_bucket_geo_distance.go search_aggs_bucket_geohash_grid.go search_aggs_bucket_global.go search_aggs_bucket_histogram.go search_aggs_bucket_ip_range.go search_aggs_bucket_missing.go search_aggs_bucket_nested.go search_aggs_bucket_range.go search_aggs_bucket_reverse_nested.go search_aggs_bucket_sampler.go search_aggs_bucket_significant_terms.go search_aggs_bucket_significant_text.go search_aggs_bucket_terms.go search_aggs_matrix_stats.go search_aggs_metrics_avg.go search_aggs_metrics_cardinality.go search_aggs_metrics_extended_stats.go search_aggs_metrics_geo_bounds.go search_aggs_metrics_geo_centroid.go search_aggs_metrics_max.go search_aggs_metrics_min.go search_aggs_metrics_percentile_ranks.go search_aggs_metrics_percentiles.go search_aggs_metrics_scripted_metric.go search_aggs_metrics_stats.go search_aggs_metrics_sum.go search_aggs_metrics_top_hits.go search_aggs_metrics_value_count.go search_aggs_metrics_weighted_avg.go search_aggs_pipeline_avg_bucket.go search_aggs_pipeline_bucket_script.go search_aggs_pipeline_bucket_selector.go search_aggs_pipeline_bucket_sort.go search_aggs_pipeline_cumulative_sum.go search_aggs_pipeline_derivative.go search_aggs_pipeline_extended_stats_bucket.go search_aggs_pipeline_max_bucket.go search_aggs_pipeline_min_bucket.go search_aggs_pipeline_mov_avg.go search_aggs_pipeline_mov_fn.go search_aggs_pipeline_percentiles_bucket.go search_aggs_pipeline_serial_diff.go search_aggs_pipeline_stats_bucket.go search_aggs_pipeline_sum_bucket.go search_collapse_builder.go search_queries_bool.go search_queries_boosting.go search_queries_common_terms.go search_queries_constant_score.go search_queries_dis_max.go search_queries_exists.go search_queries_fsq.go search_queries_fsq_score_funcs.go search_queries_fuzzy.go search_queries_geo_bounding_box.go search_queries_geo_distance.go search_queries_geo_polygon.go search_queries_has_child.go search_queries_has_parent.go search_queries_ids.go search_queries_match.go search_queries_match_all.go search_queries_match_none.go search_queries_match_phrase.go search_queries_match_phrase_prefix.go search_queries_more_like_this.go search_queries_multi_match.go search_queries_nested.go search_queries_parent_id.go search_queries_percolator.go search_queries_prefix.go search_queries_query_string.go search_queries_range.go search_queries_raw_string.go search_queries_regexp.go search_queries_script.go search_queries_simple_query_string.go search_queries_slice.go search_queries_term.go search_queries_terms.go search_queries_terms_set.go search_queries_type.go search_queries_wildcard.go search_queries_wrapper.go search_request.go search_shards.go search_source.go search_terms_lookup.go snapshot_create.go snapshot_create_repository.go snapshot_delete.go snapshot_delete_repository.go snapshot_get.go snapshot_get_repository.go snapshot_restore.go snapshot_verify_repository.go sort.go suggest_field.go suggester.go suggester_completion.go suggester_context.go suggester_context_category.go suggester_context_geo.go suggester_phrase.go suggester_term.go tasks_cancel.go tasks_get_task.go tasks_list.go termvectors.go update.go update_by_query.go validate.go xpack_ilm_delete_lifecycle.go xpack_ilm_get_lifecycle.go xpack_ilm_put_lifecycle.go xpack_info.go xpack_security_change_password.go xpack_security_delete_role.go xpack_security_delete_role_mapping.go xpack_security_get_role.go xpack_security_get_role_mapping.go xpack_security_put_role.go xpack_security_put_role_mapping.go xpack_watcher_ack_watch.go xpack_watcher_activate_watch.go xpack_watcher_deactivate_watch.go xpack_watcher_delete_watch.go xpack_watcher_execute_watch.go xpack_watcher_get_watch.go xpack_watcher_put_watch.go xpack_watcher_start.go xpack_watcher_stats.go xpack_watcher_stop.go

Constants

const (
    // Version is the current version of Elastic.
    Version = "7.0.4"

    // DefaultURL is the default endpoint of Elasticsearch on the local machine.
    // It is used e.g. when initializing a new Client without a specific URL.
    DefaultURL = "http://127.0.0.1:9200"

    // DefaultScheme is the default protocol scheme to use when sniffing
    // the Elasticsearch cluster.
    DefaultScheme = "http"

    // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
    DefaultHealthcheckEnabled = true

    // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
    // for a response from Elasticsearch on startup, i.e. when creating a
    // client. After the client is started, a shorter timeout is commonly used
    // (its default is specified in DefaultHealthcheckTimeout).
    DefaultHealthcheckTimeoutStartup = 5 * time.Second

    // DefaultHealthcheckTimeout specifies the time a running client waits for
    // a response from Elasticsearch. Notice that the healthcheck timeout
    // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
    DefaultHealthcheckTimeout = 1 * time.Second

    // DefaultHealthcheckInterval is the default interval between
    // two health checks of the nodes in the cluster.
    DefaultHealthcheckInterval = 60 * time.Second

    // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
    DefaultSnifferEnabled = true

    // DefaultSnifferInterval is the interval between two sniffing procedures,
    // i.e. the lookup of all nodes in the cluster and their addition/removal
    // from the list of actual connections.
    DefaultSnifferInterval = 15 * time.Minute

    // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
    // process that is initiated while creating a new client. For subsequent
    // sniffing processes, DefaultSnifferTimeout is used (by default).
    DefaultSnifferTimeoutStartup = 5 * time.Second

    // DefaultSnifferTimeout is the default timeout after which the
    // sniffing process times out. Notice that for the initial sniffing
    // process, DefaultSnifferTimeoutStartup is used.
    DefaultSnifferTimeout = 2 * time.Second

    // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
    // a GET request with a body.
    DefaultSendGetBodyAs = "GET"

    // DefaultGzipEnabled specifies if gzip compression is enabled by default.
    DefaultGzipEnabled = false
)
const (
    // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive.
    DefaultScrollKeepAlive = "5m"
)

Variables

var (
    // ErrNoClient is raised when no Elasticsearch node is available.
    ErrNoClient = errors.New("no Elasticsearch node available")

    // ErrRetry is raised when a request cannot be executed after the configured
    // number of retries.
    ErrRetry = errors.New("cannot connect after several retries")

    // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
    // didn't return in time.
    ErrTimeout = errors.New("timeout")
)
var (
    // ErrBulkItemRetry is returned in BulkProcessor from a worker when
    // a response item needs to be retried.
    ErrBulkItemRetry = errors.New("elastic: uncommitted bulk response items")
)
var (
    // ErrResponseSize is raised if a response body exceeds the given max body size.
    ErrResponseSize = errors.New("elastic: response size too large")
)

func IsConflict Uses

func IsConflict(err interface{}) bool

IsConflict returns true if the given error indicates that the Elasticsearch operation resulted in a version conflict. This can occur in operations like `update` or `index` with `op_type=create`. The err parameter can be of type *elastic.Error, elastic.Error, *http.Response or int (indicating the HTTP status code).

func IsConnErr Uses

func IsConnErr(err error) bool

IsConnErr returns true if the error indicates that Elastic could not find an Elasticsearch host to connect to.

func IsContextErr Uses

func IsContextErr(err error) bool

IsContextErr returns true if the error is from a context that was canceled or deadline exceeded

func IsForbidden Uses

func IsForbidden(err interface{}) bool

IsForbidden returns true if the given error indicates that Elasticsearch returned HTTP status 403. This happens e.g. due to a missing license. The err parameter can be of type *elastic.Error, elastic.Error, *http.Response or int (indicating the HTTP status code).

func IsNotFound Uses

func IsNotFound(err interface{}) bool

IsNotFound returns true if the given error indicates that Elasticsearch returned HTTP status 404. The err parameter can be of type *elastic.Error, elastic.Error, *http.Response or int (indicating the HTTP status code).

func IsStatusCode Uses

func IsStatusCode(err interface{}, code int) bool

IsStatusCode returns true if the given error indicates that the Elasticsearch operation returned the specified HTTP status code. The err parameter can be of type *http.Response, *Error, Error, or int (indicating the HTTP status code).

func IsTimeout Uses

func IsTimeout(err interface{}) bool

IsTimeout returns true if the given error indicates that Elasticsearch returned HTTP status 408. The err parameter can be of type *elastic.Error, elastic.Error, *http.Response or int (indicating the HTTP status code).

func Retry Uses

func Retry(o Operation, b Backoff) error

Retry the function f until it does not return error or BackOff stops. f is guaranteed to be run at least once. It is the caller's responsibility to reset b after Retry returns.

Retry sleeps the goroutine for the duration returned by BackOff after a failed operation returns.

func RetryNotify Uses

func RetryNotify(operation Operation, b Backoff, notify Notify) error

RetryNotify calls notify function with the error and wait duration for each failed attempt before sleep.

type AcknowledgedResponse Uses

type AcknowledgedResponse struct {
    Acknowledged       bool   `json:"acknowledged"`
    ShardsAcknowledged bool   `json:"shards_acknowledged"`
    Index              string `json:"index,omitempty"`
}

AcknowledgedResponse is returned from various APIs. It simply indicates whether the operation is ack'd or not.

type AdjacencyMatrixAggregation Uses

type AdjacencyMatrixAggregation struct {
    // contains filtered or unexported fields
}

AdjacencyMatrixAggregation returning a form of adjacency matrix. The request provides a collection of named filter expressions, similar to the filters aggregation request. Each bucket in the response represents a non-empty cell in the matrix of intersecting filters.

For details, see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-adjacency-matrix-aggregation.html

func NewAdjacencyMatrixAggregation Uses

func NewAdjacencyMatrixAggregation() *AdjacencyMatrixAggregation

NewAdjacencyMatrixAggregation initializes a new AdjacencyMatrixAggregation.

func (*AdjacencyMatrixAggregation) Filters Uses

func (a *AdjacencyMatrixAggregation) Filters(name string, filter Query) *AdjacencyMatrixAggregation

Filters adds the filter

func (*AdjacencyMatrixAggregation) Meta Uses

func (a *AdjacencyMatrixAggregation) Meta(metaData map[string]interface{}) *AdjacencyMatrixAggregation

Meta sets the meta data to be included in the aggregation response.

func (*AdjacencyMatrixAggregation) Source Uses

func (a *AdjacencyMatrixAggregation) Source() (interface{}, error)

Source returns the a JSON-serializable interface.

func (*AdjacencyMatrixAggregation) SubAggregation Uses

func (a *AdjacencyMatrixAggregation) SubAggregation(name string, subAggregation Aggregation) *AdjacencyMatrixAggregation

SubAggregation adds a sub-aggregation to this aggregation.

type Aggregation Uses

type Aggregation interface {
    // Source returns a JSON-serializable aggregation that is a fragment
    // of the request sent to Elasticsearch.
    Source() (interface{}, error)
}

Aggregations can be seen as a unit-of-work that build analytic information over a set of documents. It is (in many senses) the follow-up of facets in Elasticsearch. For more details about aggregations, visit: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations.html

type AggregationBucketAdjacencyMatrix Uses

type AggregationBucketAdjacencyMatrix struct {
    Aggregations

    Buckets []*AggregationBucketKeyItem //`json:"buckets"`
    Meta    map[string]interface{}      // `json:"meta,omitempty"`
}

AggregationBucketAdjacencyMatrix is a multi-bucket aggregation that is returned with a AdjacencyMatrix aggregation.

func (*AggregationBucketAdjacencyMatrix) UnmarshalJSON Uses

func (a *AggregationBucketAdjacencyMatrix) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketAdjacencyMatrix structure.

type AggregationBucketCompositeItem Uses

type AggregationBucketCompositeItem struct {
    Aggregations

    Key      map[string]interface{} //`json:"key"`
    DocCount int64                  //`json:"doc_count"`
}

AggregationBucketCompositeItem is a single bucket of an AggregationBucketCompositeItems structure.

func (*AggregationBucketCompositeItem) UnmarshalJSON Uses

func (a *AggregationBucketCompositeItem) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItem structure.

type AggregationBucketCompositeItems Uses

type AggregationBucketCompositeItems struct {
    Aggregations

    Buckets  []*AggregationBucketCompositeItem //`json:"buckets"`
    Meta     map[string]interface{}            // `json:"meta,omitempty"`
    AfterKey map[string]interface{}            // `json:"after_key,omitempty"`
}

AggregationBucketCompositeItems implements the response structure for a bucket aggregation of type composite.

func (*AggregationBucketCompositeItems) UnmarshalJSON Uses

func (a *AggregationBucketCompositeItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItems structure.

type AggregationBucketFilters Uses

type AggregationBucketFilters struct {
    Aggregations

    Buckets      []*AggregationBucketKeyItem          //`json:"buckets"`
    NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
    Meta         map[string]interface{}               // `json:"meta,omitempty"`
}

AggregationBucketFilters is a multi-bucket aggregation that is returned with a filters aggregation.

func (*AggregationBucketFilters) UnmarshalJSON Uses

func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.

type AggregationBucketHistogramItem Uses

type AggregationBucketHistogramItem struct {
    Aggregations

    Key         float64 //`json:"key"`
    KeyAsString *string //`json:"key_as_string"`
    DocCount    int64   //`json:"doc_count"`
}

AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.

func (*AggregationBucketHistogramItem) UnmarshalJSON Uses

func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.

type AggregationBucketHistogramItems Uses

type AggregationBucketHistogramItems struct {
    Aggregations

    Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
    Meta    map[string]interface{}            // `json:"meta,omitempty"`
}

AggregationBucketHistogramItems is a bucket aggregation that is returned with a date histogram aggregation.

func (*AggregationBucketHistogramItems) UnmarshalJSON Uses

func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.

type AggregationBucketKeyItem Uses

type AggregationBucketKeyItem struct {
    Aggregations

    Key         interface{} //`json:"key"`
    KeyAsString *string     //`json:"key_as_string"`
    KeyNumber   json.Number
    DocCount    int64 //`json:"doc_count"`
}

AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.

func (*AggregationBucketKeyItem) UnmarshalJSON Uses

func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.

type AggregationBucketKeyItems Uses

type AggregationBucketKeyItems struct {
    Aggregations

    DocCountErrorUpperBound int64                       //`json:"doc_count_error_upper_bound"`
    SumOfOtherDocCount      int64                       //`json:"sum_other_doc_count"`
    Buckets                 []*AggregationBucketKeyItem //`json:"buckets"`
    Meta                    map[string]interface{}      // `json:"meta,omitempty"`
}

AggregationBucketKeyItems is a bucket aggregation that is e.g. returned with a terms aggregation.

func (*AggregationBucketKeyItems) UnmarshalJSON Uses

func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.

type AggregationBucketKeyedHistogramItems Uses

type AggregationBucketKeyedHistogramItems struct {
    Aggregations

    Buckets map[string]*AggregationBucketHistogramItem //`json:"buckets"`
    Meta    map[string]interface{}                     // `json:"meta,omitempty"`
}

AggregationBucketKeyedHistogramItems is a bucket aggregation that is returned with a (keyed) date histogram aggregation.

func (*AggregationBucketKeyedHistogramItems) UnmarshalJSON Uses

func (a *AggregationBucketKeyedHistogramItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyedHistogramItems structure.

type AggregationBucketKeyedRangeItems Uses

type AggregationBucketKeyedRangeItems struct {
    Aggregations

    DocCountErrorUpperBound int64                                  //`json:"doc_count_error_upper_bound"`
    SumOfOtherDocCount      int64                                  //`json:"sum_other_doc_count"`
    Buckets                 map[string]*AggregationBucketRangeItem //`json:"buckets"`
    Meta                    map[string]interface{}                 // `json:"meta,omitempty"`
}

AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned with a keyed range aggregation.

func (*AggregationBucketKeyedRangeItems) UnmarshalJSON Uses

func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.

type AggregationBucketRangeItem Uses

type AggregationBucketRangeItem struct {
    Aggregations

    Key          string   //`json:"key"`
    DocCount     int64    //`json:"doc_count"`
    From         *float64 //`json:"from"`
    FromAsString string   //`json:"from_as_string"`
    To           *float64 //`json:"to"`
    ToAsString   string   //`json:"to_as_string"`
}

AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.

func (*AggregationBucketRangeItem) UnmarshalJSON Uses

func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.

type AggregationBucketRangeItems Uses

type AggregationBucketRangeItems struct {
    Aggregations

    DocCountErrorUpperBound int64                         //`json:"doc_count_error_upper_bound"`
    SumOfOtherDocCount      int64                         //`json:"sum_other_doc_count"`
    Buckets                 []*AggregationBucketRangeItem //`json:"buckets"`
    Meta                    map[string]interface{}        // `json:"meta,omitempty"`
}

AggregationBucketRangeItems is a bucket aggregation that is e.g. returned with a range aggregation.

func (*AggregationBucketRangeItems) UnmarshalJSON Uses

func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.

type AggregationBucketSignificantTerm Uses

type AggregationBucketSignificantTerm struct {
    Aggregations

    Key      string  //`json:"key"`
    DocCount int64   //`json:"doc_count"`
    BgCount  int64   //`json:"bg_count"`
    Score    float64 //`json:"score"`
}

AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.

func (*AggregationBucketSignificantTerm) UnmarshalJSON Uses

func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.

type AggregationBucketSignificantTerms Uses

type AggregationBucketSignificantTerms struct {
    Aggregations

    DocCount int64                               //`json:"doc_count"`
    Buckets  []*AggregationBucketSignificantTerm //`json:"buckets"`
    Meta     map[string]interface{}              // `json:"meta,omitempty"`
}

AggregationBucketSignificantTerms is a bucket aggregation returned with a significant terms aggregation.

func (*AggregationBucketSignificantTerms) UnmarshalJSON Uses

func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.

type AggregationExtendedStatsMetric Uses

type AggregationExtendedStatsMetric struct {
    Aggregations

    Count        int64                  // `json:"count"`
    Min          *float64               //`json:"min,omitempty"`
    Max          *float64               //`json:"max,omitempty"`
    Avg          *float64               //`json:"avg,omitempty"`
    Sum          *float64               //`json:"sum,omitempty"`
    SumOfSquares *float64               //`json:"sum_of_squares,omitempty"`
    Variance     *float64               //`json:"variance,omitempty"`
    StdDeviation *float64               //`json:"std_deviation,omitempty"`
    Meta         map[string]interface{} // `json:"meta,omitempty"`
}

AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.

func (*AggregationExtendedStatsMetric) UnmarshalJSON Uses

func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.

type AggregationGeoBoundsMetric Uses

type AggregationGeoBoundsMetric struct {
    Aggregations

    Bounds struct {
        TopLeft struct {
            Latitude  float64 `json:"lat"`
            Longitude float64 `json:"lon"`
        }   `json:"top_left"`
        BottomRight struct {
            Latitude  float64 `json:"lat"`
            Longitude float64 `json:"lon"`
        }   `json:"bottom_right"`
    }   `json:"bounds"`

    Meta map[string]interface{} // `json:"meta,omitempty"`
}

AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.

func (*AggregationGeoBoundsMetric) UnmarshalJSON Uses

func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.

type AggregationGeoCentroidMetric Uses

type AggregationGeoCentroidMetric struct {
    Aggregations

    Location struct {
        Latitude  float64 `json:"lat"`
        Longitude float64 `json:"lon"`
    }   `json:"location"`

    Count int // `json:"count,omitempty"`

    Meta map[string]interface{} // `json:"meta,omitempty"`
}

AggregationGeoCentroidMetric is a metric as returned by a GeoCentroid aggregation.

func (*AggregationGeoCentroidMetric) UnmarshalJSON Uses

func (a *AggregationGeoCentroidMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationGeoCentroidMetric structure.

type AggregationMatrixStats Uses

type AggregationMatrixStats struct {
    Aggregations

    Fields []*AggregationMatrixStatsField // `json:"field,omitempty"`
    Meta   map[string]interface{}         // `json:"meta,omitempty"`
}

AggregationMatrixStats is returned by a MatrixStats aggregation.

func (*AggregationMatrixStats) UnmarshalJSON Uses

func (a *AggregationMatrixStats) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationMatrixStats structure.

type AggregationMatrixStatsField Uses

type AggregationMatrixStatsField struct {
    Name        string             `json:"name"`
    Count       int64              `json:"count"`
    Mean        float64            `json:"mean,omitempty"`
    Variance    float64            `json:"variance,omitempty"`
    Skewness    float64            `json:"skewness,omitempty"`
    Kurtosis    float64            `json:"kurtosis,omitempty"`
    Covariance  map[string]float64 `json:"covariance,omitempty"`
    Correlation map[string]float64 `json:"correlation,omitempty"`
}

AggregationMatrixStatsField represents running stats of a single field returned from MatrixStats aggregation.

type AggregationPercentilesMetric Uses

type AggregationPercentilesMetric struct {
    Aggregations

    Values map[string]float64     // `json:"values"`
    Meta   map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.

func (*AggregationPercentilesMetric) UnmarshalJSON Uses

func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.

type AggregationPipelineBucketMetricValue Uses

type AggregationPipelineBucketMetricValue struct {
    Aggregations

    Keys          []interface{}          // `json:"keys"`
    Value         *float64               // `json:"value"`
    ValueAsString string                 // `json:"value_as_string"`
    Meta          map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPipelineBucketMetricValue is a value returned e.g. by a MaxBucket aggregation.

func (*AggregationPipelineBucketMetricValue) UnmarshalJSON Uses

func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure.

type AggregationPipelineDerivative Uses

type AggregationPipelineDerivative struct {
    Aggregations

    Value                   *float64               // `json:"value"`
    ValueAsString           string                 // `json:"value_as_string"`
    NormalizedValue         *float64               // `json:"normalized_value"`
    NormalizedValueAsString string                 // `json:"normalized_value_as_string"`
    Meta                    map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPipelineDerivative is the value returned by a Derivative aggregation.

func (*AggregationPipelineDerivative) UnmarshalJSON Uses

func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure.

type AggregationPipelinePercentilesMetric Uses

type AggregationPipelinePercentilesMetric struct {
    Aggregations

    Values map[string]float64     // `json:"values"`
    Meta   map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPipelinePercentilesMetric is the value returned by a pipeline percentiles Metric aggregation

func (*AggregationPipelinePercentilesMetric) UnmarshalJSON Uses

func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPipelinePercentilesMetric structure.

type AggregationPipelineSimpleValue Uses

type AggregationPipelineSimpleValue struct {
    Aggregations

    Value         *float64               // `json:"value"`
    ValueAsString string                 // `json:"value_as_string"`
    Meta          map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPipelineSimpleValue is a simple value, returned e.g. by a MovAvg aggregation.

func (*AggregationPipelineSimpleValue) UnmarshalJSON Uses

func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure.

type AggregationPipelineStatsMetric Uses

type AggregationPipelineStatsMetric struct {
    Aggregations

    Count         int64    // `json:"count"`
    CountAsString string   // `json:"count_as_string"`
    Min           *float64 // `json:"min"`
    MinAsString   string   // `json:"min_as_string"`
    Max           *float64 // `json:"max"`
    MaxAsString   string   // `json:"max_as_string"`
    Avg           *float64 // `json:"avg"`
    AvgAsString   string   // `json:"avg_as_string"`
    Sum           *float64 // `json:"sum"`
    SumAsString   string   // `json:"sum_as_string"`

    Meta map[string]interface{} // `json:"meta,omitempty"`
}

AggregationPipelineStatsMetric is a simple value, returned e.g. by a MovAvg aggregation.

func (*AggregationPipelineStatsMetric) UnmarshalJSON Uses

func (a *AggregationPipelineStatsMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationPipelineStatsMetric structure.

type AggregationScriptedMetric Uses

type AggregationScriptedMetric struct {
    Aggregations

    Value interface{}            //`json:"value"`
    Meta  map[string]interface{} //`json:"meta,omitempty"`
}

AggregationScriptedMetric is the value return by a scripted metric aggregation. Value maybe one of map[string]interface{}/[]interface{}/string/bool/json.Number

func (*AggregationScriptedMetric) UnmarshalJSON Uses

func (a *AggregationScriptedMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationScriptedMetric structure.

type AggregationSingleBucket Uses

type AggregationSingleBucket struct {
    Aggregations

    DocCount int64                  // `json:"doc_count"`
    Meta     map[string]interface{} // `json:"meta,omitempty"`
}

AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.

func (*AggregationSingleBucket) UnmarshalJSON Uses

func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.

type AggregationStatsMetric Uses

type AggregationStatsMetric struct {
    Aggregations

    Count int64                  // `json:"count"`
    Min   *float64               //`json:"min,omitempty"`
    Max   *float64               //`json:"max,omitempty"`
    Avg   *float64               //`json:"avg,omitempty"`
    Sum   *float64               //`json:"sum,omitempty"`
    Meta  map[string]interface{} // `json:"meta,omitempty"`
}

AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.

func (*AggregationStatsMetric) UnmarshalJSON Uses

func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.

type AggregationTopHitsMetric Uses

type AggregationTopHitsMetric struct {
    Aggregations

    Hits *SearchHits            //`json:"hits"`
    Meta map[string]interface{} // `json:"meta,omitempty"`
}

AggregationTopHitsMetric is a metric returned by a TopHits aggregation.

func (*AggregationTopHitsMetric) UnmarshalJSON Uses

func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.

type AggregationValueMetric Uses

type AggregationValueMetric struct {
    Aggregations

    Value *float64               //`json:"value"`
    Meta  map[string]interface{} // `json:"meta,omitempty"`
}

AggregationValueMetric is a single-value metric, returned e.g. by a Min or Max aggregation.

func (*AggregationValueMetric) UnmarshalJSON Uses

func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error

UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.

type Aggregations Uses

type Aggregations map[string]json.RawMessage

Aggregations is a list of aggregations that are part of a search result.

Code:

// Get a client to the local Elasticsearch instance.
client, err := elastic.NewClient()
if err != nil {
    // Handle error
    panic(err)
}

// Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
timeline = timeline.SubAggregation("history", histogram)

// Search with a term query
searchResult, err := client.Search().
    Index("twitter").                  // search in index "twitter"
    Query(elastic.NewMatchAllQuery()). // return all results, but ...
    SearchType("count").               // ... do not return hits, just the count
    Aggregation("timeline", timeline). // add our aggregation to the query
    Pretty(true).                      // pretty print request and response JSON
    Do(context.Background())           // execute
if err != nil {
    // Handle error
    panic(err)
}

// Access "timeline" aggregate in search result.
agg, found := searchResult.Aggregations.Terms("timeline")
if !found {
    log.Fatalf("we should have a terms aggregation called %q", "timeline")
}
for _, userBucket := range agg.Buckets {
    // Every bucket should have the user field as key.
    user := userBucket.Key

    // The sub-aggregation history should have the number of tweets per year.
    histogram, found := userBucket.DateHistogram("history")
    if found {
        for _, year := range histogram.Buckets {
            var key string
            if s := year.KeyAsString; s != nil {
                key = *s
            }
            fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, key)
        }
    }
}

func (Aggregations) AdjacencyMatrix Uses

func (a Aggregations) AdjacencyMatrix(name string) (*AggregationBucketAdjacencyMatrix, bool)

AdjacencyMatrix returning a form of adjacency matrix. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-adjacency-matrix-aggregation.html

func (Aggregations) Avg Uses

func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool)

Avg returns average aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-avg-aggregation.html

func (Aggregations) AvgBucket Uses

func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool)

AvgBucket returns average bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-avg-bucket-aggregation.html

func (Aggregations) BucketScript Uses

func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool)

BucketScript returns bucket script pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-script-aggregation.html

func (Aggregations) Cardinality Uses

func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool)

Cardinality returns cardinality aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-cardinality-aggregation.html

func (Aggregations) Children Uses

func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool)

Children returns children results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-children-aggregation.html

func (Aggregations) Composite Uses

func (a Aggregations) Composite(name string) (*AggregationBucketCompositeItems, bool)

Composite returns composite bucket aggregation results.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html for details.

func (Aggregations) CumulativeSum Uses

func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool)

CumulativeSum returns a cumulative sum pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-cumulative-sum-aggregation.html

func (Aggregations) DateHistogram Uses

func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool)

DateHistogram returns date histogram aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html

func (Aggregations) DateRange Uses

func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool)

DateRange returns date range aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-daterange-aggregation.html

func (Aggregations) Derivative Uses

func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool)

Derivative returns derivative pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-derivative-aggregation.html

func (Aggregations) DiversifiedSampler Uses

func (a Aggregations) DiversifiedSampler(name string) (*AggregationSingleBucket, bool)

DiversifiedSampler returns diversified_sampler aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-diversified-sampler-aggregation.html

func (Aggregations) ExtendedStats Uses

func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool)

ExtendedStats returns extended stats aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-extendedstats-aggregation.html

func (Aggregations) Filter Uses

func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool)

Filter returns filter results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filter-aggregation.html

func (Aggregations) Filters Uses

func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool)

Filters returns filters results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filters-aggregation.html

func (Aggregations) GeoBounds Uses

func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool)

GeoBounds returns geo-bounds aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geobounds-aggregation.html

func (Aggregations) GeoCentroid Uses

func (a Aggregations) GeoCentroid(name string) (*AggregationGeoCentroidMetric, bool)

GeoCentroid returns geo-centroid aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geocentroid-aggregation.html

func (Aggregations) GeoDistance Uses

func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool)

GeoDistance returns geo distance aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geodistance-aggregation.html

func (Aggregations) GeoHash Uses

func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool)

GeoHash returns geo-hash aggregation results. https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geohashgrid-aggregation.html

func (Aggregations) Global Uses

func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool)

Global returns global results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-global-aggregation.html

func (Aggregations) Histogram Uses

func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool)

Histogram returns histogram aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-histogram-aggregation.html

func (Aggregations) IPRange Uses

func (a Aggregations) IPRange(name string) (*AggregationBucketRangeItems, bool)

IPRange returns IP range aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-iprange-aggregation.html

func (Aggregations) KeyedDateHistogram Uses

func (a Aggregations) KeyedDateHistogram(name string) (*AggregationBucketKeyedHistogramItems, bool)

KeyedDateHistogram returns date histogram aggregation results for keyed responses.

See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html#_keyed_response_3

func (Aggregations) KeyedRange Uses

func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool)

KeyedRange returns keyed range aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-range-aggregation.html.

func (Aggregations) MatrixStats Uses

func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool)

MatrixStats returns matrix stats aggregation results. https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-matrix-stats-aggregation.html

func (Aggregations) Max Uses

func (a Aggregations) Max(name string) (*AggregationValueMetric, bool)

Max returns max aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-max-aggregation.html

func (Aggregations) MaxBucket Uses

func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool)

MaxBucket returns maximum bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-max-bucket-aggregation.html

func (Aggregations) Min Uses

func (a Aggregations) Min(name string) (*AggregationValueMetric, bool)

Min returns min aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-min-aggregation.html

func (Aggregations) MinBucket Uses

func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool)

MinBucket returns minimum bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-min-bucket-aggregation.html

func (Aggregations) Missing Uses

func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool)

Missing returns missing results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-missing-aggregation.html

func (Aggregations) MovAvg Uses

func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool)

MovAvg returns moving average pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html

Deprecated: The MovAvgAggregation has been deprecated in 6.4.0. Use the more generate MovFnAggregation instead.

func (Aggregations) MovFn Uses

func (a Aggregations) MovFn(name string) (*AggregationPipelineSimpleValue, bool)

MovFn returns moving function pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movfn-aggregation.html

func (Aggregations) Nested Uses

func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool)

Nested returns nested results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-nested-aggregation.html

func (Aggregations) PercentileRanks Uses

func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool)

PercentileRanks returns percentile ranks results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-rank-aggregation.html

func (Aggregations) Percentiles Uses

func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool)

Percentiles returns percentiles results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-aggregation.html

func (Aggregations) PercentilesBucket Uses

func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercentilesMetric, bool)

PercentilesBucket returns stats bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html

func (Aggregations) Range Uses

func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool)

Range returns range aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-range-aggregation.html

func (Aggregations) ReverseNested Uses

func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool)

ReverseNested returns reverse-nested results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-reverse-nested-aggregation.html

func (Aggregations) Sampler Uses

func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool)

Sampler returns sampler aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-sampler-aggregation.html

func (Aggregations) ScriptedMetric Uses

func (a Aggregations) ScriptedMetric(name string) (*AggregationScriptedMetric, bool)

ScriptedMetric returns scripted metric aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.2/search-aggregations-metrics-scripted-metric-aggregation.html for details.

func (Aggregations) SerialDiff Uses

func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool)

SerialDiff returns serial differencing pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-serialdiff-aggregation.html

func (Aggregations) SignificantTerms Uses

func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool)

SignificantTerms returns significant terms aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html

func (Aggregations) Stats Uses

func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool)

Stats returns stats aggregation results. https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-stats-aggregation.html

func (Aggregations) StatsBucket Uses

func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool)

StatsBucket returns stats bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-stats-bucket-aggregation.html

func (Aggregations) Sum Uses

func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool)

Sum returns sum aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-sum-aggregation.html

func (Aggregations) SumBucket Uses

func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool)

SumBucket returns sum bucket pipeline aggregation results. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-sum-bucket-aggregation.html

func (Aggregations) Terms Uses

func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool)

Terms returns terms aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-terms-aggregation.html

func (Aggregations) TopHits Uses

func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool)

TopHits returns top-hits aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-top-hits-aggregation.html

func (Aggregations) ValueCount Uses

func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool)

ValueCount returns value-count aggregation results. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-valuecount-aggregation.html

func (Aggregations) WeightedAvg Uses

func (a Aggregations) WeightedAvg(name string) (*AggregationValueMetric, bool)

WeightedAvg computes the weighted average of numeric values that are extracted from the aggregated documents. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-weight-avg-aggregation.html

type AliasAction Uses

type AliasAction interface {
    Source() (interface{}, error)
}

AliasAction is an action to apply to an alias, e.g. "add" or "remove".

type AliasAddAction Uses

type AliasAddAction struct {
    // contains filtered or unexported fields
}

AliasAddAction is an action to add to an alias.

func NewAliasAddAction Uses

func NewAliasAddAction(alias string) *AliasAddAction

NewAliasAddAction returns an action to add an alias.

func (*AliasAddAction) Filter Uses

func (a *AliasAddAction) Filter(filter Query) *AliasAddAction

Filter associates a filter to the alias.

func (*AliasAddAction) Index Uses

func (a *AliasAddAction) Index(index ...string) *AliasAddAction

Index associates one or more indices to the alias.

func (*AliasAddAction) IndexRouting Uses

func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction

IndexRouting associates an index routing value to the alias.

func (*AliasAddAction) IsWriteIndex Uses

func (a *AliasAddAction) IsWriteIndex(flag bool) *AliasAddAction

IsWriteIndex associates an is_write_index flag to the alias.

func (*AliasAddAction) Routing Uses

func (a *AliasAddAction) Routing(routing string) *AliasAddAction

Routing associates a routing value to the alias. This basically sets index and search routing to the same value.

func (*AliasAddAction) SearchRouting Uses

func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction

SearchRouting associates a search routing value to the alias.

func (*AliasAddAction) Source Uses

func (a *AliasAddAction) Source() (interface{}, error)

Source returns the JSON-serializable data.

func (*AliasAddAction) Validate Uses

func (a *AliasAddAction) Validate() error

Validate checks if the operation is valid.

type AliasRemoveAction Uses

type AliasRemoveAction struct {
    // contains filtered or unexported fields
}

AliasRemoveAction is an action to remove an alias.

func NewAliasRemoveAction Uses

func NewAliasRemoveAction(alias string) *AliasRemoveAction

NewAliasRemoveAction returns an action to remove an alias.

func (*AliasRemoveAction) Index Uses

func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction

Index associates one or more indices to the alias.

func (*AliasRemoveAction) Source Uses

func (a *AliasRemoveAction) Source() (interface{}, error)

Source returns the JSON-serializable data.

func (*AliasRemoveAction) Validate Uses

func (a *AliasRemoveAction) Validate() error

Validate checks if the operation is valid.

type AliasRemoveIndexAction Uses

type AliasRemoveIndexAction struct {
    // contains filtered or unexported fields
}

AliasRemoveIndexAction is an action to remove an index during an alias operation.

func NewAliasRemoveIndexAction Uses

func NewAliasRemoveIndexAction(index string) *AliasRemoveIndexAction

NewAliasRemoveIndexAction returns an action to remove an index.

func (*AliasRemoveIndexAction) Source Uses

func (a *AliasRemoveIndexAction) Source() (interface{}, error)

Source returns the JSON-serializable data.

func (*AliasRemoveIndexAction) Validate Uses

func (a *AliasRemoveIndexAction) Validate() error

Validate checks if the operation is valid.

type AliasResult Uses

type AliasResult struct {
    Acknowledged       bool   `json:"acknowledged"`
    ShardsAcknowledged bool   `json:"shards_acknowledged"`
    Index              string `json:"index,omitempty"`
}

AliasResult is the outcome of calling Do on AliasService.

type AliasService Uses

type AliasService struct {
    // contains filtered or unexported fields
}

AliasService enables users to add or remove an alias. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html for details.

func NewAliasService Uses

func NewAliasService(client *Client) *AliasService

NewAliasService implements a service to manage aliases.

func (*AliasService) Action Uses

func (s *AliasService) Action(action ...AliasAction) *AliasService

Action accepts one or more AliasAction instances which can be of type AliasAddAction or AliasRemoveAction.

func (*AliasService) Add Uses

func (s *AliasService) Add(indexName string, aliasName string) *AliasService

Add adds an alias to an index.

func (*AliasService) AddWithFilter Uses

func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService

Add adds an alias to an index and associates a filter to the alias.

func (*AliasService) Do Uses

func (s *AliasService) Do(ctx context.Context) (*AliasResult, error)

Do executes the command.

func (*AliasService) Pretty Uses

func (s *AliasService) Pretty(pretty bool) *AliasService

Pretty asks Elasticsearch to indent the HTTP response.

func (*AliasService) Remove Uses

func (s *AliasService) Remove(indexName string, aliasName string) *AliasService

Remove removes an alias.

type AliasesResult Uses

type AliasesResult struct {
    Indices map[string]indexResult
}

AliasesResult is the outcome of calling AliasesService.Do.

func (AliasesResult) IndicesByAlias Uses

func (ar AliasesResult) IndicesByAlias(aliasName string) []string

IndicesByAlias returns all indices given a specific alias name.

type AliasesService Uses

type AliasesService struct {
    // contains filtered or unexported fields
}

AliasesService returns the aliases associated with one or more indices, or the indices associated with one or more aliases, or a combination of those filters. See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html.

func NewAliasesService Uses

func NewAliasesService(client *Client) *AliasesService

NewAliasesService instantiates a new AliasesService.

func (*AliasesService) Alias Uses

func (s *AliasesService) Alias(alias ...string) *AliasesService

Alias adds one or more aliases.

func (*AliasesService) Do Uses

func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error)

func (*AliasesService) Index Uses

func (s *AliasesService) Index(index ...string) *AliasesService

Index adds one or more indices.

func (*AliasesService) Pretty Uses

func (s *AliasesService) Pretty(pretty bool) *AliasesService

Pretty asks Elasticsearch to indent the returned JSON.

type AllocateEmptyPrimaryAllocationCommand Uses

type AllocateEmptyPrimaryAllocationCommand struct {
    // contains filtered or unexported fields
}

AllocateEmptyPrimaryAllocationCommand allocates an unassigned empty primary shard to a specific node. Use with extreme care as this will result in data loss. Allocation deciders are ignored.

func NewAllocateEmptyPrimaryAllocationCommand Uses

func NewAllocateEmptyPrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateEmptyPrimaryAllocationCommand

NewAllocateEmptyPrimaryAllocationCommand creates a new AllocateEmptyPrimaryAllocationCommand.

func (*AllocateEmptyPrimaryAllocationCommand) Name Uses

func (cmd *AllocateEmptyPrimaryAllocationCommand) Name() string

Name of the command in a request to the Cluster Reroute API.

func (*AllocateEmptyPrimaryAllocationCommand) Source Uses

func (cmd *AllocateEmptyPrimaryAllocationCommand) Source() (interface{}, error)

Source generates the (inner) JSON to be used when serializing the command.

type AllocateReplicaAllocationCommand Uses

type AllocateReplicaAllocationCommand struct {
    // contains filtered or unexported fields
}

AllocateReplicaAllocationCommand allocates an unassigned replica shard to a specific node. Checks if allocation deciders allow allocation.

func NewAllocateReplicaAllocationCommand Uses

func NewAllocateReplicaAllocationCommand(index string, shardId int, node string) *AllocateReplicaAllocationCommand

NewAllocateReplicaAllocationCommand creates a new AllocateReplicaAllocationCommand.

func (*AllocateReplicaAllocationCommand) Name Uses

func (cmd *AllocateReplicaAllocationCommand) Name() string

Name of the command in a request to the Cluster Reroute API.

func (*AllocateReplicaAllocationCommand) Source Uses

func (cmd *AllocateReplicaAllocationCommand) Source() (interface{}, error)

Source generates the (inner) JSON to be used when serializing the command.

type AllocateStalePrimaryAllocationCommand Uses

type AllocateStalePrimaryAllocationCommand struct {
    // contains filtered or unexported fields
}

AllocateStalePrimaryAllocationCommand allocates an unassigned stale primary shard to a specific node. Use with extreme care as this will result in data loss. Allocation deciders are ignored.

func NewAllocateStalePrimaryAllocationCommand Uses

func NewAllocateStalePrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateStalePrimaryAllocationCommand

NewAllocateStalePrimaryAllocationCommand creates a new AllocateStalePrimaryAllocationCommand.

func (*AllocateStalePrimaryAllocationCommand) Name Uses

func (cmd *AllocateStalePrimaryAllocationCommand) Name() string

Name of the command in a request to the Cluster Reroute API.

func (*AllocateStalePrimaryAllocationCommand) Source Uses

func (cmd *AllocateStalePrimaryAllocationCommand) Source() (interface{}, error)

Source generates the (inner) JSON to be used when serializing the command.

type AllocationCommand Uses

type AllocationCommand interface {
    Name() string
    Source() (interface{}, error)
}

AllocationCommand is a command to be executed in a call to Cluster Reroute API.

type AllocationId Uses

type AllocationId struct {
    Id           string `json:"id"`
    RelocationId string `json:"relocation_id,omitempty"`
}

type AvgAggregation Uses

type AvgAggregation struct {
    // contains filtered or unexported fields
}

AvgAggregation is a single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. These values can be extracted either from specific numeric fields in the documents, or be generated by a provided script.

See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-avg-aggregation.html

func NewAvgAggregation Uses

func NewAvgAggregation() *AvgAggregation

func (*AvgAggregation) Field Uses

func (a *AvgAggregation) Field(field string) *AvgAggregation

func (*AvgAggregation) Format Uses

func (a *AvgAggregation) Format(format string) *AvgAggregation

func (*AvgAggregation) Meta Uses

func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation

Meta sets the meta data to be included in the aggregation response.

func (*AvgAggregation) Missing Uses

func (a *AvgAggregation) Missing(missing interface{}) *AvgAggregation

func (*AvgAggregation) Script Uses

func (a *AvgAggregation) Script(script *Script) *AvgAggregation

func (*AvgAggregation) Source Uses

func (a *AvgAggregation) Source() (interface{}, error)

func (*AvgAggregation) SubAggregation Uses

func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation

type AvgBucketAggregation Uses

type AvgBucketAggregation struct {
    // contains filtered or unexported fields
}

AvgBucketAggregation is a sibling pipeline aggregation which calculates the (mean) average value of a specified metric in a sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation.

For more details, see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-avg-bucket-aggregation.html

func NewAvgBucketAggregation Uses

func NewAvgBucketAggregation() *AvgBucketAggregation

NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation.

func (*AvgBucketAggregation) BucketsPath Uses

func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation

BucketsPath sets the paths to the buckets to use for this pipeline aggregator.

func (*AvgBucketAggregation) Format Uses

func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation

Format to use on the output of this aggregation.

func (*AvgBucketAggregation) GapInsertZeros Uses

func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation

GapInsertZeros inserts zeros for gaps in the series.

func (*AvgBucketAggregation) GapPolicy Uses

func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation

GapPolicy defines what should be done when a gap in the series is discovered. Valid values include "insert_zeros" or "skip". Default is "insert_zeros".

func (*AvgBucketAggregation) GapSkip Uses

func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation

GapSkip skips gaps in the series.

func (*AvgBucketAggregation) Meta Uses

func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation

Meta sets the meta data to be included in the aggregation response.

func (*AvgBucketAggregation) Source Uses

func (a *AvgBucketAggregation) Source() (interface{}, error)

Source returns the a JSON-serializable interface.

type Backoff Uses

type Backoff interface {
    // Next implements a BackoffFunc.
    Next(retry int) (time.Duration, bool)
}

Backoff allows callers to implement their own Backoff strategy.

type BackoffFunc Uses

type BackoffFunc func(retry int) (time.Duration, bool)

BackoffFunc specifies the signature of a function that returns the time to wait before the next call to a resource. To stop retrying return false in the 2nd return value.

type BackoffRetrier Uses

type BackoffRetrier struct {
    // contains filtered or unexported fields
}

BackoffRetrier is an implementation that does nothing but return nil on Retry.

func NewBackoffRetrier Uses

func NewBackoffRetrier(backoff Backoff) *BackoffRetrier

NewBackoffRetrier returns a retrier that uses the given backoff strategy.

func (*BackoffRetrier) Retry Uses

func (r *BackoffRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error)

Retry calls into the backoff strategy and its wait interval.

type BoolQuery Uses

type BoolQuery struct {
    Query
    // contains filtered or unexported fields
}

A bool query matches documents matching boolean combinations of other queries. For more details, see: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-bool-query.html

func NewBoolQuery Uses

func NewBoolQuery() *BoolQuery

Creates a new bool query.

func (*BoolQuery) AdjustPureNegative Uses

func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery

func (*BoolQuery) Boost Uses

func (q *BoolQuery) Boost(boost float64) *BoolQuery

func (*BoolQuery) Filter Uses

func (q *BoolQuery) Filter(filters ...Query) *BoolQuery

func (*BoolQuery) MinimumNumberShouldMatch Uses

func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery

func (*BoolQuery) MinimumShouldMatch Uses

func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery

func (*BoolQuery) Must Uses

func (q *BoolQuery) Must(queries ...Query) *BoolQuery

func (*BoolQuery) MustNot Uses

func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery

func (*BoolQuery) QueryName Uses

func (q *BoolQuery) QueryName(queryName string) *BoolQuery

func (*BoolQuery) Should Uses

func (q *BoolQuery) Should(queries ...Query) *BoolQuery

func (*BoolQuery) Source Uses

func (q *BoolQuery) Source() (interface{}, error)

Creates the query source for the bool query.

type BoostingQuery Uses

type BoostingQuery struct {
    Query
    // contains filtered or unexported fields
}

A boosting query can be used to effectively demote results that match a given query. For more details, see: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-boosting-query.html

func NewBoostingQuery Uses

func NewBoostingQuery() *BoostingQuery

Creates a new boosting query.

func (*BoostingQuery) Boost Uses

func (q *BoostingQuery) Boost(boost float64) *BoostingQuery

func (*BoostingQuery) Negative Uses

func (q *BoostingQuery) Negative(negative Query) *BoostingQuery

func (*BoostingQuery) NegativeBoost Uses

func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery

func (*BoostingQuery) Positive Uses

func (q *BoostingQuery) Positive(positive Query) *BoostingQuery

func (*BoostingQuery) Source Uses

func (q *BoostingQuery) Source() (interface{}, error)

Creates the query source for the boosting query.

type BucketCountThresholds Uses

type BucketCountThresholds struct {
    MinDocCount      *int64
    ShardMinDocCount *int64
    RequiredSize     *int
    ShardSize        *int
}

BucketCountThresholds is used in e.g. terms and significant text aggregations.

type BucketScriptAggregation Uses

type BucketScriptAggregation struct {
    // contains filtered or unexported fields
}

BucketScriptAggregation is a parent pipeline aggregation which executes a script which can perform per bucket computations on specified metrics in the parent multi-bucket aggregation. The specified metric must be numeric and the script must return a numeric value.

For more details, see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-script-aggregation.html

func NewBucketScriptAggregation Uses

func NewBucketScriptAggregation() *BucketScriptAggregation

NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation.

func (*BucketScriptAggregation) AddBucketsPath Uses

func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation

AddBucketsPath adds a bucket path to use for this pipeline aggregator.

func (*BucketScriptAggregation) BucketsPathsMap Uses

func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation

BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.

func (*BucketScriptAggregation) Format Uses

func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation

Format to use on the output of this aggregation.

func (*BucketScriptAggregation) GapInsertZeros Uses

func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation

GapInsertZeros inserts zeros for gaps in the series.

func (*BucketScriptAggregation) GapPolicy Uses

func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation

GapPolicy defines what should be done when a gap in the series is discovered. Valid values include "insert_zeros" or "skip". Default is "insert_zeros".

func (*BucketScriptAggregation) GapSkip Uses

func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation

GapSkip skips gaps in the series.

func (*BucketScriptAggregation) Meta Uses

func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation

Meta sets the meta data to be included in the aggregation response.

func (*BucketScriptAggregation) Script Uses

func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation

Script is the script to run.

func (*BucketScriptAggregation) Source Uses

func (a *BucketScriptAggregation) Source() (interface{}, error)

Source returns the a JSON-serializable interface.

type BucketSelectorAggregation Uses

type BucketSelectorAggregation struct {
    // contains filtered or unexported fields
}

BucketSelectorAggregation is a parent pipeline aggregation which determines whether the current bucket will be retained in the parent multi-bucket aggregation. The specific metric must be numeric and the script must return a boolean value. If the script language is expression then a numeric return value is permitted. In this case 0.0 will be evaluated as false and all other values will evaluate to true.

For more details, see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-selector-aggregation.html

func NewBucketSelectorAggregation Uses

func NewBucketSelectorAggregation() *BucketSelectorAggregation

NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation.

func (*BucketSelectorAggregation) AddBucketsPath Uses

func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation

AddBucketsPath adds a bucket path to use for this pipeline aggregator.

func (*BucketSelectorAggregation) BucketsPathsMap Uses

func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation

BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator.

func (*BucketSelectorAggregation) Format Uses

func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation

Format to use on the output of this aggregation.

func (*BucketSelectorAggregation) GapInsertZeros Uses

func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation

GapInsertZeros inserts zeros for gaps in the series.

func (*BucketSelectorAggregation) GapPolicy Uses

func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation

GapPolicy defines what should be done when a gap in the series is discovered. Valid values include "insert_zeros" or "skip". Default is "insert_zeros".

func (*BucketSelectorAggregation) GapSkip Uses

func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation

GapSkip skips gaps in the series.

func (*BucketSelectorAggregation) Meta Uses

func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation

Meta sets the meta data to be included in the aggregation response.

func (*BucketSelectorAggregation) Script Uses

func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation

Script is the script to run.

func (*BucketSelectorAggregation) Source Uses

func (a *BucketSelectorAggregation) Source() (interface{}, error)

Source returns the a JSON-serializable interface.

type BucketSortAggregation Uses

type BucketSortAggregation struct {
    // contains filtered or unexported fields
}

BucketSortAggregation parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. Zero or more sort fields may be specified together with the corresponding sort order. Each bucket may be sorted based on its _key, _count or its sub-aggregations. In addition, parameters from and size may be set in order to truncate the result buckets.

For more details, see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-sort-aggregation.html

func NewBucketSortAggregation Uses

func NewBucketSortAggregation() *BucketSortAggregation

NewBucketSortAggregation creates and initializes a new BucketSortAggregation.

func (*BucketSortAggregation) From Uses

func (a *BucketSortAggregation) From(from int) *BucketSortAggregation

From adds the "from" parameter to the aggregation.

func (*BucketSortAggregation) GapInsertZeros Uses

func (a *BucketSortAggregation) GapInsertZeros() *BucketSortAggregation

GapInsertZeros inserts zeros for gaps in the series.

func (*BucketSortAggregation) GapPolicy Uses

func (a *BucketSortAggregation) GapPolicy(gapPolicy string) *BucketSortAggregation

GapPolicy defines what should be done when a gap in the series is discovered. Valid values include "insert_zeros" or "skip". Default is "skip".

func (*BucketSortAggregation) GapSkip Uses

func (a *BucketSortAggregation) GapSkip() *BucketSortAggregation

GapSkip skips gaps in the series.

func (*BucketSortAggregation) Meta Uses

func (a *BucketSortAggregation) Meta(meta map[string]interface{}) *BucketSortAggregation

Meta sets the meta data in the aggregation. Although metadata is supported for this aggregation by Elasticsearch, it's important to note that there's no use to it because this aggregation does not include new data in the response. It merely reorders parent buckets.

func (*BucketSortAggregation) Size Uses

func (a *BucketSortAggregation) Size(size int) *BucketSortAggregation

Size adds the "size" parameter to the aggregation.

func (*BucketSortAggregation) Sort Uses

func (a *BucketSortAggregation) Sort(field string, ascending bool) *BucketSortAggregation

Sort adds a sort order to the list of sorters.

func (*BucketSortAggregation) SortWithInfo Uses

func (a *BucketSortAggregation) SortWithInfo(info SortInfo) *BucketSortAggregation

SortWithInfo adds a SortInfo to the list of sorters.

func (*BucketSortAggregation) Source Uses

func (a *BucketSortAggregation) Source() (interface{}, error)

Source returns the a JSON-serializable interface.

type BulkAfterFunc Uses

type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)

BulkAfterFunc defines the signature of callbacks that are executed after a commit to Elasticsearch. The err parameter signals an error.

type BulkBeforeFunc Uses

type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)

BulkBeforeFunc defines the signature of callbacks that are executed before a commit to Elasticsearch.

type BulkDeleteRequest Uses

type BulkDeleteRequest struct {
    BulkableRequest
    // contains filtered or unexported fields
}

BulkDeleteRequest is a request to remove a document from Elasticsearch.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func NewBulkDeleteRequest Uses

func NewBulkDeleteRequest() *BulkDeleteRequest

NewBulkDeleteRequest returns a new BulkDeleteRequest.

func (*BulkDeleteRequest) Id Uses

func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest

Id specifies the identifier of the document to delete.

func (*BulkDeleteRequest) Index Uses

func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest

Index specifies the Elasticsearch index to use for this delete request. If unspecified, the index set on the BulkService will be used.

func (*BulkDeleteRequest) Parent Uses

func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest

Parent specifies the parent of the request, which is used in parent/child mappings.

func (*BulkDeleteRequest) Routing Uses

func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest

Routing specifies a routing value for the request.

func (*BulkDeleteRequest) Source Uses

func (r *BulkDeleteRequest) Source() ([]string, error)

Source returns the on-wire representation of the delete request, split into an action-and-meta-data line and an (optional) source line. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func (*BulkDeleteRequest) String Uses

func (r *BulkDeleteRequest) String() string

String returns the on-wire representation of the delete request, concatenated as a single string.

func (*BulkDeleteRequest) Type Uses

func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest

Type specifies the Elasticsearch type to use for this delete request. If unspecified, the type set on the BulkService will be used.

func (*BulkDeleteRequest) UseEasyJSON Uses

func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest

UseEasyJSON is an experimental setting that enables serialization with github.com/mailru/easyjson, which should in faster serialization time and less allocations, but removed compatibility with encoding/json, usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations for details. This setting is disabled by default.

func (*BulkDeleteRequest) Version Uses

func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest

Version indicates the version to be deleted as part of an optimistic concurrency model.

func (*BulkDeleteRequest) VersionType Uses

func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest

VersionType can be "internal" (default), "external", "external_gte", or "external_gt".

type BulkIndexByScrollResponse Uses

type BulkIndexByScrollResponse struct {
    Header           http.Header `json:"-"`
    Took             int64       `json:"took"`
    SliceId          *int64      `json:"slice_id,omitempty"`
    TimedOut         bool        `json:"timed_out"`
    Total            int64       `json:"total"`
    Updated          int64       `json:"updated,omitempty"`
    Created          int64       `json:"created,omitempty"`
    Deleted          int64       `json:"deleted"`
    Batches          int64       `json:"batches"`
    VersionConflicts int64       `json:"version_conflicts"`
    Noops            int64       `json:"noops"`
    Retries          struct {
        Bulk   int64 `json:"bulk"`
        Search int64 `json:"search"`
    }   `json:"retries,omitempty"`
    Throttled            string                             `json:"throttled"`
    ThrottledMillis      int64                              `json:"throttled_millis"`
    RequestsPerSecond    float64                            `json:"requests_per_second"`
    Canceled             string                             `json:"canceled,omitempty"`
    ThrottledUntil       string                             `json:"throttled_until"`
    ThrottledUntilMillis int64                              `json:"throttled_until_millis"`
    Failures             []bulkIndexByScrollResponseFailure `json:"failures"`
}

BulkIndexByScrollResponse is the outcome of executing Do with DeleteByQueryService and UpdateByQueryService.

type BulkIndexRequest Uses

type BulkIndexRequest struct {
    BulkableRequest
    // contains filtered or unexported fields
}

BulkIndexRequest is a request to add a document to Elasticsearch.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func NewBulkIndexRequest Uses

func NewBulkIndexRequest() *BulkIndexRequest

NewBulkIndexRequest returns a new BulkIndexRequest. The operation type is "index" by default.

func (*BulkIndexRequest) Doc Uses

func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest

Doc specifies the document to index.

func (*BulkIndexRequest) Id Uses

func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest

Id specifies the identifier of the document to index.

func (*BulkIndexRequest) Index Uses

func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest

Index specifies the Elasticsearch index to use for this index request. If unspecified, the index set on the BulkService will be used.

func (*BulkIndexRequest) OpType Uses

func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest

OpType specifies if this request should follow create-only or upsert behavior. This follows the OpType of the standard document index API. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#operation-type for details.

func (*BulkIndexRequest) Parent Uses

func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest

Parent specifies the identifier of the parent document (if available).

func (*BulkIndexRequest) Pipeline Uses

func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest

Pipeline to use while processing the request.

func (*BulkIndexRequest) RetryOnConflict Uses

func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest

RetryOnConflict specifies how often to retry in case of a version conflict.

func (*BulkIndexRequest) Routing Uses

func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest

Routing specifies a routing value for the request.

func (*BulkIndexRequest) Source Uses

func (r *BulkIndexRequest) Source() ([]string, error)

Source returns the on-wire representation of the index request, split into an action-and-meta-data line and an (optional) source line. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func (*BulkIndexRequest) String Uses

func (r *BulkIndexRequest) String() string

String returns the on-wire representation of the index request, concatenated as a single string.

func (*BulkIndexRequest) Type Uses

func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest

Type specifies the Elasticsearch type to use for this index request. If unspecified, the type set on the BulkService will be used.

func (*BulkIndexRequest) UseEasyJSON Uses

func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest

UseEasyJSON is an experimental setting that enables serialization with github.com/mailru/easyjson, which should in faster serialization time and less allocations, but removed compatibility with encoding/json, usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations for details. This setting is disabled by default.

func (*BulkIndexRequest) Version Uses

func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest

Version indicates the version of the document as part of an optimistic concurrency model.

func (*BulkIndexRequest) VersionType Uses

func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest

VersionType specifies how versions are created. It can be e.g. internal, external, external_gte, or force.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#index-versioning for details.

type BulkProcessor Uses

type BulkProcessor struct {
    // contains filtered or unexported fields
}

BulkProcessor encapsulates a task that accepts bulk requests and orchestrates committing them to Elasticsearch via one or more workers.

BulkProcessor is returned by setting up a BulkProcessorService and calling the Do method.

func (*BulkProcessor) Add Uses

func (p *BulkProcessor) Add(request BulkableRequest)

Add adds a single request to commit by the BulkProcessorService.

The caller is responsible for setting the index and type on the request.

func (*BulkProcessor) Close Uses

func (p *BulkProcessor) Close() error

Close stops the bulk processor previously started with Do. If it is already stopped, this is a no-op and nil is returned.

By implementing Close, BulkProcessor implements the io.Closer interface.

func (*BulkProcessor) Flush Uses

func (p *BulkProcessor) Flush() error

Flush manually asks all workers to commit their outstanding requests. It returns only when all workers acknowledge completion.

func (*BulkProcessor) Start Uses

func (p *BulkProcessor) Start(ctx context.Context) error

Start starts the bulk processor. If the processor is already started, nil is returned.

func (*BulkProcessor) Stats Uses

func (p *BulkProcessor) Stats() BulkProcessorStats

Stats returns the latest bulk processor statistics. Collecting stats must be enabled first by calling Stats(true) on the service that created this processor.

func (*BulkProcessor) Stop Uses

func (p *BulkProcessor) Stop() error

Stop is an alias for Close.

type BulkProcessorService Uses

type BulkProcessorService struct {
    // contains filtered or unexported fields
}

BulkProcessorService allows to easily process bulk requests. It allows setting policies when to flush new bulk requests, e.g. based on a number of actions, on the size of the actions, and/or to flush periodically. It also allows to control the number of concurrent bulk requests allowed to be executed in parallel.

BulkProcessorService, by default, commits either every 1000 requests or when the (estimated) size of the bulk requests exceeds 5 MB. However, it does not commit periodically. BulkProcessorService also does retry by default, using an exponential backoff algorithm. It also will automatically re-enqueue items returned with a status of 408, 429, 503 or 507. You can change this behavior with RetryItemStatusCodes.

The caller is responsible for setting the index and type on every bulk request added to BulkProcessorService.

BulkProcessorService takes ideas from the BulkProcessor of the Elasticsearch Java API as documented in https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.

func NewBulkProcessorService Uses

func NewBulkProcessorService(client *Client) *BulkProcessorService

NewBulkProcessorService creates a new BulkProcessorService.

func (*BulkProcessorService) After Uses

func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService

After specifies a function to be executed when bulk requests have been committed to Elasticsearch. The After callback executes both when the commit was successful as well as on failures.

func (*BulkProcessorService) Backoff Uses

func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService

Backoff sets the backoff strategy to use for errors.

func (*BulkProcessorService) Before Uses

func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService

Before specifies a function to be executed before bulk requests get committed to Elasticsearch.

func (*BulkProcessorService) BulkActions Uses

func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService

BulkActions specifies when to flush based on the number of actions currently added. Defaults to 1000 and can be set to -1 to be disabled.

func (*BulkProcessorService) BulkSize Uses

func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService

BulkSize specifies when to flush based on the size (in bytes) of the actions currently added. Defaults to 5 MB and can be set to -1 to be disabled.

func (*BulkProcessorService) Do Uses

func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error)

Do creates a new BulkProcessor and starts it. Consider the BulkProcessor as a running instance that accepts bulk requests and commits them to Elasticsearch, spreading the work across one or more workers.

You can interoperate with the BulkProcessor returned by Do, e.g. Start and Stop (or Close) it.

Context is an optional context that is passed into the bulk request service calls. In contrast to other operations, this context is used in a long running process. You could use it to pass e.g. loggers, but you shouldn't use it for cancellation.

Calling Do several times returns new BulkProcessors. You probably don't want to do this. BulkProcessorService implements just a builder pattern.

func (*BulkProcessorService) FlushInterval Uses

func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService

FlushInterval specifies when to flush at the end of the given interval. This is disabled by default. If you want the bulk processor to operate completely asynchronously, set both BulkActions and BulkSize to -1 and set the FlushInterval to a meaningful interval.

func (*BulkProcessorService) Name Uses

func (s *BulkProcessorService) Name(name string) *BulkProcessorService

Name is an optional name to identify this bulk processor.

func (*BulkProcessorService) RetryItemStatusCodes Uses

func (s *BulkProcessorService) RetryItemStatusCodes(retryItemStatusCodes ...int) *BulkProcessorService

RetryItemStatusCodes sets an array of status codes that indicate that a bulk response line item should be retried.

func (*BulkProcessorService) Stats Uses

func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService

Stats tells bulk processor to gather stats while running. Use Stats to return the stats. This is disabled by default.

func (*BulkProcessorService) Workers Uses

func (s *BulkProcessorService) Workers(num int) *BulkProcessorService

Workers is the number of concurrent workers allowed to be executed. Defaults to 1 and must be greater or equal to 1.

type BulkProcessorStats Uses

type BulkProcessorStats struct {
    Flushed   int64 // number of times the flush interval has been invoked
    Committed int64 // # of times workers committed bulk requests
    Indexed   int64 // # of requests indexed
    Created   int64 // # of requests that ES reported as creates (201)
    Updated   int64 // # of requests that ES reported as updates
    Deleted   int64 // # of requests that ES reported as deletes
    Succeeded int64 // # of requests that ES reported as successful
    Failed    int64 // # of requests that ES reported as failed

    Workers []*BulkProcessorWorkerStats // stats for each worker
}

BulkProcessorStats contains various statistics of a bulk processor while it is running. Use the Stats func to return it while running.

type BulkProcessorWorkerStats Uses

type BulkProcessorWorkerStats struct {
    Queued       int64         // # of requests queued in this worker
    LastDuration time.Duration // duration of last commit
}

BulkProcessorWorkerStats represents per-worker statistics.

type BulkResponse Uses

type BulkResponse struct {
    Took   int                            `json:"took,omitempty"`
    Errors bool                           `json:"errors,omitempty"`
    Items  []map[string]*BulkResponseItem `json:"items,omitempty"`
}

BulkResponse is a response to a bulk execution.

Example: {

"took":3,
"errors":false,
"items":[{
  "index":{
    "_index":"index1",
    "_type":"tweet",
    "_id":"1",
    "_version":3,
    "status":201
  }
},{
  "index":{
    "_index":"index2",
    "_type":"tweet",
    "_id":"2",
    "_version":3,
    "status":200
  }
},{
  "delete":{
    "_index":"index1",
    "_type":"tweet",
    "_id":"1",
    "_version":4,
    "status":200,
    "found":true
  }
},{
  "update":{
    "_index":"index2",
    "_type":"tweet",
    "_id":"2",
    "_version":4,
    "status":200
  }
}]

}

func (*BulkResponse) ByAction Uses

func (r *BulkResponse) ByAction(action string) []*BulkResponseItem

ByAction returns all bulk request results of a certain action, e.g. "index" or "delete".

func (*BulkResponse) ById Uses

func (r *BulkResponse) ById(id string) []*BulkResponseItem

ById returns all bulk request results of a given document id, regardless of the action ("index", "delete" etc.).

func (*BulkResponse) Created Uses

func (r *BulkResponse) Created() []*BulkResponseItem

Created returns all bulk request results of "create" actions.

func (*BulkResponse) Deleted Uses

func (r *BulkResponse) Deleted() []*BulkResponseItem

Deleted returns all bulk request results of "delete" actions.

func (*BulkResponse) Failed Uses

func (r *BulkResponse) Failed() []*BulkResponseItem

Failed returns those items of a bulk response that have errors, i.e. those that don't have a status code between 200 and 299.

func (*BulkResponse) Indexed Uses

func (r *BulkResponse) Indexed() []*BulkResponseItem

Indexed returns all bulk request results of "index" actions.

func (*BulkResponse) Succeeded Uses

func (r *BulkResponse) Succeeded() []*BulkResponseItem

Succeeded returns those items of a bulk response that have no errors, i.e. those have a status code between 200 and 299.

func (*BulkResponse) Updated Uses

func (r *BulkResponse) Updated() []*BulkResponseItem

Updated returns all bulk request results of "update" actions.

type BulkResponseItem Uses

type BulkResponseItem struct {
    Index         string        `json:"_index,omitempty"`
    Type          string        `json:"_type,omitempty"`
    Id            string        `json:"_id,omitempty"`
    Version       int64         `json:"_version,omitempty"`
    Result        string        `json:"result,omitempty"`
    Shards        *ShardsInfo   `json:"_shards,omitempty"`
    SeqNo         int64         `json:"_seq_no,omitempty"`
    PrimaryTerm   int64         `json:"_primary_term,omitempty"`
    Status        int           `json:"status,omitempty"`
    ForcedRefresh bool          `json:"forced_refresh,omitempty"`
    Error         *ErrorDetails `json:"error,omitempty"`
    GetResult     *GetResult    `json:"get,omitempty"`
}

BulkResponseItem is the result of a single bulk request.

type BulkService Uses

type BulkService struct {
    // contains filtered or unexported fields
}

BulkService allows for batching bulk requests and sending them to Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest, BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch, then use Do to send them to Elasticsearch.

BulkService will be reset after each Do call. In other words, you can reuse BulkService to send many batches. You do not have to create a new BulkService for each batch.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for more details.

func NewBulkService Uses

func NewBulkService(client *Client) *BulkService

NewBulkService initializes a new BulkService.

func (*BulkService) Add Uses

func (s *BulkService) Add(requests ...BulkableRequest) *BulkService

Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest, and/or BulkDeleteRequest.

func (*BulkService) Do Uses

func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error)

Do sends the batched requests to Elasticsearch. Note that, when successful, you can reuse the BulkService for the next batch as the list of bulk requests is cleared on success.

func (*BulkService) EstimatedSizeInBytes Uses

func (s *BulkService) EstimatedSizeInBytes() int64

EstimatedSizeInBytes returns the estimated size of all bulkable requests added via Add.

func (*BulkService) Index Uses

func (s *BulkService) Index(index string) *BulkService

Index specifies the index to use for all batches. You may also leave this blank and specify the index in the individual bulk requests.

func (*BulkService) NumberOfActions Uses

func (s *BulkService) NumberOfActions() int

NumberOfActions returns the number of bulkable requests that need to be sent to Elasticsearch on the next batch.

func (*BulkService) Pipeline Uses

func (s *BulkService) Pipeline(pipeline string) *BulkService

Pipeline specifies the pipeline id to preprocess incoming documents with.

func (*BulkService) Pretty Uses

func (s *BulkService) Pretty(pretty bool) *BulkService

Pretty tells Elasticsearch whether to return a formatted JSON response.

func (*BulkService) Refresh Uses

func (s *BulkService) Refresh(refresh string) *BulkService

Refresh controls when changes made by this request are made visible to search. The allowed values are: "true" (refresh the relevant primary and replica shards immediately), "wait_for" (wait for the changes to be made visible by a refresh before reying), or "false" (no refresh related actions). The default value is "false".

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html for details.

func (*BulkService) Reset Uses

func (s *BulkService) Reset()

Reset cleans up the request queue

func (*BulkService) Retrier Uses

func (s *BulkService) Retrier(retrier Retrier) *BulkService

Retrier allows to set specific retry logic for this BulkService. If not specified, it will use the client's default retrier.

func (*BulkService) Routing Uses

func (s *BulkService) Routing(routing string) *BulkService

Routing specifies the routing value.

func (*BulkService) Timeout Uses

func (s *BulkService) Timeout(timeout string) *BulkService

Timeout is a global timeout for processing bulk requests. This is a server-side timeout, i.e. it tells Elasticsearch the time after which it should stop processing.

func (*BulkService) Type Uses

func (s *BulkService) Type(typ string) *BulkService

Type specifies the type to use for all batches. You may also leave this blank and specify the type in the individual bulk requests.

func (*BulkService) WaitForActiveShards Uses

func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService

WaitForActiveShards sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1).

type BulkUpdateRequest Uses

type BulkUpdateRequest struct {
    BulkableRequest
    // contains filtered or unexported fields
}

BulkUpdateRequest is a request to update a document in Elasticsearch.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func NewBulkUpdateRequest Uses

func NewBulkUpdateRequest() *BulkUpdateRequest

NewBulkUpdateRequest returns a new BulkUpdateRequest.

func (*BulkUpdateRequest) DetectNoop Uses

func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest

DetectNoop specifies whether changes that don't affect the document should be ignored (true) or unignored (false). This is enabled by default in Elasticsearch.

func (*BulkUpdateRequest) Doc Uses

func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest

Doc specifies the updated document.

func (*BulkUpdateRequest) DocAsUpsert Uses

func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest

DocAsUpsert indicates whether the contents of Doc should be used as the Upsert value.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_doc_as_upsert_literal for details.

func (*BulkUpdateRequest) Id Uses

func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest

Id specifies the identifier of the document to update.

func (*BulkUpdateRequest) Index Uses

func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest

Index specifies the Elasticsearch index to use for this update request. If unspecified, the index set on the BulkService will be used.

func (*BulkUpdateRequest) Parent Uses

func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest

Parent specifies the identifier of the parent document (if available).

func (*BulkUpdateRequest) RetryOnConflict Uses

func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest

RetryOnConflict specifies how often to retry in case of a version conflict.

func (*BulkUpdateRequest) ReturnSource Uses

func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest

ReturnSource specifies whether Elasticsearch should return the source after the update. In the request, this responds to the `_source` field. It is false by default.

func (*BulkUpdateRequest) Routing Uses

func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest

Routing specifies a routing value for the request.

func (*BulkUpdateRequest) Script Uses

func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest

Script specifies an update script. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html#bulk-update and https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html for details.

func (*BulkUpdateRequest) ScriptedUpsert Uses

func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest

ScripedUpsert specifies if your script will run regardless of whether the document exists or not.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_scripted_upsert_literal

func (*BulkUpdateRequest) Source Uses

func (r *BulkUpdateRequest) Source() ([]string, error)

Source returns the on-wire representation of the update request, split into an action-and-meta-data line and an (optional) source line. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html for details.

func (*BulkUpdateRequest) String Uses

func (r *BulkUpdateRequest) String() string

String returns the on-wire representation of the update request, concatenated as a single string.

func (*BulkUpdateRequest) Type Uses

func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest

Type specifies the Elasticsearch type to use for this update request. If unspecified, the type set on the BulkService will be used.

func (*BulkUpdateRequest) Upsert Uses

func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest

Upsert specifies the document to use for upserts. It will be used for create if the original document does not exist.

func (*BulkUpdateRequest) UseEasyJSON Uses

func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest

UseEasyJSON is an experimental setting that enables serialization with github.com/mailru/easyjson, which should in faster serialization time and less allocations, but removed compatibility with encoding/json, usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations for details. This setting is disabled by default.

func (*BulkUpdateRequest) Version Uses

func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest

Version indicates the version of the document as part of an optimistic concurrency model.

func (*BulkUpdateRequest) VersionType Uses

func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest

VersionType can be "internal" (default), "external", "external_gte", or "external_gt".

type BulkableRequest Uses

type BulkableRequest interface {
    fmt.Stringer
    Source() ([]string, error)
}

BulkableRequest is a generic interface to bulkable requests.

type CancelAllocationCommand Uses

type CancelAllocationCommand struct {
    // contains filtered or unexported fields
}

CancelAllocationCommand cancels relocation, or recovery of a given shard on a node.

func NewCancelAllocationCommand Uses

func NewCancelAllocationCommand(index string, shardId int, node string, allowPrimary bool) *CancelAllocationCommand

NewCancelAllocationCommand creates a new CancelAllocationCommand.

func (*CancelAllocationCommand) Name Uses

func (cmd *CancelAllocationCommand) Name() string

Name of the command in a request to the Cluster Reroute API.

func (*CancelAllocationCommand) Source Uses

func (cmd *CancelAllocationCommand) Source() (interface{}, error)

Source generates the (inner) JSON to be used when serializing the command.

type CandidateGenerator Uses

type CandidateGenerator interface {
    Type() string
    Source() (interface{}, error)
}

type CardinalityAggregation Uses

type CardinalityAggregation struct {
    // contains filtered or unexported fields
}

CardinalityAggregation is a single-value metrics aggregation that calculates an approximate count of distinct values. Values can be extracted either from specific fields in the document or generated by a script. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-cardinality-aggregation.html

func NewCardinalityAggregation Uses

func NewCardinalityAggregation() *CardinalityAggregation

func (*CardinalityAggregation) Field Uses

func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation

func (*CardinalityAggregation) Format Uses

func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation

func (*CardinalityAggregation) Meta Uses

func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation

Meta sets the meta data to be included in the aggregation response.

func (*CardinalityAggregation) Missing Uses

func (a *CardinalityAggregation) Missing(missing interface{}) *CardinalityAggregation

func (*CardinalityAggregation) PrecisionThreshold Uses

func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation

func (*CardinalityAggregation) Rehash Uses

func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation

func (*CardinalityAggregation) Script Uses

func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation

func (*CardinalityAggregation) Source Uses

func (a *CardinalityAggregation) Source() (interface{}, error)

func (*CardinalityAggregation) SubAggregation Uses

func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation

type CatAliasesResponse Uses

type CatAliasesResponse []CatAliasesResponseRow

CatAliasesResponse is the outcome of CatAliasesService.Do.

type CatAliasesResponseRow Uses

type CatAliasesResponseRow struct {
    // Alias name.
    Alias string `json:"alias"`
    // Index the alias points to.
    Index string `json:"index"`
    // Filter, e.g. "*" or "-".
    Filter string `json:"filter"`
    // RoutingIndex specifies the index routing (or "-").
    RoutingIndex string `json:"routing.index"`
    // RoutingSearch specifies the search routing (or "-").
    RoutingSearch string `json:"routing.search"`
}

CatAliasesResponseRow is a single row in a CatAliasesResponse. Notice that not all of these fields might be filled; that depends on the number of columns chose in the request (see CatAliasesService.Columns).

type CatAliasesService Uses

type CatAliasesService struct {
    // contains filtered or unexported fields
}

CatAliasesService shows information about currently configured aliases to indices including filter and routing infos.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-aliases.html for details.

func NewCatAliasesService Uses

func NewCatAliasesService(client *Client) *CatAliasesService

NewCatAliasesService creates a new CatAliasesService.

func (*CatAliasesService) Alias Uses

func (s *CatAliasesService) Alias(alias ...string) *CatAliasesService

Alias specifies one or more aliases to which information should be returned.

func (*CatAliasesService) Columns Uses

func (s *CatAliasesService) Columns(columns ...string) *CatAliasesService

Columns to return in the response. To get a list of all possible columns to return, run the following command in your terminal:

Example:

curl 'http://localhost:9200/_cat/aliases?help'

You can use Columns("*") to return all possible columns. That might take a little longer than the default set of columns.

func (*CatAliasesService) Do Uses

func (s *CatAliasesService) Do(ctx context.Context) (CatAliasesResponse, error)

Do executes the operation.

func (*CatAliasesService) Local Uses

func (s *CatAliasesService) Local(local bool) *CatAliasesService

Local indicates to return local information, i.e. do not retrieve the state from master node (default: false).

func (*CatAliasesService) MasterTimeout Uses

func (s *CatAliasesService) MasterTimeout(masterTimeout string) *CatAliasesService

MasterTimeout is the explicit operation timeout for connection to master node.

func (*CatAliasesService) Pretty Uses

func (s *CatAliasesService) Pretty(pretty bool) *CatAliasesService

Pretty indicates that the JSON response be indented and human readable.

func (*CatAliasesService) Sort Uses

func (s *CatAliasesService) Sort(fields ...string) *CatAliasesService

Sort is a list of fields to sort by.

type CatAllocationResponse Uses

type CatAllocationResponse []CatAllocationResponseRow

CatAllocationResponse is the outcome of CatAllocationService.Do.

type CatAllocationResponseRow Uses

type CatAllocationResponseRow struct {
    // Shards represents the number of shards on a node.
    Shards int `json:"shards,string"`
    // DiskIndices represents the disk used by ES indices, e.g. "46.1kb".
    DiskIndices string `json:"disk.indices"`
    // DiskUsed represents the disk used (total, not just ES), e.g. "34.5gb"
    DiskUsed string `json:"disk.used"`
    // DiskAvail represents the disk available, e.g. "53.2gb".
    DiskAvail string `json:"disk.avail"`
    // DiskTotal represents the total capacity of all volumes, e.g. "87.7gb".
    DiskTotal string `json:"disk.total"`
    // DiskPercent represents the percent of disk used, e.g. 39.
    DiskPercent int `json:"disk.percent,string"`
    // Host represents the hostname of the node.
    Host string `json:"host"`
    // IP represents the IP address of the node.
    IP  string `json:"ip"`
    // Node represents the node ID.
    Node string `json:"node"`
}

CatAllocationResponseRow is a single row in a CatAllocationResponse. Notice that not all of these fields might be filled; that depends on the number of columns chose in the request (see CatAllocationService.Columns).

type CatAllocationService Uses

type CatAllocationService struct {
    // contains filtered or unexported fields
}

CatAllocationService provides a snapshot of how many shards are allocated to each data node and how much disk space they are using.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-allocation.html for details.

func NewCatAllocationService Uses

func NewCatAllocationService(client *Client) *CatAllocationService

NewCatAllocationService creates a new CatAllocationService.

func (*CatAllocationService) Bytes Uses

func (s *CatAllocationService) Bytes(bytes string) *CatAllocationService

Bytes represents the unit in which to display byte values. Valid values are: "b", "k", "m", or "g".

func (*CatAllocationService) Columns Uses

func (s *CatAllocationService) Columns(columns ...string) *CatAllocationService

Columns to return in the response. To get a list of all possible columns to return, run the following command in your terminal:

Example:

curl 'http://localhost:9200/_cat/aliases?help'

You can use Columns("*") to return all possible columns. That might take a little longer than the default set of columns.

func (*CatAllocationService) Do Uses

func (s *CatAllocationService) Do(ctx context.Context) (CatAllocationResponse, error)

Do executes the operation.

func (*CatAllocationService) Local Uses

func (s *CatAllocationService) Local(local bool) *CatAllocationService

Local indicates to return local information, i.e. do not retrieve the state from master node (default: false).

func (*CatAllocationService) MasterTimeout Uses

func (s *CatAllocationService) MasterTimeout(masterTimeout string) *CatAllocationService

MasterTimeout is the explicit operation timeout for connection to master node.

func (*CatAllocationService) NodeID Uses

func (s *CatAllocationService) NodeID(nodes ...string) *CatAllocationService

NodeID specifies one or more node IDs to for information should be returned.

func (*CatAllocationService) Pretty Uses

func (s *CatAllocationService) Pretty(pretty bool) *CatAllocationService

Pretty indicates that the JSON response be indented and human readable.

func (*CatAllocationService) Sort Uses

func (s *CatAllocationService) Sort(fields ...string) *CatAllocationService

Sort is a list of fields to sort by.

type CatCountResponse Uses

type CatCountResponse []CatCountResponseRow

CatCountResponse is the outcome of CatCountService.Do.

type CatCountResponseRow Uses

type CatCountResponseRow struct {
    Epoch     int64  `json:"epoch,string"` // e.g. 1527077996
    Timestamp string `json:"timestamp"`    // e.g. "12:19:56"
    Count     int    `json:"count,string"` // number of documents
}

CatCountResponseRow specifies the data returned for one index of a CatCountResponse. Notice that not all of these fields might be filled; that depends on the number of columns chose in the request (see CatCountService.Columns).

type CatCountService Uses

type CatCountService struct {
    // contains filtered or unexported fields
}

CatCountService provides quick access to the document count of the entire cluster, or individual indices.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-count.html for details.

func NewCatCountService Uses

func NewCatCountService(client *Client) *CatCountService

NewCatCountService creates a new CatCountService.

func (*CatCountService) Columns Uses

func (s *CatCountService) Columns(columns ...string) *CatCountService

Columns to return in the response. To get a list of all possible columns to return, run the following command in your terminal:

Example:

curl 'http://localhost:9200/_cat/count?help'

You can use Columns("*") to return all possible columns. That might take a little longer than the default set of columns.

func (*CatCountService) Do Uses

func (s *CatCountService) Do(ctx context.Context) (CatCountResponse, error)

Do executes the operation.

func (*CatCountService) Index Uses

func (s *CatCountService) Index(index ...string) *CatCountService

Index specifies zero or more indices for which to return counts (by default counts for all indices are returned).

func (*CatCountService) Local Uses

func (s *CatCountService) Local(local bool) *CatCountService

Local indicates to return local information, i.e. do not retrieve the state from master node (default: false).

func (*CatCountService) MasterTimeout Uses

func (s *CatCountService) MasterTimeout(masterTimeout string) *CatCountService

MasterTimeout is the explicit operation timeout for connection to master node.

func (*CatCountService) Pretty Uses

func (s *CatCountService) Pretty(pretty bool) *CatCountService

Pretty indicates that the JSON response be indented and human readable.

func (*CatCountService) Sort Uses

func (s *CatCountService) Sort(fields ...string) *CatCountService

Sort is a list of fields to sort by.

type CatHealthResponse Uses

type CatHealthResponse []CatHealthResponseRow

CatHealthResponse is the outcome of CatHealthService.Do.

type CatHealthResponseRow Uses

type CatHealthResponseRow struct {
    Epoch               int64  `json:"epoch,string"`          // e.g. 1527077996
    Timestamp           string `json:"timestamp"`             // e.g. "12:19:56"
    Cluster             string `json:"cluster"`               // cluster name, e.g. "elasticsearch"
    Status              string `json:"status"`                // health status, e.g. "green", "yellow", or "red"
    NodeTotal           int    `json:"node.total,string"`     // total number of nodes
    NodeData            int    `json:"node.data,string"`      // number of nodes that can store data
    Shards              int    `json:"shards,string"`         // total number of shards
    Pri                 int    `json:"pri,string"`            // number of primary shards
    Relo                int    `json:"relo,string"`           // number of relocating nodes
    Init                int    `json:"init,string"`           // number of initializing nodes
    Unassign            int    `json:"unassign,string"`       // number of unassigned shards
    PendingTasks        int    `json:"pending_tasks,string"`  // number of pending tasks
    MaxTaskWaitTime     string `json:"max_task_wait_time"`    // wait time of longest task pending, e.g. "-" or time in millis
    ActiveShardsPercent string `json:"active_shards_percent"` // active number of shards in percent, e.g. "100%"
}

CatHealthResponseRow is a single row in a CatHealthResponse. Notice that not all of these fields might be filled; that depends on the number of columns chose in the request (see CatHealthService.Columns).

type CatHealthService Uses

type CatHealthService struct {
    // contains filtered or unexported fields
}

CatHealthService returns a terse representation of the same information as /_cluster/health.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-health.html for details.

func NewCatHealthService Uses

func NewCatHealthService(client *Client) *CatHealthService

NewCatHealthService creates a new CatHealthService.

func (*CatHealthService) Columns Uses

func (s *CatHealthService) Columns(columns ...string) *CatHealthService

Columns to return in the response. To get a list of all possible columns to return, run the following command in your terminal:

Example:

curl 'http://localhost:9200/_cat/indices?help'

You can use Columns("*") to return all possible columns. That might take a little longer than the default set of columns.

func (*CatHealthService) DisableTimestamping Uses

func (s *CatHealthService) DisableTimestamping(disable bool) *CatHealthService

DisableTimestamping disables timestamping (default: true).

func (*CatHealthService) Do Uses

func (s *CatHealthService) Do(ctx context.Context) (CatHealthResponse, error)

Do executes the operation.

func (*CatHealthService) Local Uses

func (s *CatHealthService) Local(local bool) *CatHealthService

Local indicates to return local information, i.e. do not retrieve the state from master node (default: false).

func (*CatHealthService) MasterTimeout Uses

func (s *CatHealthService) MasterTimeout(masterTimeout string) *CatHealthService

MasterTimeout is the explicit operation timeout for connection to master node.

func (*CatHealthService) Pretty Uses

func (s *CatHealthService) Pretty(pretty bool) *CatHealthService

Pretty indicates that the JSON response be indented and human readable.

func (*CatHealthService) Sort Uses

func (s *CatHealthService) Sort(fields ...string) *CatHealthService

Sort is a list of fields to sort by.

type CatIndicesResponse Uses

type CatIndicesResponse []CatIndicesResponseRow

CatIndicesResponse is the outcome of CatIndicesService.Do.

type CatIndicesResponseRow Uses

type CatIndicesResponseRow struct {
    Health                       string `json:"health"`                              // "green", "yellow", or "red"
    Status                       string `json:"status"`                              // "open" or "closed"
    Index                        string `json:"index"`                               // index name
    UUID                         string `json:"uuid"`                                // index uuid
    Pri                          int    `json:"pri,string"`                          // number of primary shards
    Rep                          int    `json:"rep,string"`                          // number of replica shards
    DocsCount                    int    `json:"docs.count,string"`                   // number of available documents
    DocsDeleted                  int    `json:"docs.deleted,string"`                 // number of deleted documents
    CreationDate                 int64  `json:"creation.date,string"`                // index creation date (millisecond value), e.g. 1527077221644
    CreationDateString           string `json:"creation.date.string"`                // index creation date (as string), e.g. "2018-05-23T12:07:01.644Z"
    StoreSize                    string `json:"store.size"`                          // store size of primaries & replicas, e.g. "4.6kb"
    PriStoreSize                 string `json:"pri.store.size"`                      // store size of primaries, e.g. "230b"
    CompletionSize               string `json:"completion.size"`                     // size of completion on primaries & replicas
    PriCompletionSize            string `json:"pri.completion.size"`                 // size of completion on primaries
    FielddataMemorySize          string `json:"fielddata.memory_size"`               // used fielddata cache on primaries & replicas
    PriFielddataMemorySize       string `json:"pri.fielddata.memory_size"`           // used fielddata cache on primaries
    FielddataEvictions           int    `json:"fielddata.evictions,string"`          // fielddata evictions on primaries & replicas
    PriFielddataEvictions        int    `json:"pri.fielddata.evictions,string"`      // fielddata evictions on primaries
    QueryCacheMemorySize         string `json:"query_cache.memory_size"`             // used query cache on primaries & replicas
    PriQueryCacheMemorySize      string `json:"pri.query_cache.memory_size"`         // used query cache on primaries
    QueryCacheEvictions          int    `json:"query_cache.evictions,string"`        // query cache evictions on primaries & replicas
    PriQueryCacheEvictions       int    `json:"pri.query_cache.evictions,string"`    // query cache evictions on primaries
    RequestCacheMemorySize       string `json:"request_cache.memory_size"`           // used request cache on primaries & replicas
    PriRequestCacheMemorySize    string `json:"pri.request_cache.memory_size"`       // used request cache on primaries
    RequestCacheEvictions        int    `json:"request_cache.evictions,string"`      // request cache evictions on primaries & replicas
    PriRequestCacheEvictions     int    `json:"pri.request_cache.evictions,string"`  // request cache evictions on primaries
    RequestCacheHitCount         int    `json:"request_cache.hit_count,string"`      // request cache hit count on primaries & replicas
    PriRequestCacheHitCount      int    `json:"pri.request_cache.hit_count,string"`  // request cache hit count on primaries
    RequestCacheMissCount        int    `json:"request_cache.miss_count,string"`     // request cache miss count on primaries & replicas
    PriRequestCacheMissCount     int    `json:"pri.request_cache.miss_count,string"` // request cache miss count on primaries
    FlushTotal                   int    `json:"flush.total"`                         // number of flushes on primaries & replicas
    PriFlushTotal                int    `json:"pri.flush.total"`                     // number of flushes on primaries
    FlushTotalTime               string `json:"flush.total_time"`                    // time spent in flush on primaries & replicas
    PriFlushTotalTime            string `json:"pri.flush.total_time"`                // time spent in flush on primaries
    GetCurrent                   int    `json:"get.current,string"`                  // number of current get ops on primaries & replicas
    PriGetCurrent                int    `json:"pri.get.current,string"`              // number of current get ops on primaries
    GetTime                      string `json:"get.time"`                            // time spent in get on primaries & replicas
    PriGetTime                   string `json:"pri.get.time"`                        // time spent in get on primaries
    GetTotal                     int    `json:"get.total,string"`                    // number of get ops on primaries & replicas
    PriGetTotal                  int    `json:"pri.get.total,string"`                // number of get ops on primaries
    GetExistsTime                string `json:"get.exists_time"`                     // time spent in successful gets on primaries & replicas
    PriGetExistsTime             string `json:"pri.get.exists_time"`                 // time spent in successful gets on primaries
    GetExistsTotal               int    `json:"get.exists_total,string"`             // number of successful gets on primaries & replicas
    PriGetExistsTotal            int    `json:"pri.get.exists_total,string"`         // number of successful gets on primaries
    GetMissingTime               string `json:"get.missing_time"`                    // time spent in failed gets on primaries & replicas
    PriGetMissingTime            string `json:"pri.get.missing_time"`                // time spent in failed gets on primaries
    GetMissingTotal              int    `json:"get.missing_total,string"`            // number of failed gets on primaries & replicas
    PriGetMissingTotal           int    `json:"pri.get.missing_total,string"`        // number of failed gets on primaries
    IndexingDeleteCurrent        int    `json:"indexing.delete_current,string"`      // number of current deletions on primaries & replicas
    PriIndexingDeleteCurrent     int    `json:"pri.indexing.delete_current,string"`  // number of current deletions on primaries
    IndexingDeleteTime           string `json:"indexing.delete_time"`                // time spent in deletions on primaries & replicas
    PriIndexingDeleteTime        string `json:"pri.indexing.delete_time"`            // time spent in deletions on primaries
    IndexingDeleteTotal          int    `json:"indexing.delete_total,string"`        // number of delete ops on primaries & replicas
    PriIndexingDeleteTotal       int    `json:"pri.indexing.delete_total,string"`    // number of delete ops on primaries
    IndexingIndexCurrent         int    `json:"indexing.index_current,string"`       // number of current indexing on primaries & replicas
    PriIndexingIndexCurrent      int    `json:"pri.indexing.index_current,string"`   // number of current indexing on primaries
    IndexingIndexTime            string `json:"indexing.index_time"`                 // time spent in indexing on primaries & replicas
    PriIndexingIndexTime         string `json:"pri.indexing.index_time"`             // time spent in indexing on primaries
    IndexingIndexTotal           int    `json:"indexing.index_total,string"`         // number of index ops on primaries & replicas
    PriIndexingIndexTotal        int    `json:"pri.indexing.index_total,string"`     // number of index ops on primaries
    IndexingIndexFailed          int    `json:"indexing.index_failed,string"`        // number of failed indexing ops on primaries & replicas
    PriIndexingIndexFailed       int    `json:"pri.indexing.index_failed,string"`    // number of failed indexing ops on primaries
    MergesCurrent                int    `json:"merges.current,string"`               // number of current merges on primaries & replicas
    PriMergesCurrent             int    `json:"pri.merges.current,string"`           // number of current merges on primaries
    MergesCurrentDocs            int    `json:"merges.current_docs,string"`          // number of current merging docs on primaries & replicas
    PriMergesCurrentDocs         int    `json:"pri.merges.current_docs,string"`      // number of current merging docs on primaries
    MergesCurrentSize            string `json:"merges.current_size"`                 // size of current merges on primaries & replicas
    PriMergesCurrentSize         string `json:"pri.merges.current_size"`             // size of current merges on primaries
    MergesTotal                  int    `json:"merges.total,string"`                 // number of completed merge ops on primaries & replicas
    PriMergesTotal               int    `json:"pri.merges.total,string"`             // number of completed merge ops on primaries
    MergesTotalDocs              int    `json:"merges.total_docs,string"`            // docs merged on primaries & replicas
    PriMergesTotalDocs           int    `json:"pri.merges.total_docs,string"`        // docs merged on primaries
    MergesTotalSize              string `json:"merges.total_size"`                   // size merged on primaries & replicas
    PriMergesTotalSize           string `json:"pri.merges.total_size"`               // size merged on primaries
    MergesTotalTime              string `json:"merges.total_time"`                   // time spent in merges on primaries & replicas
    PriMergesTotalTime           string `json:"pri.merges.total_time"`               // time spent in merges on primaries
    RefreshTotal                 int    `json:"refresh.total,string"`                // total refreshes on primaries & replicas
    PriRefreshTotal              int    `json:"pri.refresh.total,string"`            // total refreshes on primaries
    RefreshTime                  string `json:"refresh.time"`                        // time spent in refreshes on primaries & replicas
    PriRefreshTime               string `json:"pri.refresh.time"`                    // time spent in refreshes on primaries
    RefreshListeners             int    `json:"refresh.listeners,string"`            // number of pending refresh listeners on primaries & replicas
    PriRefreshListeners          int    `json:"pri.refresh.listeners,string"`        // number of pending refresh listeners on primaries
    SearchFetchCurrent           int    `json:"search.fetch_current,string"`         // current fetch phase ops on primaries & replicas
    PriSearchFetchCurrent        int    `json:"pri.search.fetch_current,string"`     // current fetch phase ops on primaries
    SearchFetchTime              string `json:"search.fetch_time"`                   // time spent in fetch phase on primaries & replicas
    PriSearchFetchTime           string `json:"pri.search.fetch_time"`               // time spent in fetch phase on primaries
    SearchFetchTotal             int    `json:"search.fetch_total,string"`           // total fetch ops on primaries & replicas
    PriSearchFetchTotal          int    `json:"pri.search.fetch_total,string"`       // total fetch ops on primaries
    SearchOpenContexts           int    `json:"search.open_contexts,string"`         // open search contexts on primaries & replicas
    PriSearchOpenContexts        int    `json:"pri.search.open_contexts,string"`     // open search contexts on primaries
    SearchQueryCurrent           int    `json:"search.query_current,string"`         // current query phase ops on primaries & replicas
    PriSearchQueryCurrent        int    `json:"pri.search.query_current,string"`     // current query phase ops on primaries
    SearchQueryTime              string `json:"search.query_time"`                   // time spent in query phase on primaries & replicas, e.g. "0s"
    PriSearchQueryTime           string `json:"pri.search.query_time"`               // time spent in query phase on primaries, e.g. "0s"
    SearchQueryTotal             int    `json:"search.query_total,string"`           // total query phase ops on primaries & replicas
    PriSearchQueryTotal          int    `json:"pri.search.query_total,string"`       // total query phase ops on primaries
    SearchScrollCurrent          int    `json:"search.scroll_current,string"`        // open scroll contexts on primaries & replicas
    PriSearchScrollCurrent       int    `json:"pri.search.scroll_current,string"`    // open scroll contexts on primaries
    SearchScrollTime             string `json:"search.scroll_time"`                  // time scroll contexts held open on primaries & replicas, e.g. "0s"
    PriSearchScrollTime          string `json:"pri.search.scroll_time"`              // time scroll contexts held open on primaries, e.g. "0s"
    SearchScrollTotal            int    `json:"search.scroll_total,string"`          // completed scroll contexts on primaries & replicas
    PriSearchScrollTotal         int    `json:"pri.search.scroll_total,string"`      // completed scroll contexts on primaries
    SegmentsCount                int    `json:"segments.count,string"`               // number of segments on primaries & replicas
    PriSegmentsCount             int    `json:"pri.segments.count,string"`           // number of segments on primaries
    SegmentsMemory               string `json:"segments.memory"`                     // memory used by segments on primaries & replicas, e.g. "1.3kb"
    PriSegmentsMemory            string `json:"pri.segments.memory"`                 // memory used by segments on primaries, e.g. "1.3kb"
    SegmentsIndexWriterMemory    string `json:"segments.index_writer_memory"`        // memory used by index writer on primaries & replicas, e.g. "0b"
    PriSegmentsIndexWriterMemory string `json:"pri.segments.index_writer_memory"`    // memory used by index writer on primaries, e.g. "0b"
    SegmentsVersionMapMemory     string `json:"segments.version_map_memory"`         // memory used by version map on primaries & replicas, e.g. "0b"
    PriSegmentsVersionMapMemory  string `json:"pri.segments.version_map_memory"`     // memory used by version map on primaries, e.g. "0b"
    SegmentsFixedBitsetMemory    string `json:"segments.fixed_bitset_memory"`        // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries & replicas, e.g. "0b"
    PriSegmentsFixedBitsetMemory string `json:"pri.segments.fixed_bitset_memory"`    // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries, e.g. "0b"
    WarmerCurrent                int    `json:"warmer.count,string"`                 // current warmer ops on primaries & replicas
    PriWarmerCurrent             int    `json:"pri.warmer.count,string"`             // current warmer ops on primaries
    WarmerTotal                  int    `json:"warmer.total,string"`                 // total warmer ops on primaries & replicas
    PriWarmerTotal               int    `json:"pri.warmer.total,string"`             // total warmer ops on primaries
    WarmerTotalTime              string `json:"warmer.total_time"`                   // time spent in warmers on primaries & replicas, e.g. "47s"
    PriWarmerTotalTime           string `json:"pri.warmer.total_time"`               // time spent in warmers on primaries, e.g. "47s"
    SuggestCurrent               int    `json:"suggest.current,string"`              // number of current suggest ops on primaries & replicas
    PriSuggestCurrent            int    `json:"pri.suggest.current,string"`          // number of current suggest ops on primaries
    SuggestTime                  string `json:"suggest.time"`                        // time spend in suggest on primaries & replicas, "31s"
    PriSuggestTime               string `json:"pri.suggest.time"`                    // time spend in suggest on primaries, e.g. "31s"
    SuggestTotal                 int    `json:"suggest.total,string"`                // number of suggest ops on primaries & replicas
    PriSuggestTotal              int    `json:"pri.suggest.total,string"`            // number of suggest ops on primaries
    MemoryTotal                  string `json:"memory.total"`                        // total user memory on primaries & replicas, e.g. "1.5kb"
    PriMemoryTotal               string `json:"pri.memory.total"`                    // total user memory on primaries, e.g. "1.5kb"
}

CatIndicesResponseRow specifies the data returned for one index of a CatIndicesResponse. Notice that not all of these fields might be filled; that depends on the number of columns chose in the request (see CatIndicesService.Columns).

type CatIndicesService Uses

type CatIndicesService struct {
    // contains filtered or unexported fields
}

CatIndicesService returns the list of indices plus some additional information about them.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-indices.html for details.

func NewCatIndicesService Uses

func NewCatIndicesService(client *Client) *CatIndicesService

NewCatIndicesService creates a new CatIndicesService.

func (*CatIndicesService) Bytes Uses

func (s *CatIndicesService) Bytes(bytes string) *CatIndicesService

Bytes represents the unit in which to display byte values. Valid values are: "b", "k", "m", or "g".

func (*CatIndicesService) Columns Uses

func (s *CatIndicesService) Columns(columns ...string) *CatIndicesService

Columns to return in the response. To get a list of all possible columns to return, run the following command in your terminal:

Example:

curl 'http://localhost:9200/_cat/indices?help'

You can use Columns("*") to return all possible columns. That might take a little longer than the default set of columns.

func (*CatIndicesService) Do Uses

func (s *CatIndicesService) Do(ctx context.Context) (CatIndicesResponse, error)

Do executes the operation.

func (*CatIndicesService) Health Uses

func (s *CatIndicesService) Health(healthState string) *CatIndicesService

Health filters indices by their health status. Valid values are: "green", "yellow", or "red".

func (*CatIndicesService) Index Uses

func (s *CatIndicesService) Index(index string) *CatIndicesService

Index is the name of the index to list (by default all indices are returned).

func (*CatIndicesService) Local Uses

func (s *CatIndicesService) Local(local bool) *CatIndicesService

Local indicates to return local information, i.e. do not retrieve the state from master node (default: false).

func (*CatIndicesService) MasterTimeout Uses

func (s *CatIndicesService) MasterTimeout(masterTimeout string) *CatIndicesService

MasterTimeout is the explicit operation timeout for connection to master node.

func (*CatIndicesService) Pretty Uses

func (s *CatIndicesService) Pretty(pretty bool) *CatIndicesService

Pretty indicates that the JSON response be indented and human readable.

func (*CatIndicesService) PrimaryOnly Uses

func (s *CatIndicesService) PrimaryOnly(primaryOnly bool) *CatIndicesService

PrimaryOnly when set to true returns stats only for primary shards (default: false).

func (*CatIndicesService) Sort Uses

func (s *CatIndicesService) Sort(fields ...string) *CatIndicesService

Sort is a list of fields to sort by.

type ChiSquareSignificanceHeuristic Uses

type ChiSquareSignificanceHeuristic struct {
    // contains filtered or unexported fields
}

ChiSquareSignificanceHeuristic implements Chi square as described in "Information Retrieval", Manning et al., Chapter 13.5.2.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_chi_square for details.

func NewChiSquareSignificanceHeuristic Uses

func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic

NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic.

func (*ChiSquareSignificanceHeuristic) BackgroundIsSuperset Uses

func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic

BackgroundIsSuperset indicates whether you defined a custom background filter that represents a difference set of documents that you want to compare to.

func (*ChiSquareSignificanceHeuristic) IncludeNegatives Uses

func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic

IncludeNegatives indicates whether to filter out the terms that appear much less in the subset than in the background without the subset.

func (*ChiSquareSignificanceHeuristic) Name Uses

func (sh *ChiSquareSignificanceHeuristic) Name() string

Name returns the name of the heuristic in the REST interface.

func (*ChiSquareSignificanceHeuristic) Source Uses

func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error)

Source returns the parameters that need to be added to the REST parameters.

type ChildrenAggregation Uses

type ChildrenAggregation struct {
    // contains filtered or unexported fields
}

ChildrenAggregation is a special single bucket aggregation that enables aggregating from buckets on parent document types to buckets on child documents. It is available from 1.4.0.Beta1 upwards. See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-children-aggregation.html

func NewChildrenAggregation Uses

func NewChildrenAggregation() *ChildrenAggregation

func (*ChildrenAggregation) Meta Uses

func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation

Meta sets the meta data to be included in the aggregation response.

func (*ChildrenAggregation) Source Uses

func (a *ChildrenAggregation) Source() (interface{}, error)

func (*ChildrenAggregation) SubAggregation Uses

func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation

func (*ChildrenAggregation) Type Uses

func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation

type ClearScrollResponse Uses

type ClearScrollResponse struct {
}

ClearScrollResponse is the response of ClearScrollService.Do.

type ClearScrollService Uses

type ClearScrollService struct {
    // contains filtered or unexported fields
}

ClearScrollService clears one or more scroll contexts by their ids.

See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#_clear_scroll_api for details.

func NewClearScrollService Uses

func NewClearScrollService(client *Client) *ClearScrollService

NewClearScrollService creates a new ClearScrollService.

func (*ClearScrollService) Do Uses

func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error)

Do executes the operation.

func (*ClearScrollService) Pretty Uses

func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService

Pretty indicates that the JSON response be indented and human readable.

func (*ClearScrollService) ScrollId