for_core_protos_go_proto

package
v0.8.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 7, 2024 License: Apache-2.0 Imports: 19 Imported by: 0

Documentation

Index

Constants

View Source
const Code_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = for_core_protos_go_proto.Code_DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_

Variables

View Source
var (
	OptimizerOptions_Level_name = map[int32]string{
		0:  "L1",
		-1: "L0",
	}
	OptimizerOptions_Level_value = map[string]int32{
		"L1": 0,
		"L0": -1,
	}
)

Enum value maps for OptimizerOptions_Level.

View Source
var (
	OptimizerOptions_GlobalJitLevel_name = map[int32]string{
		0:  "DEFAULT",
		-1: "OFF",
		1:  "ON_1",
		2:  "ON_2",
	}
	OptimizerOptions_GlobalJitLevel_value = map[string]int32{
		"DEFAULT": 0,
		"OFF":     -1,
		"ON_1":    1,
		"ON_2":    2,
	}
)

Enum value maps for OptimizerOptions_GlobalJitLevel.

View Source
var (
	ConfigProto_Experimental_MlirBridgeRollout_name = map[int32]string{
		0: "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED",
		1: "MLIR_BRIDGE_ROLLOUT_ENABLED",
		2: "MLIR_BRIDGE_ROLLOUT_DISABLED",
	}
	ConfigProto_Experimental_MlirBridgeRollout_value = map[string]int32{
		"MLIR_BRIDGE_ROLLOUT_UNSPECIFIED": 0,
		"MLIR_BRIDGE_ROLLOUT_ENABLED":     1,
		"MLIR_BRIDGE_ROLLOUT_DISABLED":    2,
	}
)

Enum value maps for ConfigProto_Experimental_MlirBridgeRollout.

View Source
var (
	RunOptions_TraceLevel_name = map[int32]string{
		0: "NO_TRACE",
		1: "SOFTWARE_TRACE",
		2: "HARDWARE_TRACE",
		3: "FULL_TRACE",
	}
	RunOptions_TraceLevel_value = map[string]int32{
		"NO_TRACE":       0,
		"SOFTWARE_TRACE": 1,
		"HARDWARE_TRACE": 2,
		"FULL_TRACE":     3,
	}
)

Enum value maps for RunOptions_TraceLevel.

View Source
var (
	ErrorSourceProto_ErrorSource_name = map[int32]string{
		0: "UNKNOWN",
		1: "TPU_COMPILE_OP",
		2: "TF_XLA_BRIDGE",
		3: "MLIR_BRIDGE_PHASE_1",
		4: "MLIR_BRIDGE_PHASE_2",
		5: "EAGER_REMOTE_MGR",
	}
	ErrorSourceProto_ErrorSource_value = map[string]int32{
		"UNKNOWN":             0,
		"TPU_COMPILE_OP":      1,
		"TF_XLA_BRIDGE":       2,
		"MLIR_BRIDGE_PHASE_1": 3,
		"MLIR_BRIDGE_PHASE_2": 4,
		"EAGER_REMOTE_MGR":    5,
	}
)

Enum value maps for ErrorSourceProto_ErrorSource.

View Source
var (
	DeploymentMode_name = map[int32]string{
		0: "DEPLOYMENT_MODE_UNSPECIFIED",
		1: "DEPLOYMENT_MODE_COLOCATED",
		2: "DEPLOYMENT_MODE_REMOTE",
		3: "DEPLOYMENT_MODE_HYBRID",
	}
	DeploymentMode_value = map[string]int32{
		"DEPLOYMENT_MODE_UNSPECIFIED": 0,
		"DEPLOYMENT_MODE_COLOCATED":   1,
		"DEPLOYMENT_MODE_REMOTE":      2,
		"DEPLOYMENT_MODE_HYBRID":      3,
	}
)

Enum value maps for DeploymentMode.

View Source
var (
	ProcessingModeDef_ShardingPolicy_name = map[int32]string{
		0: "OFF",
		1: "DYNAMIC",
		2: "FILE",
		3: "DATA",
		4: "FILE_OR_DATA",
		5: "HINT",
	}
	ProcessingModeDef_ShardingPolicy_value = map[string]int32{
		"OFF":          0,
		"DYNAMIC":      1,
		"FILE":         2,
		"DATA":         3,
		"FILE_OR_DATA": 4,
		"HINT":         5,
	}
)

Enum value maps for ProcessingModeDef_ShardingPolicy.

View Source
var (
	DataServiceMetadata_Compression_name = map[int32]string{
		0: "COMPRESSION_UNSPECIFIED",
		1: "COMPRESSION_OFF",
		2: "COMPRESSION_SNAPPY",
	}
	DataServiceMetadata_Compression_value = map[string]int32{
		"COMPRESSION_UNSPECIFIED": 0,
		"COMPRESSION_OFF":         1,
		"COMPRESSION_SNAPPY":      2,
	}
)

Enum value maps for DataServiceMetadata_Compression.

View Source
var (
	TensorDebugMode_name = map[int32]string{
		0: "UNSPECIFIED",
		1: "NO_TENSOR",
		2: "CURT_HEALTH",
		3: "CONCISE_HEALTH",
		4: "FULL_HEALTH",
		5: "SHAPE",
		6: "FULL_NUMERICS",
		7: "FULL_TENSOR",
		8: "REDUCE_INF_NAN_THREE_SLOTS",
	}
	TensorDebugMode_value = map[string]int32{
		"UNSPECIFIED":                0,
		"NO_TENSOR":                  1,
		"CURT_HEALTH":                2,
		"CONCISE_HEALTH":             3,
		"FULL_HEALTH":                4,
		"SHAPE":                      5,
		"FULL_NUMERICS":              6,
		"FULL_TENSOR":                7,
		"REDUCE_INF_NAN_THREE_SLOTS": 8,
	}
)

Enum value maps for TensorDebugMode.

View Source
var (
	RewriterConfig_Toggle_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
		3: "AGGRESSIVE",
		4: "EXPERIMENTAL_MLIR",
		5: "EXPERIMENTAL_BOTH",
	}
	RewriterConfig_Toggle_value = map[string]int32{
		"DEFAULT":           0,
		"ON":                1,
		"OFF":               2,
		"AGGRESSIVE":        3,
		"EXPERIMENTAL_MLIR": 4,
		"EXPERIMENTAL_BOTH": 5,
	}
)

Enum value maps for RewriterConfig_Toggle.

View Source
var (
	RewriterConfig_CpuLayout_name = map[int32]string{
		0: "NO_CONVERSION_ON_CPU",
		1: "NCHW_TO_NHWC",
		2: "NHWC_TO_NCHW",
	}
	RewriterConfig_CpuLayout_value = map[string]int32{
		"NO_CONVERSION_ON_CPU": 0,
		"NCHW_TO_NHWC":         1,
		"NHWC_TO_NCHW":         2,
	}
)

Enum value maps for RewriterConfig_CpuLayout.

View Source
var (
	RewriterConfig_NumIterationsType_name = map[int32]string{
		0: "DEFAULT_NUM_ITERS",
		1: "ONE",
		2: "TWO",
	}
	RewriterConfig_NumIterationsType_value = map[string]int32{
		"DEFAULT_NUM_ITERS": 0,
		"ONE":               1,
		"TWO":               2,
	}
)

Enum value maps for RewriterConfig_NumIterationsType.

View Source
var (
	RewriterConfig_MemOptType_name = map[int32]string{
		0: "DEFAULT_MEM_OPT",
		1: "NO_MEM_OPT",
		2: "MANUAL",
		4: "SWAPPING_HEURISTICS",
		5: "RECOMPUTATION_HEURISTICS",
		6: "SCHEDULING_HEURISTICS",
		3: "HEURISTICS",
	}
	RewriterConfig_MemOptType_value = map[string]int32{
		"DEFAULT_MEM_OPT":          0,
		"NO_MEM_OPT":               1,
		"MANUAL":                   2,
		"SWAPPING_HEURISTICS":      4,
		"RECOMPUTATION_HEURISTICS": 5,
		"SCHEDULING_HEURISTICS":    6,
		"HEURISTICS":               3,
	}
)

Enum value maps for RewriterConfig_MemOptType.

View Source
var (
	FunctionSpec_JitCompile_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
	}
	FunctionSpec_JitCompile_value = map[string]int32{
		"DEFAULT": 0,
		"ON":      1,
		"OFF":     2,
	}
)

Enum value maps for FunctionSpec_JitCompile.

View Source
var (
	SaverDef_CheckpointFormatVersion_name = map[int32]string{
		0: "LEGACY",
		1: "V1",
		2: "V2",
	}
	SaverDef_CheckpointFormatVersion_value = map[string]int32{
		"LEGACY": 0,
		"V1":     1,
		"V2":     2,
	}
)

Enum value maps for SaverDef_CheckpointFormatVersion.

View Source
var (
	TypeSpecProto_TypeSpecClass_name = map[int32]string{
		0:  "UNKNOWN",
		1:  "SPARSE_TENSOR_SPEC",
		2:  "INDEXED_SLICES_SPEC",
		3:  "RAGGED_TENSOR_SPEC",
		4:  "TENSOR_ARRAY_SPEC",
		5:  "DATA_DATASET_SPEC",
		6:  "DATA_ITERATOR_SPEC",
		7:  "OPTIONAL_SPEC",
		8:  "PER_REPLICA_SPEC",
		9:  "VARIABLE_SPEC",
		10: "ROW_PARTITION_SPEC",
		12: "REGISTERED_TYPE_SPEC",
		13: "EXTENSION_TYPE_SPEC",
	}
	TypeSpecProto_TypeSpecClass_value = map[string]int32{
		"UNKNOWN":              0,
		"SPARSE_TENSOR_SPEC":   1,
		"INDEXED_SLICES_SPEC":  2,
		"RAGGED_TENSOR_SPEC":   3,
		"TENSOR_ARRAY_SPEC":    4,
		"DATA_DATASET_SPEC":    5,
		"DATA_ITERATOR_SPEC":   6,
		"OPTIONAL_SPEC":        7,
		"PER_REPLICA_SPEC":     8,
		"VARIABLE_SPEC":        9,
		"ROW_PARTITION_SPEC":   10,
		"REGISTERED_TYPE_SPEC": 12,
		"EXTENSION_TYPE_SPEC":  13,
	}
)

Enum value maps for TypeSpecProto_TypeSpecClass.

View Source
var (
	BundleHeaderProto_Endianness_name = map[int32]string{
		0: "LITTLE",
		1: "BIG",
	}
	BundleHeaderProto_Endianness_value = map[string]int32{
		"LITTLE": 0,
		"BIG":    1,
	}
)

Enum value maps for BundleHeaderProto_Endianness.

View Source
var (
	VerifierConfig_Toggle_name = map[int32]string{
		0: "DEFAULT",
		1: "ON",
		2: "OFF",
	}
	VerifierConfig_Toggle_value = map[string]int32{
		"DEFAULT": 0,
		"ON":      1,
		"OFF":     2,
	}
)

Enum value maps for VerifierConfig_Toggle.

View Source
var File_tensorflow_core_protobuf_bfc_memory_map_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_cluster_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_composite_tensor_variant_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_control_flow_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_core_platform_payloads_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_data_service_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_debug_event_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_debug_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_device_filters_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_device_properties_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_error_codes_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_fingerprint_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_meta_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_named_tensor_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_queue_runner_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_remote_tensor_handle_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_rewriter_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_rpc_options_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_saved_model_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_saved_object_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_saver_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_service_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_snapshot_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_status_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_struct_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_tensor_bundle_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_tensorflow_server_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_trackable_object_graph_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_transport_options_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_protobuf_verifier_config_proto protoreflect.FileDescriptor
View Source
var File_tensorflow_core_util_quantization_uniform_quant_ops_attr_proto protoreflect.FileDescriptor
View Source
var File_tsl_protobuf_coordination_config_proto protoreflect.FileDescriptor

Functions

This section is empty.

Types

type AssetFileDef

type AssetFileDef struct {

	// The tensor to bind the asset filename to.
	TensorInfo *TensorInfo `protobuf:"bytes,1,opt,name=tensor_info,json=tensorInfo,proto3" json:"tensor_info,omitempty"`
	// The filename within an assets directory. Note: does not include the path
	// prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename
	// would be "vocab.txt".
	Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"`
	// contains filtered or unexported fields
}

An asset file def for a single file or a set of sharded files with the same name.

func (*AssetFileDef) Descriptor deprecated

func (*AssetFileDef) Descriptor() ([]byte, []int)

Deprecated: Use AssetFileDef.ProtoReflect.Descriptor instead.

func (*AssetFileDef) GetFilename

func (x *AssetFileDef) GetFilename() string

func (*AssetFileDef) GetTensorInfo

func (x *AssetFileDef) GetTensorInfo() *TensorInfo

func (*AssetFileDef) ProtoMessage

func (*AssetFileDef) ProtoMessage()

func (*AssetFileDef) ProtoReflect

func (x *AssetFileDef) ProtoReflect() protoreflect.Message

func (*AssetFileDef) Reset

func (x *AssetFileDef) Reset()

func (*AssetFileDef) String

func (x *AssetFileDef) String() string

type AutoParallelOptions

type AutoParallelOptions struct {
	Enable      bool  `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"`
	NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas,proto3" json:"num_replicas,omitempty"`
	// contains filtered or unexported fields
}

func (*AutoParallelOptions) Descriptor deprecated

func (*AutoParallelOptions) Descriptor() ([]byte, []int)

Deprecated: Use AutoParallelOptions.ProtoReflect.Descriptor instead.

func (*AutoParallelOptions) GetEnable

func (x *AutoParallelOptions) GetEnable() bool

func (*AutoParallelOptions) GetNumReplicas

func (x *AutoParallelOptions) GetNumReplicas() int32

func (*AutoParallelOptions) ProtoMessage

func (*AutoParallelOptions) ProtoMessage()

func (*AutoParallelOptions) ProtoReflect

func (x *AutoParallelOptions) ProtoReflect() protoreflect.Message

func (*AutoParallelOptions) Reset

func (x *AutoParallelOptions) Reset()

func (*AutoParallelOptions) String

func (x *AutoParallelOptions) String() string

type BoundedTensorSpecProto

type BoundedTensorSpecProto struct {
	Name    string                                  `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape   *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype   types_go_proto.DataType                 `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Minimum *tensor_go_proto.TensorProto            `protobuf:"bytes,4,opt,name=minimum,proto3" json:"minimum,omitempty"`
	Maximum *tensor_go_proto.TensorProto            `protobuf:"bytes,5,opt,name=maximum,proto3" json:"maximum,omitempty"`
	// contains filtered or unexported fields
}

A protobuf to represent tf.BoundedTensorSpec.

func (*BoundedTensorSpecProto) Descriptor deprecated

func (*BoundedTensorSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use BoundedTensorSpecProto.ProtoReflect.Descriptor instead.

func (*BoundedTensorSpecProto) GetDtype

func (*BoundedTensorSpecProto) GetMaximum

func (*BoundedTensorSpecProto) GetMinimum

func (*BoundedTensorSpecProto) GetName

func (x *BoundedTensorSpecProto) GetName() string

func (*BoundedTensorSpecProto) GetShape

func (*BoundedTensorSpecProto) ProtoMessage

func (*BoundedTensorSpecProto) ProtoMessage()

func (*BoundedTensorSpecProto) ProtoReflect

func (x *BoundedTensorSpecProto) ProtoReflect() protoreflect.Message

func (*BoundedTensorSpecProto) Reset

func (x *BoundedTensorSpecProto) Reset()

func (*BoundedTensorSpecProto) String

func (x *BoundedTensorSpecProto) String() string

type BundleEntryProto

type BundleEntryProto struct {

	// The tensor dtype and shape.
	Dtype types_go_proto.DataType                 `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	// The binary content of the tensor lies in:
	//
	//	File "shard_id": bytes [offset, offset + size).
	ShardId int32 `protobuf:"varint,3,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"`
	Offset  int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
	Size    int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"`
	// The CRC32C checksum of the tensor bytes.
	Crc32C uint32 `protobuf:"fixed32,6,opt,name=crc32c,proto3" json:"crc32c,omitempty"`
	// Iff present, this entry represents a partitioned tensor.  The previous
	// fields are interpreted as follows:
	//
	//	"dtype", "shape": describe the full tensor.
	//	"shard_id", "offset", "size", "crc32c": all IGNORED.
	//	   These information for each slice can be looked up in their own
	//	   BundleEntryProto, keyed by each "slice_name".
	Slices []*tensor_slice_go_proto.TensorSliceProto `protobuf:"bytes,7,rep,name=slices,proto3" json:"slices,omitempty"`
	// contains filtered or unexported fields
}

Describes the metadata related to a checkpointed tensor.

func (*BundleEntryProto) Descriptor deprecated

func (*BundleEntryProto) Descriptor() ([]byte, []int)

Deprecated: Use BundleEntryProto.ProtoReflect.Descriptor instead.

func (*BundleEntryProto) GetCrc32C

func (x *BundleEntryProto) GetCrc32C() uint32

func (*BundleEntryProto) GetDtype

func (*BundleEntryProto) GetOffset

func (x *BundleEntryProto) GetOffset() int64

func (*BundleEntryProto) GetShape

func (*BundleEntryProto) GetShardId

func (x *BundleEntryProto) GetShardId() int32

func (*BundleEntryProto) GetSize

func (x *BundleEntryProto) GetSize() int64

func (*BundleEntryProto) GetSlices

func (*BundleEntryProto) ProtoMessage

func (*BundleEntryProto) ProtoMessage()

func (*BundleEntryProto) ProtoReflect

func (x *BundleEntryProto) ProtoReflect() protoreflect.Message

func (*BundleEntryProto) Reset

func (x *BundleEntryProto) Reset()

func (*BundleEntryProto) String

func (x *BundleEntryProto) String() string

type BundleHeaderProto

type BundleHeaderProto struct {

	// Number of data files in the bundle.
	NumShards  int32                        `protobuf:"varint,1,opt,name=num_shards,json=numShards,proto3" json:"num_shards,omitempty"`
	Endianness BundleHeaderProto_Endianness `protobuf:"varint,2,opt,name=endianness,proto3,enum=tensorflow.BundleHeaderProto_Endianness" json:"endianness,omitempty"`
	// Versioning of the tensor bundle format.
	Version *versions_go_proto.VersionDef `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

Special header that is associated with a bundle.

TODO(zongheng,zhifengc): maybe in the future, we can add information about which binary produced this checkpoint, timestamp, etc. Sometime, these can be valuable debugging information. And if needed, these can be used as defensive information ensuring reader (binary version) of the checkpoint and the writer (binary version) must match within certain range, etc.

func (*BundleHeaderProto) Descriptor deprecated

func (*BundleHeaderProto) Descriptor() ([]byte, []int)

Deprecated: Use BundleHeaderProto.ProtoReflect.Descriptor instead.

func (*BundleHeaderProto) GetEndianness

func (*BundleHeaderProto) GetNumShards

func (x *BundleHeaderProto) GetNumShards() int32

func (*BundleHeaderProto) GetVersion

func (*BundleHeaderProto) ProtoMessage

func (*BundleHeaderProto) ProtoMessage()

func (*BundleHeaderProto) ProtoReflect

func (x *BundleHeaderProto) ProtoReflect() protoreflect.Message

func (*BundleHeaderProto) Reset

func (x *BundleHeaderProto) Reset()

func (*BundleHeaderProto) String

func (x *BundleHeaderProto) String() string

type BundleHeaderProto_Endianness

type BundleHeaderProto_Endianness int32

An enum indicating the endianness of the platform that produced this bundle. A bundle can only be read by a platform with matching endianness. Defaults to LITTLE, as most modern platforms are little-endian.

Affects the binary tensor data bytes only, not the metadata in protobufs.

const (
	BundleHeaderProto_LITTLE BundleHeaderProto_Endianness = 0
	BundleHeaderProto_BIG    BundleHeaderProto_Endianness = 1
)

func (BundleHeaderProto_Endianness) Descriptor

func (BundleHeaderProto_Endianness) Enum

func (BundleHeaderProto_Endianness) EnumDescriptor deprecated

func (BundleHeaderProto_Endianness) EnumDescriptor() ([]byte, []int)

Deprecated: Use BundleHeaderProto_Endianness.Descriptor instead.

func (BundleHeaderProto_Endianness) Number

func (BundleHeaderProto_Endianness) String

func (BundleHeaderProto_Endianness) Type

type CallableOptions

type CallableOptions struct {

	// Tensors to be fed in the callable. Each feed is the name of a tensor.
	Feed []string `protobuf:"bytes,1,rep,name=feed,proto3" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller of the callable expects a
	// tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
	// order of specified fetches does not change the execution order.
	Fetch []string `protobuf:"bytes,2,rep,name=fetch,proto3" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run by the
	// callable but their outputs will not be returned.
	Target []string `protobuf:"bytes,3,rep,name=target,proto3" json:"target,omitempty"`
	// Options that will be applied to each run.
	RunOptions *RunOptions `protobuf:"bytes,4,opt,name=run_options,json=runOptions,proto3" json:"run_options,omitempty"`
	// Tensors to be connected in the callable. Each TensorConnection denotes
	// a pair of tensors in the graph, between which an edge will be created
	// in the callable.
	TensorConnection []*TensorConnection `protobuf:"bytes,5,rep,name=tensor_connection,json=tensorConnection,proto3" json:"tensor_connection,omitempty"`
	// The Tensor objects fed in the callable and fetched from the callable
	// are expected to be backed by host (CPU) memory by default.
	//
	// The options below allow changing that - feeding tensors backed by
	// device memory, or returning tensors that are backed by device memory.
	//
	// The maps below map the name of a feed/fetch tensor (which appears in
	// 'feed' or 'fetch' fields above), to the fully qualified name of the device
	// owning the memory backing the contents of the tensor.
	//
	// For example, creating a callable with the following options:
	//
	//	CallableOptions {
	//	  feed: "a:0"
	//	  feed: "b:0"
	//
	//	  fetch: "x:0"
	//	  fetch: "y:0"
	//
	//	  feed_devices: {
	//	    "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//	  }
	//
	//	  fetch_devices: {
	//	    "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
	//	 }
	//	}
	//
	// means that the Callable expects:
	// - The first argument ("a:0") is a Tensor backed by GPU memory.
	// - The second argument ("b:0") is a Tensor backed by host memory.
	// and of its return values:
	// - The first output ("x:0") will be backed by host memory.
	// - The second output ("y:0") will be backed by GPU memory.
	//
	// FEEDS:
	// It is the responsibility of the caller to ensure that the memory of the fed
	// tensors will be correctly initialized and synchronized before it is
	// accessed by operations executed during the call to Session::RunCallable().
	//
	// This is typically ensured by using the TensorFlow memory allocators
	// (Device::GetAllocator()) to create the Tensor to be fed.
	//
	// Alternatively, for CUDA-enabled GPU devices, this typically means that the
	// operation that produced the contents of the tensor has completed, i.e., the
	// CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
	// cuStreamSynchronize()).
	FeedDevices  map[string]string `` /* 182-byte string literal not displayed */
	FetchDevices map[string]string `` /* 185-byte string literal not displayed */
	// By default, RunCallable() will synchronize the GPU stream before returning
	// fetched tensors on a GPU device, to ensure that the values in those tensors
	// have been produced. This simplifies interacting with the tensors, but
	// potentially incurs a performance hit.
	//
	// If this options is set to true, the caller is responsible for ensuring
	// that the values in the fetched tensors have been produced before they are
	// used. The caller can do this by invoking `Device::Sync()` on the underlying
	// device(s), or by feeding the tensors back to the same Session using
	// `feed_devices` with the same corresponding device name.
	FetchSkipSync bool `protobuf:"varint,8,opt,name=fetch_skip_sync,json=fetchSkipSync,proto3" json:"fetch_skip_sync,omitempty"`
	// contains filtered or unexported fields
}

Defines a subgraph in another `GraphDef` as a set of feed points and nodes to be fetched or executed.

Compare with the arguments to `Session::Run()`.

func (*CallableOptions) Descriptor deprecated

func (*CallableOptions) Descriptor() ([]byte, []int)

Deprecated: Use CallableOptions.ProtoReflect.Descriptor instead.

func (*CallableOptions) GetFeed

func (x *CallableOptions) GetFeed() []string

func (*CallableOptions) GetFeedDevices

func (x *CallableOptions) GetFeedDevices() map[string]string

func (*CallableOptions) GetFetch

func (x *CallableOptions) GetFetch() []string

func (*CallableOptions) GetFetchDevices

func (x *CallableOptions) GetFetchDevices() map[string]string

func (*CallableOptions) GetFetchSkipSync

func (x *CallableOptions) GetFetchSkipSync() bool

func (*CallableOptions) GetRunOptions

func (x *CallableOptions) GetRunOptions() *RunOptions

func (*CallableOptions) GetTarget

func (x *CallableOptions) GetTarget() []string

func (*CallableOptions) GetTensorConnection

func (x *CallableOptions) GetTensorConnection() []*TensorConnection

func (*CallableOptions) ProtoMessage

func (*CallableOptions) ProtoMessage()

func (*CallableOptions) ProtoReflect

func (x *CallableOptions) ProtoReflect() protoreflect.Message

func (*CallableOptions) Reset

func (x *CallableOptions) Reset()

func (*CallableOptions) String

func (x *CallableOptions) String() string

type CapturedTensor

type CapturedTensor struct {

	// Name of captured tensor
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Name of concrete function which contains the computed graph tensor.
	ConcreteFunction string `protobuf:"bytes,2,opt,name=concrete_function,json=concreteFunction,proto3" json:"concrete_function,omitempty"`
	// contains filtered or unexported fields
}

func (*CapturedTensor) Descriptor deprecated

func (*CapturedTensor) Descriptor() ([]byte, []int)

Deprecated: Use CapturedTensor.ProtoReflect.Descriptor instead.

func (*CapturedTensor) GetConcreteFunction

func (x *CapturedTensor) GetConcreteFunction() string

func (*CapturedTensor) GetName

func (x *CapturedTensor) GetName() string

func (*CapturedTensor) ProtoMessage

func (*CapturedTensor) ProtoMessage()

func (*CapturedTensor) ProtoReflect

func (x *CapturedTensor) ProtoReflect() protoreflect.Message

func (*CapturedTensor) Reset

func (x *CapturedTensor) Reset()

func (*CapturedTensor) String

func (x *CapturedTensor) String() string

type ClusterDef

type ClusterDef struct {

	// The jobs that comprise the cluster.
	Job []*JobDef `protobuf:"bytes,1,rep,name=job,proto3" json:"job,omitempty"`
	// contains filtered or unexported fields
}

Defines a TensorFlow cluster as a set of jobs.

func (*ClusterDef) Descriptor deprecated

func (*ClusterDef) Descriptor() ([]byte, []int)

Deprecated: Use ClusterDef.ProtoReflect.Descriptor instead.

func (*ClusterDef) GetJob

func (x *ClusterDef) GetJob() []*JobDef

func (*ClusterDef) ProtoMessage

func (*ClusterDef) ProtoMessage()

func (*ClusterDef) ProtoReflect

func (x *ClusterDef) ProtoReflect() protoreflect.Message

func (*ClusterDef) Reset

func (x *ClusterDef) Reset()

func (*ClusterDef) String

func (x *ClusterDef) String() string

type ClusterDeviceFilters

type ClusterDeviceFilters struct {
	Jobs []*JobDeviceFilters `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
	// contains filtered or unexported fields
}

Defines the device filters for jobs in a cluster.

func (*ClusterDeviceFilters) Descriptor deprecated

func (*ClusterDeviceFilters) Descriptor() ([]byte, []int)

Deprecated: Use ClusterDeviceFilters.ProtoReflect.Descriptor instead.

func (*ClusterDeviceFilters) GetJobs

func (x *ClusterDeviceFilters) GetJobs() []*JobDeviceFilters

func (*ClusterDeviceFilters) ProtoMessage

func (*ClusterDeviceFilters) ProtoMessage()

func (*ClusterDeviceFilters) ProtoReflect

func (x *ClusterDeviceFilters) ProtoReflect() protoreflect.Message

func (*ClusterDeviceFilters) Reset

func (x *ClusterDeviceFilters) Reset()

func (*ClusterDeviceFilters) String

func (x *ClusterDeviceFilters) String() string

type CodeLocation

type CodeLocation struct {

	// Host name on which the source files are located.
	HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
	// ID to a stack frame, each of which is pointed to
	// by a unique ID. The ordering of the frames is consistent with Python's
	// `traceback.extract_tb()`.
	StackFrameIds []string `protobuf:"bytes,2,rep,name=stack_frame_ids,json=stackFrameIds,proto3" json:"stack_frame_ids,omitempty"`
	// contains filtered or unexported fields
}

Code location information: A stack trace with host-name information. Instead of encoding the detailed stack trace, this proto refers to IDs of stack frames stored as `StackFrameWithId` protos.

func (*CodeLocation) Descriptor deprecated

func (*CodeLocation) Descriptor() ([]byte, []int)

Deprecated: Use CodeLocation.ProtoReflect.Descriptor instead.

func (*CodeLocation) GetHostName

func (x *CodeLocation) GetHostName() string

func (*CodeLocation) GetStackFrameIds

func (x *CodeLocation) GetStackFrameIds() []string

func (*CodeLocation) ProtoMessage

func (*CodeLocation) ProtoMessage()

func (*CodeLocation) ProtoReflect

func (x *CodeLocation) ProtoReflect() protoreflect.Message

func (*CodeLocation) Reset

func (x *CodeLocation) Reset()

func (*CodeLocation) String

func (x *CodeLocation) String() string

type CollectionDef

type CollectionDef struct {

	// Types that are assignable to Kind:
	//
	//	*CollectionDef_NodeList_
	//	*CollectionDef_BytesList_
	//	*CollectionDef_Int64List_
	//	*CollectionDef_FloatList_
	//	*CollectionDef_AnyList_
	Kind isCollectionDef_Kind `protobuf_oneof:"kind"`
	// contains filtered or unexported fields
}

CollectionDef should cover most collections. To add a user-defined collection, do one of the following:

  1. For simple data types, such as string, int, float: tf.add_to_collection("your_collection_name", your_simple_value) strings will be stored as bytes_list.

2. For Protobuf types, there are three ways to add them:

  1. tf.add_to_collection("your_collection_name", your_proto.SerializeToString())

    collection_def { key: "user_defined_bytes_collection" value { bytes_list { value: "queue_name: \"test_queue\"\n" } } }

    or

  2. tf.add_to_collection("your_collection_name", str(your_proto))

    collection_def { key: "user_defined_string_collection" value { bytes_list { value: "\n\ntest_queue" } } }

    or

  3. any_buf = any_pb2.Any() tf.add_to_collection("your_collection_name", any_buf.Pack(your_proto))

    collection_def { key: "user_defined_any_collection" value { any_list { value { type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" value: "\n\ntest_queue" } } } }

  4. For Python objects, implement to_proto() and from_proto(), and register them in the following manner: ops.register_proto_function("your_collection_name", proto_type, to_proto=YourPythonObject.to_proto, from_proto=YourPythonObject.from_proto) These functions will be invoked to serialize and de-serialize the collection. For example, ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=Variable.to_proto, from_proto=Variable.from_proto)

func (*CollectionDef) Descriptor deprecated

func (*CollectionDef) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef.ProtoReflect.Descriptor instead.

func (*CollectionDef) GetAnyList

func (x *CollectionDef) GetAnyList() *CollectionDef_AnyList

func (*CollectionDef) GetBytesList

func (x *CollectionDef) GetBytesList() *CollectionDef_BytesList

func (*CollectionDef) GetFloatList

func (x *CollectionDef) GetFloatList() *CollectionDef_FloatList

func (*CollectionDef) GetInt64List

func (x *CollectionDef) GetInt64List() *CollectionDef_Int64List

func (*CollectionDef) GetKind

func (m *CollectionDef) GetKind() isCollectionDef_Kind

func (*CollectionDef) GetNodeList

func (x *CollectionDef) GetNodeList() *CollectionDef_NodeList

func (*CollectionDef) ProtoMessage

func (*CollectionDef) ProtoMessage()

func (*CollectionDef) ProtoReflect

func (x *CollectionDef) ProtoReflect() protoreflect.Message

func (*CollectionDef) Reset

func (x *CollectionDef) Reset()

func (*CollectionDef) String

func (x *CollectionDef) String() string

type CollectionDef_AnyList

type CollectionDef_AnyList struct {
	Value []*anypb.Any `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

AnyList is used for collecting Any protos.

func (*CollectionDef_AnyList) Descriptor deprecated

func (*CollectionDef_AnyList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_AnyList.ProtoReflect.Descriptor instead.

func (*CollectionDef_AnyList) GetValue

func (x *CollectionDef_AnyList) GetValue() []*anypb.Any

func (*CollectionDef_AnyList) ProtoMessage

func (*CollectionDef_AnyList) ProtoMessage()

func (*CollectionDef_AnyList) ProtoReflect

func (x *CollectionDef_AnyList) ProtoReflect() protoreflect.Message

func (*CollectionDef_AnyList) Reset

func (x *CollectionDef_AnyList) Reset()

func (*CollectionDef_AnyList) String

func (x *CollectionDef_AnyList) String() string

type CollectionDef_AnyList_

type CollectionDef_AnyList_ struct {
	AnyList *CollectionDef_AnyList `protobuf:"bytes,5,opt,name=any_list,json=anyList,proto3,oneof"`
}

type CollectionDef_BytesList

type CollectionDef_BytesList struct {
	Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

BytesList is used for collecting strings and serialized protobufs. For example:

collection_def {
  key: "trainable_variables"
  value {
    bytes_list {
      value: "\n\017conv1/weights:0\022\024conv1/weights/Assign
             \032\024conv1/weights/read:0"
      value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032
             \023conv1/biases/read:0"
    }
  }
}

func (*CollectionDef_BytesList) Descriptor deprecated

func (*CollectionDef_BytesList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_BytesList.ProtoReflect.Descriptor instead.

func (*CollectionDef_BytesList) GetValue

func (x *CollectionDef_BytesList) GetValue() [][]byte

func (*CollectionDef_BytesList) ProtoMessage

func (*CollectionDef_BytesList) ProtoMessage()

func (*CollectionDef_BytesList) ProtoReflect

func (x *CollectionDef_BytesList) ProtoReflect() protoreflect.Message

func (*CollectionDef_BytesList) Reset

func (x *CollectionDef_BytesList) Reset()

func (*CollectionDef_BytesList) String

func (x *CollectionDef_BytesList) String() string

type CollectionDef_BytesList_

type CollectionDef_BytesList_ struct {
	BytesList *CollectionDef_BytesList `protobuf:"bytes,2,opt,name=bytes_list,json=bytesList,proto3,oneof"`
}

type CollectionDef_FloatList

type CollectionDef_FloatList struct {
	Value []float32 `protobuf:"fixed32,1,rep,packed,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

FloatList is used for collecting float values.

func (*CollectionDef_FloatList) Descriptor deprecated

func (*CollectionDef_FloatList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_FloatList.ProtoReflect.Descriptor instead.

func (*CollectionDef_FloatList) GetValue

func (x *CollectionDef_FloatList) GetValue() []float32

func (*CollectionDef_FloatList) ProtoMessage

func (*CollectionDef_FloatList) ProtoMessage()

func (*CollectionDef_FloatList) ProtoReflect

func (x *CollectionDef_FloatList) ProtoReflect() protoreflect.Message

func (*CollectionDef_FloatList) Reset

func (x *CollectionDef_FloatList) Reset()

func (*CollectionDef_FloatList) String

func (x *CollectionDef_FloatList) String() string

type CollectionDef_FloatList_

type CollectionDef_FloatList_ struct {
	FloatList *CollectionDef_FloatList `protobuf:"bytes,4,opt,name=float_list,json=floatList,proto3,oneof"`
}

type CollectionDef_Int64List

type CollectionDef_Int64List struct {
	Value []int64 `protobuf:"varint,1,rep,packed,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

Int64List is used for collecting int, int64 and long values.

func (*CollectionDef_Int64List) Descriptor deprecated

func (*CollectionDef_Int64List) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_Int64List.ProtoReflect.Descriptor instead.

func (*CollectionDef_Int64List) GetValue

func (x *CollectionDef_Int64List) GetValue() []int64

func (*CollectionDef_Int64List) ProtoMessage

func (*CollectionDef_Int64List) ProtoMessage()

func (*CollectionDef_Int64List) ProtoReflect

func (x *CollectionDef_Int64List) ProtoReflect() protoreflect.Message

func (*CollectionDef_Int64List) Reset

func (x *CollectionDef_Int64List) Reset()

func (*CollectionDef_Int64List) String

func (x *CollectionDef_Int64List) String() string

type CollectionDef_Int64List_

type CollectionDef_Int64List_ struct {
	Int64List *CollectionDef_Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,proto3,oneof"`
}

type CollectionDef_NodeList

type CollectionDef_NodeList struct {
	Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

NodeList is used for collecting nodes in graph. For example

collection_def {
  key: "summaries"
  value {
    node_list {
      value: "input_producer/ScalarSummary:0"
      value: "shuffle_batch/ScalarSummary:0"
      value: "ImageSummary:0"
    }
  }

func (*CollectionDef_NodeList) Descriptor deprecated

func (*CollectionDef_NodeList) Descriptor() ([]byte, []int)

Deprecated: Use CollectionDef_NodeList.ProtoReflect.Descriptor instead.

func (*CollectionDef_NodeList) GetValue

func (x *CollectionDef_NodeList) GetValue() []string

func (*CollectionDef_NodeList) ProtoMessage

func (*CollectionDef_NodeList) ProtoMessage()

func (*CollectionDef_NodeList) ProtoReflect

func (x *CollectionDef_NodeList) ProtoReflect() protoreflect.Message

func (*CollectionDef_NodeList) Reset

func (x *CollectionDef_NodeList) Reset()

func (*CollectionDef_NodeList) String

func (x *CollectionDef_NodeList) String() string

type CollectionDef_NodeList_

type CollectionDef_NodeList_ struct {
	NodeList *CollectionDef_NodeList `protobuf:"bytes,1,opt,name=node_list,json=nodeList,proto3,oneof"`
}

type CompositeTensorVariantMetadata

type CompositeTensorVariantMetadata struct {
	TypeSpecProto *TypeSpecProto `protobuf:"bytes,1,opt,name=type_spec_proto,json=typeSpecProto,proto3" json:"type_spec_proto,omitempty"`
	// contains filtered or unexported fields
}

Metadata for CompositeTensorVariant, used when serializing as Variant.

We define a new message here (rather than directly using TypeSpecProto for the metadata string) to retain flexibility to change the metadata encoding to support additional features.

func (*CompositeTensorVariantMetadata) Descriptor deprecated

func (*CompositeTensorVariantMetadata) Descriptor() ([]byte, []int)

Deprecated: Use CompositeTensorVariantMetadata.ProtoReflect.Descriptor instead.

func (*CompositeTensorVariantMetadata) GetTypeSpecProto

func (x *CompositeTensorVariantMetadata) GetTypeSpecProto() *TypeSpecProto

func (*CompositeTensorVariantMetadata) ProtoMessage

func (*CompositeTensorVariantMetadata) ProtoMessage()

func (*CompositeTensorVariantMetadata) ProtoReflect

func (*CompositeTensorVariantMetadata) Reset

func (x *CompositeTensorVariantMetadata) Reset()

func (*CompositeTensorVariantMetadata) String

type CondContextDef

type CondContextDef struct {

	// Name of the context.
	ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName,proto3" json:"context_name,omitempty"`
	// Name of the pred tensor.
	PredName string `protobuf:"bytes,2,opt,name=pred_name,json=predName,proto3" json:"pred_name,omitempty"`
	// Name of the pivot tensor.
	PivotName string `protobuf:"bytes,3,opt,name=pivot_name,json=pivotName,proto3" json:"pivot_name,omitempty"`
	// Branch prediction. 0 or 1.
	Branch int32 `protobuf:"varint,4,opt,name=branch,proto3" json:"branch,omitempty"`
	// Values and external values in control flow context.
	ValuesDef *ValuesDef `protobuf:"bytes,5,opt,name=values_def,json=valuesDef,proto3" json:"values_def,omitempty"`
	// Contexts contained inside this context (e.g. nested conds).
	NestedContexts []*ControlFlowContextDef `protobuf:"bytes,6,rep,name=nested_contexts,json=nestedContexts,proto3" json:"nested_contexts,omitempty"`
	// contains filtered or unexported fields
}

Protocol buffer representing a CondContext object.

func (*CondContextDef) Descriptor deprecated

func (*CondContextDef) Descriptor() ([]byte, []int)

Deprecated: Use CondContextDef.ProtoReflect.Descriptor instead.

func (*CondContextDef) GetBranch

func (x *CondContextDef) GetBranch() int32

func (*CondContextDef) GetContextName

func (x *CondContextDef) GetContextName() string

func (*CondContextDef) GetNestedContexts

func (x *CondContextDef) GetNestedContexts() []*ControlFlowContextDef

func (*CondContextDef) GetPivotName

func (x *CondContextDef) GetPivotName() string

func (*CondContextDef) GetPredName

func (x *CondContextDef) GetPredName() string

func (*CondContextDef) GetValuesDef

func (x *CondContextDef) GetValuesDef() *ValuesDef

func (*CondContextDef) ProtoMessage

func (*CondContextDef) ProtoMessage()

func (*CondContextDef) ProtoReflect

func (x *CondContextDef) ProtoReflect() protoreflect.Message

func (*CondContextDef) Reset

func (x *CondContextDef) Reset()

func (*CondContextDef) String

func (x *CondContextDef) String() string

type ConfigProto

type ConfigProto struct {

	// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
	// number of devices of that type to use.  If a particular device
	// type is not found in the map, the system picks an appropriate
	// number.
	DeviceCount map[string]int32 `` /* 183-byte string literal not displayed */
	// The execution of an individual op (for some op types) can be
	// parallelized on a pool of intra_op_parallelism_threads.
	// 0 means the system picks an appropriate number.
	//
	// If you create an ordinary session, e.g., from Python or C++,
	// then there is exactly one intra op thread pool per process.
	// The first session created determines the number of threads in this pool.
	// All subsequent sessions reuse/share this one global pool.
	//
	// There are notable exceptions to the default behavior described above:
	//  1. There is an environment variable  for overriding this thread pool,
	//     named TF_OVERRIDE_GLOBAL_THREADPOOL.
	//  2. When connecting to a server, such as a remote `tf.train.Server`
	//     instance, then this option will be ignored altogether.
	IntraOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// Nodes that perform blocking operations are enqueued on a pool of
	// inter_op_parallelism_threads available in each process.
	//
	// 0 means the system picks an appropriate number.
	// Negative means all operations are performed in caller's thread.
	//
	// Note that the first Session created in the process sets the
	// number of threads for all future sessions unless use_per_session_threads is
	// true or session_inter_op_thread_pool is configured.
	InterOpParallelismThreads int32 `` /* 141-byte string literal not displayed */
	// If true, use a new set of threads for this session rather than the global
	// pool of threads. Only supported by direct sessions.
	//
	// If false, use the global threads created by the first session, or the
	// per-session thread pools configured by session_inter_op_thread_pool.
	//
	// This option is deprecated. The same effect can be achieved by setting
	// session_inter_op_thread_pool to have one element, whose num_threads equals
	// inter_op_parallelism_threads.
	UsePerSessionThreads bool `` /* 126-byte string literal not displayed */
	// This option is experimental - it may be replaced with a different mechanism
	// in the future.
	//
	// Configures session thread pools. If this is configured, then RunOptions for
	// a Run call can select the thread pool to use.
	//
	// The intended use is for when some session invocations need to run in a
	// background pool limited to a small number of threads:
	// - For example, a session may be configured to have one large pool (for
	// regular compute) and one small pool (for periodic, low priority work);
	// using the small pool is currently the mechanism for limiting the inter-op
	// parallelism of the low priority work.  Note that it does not limit the
	// parallelism of work spawned by a single op kernel implementation.
	// - Using this setting is normally not needed in training, but may help some
	// serving use cases.
	// - It is also generally recommended to set the global_name field of this
	// proto, to avoid creating multiple large pools. It is typically better to
	// run the non-low-priority work, even across sessions, in a single large
	// pool.
	SessionInterOpThreadPool []*ThreadPoolOptionProto `` /* 140-byte string literal not displayed */
	// Assignment of Nodes to Devices is recomputed every placement_period
	// steps until the system warms up (at which point the recomputation
	// typically slows down automatically).
	PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"`
	// When any filters are present sessions will ignore all devices which do not
	// match the filters. Each filter can be partially specified, e.g. "/job:ps"
	// "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"`
	// Options that apply to all GPUs.
	GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"`
	// Whether soft placement is allowed. If allow_soft_placement is true,
	// an op will be placed on CPU if
	//  1. there's no GPU implementation for the OP
	//
	// or
	//  2. no GPU devices are known or registered
	//
	// or
	//  3. need to co-locate with reftype input(s) which are from CPU.
	AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"`
	// Whether device placements should be logged.
	LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"`
	// Options that apply to all graphs.
	GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"`
	// Global timeout for all blocking operations in this session.  If non-zero,
	// and not overridden on a per-operation basis, this value will be used as the
	// deadline for all blocking operations.
	OperationTimeoutInMs int64 `` /* 127-byte string literal not displayed */
	// Options that apply when this session uses the distributed runtime.
	RpcOptions *for_core_protos_go_proto.RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions,proto3" json:"rpc_options,omitempty"`
	// Optional list of all workers to use in this session.
	ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef,proto3" json:"cluster_def,omitempty"`
	// If true, any resources such as Variables used in the session will not be
	// shared with other sessions. However, when clusterspec propagation is
	// enabled, this field is ignored and sessions are always isolated.
	IsolateSessionState bool `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"`
	// When true, WorkerSessions are created with device attributes from the
	// full cluster.
	// This is helpful when a worker wants to partition a graph
	// (for example during a PartitionedCallOp).
	ShareClusterDevicesInSession bool                      `` /* 153-byte string literal not displayed */
	Experimental                 *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

Session configuration parameters. The system picks appropriate values for fields that are not set.

func (*ConfigProto) Descriptor deprecated

func (*ConfigProto) Descriptor() ([]byte, []int)

Deprecated: Use ConfigProto.ProtoReflect.Descriptor instead.

func (*ConfigProto) GetAllowSoftPlacement

func (x *ConfigProto) GetAllowSoftPlacement() bool

func (*ConfigProto) GetClusterDef

func (x *ConfigProto) GetClusterDef() *ClusterDef

func (*ConfigProto) GetDeviceCount

func (x *ConfigProto) GetDeviceCount() map[string]int32

func (*ConfigProto) GetDeviceFilters

func (x *ConfigProto) GetDeviceFilters() []string

func (*ConfigProto) GetExperimental

func (x *ConfigProto) GetExperimental() *ConfigProto_Experimental

func (*ConfigProto) GetGpuOptions

func (x *ConfigProto) GetGpuOptions() *GPUOptions

func (*ConfigProto) GetGraphOptions

func (x *ConfigProto) GetGraphOptions() *GraphOptions

func (*ConfigProto) GetInterOpParallelismThreads

func (x *ConfigProto) GetInterOpParallelismThreads() int32

func (*ConfigProto) GetIntraOpParallelismThreads

func (x *ConfigProto) GetIntraOpParallelismThreads() int32

func (*ConfigProto) GetIsolateSessionState

func (x *ConfigProto) GetIsolateSessionState() bool

func (*ConfigProto) GetLogDevicePlacement

func (x *ConfigProto) GetLogDevicePlacement() bool

func (*ConfigProto) GetOperationTimeoutInMs

func (x *ConfigProto) GetOperationTimeoutInMs() int64

func (*ConfigProto) GetPlacementPeriod

func (x *ConfigProto) GetPlacementPeriod() int32

func (*ConfigProto) GetRpcOptions

func (x *ConfigProto) GetRpcOptions() *for_core_protos_go_proto.RPCOptions

func (*ConfigProto) GetSessionInterOpThreadPool

func (x *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto

func (*ConfigProto) GetShareClusterDevicesInSession

func (x *ConfigProto) GetShareClusterDevicesInSession() bool

func (*ConfigProto) GetUsePerSessionThreads

func (x *ConfigProto) GetUsePerSessionThreads() bool

func (*ConfigProto) ProtoMessage

func (*ConfigProto) ProtoMessage()

func (*ConfigProto) ProtoReflect

func (x *ConfigProto) ProtoReflect() protoreflect.Message

func (*ConfigProto) Reset

func (x *ConfigProto) Reset()

func (*ConfigProto) String

func (x *ConfigProto) String() string

type ConfigProto_Experimental

type ConfigProto_Experimental struct {

	// Task name for group resolution.
	CollectiveGroupLeader string `` /* 126-byte string literal not displayed */
	// Which executor to use, the default executor will be used
	// if it is an empty string or "DEFAULT"
	ExecutorType string `protobuf:"bytes,3,opt,name=executor_type,json=executorType,proto3" json:"executor_type,omitempty"`
	// Guidance to formatting of large RecvBuf fields for transfer.
	// Any positive value sets the max chunk size.  0 defaults to 4096.
	// Any negative value indicates no max, i.e. one chunk only.
	RecvBufMaxChunk int32 `protobuf:"varint,4,opt,name=recv_buf_max_chunk,json=recvBufMaxChunk,proto3" json:"recv_buf_max_chunk,omitempty"`
	// If true, and supported by the platform, the runtime will attempt to
	// use NUMA affinity where applicable.  One consequence will be the
	// existence of as many CPU devices as there are available NUMA nodes.
	UseNumaAffinity bool `protobuf:"varint,5,opt,name=use_numa_affinity,json=useNumaAffinity,proto3" json:"use_numa_affinity,omitempty"`
	// If true, make collective op execution order sequential and deterministic
	// for potentially concurrent collective instances.
	CollectiveDeterministicSequentialExecution bool `` /* 192-byte string literal not displayed */
	// If true, use NCCL for CollectiveOps.  This feature is highly
	// experimental.
	CollectiveNccl bool `protobuf:"varint,7,opt,name=collective_nccl,json=collectiveNccl,proto3" json:"collective_nccl,omitempty"`
	// In the following, session state means the value of a variable, elements
	// in a hash table, or any other resource, accessible by worker sessions
	// held by a TF server.
	//
	// When ClusterSpec propagation is enabled, the value of
	// isolate_session_state is ignored when deciding whether to share session
	// states in a TF server (for backwards compatibility reasons).
	// - If share_session_state_in_clusterspec_propagation is true, the session
	// states are shared.
	// - If share_session_state_in_clusterspec_propagation is false, session
	// states are isolated.
	//
	// When clusterspec propagation is not used, the value of
	// share_session_state_in_clusterspec_propagation is ignored when deciding
	// whether to share session states in a TF server.
	// - If isolate_session_state is true, session states are isolated.
	// - If isolate_session_state is false, session states are shared.
	//
	// TODO(b/129330037): Add a single API that consistently treats
	// isolate_session_state and ClusterSpec propagation.
	ShareSessionStateInClusterspecPropagation bool `` /* 193-byte string literal not displayed */
	// If using a direct session, disable spinning while waiting for work in
	// the thread pool. This may result in higher latency for completing ops,
	// but in the case where there is a lot of spinning may result in lower
	// CPU usage.
	DisableThreadSpinning bool `` /* 127-byte string literal not displayed */
	// This was promoted to a non-experimental API. Please use
	// ConfigProto.share_cluster_devices_in_session instead.
	ShareClusterDevicesInSession bool `` /* 153-byte string literal not displayed */
	// Metadata about the session.
	//
	// If set, this can be used by the runtime and the Ops for debugging,
	// monitoring, etc.
	//
	// NOTE: This is currently used and propagated only by the direct session
	// and EagerContext.
	SessionMetadata *SessionMetadata `protobuf:"bytes,11,opt,name=session_metadata,json=sessionMetadata,proto3" json:"session_metadata,omitempty"`
	// If true, the session may treat the graph as being static for optimization
	// purposes.
	//
	// If this option is set to true when a session is created, the full
	// GraphDef must be passed in a single call to Session::Create(), and
	// Session::Extend() may not be supported.
	OptimizeForStaticGraph bool `` /* 133-byte string literal not displayed */
	// Whether to enable the MLIR-based TF->XLA bridge. This is only used if set
	// to true. Default value or false is ignored. Use mlir_bridge_rollout for
	// finer control.
	//
	// If this option is set to true when a session is created, MLIR is used to
	// perform the set of graph transformations to put the graph in a form that
	// can be executed with delegation of some computations to an accelerator.
	// This builds on the model of XLA where a subset of the graph is
	// encapsulated and attached to a "compile" operation, whose result is fed
	// to an "execute" operation. The kernel for these operations is responsible
	// to lower the encapsulated graph to a particular device.
	EnableMlirBridge bool `protobuf:"varint,13,opt,name=enable_mlir_bridge,json=enableMlirBridge,proto3" json:"enable_mlir_bridge,omitempty"`
	// Whether to enable the MLIR-based TF->XLA bridge.
	MlirBridgeRollout ConfigProto_Experimental_MlirBridgeRollout `` /* 175-byte string literal not displayed */
	// Whether to enable the MLIR-based Graph optimizations.
	//
	// This will become a part of standard Tensorflow graph optimization
	// pipeline, currently this is only used for gradual migration and testing
	// new passes that are replacing existing optimizations in Grappler.
	EnableMlirGraphOptimization bool `` /* 148-byte string literal not displayed */
	// If true, the session will not store an additional copy of the graph for
	// each subgraph.
	//
	// If this option is set to true when a session is created, the
	// `RunOptions.output_partition_graphs` options must not be set.
	DisableOutputPartitionGraphs bool `` /* 151-byte string literal not displayed */
	// Minimum number of batches run through the XLA graph before XLA fusion
	// autotuner is enabled. Default value of zero disables the autotuner.
	//
	// The XLA fusion autotuner can improve performance by executing a heuristic
	// search on the compiler parameters.
	XlaFusionAutotunerThresh int64 `` /* 139-byte string literal not displayed */
	// Whether runtime execution uses TFRT.
	UseTfrt bool `protobuf:"varint,18,opt,name=use_tfrt,json=useTfrt,proto3" json:"use_tfrt,omitempty"`
	// If true, use Pathways with TFRT API for multi host support.
	EnableMultiHost bool `protobuf:"varint,27,opt,name=enable_multi_host,json=enableMultiHost,proto3" json:"enable_multi_host,omitempty"`
	// Port for the Pathways server. Ignored if enable_multi_host=false.
	BackendServerPort int32 `protobuf:"varint,28,opt,name=backend_server_port,json=backendServerPort,proto3" json:"backend_server_port,omitempty"`
	// If true, TFRT will use TPU specific compiler passes and perform TPU
	// specific initialization.
	TargetTpu bool `protobuf:"varint,29,opt,name=target_tpu,json=targetTpu,proto3" json:"target_tpu,omitempty"`
	// If true, TFRT will use GPU specific compiler passes and perform GPU
	// specific initialization.
	TargetGpu bool `protobuf:"varint,30,opt,name=target_gpu,json=targetGpu,proto3" json:"target_gpu,omitempty"`
	// The threshold to merge small streams in TFRT. The stream with cost
	// smaller than the threshold will be merged. Setting it to value 1
	// disables all merges.
	StreamMergeThreshold int32 `protobuf:"varint,31,opt,name=stream_merge_threshold,json=streamMergeThreshold,proto3" json:"stream_merge_threshold,omitempty"`
	// Whether functional control flow op lowering should be disabled. This is
	// useful when executing within a portable runtime where control flow op
	// kernels may not be loaded due to selective registration.
	DisableFunctionalOpsLowering bool `` /* 151-byte string literal not displayed */
	// Provides a hint to XLA auto clustering to prefer forming a single large
	// cluster that encompases most of the graph.
	XlaPreferSingleGraphCluster bool `` /* 150-byte string literal not displayed */
	// Distributed coordination service configurations.
	CoordinationConfig *CoordinationServiceConfig `protobuf:"bytes,23,opt,name=coordination_config,json=coordinationConfig,proto3" json:"coordination_config,omitempty"`
	// If true, the session will treat the graph as being non-static for
	// optimization purposes.
	//
	// If this option is set to true when a session is created, the full
	// GraphDef will be retained to enable calls to Session::Extend().
	// Calling Extend() without setting this flag will result in errors.
	//
	// This option is meant to replace `optimize_for_static_graph` and it
	// aims to negate its value.
	DisableOptimizeForStaticGraph bool `` /* 156-byte string literal not displayed */
	// Whether eager remote execution will stream all the function calls or
	// allow them to happen in parallel. When true, streaming execution is
	// disabled, and parallel execution is allowed.
	DisableEagerExecutorStreamingEnqueue bool `` /* 177-byte string literal not displayed */
	// contains filtered or unexported fields
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*ConfigProto_Experimental) Descriptor deprecated

func (*ConfigProto_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use ConfigProto_Experimental.ProtoReflect.Descriptor instead.

func (*ConfigProto_Experimental) GetBackendServerPort added in v0.8.0

func (x *ConfigProto_Experimental) GetBackendServerPort() int32

func (*ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution

func (x *ConfigProto_Experimental) GetCollectiveDeterministicSequentialExecution() bool

func (*ConfigProto_Experimental) GetCollectiveGroupLeader

func (x *ConfigProto_Experimental) GetCollectiveGroupLeader() string

func (*ConfigProto_Experimental) GetCollectiveNccl

func (x *ConfigProto_Experimental) GetCollectiveNccl() bool

func (*ConfigProto_Experimental) GetCoordinationConfig

func (x *ConfigProto_Experimental) GetCoordinationConfig() *CoordinationServiceConfig

func (*ConfigProto_Experimental) GetDisableEagerExecutorStreamingEnqueue added in v0.7.0

func (x *ConfigProto_Experimental) GetDisableEagerExecutorStreamingEnqueue() bool

func (*ConfigProto_Experimental) GetDisableFunctionalOpsLowering

func (x *ConfigProto_Experimental) GetDisableFunctionalOpsLowering() bool

func (*ConfigProto_Experimental) GetDisableOptimizeForStaticGraph added in v0.4.0

func (x *ConfigProto_Experimental) GetDisableOptimizeForStaticGraph() bool

func (*ConfigProto_Experimental) GetDisableOutputPartitionGraphs

func (x *ConfigProto_Experimental) GetDisableOutputPartitionGraphs() bool

func (*ConfigProto_Experimental) GetDisableThreadSpinning

func (x *ConfigProto_Experimental) GetDisableThreadSpinning() bool

func (*ConfigProto_Experimental) GetEnableMlirBridge

func (x *ConfigProto_Experimental) GetEnableMlirBridge() bool

func (*ConfigProto_Experimental) GetEnableMlirGraphOptimization

func (x *ConfigProto_Experimental) GetEnableMlirGraphOptimization() bool

func (*ConfigProto_Experimental) GetEnableMultiHost added in v0.8.0

func (x *ConfigProto_Experimental) GetEnableMultiHost() bool

func (*ConfigProto_Experimental) GetExecutorType

func (x *ConfigProto_Experimental) GetExecutorType() string

func (*ConfigProto_Experimental) GetMlirBridgeRollout

func (*ConfigProto_Experimental) GetOptimizeForStaticGraph

func (x *ConfigProto_Experimental) GetOptimizeForStaticGraph() bool

func (*ConfigProto_Experimental) GetRecvBufMaxChunk

func (x *ConfigProto_Experimental) GetRecvBufMaxChunk() int32

func (*ConfigProto_Experimental) GetSessionMetadata

func (x *ConfigProto_Experimental) GetSessionMetadata() *SessionMetadata

func (*ConfigProto_Experimental) GetShareClusterDevicesInSession

func (x *ConfigProto_Experimental) GetShareClusterDevicesInSession() bool

func (*ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation

func (x *ConfigProto_Experimental) GetShareSessionStateInClusterspecPropagation() bool

func (*ConfigProto_Experimental) GetStreamMergeThreshold added in v0.8.0

func (x *ConfigProto_Experimental) GetStreamMergeThreshold() int32

func (*ConfigProto_Experimental) GetTargetGpu added in v0.8.0

func (x *ConfigProto_Experimental) GetTargetGpu() bool

func (*ConfigProto_Experimental) GetTargetTpu added in v0.8.0

func (x *ConfigProto_Experimental) GetTargetTpu() bool

func (*ConfigProto_Experimental) GetUseNumaAffinity

func (x *ConfigProto_Experimental) GetUseNumaAffinity() bool

func (*ConfigProto_Experimental) GetUseTfrt

func (x *ConfigProto_Experimental) GetUseTfrt() bool

func (*ConfigProto_Experimental) GetXlaFusionAutotunerThresh

func (x *ConfigProto_Experimental) GetXlaFusionAutotunerThresh() int64

func (*ConfigProto_Experimental) GetXlaPreferSingleGraphCluster

func (x *ConfigProto_Experimental) GetXlaPreferSingleGraphCluster() bool

func (*ConfigProto_Experimental) ProtoMessage

func (*ConfigProto_Experimental) ProtoMessage()

func (*ConfigProto_Experimental) ProtoReflect

func (x *ConfigProto_Experimental) ProtoReflect() protoreflect.Message

func (*ConfigProto_Experimental) Reset

func (x *ConfigProto_Experimental) Reset()

func (*ConfigProto_Experimental) String

func (x *ConfigProto_Experimental) String() string

type ConfigProto_Experimental_MlirBridgeRollout

type ConfigProto_Experimental_MlirBridgeRollout int32

An enum that describes the state of the MLIR bridge rollout.

const (
	// If this field is left unspecified, the MLIR bridge may be selectively
	// enabled on a per graph basis.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_UNSPECIFIED ConfigProto_Experimental_MlirBridgeRollout = 0
	// Enabling the MLIR bridge enables it for all graphs in this session.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_ENABLED ConfigProto_Experimental_MlirBridgeRollout = 1
	// Disabling the MLIR bridge disables it for all graphs in this session.
	ConfigProto_Experimental_MLIR_BRIDGE_ROLLOUT_DISABLED ConfigProto_Experimental_MlirBridgeRollout = 2
)

func (ConfigProto_Experimental_MlirBridgeRollout) Descriptor

func (ConfigProto_Experimental_MlirBridgeRollout) Enum

func (ConfigProto_Experimental_MlirBridgeRollout) EnumDescriptor deprecated

func (ConfigProto_Experimental_MlirBridgeRollout) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConfigProto_Experimental_MlirBridgeRollout.Descriptor instead.

func (ConfigProto_Experimental_MlirBridgeRollout) Number

func (ConfigProto_Experimental_MlirBridgeRollout) String

func (ConfigProto_Experimental_MlirBridgeRollout) Type

type ControlFlowContextDef

type ControlFlowContextDef struct {

	// Types that are assignable to Ctxt:
	//
	//	*ControlFlowContextDef_CondCtxt
	//	*ControlFlowContextDef_WhileCtxt
	Ctxt isControlFlowContextDef_Ctxt `protobuf_oneof:"ctxt"`
	// contains filtered or unexported fields
}

Container for any kind of control flow context. Any other control flow contexts that are added below should also be added here.

func (*ControlFlowContextDef) Descriptor deprecated

func (*ControlFlowContextDef) Descriptor() ([]byte, []int)

Deprecated: Use ControlFlowContextDef.ProtoReflect.Descriptor instead.

func (*ControlFlowContextDef) GetCondCtxt

func (x *ControlFlowContextDef) GetCondCtxt() *CondContextDef

func (*ControlFlowContextDef) GetCtxt

func (m *ControlFlowContextDef) GetCtxt() isControlFlowContextDef_Ctxt

func (*ControlFlowContextDef) GetWhileCtxt

func (x *ControlFlowContextDef) GetWhileCtxt() *WhileContextDef

func (*ControlFlowContextDef) ProtoMessage

func (*ControlFlowContextDef) ProtoMessage()

func (*ControlFlowContextDef) ProtoReflect

func (x *ControlFlowContextDef) ProtoReflect() protoreflect.Message

func (*ControlFlowContextDef) Reset

func (x *ControlFlowContextDef) Reset()

func (*ControlFlowContextDef) String

func (x *ControlFlowContextDef) String() string

type ControlFlowContextDef_CondCtxt

type ControlFlowContextDef_CondCtxt struct {
	CondCtxt *CondContextDef `protobuf:"bytes,1,opt,name=cond_ctxt,json=condCtxt,proto3,oneof"`
}

type ControlFlowContextDef_WhileCtxt

type ControlFlowContextDef_WhileCtxt struct {
	WhileCtxt *WhileContextDef `protobuf:"bytes,2,opt,name=while_ctxt,json=whileCtxt,proto3,oneof"`
}

type CoordinatedJob added in v0.3.0

type CoordinatedJob struct {
	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	NumTasks int32  `protobuf:"varint,2,opt,name=num_tasks,json=numTasks,proto3" json:"num_tasks,omitempty"`
	// contains filtered or unexported fields
}

Represents a job type and the number of tasks under this job. For example, ("worker", 20) implies that there will be 20 worker tasks.

func (*CoordinatedJob) Descriptor deprecated added in v0.3.0

func (*CoordinatedJob) Descriptor() ([]byte, []int)

Deprecated: Use CoordinatedJob.ProtoReflect.Descriptor instead.

func (*CoordinatedJob) GetName added in v0.3.0

func (x *CoordinatedJob) GetName() string

func (*CoordinatedJob) GetNumTasks added in v0.3.0

func (x *CoordinatedJob) GetNumTasks() int32

func (*CoordinatedJob) ProtoMessage added in v0.3.0

func (*CoordinatedJob) ProtoMessage()

func (*CoordinatedJob) ProtoReflect added in v0.3.0

func (x *CoordinatedJob) ProtoReflect() protoreflect.Message

func (*CoordinatedJob) Reset added in v0.3.0

func (x *CoordinatedJob) Reset()

func (*CoordinatedJob) String added in v0.3.0

func (x *CoordinatedJob) String() string

type CoordinationServiceConfig

type CoordinationServiceConfig struct {

	// Type of coordination service implementation to enable.
	// For example, setting the service type as "standalone" starts a service
	// instance on the leader task to provide the coordination services such as
	// heartbeats and consistent key-value store.
	ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
	// Address where the coordination service instance is hosted.
	ServiceLeader string `protobuf:"bytes,2,opt,name=service_leader,json=serviceLeader,proto3" json:"service_leader,omitempty"`
	// Whether to enable the health check mechanism.
	EnableHealthCheck bool `protobuf:"varint,3,opt,name=enable_health_check,json=enableHealthCheck,proto3" json:"enable_health_check,omitempty"`
	// Maximum wait time for all members in the cluster to be registered.
	ClusterRegisterTimeoutInMs int64 `` /* 146-byte string literal not displayed */
	// Heartbeat timeout, if a task does not record heartbeat in this time
	// window, it will be considered disconnected.
	// Note: This is also used as a grace period to accept any heartbeats after
	// the agent has disconnected, to account for the lag time between the service
	// recording the state change and the agent stopping heartbeats.
	HeartbeatTimeoutInMs int64             `` /* 126-byte string literal not displayed */
	CoordinatedJobList   []*CoordinatedJob `protobuf:"bytes,10,rep,name=coordinated_job_list,json=coordinatedJobList,proto3" json:"coordinated_job_list,omitempty"`
	// Denotes how long to wait for all coordination agents to reach the barriers
	// (after the first shutdown request) before disconnecting together. If
	// set to 0, no barrier is imposed upon shutdown and each worker can
	// disconnect individually.
	ShutdownBarrierTimeoutInMs int64 `` /* 146-byte string literal not displayed */
	// If set, agents do not make an explicit Shutdown() call. Service will only
	// find out about the disconnecte agent via stale heartbeats. Used for
	// testing.
	AgentDestructionWithoutShutdown bool `` /* 159-byte string literal not displayed */
	// The list of jobs which are recoverable. If a task in this list fails,
	// it will not propagate error to other tasks.
	// If empty, no jobs will be recoverable and every task failure will cause
	// error propagation to other tasks.
	RecoverableJobs []string `protobuf:"bytes,9,rep,name=recoverable_jobs,json=recoverableJobs,proto3" json:"recoverable_jobs,omitempty"`
	// If a task restarts with a new incarnation, we may allow it to reconnect
	// silently. This is useful when we know that a task can immediately resume
	// work upon re-connecting to the service.
	AllowNewIncarnationToReconnect bool `` /* 159-byte string literal not displayed */
	// Disables coordination service.
	// Some libraries enable coordination service by default even if the user did
	// not specify any config. This field allows users to explicitly disable
	// coordination service under all situations.
	ForceDisable bool `protobuf:"varint,12,opt,name=force_disable,json=forceDisable,proto3" json:"force_disable,omitempty"`
	// contains filtered or unexported fields
}

Coordination service configuration parameters. The system picks appropriate values for fields that are not set.

func (*CoordinationServiceConfig) Descriptor deprecated

func (*CoordinationServiceConfig) Descriptor() ([]byte, []int)

Deprecated: Use CoordinationServiceConfig.ProtoReflect.Descriptor instead.

func (*CoordinationServiceConfig) GetAgentDestructionWithoutShutdown

func (x *CoordinationServiceConfig) GetAgentDestructionWithoutShutdown() bool

func (*CoordinationServiceConfig) GetAllowNewIncarnationToReconnect added in v0.5.0

func (x *CoordinationServiceConfig) GetAllowNewIncarnationToReconnect() bool

func (*CoordinationServiceConfig) GetClusterRegisterTimeoutInMs

func (x *CoordinationServiceConfig) GetClusterRegisterTimeoutInMs() int64

func (*CoordinationServiceConfig) GetCoordinatedJobList added in v0.3.0

func (x *CoordinationServiceConfig) GetCoordinatedJobList() []*CoordinatedJob

func (*CoordinationServiceConfig) GetEnableHealthCheck

func (x *CoordinationServiceConfig) GetEnableHealthCheck() bool

func (*CoordinationServiceConfig) GetForceDisable added in v0.7.0

func (x *CoordinationServiceConfig) GetForceDisable() bool

func (*CoordinationServiceConfig) GetHeartbeatTimeoutInMs

func (x *CoordinationServiceConfig) GetHeartbeatTimeoutInMs() int64

func (*CoordinationServiceConfig) GetRecoverableJobs added in v0.3.0

func (x *CoordinationServiceConfig) GetRecoverableJobs() []string

func (*CoordinationServiceConfig) GetServiceLeader

func (x *CoordinationServiceConfig) GetServiceLeader() string

func (*CoordinationServiceConfig) GetServiceType

func (x *CoordinationServiceConfig) GetServiceType() string

func (*CoordinationServiceConfig) GetShutdownBarrierTimeoutInMs

func (x *CoordinationServiceConfig) GetShutdownBarrierTimeoutInMs() int64

func (*CoordinationServiceConfig) ProtoMessage

func (*CoordinationServiceConfig) ProtoMessage()

func (*CoordinationServiceConfig) ProtoReflect

func (*CoordinationServiceConfig) Reset

func (x *CoordinationServiceConfig) Reset()

func (*CoordinationServiceConfig) String

func (x *CoordinationServiceConfig) String() string

type CrossTrainerCacheOptions added in v0.2.0

type CrossTrainerCacheOptions struct {
	TrainerId string `protobuf:"bytes,1,opt,name=trainer_id,json=trainerId,proto3" json:"trainer_id,omitempty"`
	// contains filtered or unexported fields
}

func (*CrossTrainerCacheOptions) Descriptor deprecated added in v0.2.0

func (*CrossTrainerCacheOptions) Descriptor() ([]byte, []int)

Deprecated: Use CrossTrainerCacheOptions.ProtoReflect.Descriptor instead.

func (*CrossTrainerCacheOptions) GetTrainerId added in v0.2.0

func (x *CrossTrainerCacheOptions) GetTrainerId() string

func (*CrossTrainerCacheOptions) ProtoMessage added in v0.2.0

func (*CrossTrainerCacheOptions) ProtoMessage()

func (*CrossTrainerCacheOptions) ProtoReflect added in v0.2.0

func (x *CrossTrainerCacheOptions) ProtoReflect() protoreflect.Message

func (*CrossTrainerCacheOptions) Reset added in v0.2.0

func (x *CrossTrainerCacheOptions) Reset()

func (*CrossTrainerCacheOptions) String added in v0.2.0

func (x *CrossTrainerCacheOptions) String() string

type DataServiceConfig

type DataServiceConfig struct {
	DeploymentMode DeploymentMode `` /* 140-byte string literal not displayed */
	// contains filtered or unexported fields
}

Data service config available to the client through GetDataServiceConfig RPC. Next tag: 2

func (*DataServiceConfig) Descriptor deprecated

func (*DataServiceConfig) Descriptor() ([]byte, []int)

Deprecated: Use DataServiceConfig.ProtoReflect.Descriptor instead.

func (*DataServiceConfig) GetDeploymentMode

func (x *DataServiceConfig) GetDeploymentMode() DeploymentMode

func (*DataServiceConfig) ProtoMessage

func (*DataServiceConfig) ProtoMessage()

func (*DataServiceConfig) ProtoReflect

func (x *DataServiceConfig) ProtoReflect() protoreflect.Message

func (*DataServiceConfig) Reset

func (x *DataServiceConfig) Reset()

func (*DataServiceConfig) String

func (x *DataServiceConfig) String() string

type DataServiceMetadata

type DataServiceMetadata struct {

	// Types that are assignable to OptionalElementSpec:
	//
	//	*DataServiceMetadata_ElementSpec
	OptionalElementSpec isDataServiceMetadata_OptionalElementSpec `protobuf_oneof:"optional_element_spec"`
	Compression         DataServiceMetadata_Compression           `` /* 129-byte string literal not displayed */
	// Cardinality of the dataset.
	Cardinality int64 `protobuf:"varint,3,opt,name=cardinality,proto3" json:"cardinality,omitempty"`
	// contains filtered or unexported fields
}

Metadata related to tf.data service datasets. Next tag: 4

func (*DataServiceMetadata) Descriptor deprecated

func (*DataServiceMetadata) Descriptor() ([]byte, []int)

Deprecated: Use DataServiceMetadata.ProtoReflect.Descriptor instead.

func (*DataServiceMetadata) GetCardinality

func (x *DataServiceMetadata) GetCardinality() int64

func (*DataServiceMetadata) GetCompression

func (*DataServiceMetadata) GetElementSpec

func (x *DataServiceMetadata) GetElementSpec() []byte

func (*DataServiceMetadata) GetOptionalElementSpec

func (m *DataServiceMetadata) GetOptionalElementSpec() isDataServiceMetadata_OptionalElementSpec

func (*DataServiceMetadata) ProtoMessage

func (*DataServiceMetadata) ProtoMessage()

func (*DataServiceMetadata) ProtoReflect

func (x *DataServiceMetadata) ProtoReflect() protoreflect.Message

func (*DataServiceMetadata) Reset

func (x *DataServiceMetadata) Reset()

func (*DataServiceMetadata) String

func (x *DataServiceMetadata) String() string

type DataServiceMetadata_Compression

type DataServiceMetadata_Compression int32
const (
	DataServiceMetadata_COMPRESSION_UNSPECIFIED DataServiceMetadata_Compression = 0
	// No compression.
	DataServiceMetadata_COMPRESSION_OFF DataServiceMetadata_Compression = 1
	// Snappy compression as defined in tensorflow/core/platform/snappy.h.
	DataServiceMetadata_COMPRESSION_SNAPPY DataServiceMetadata_Compression = 2
)

func (DataServiceMetadata_Compression) Descriptor

func (DataServiceMetadata_Compression) Enum

func (DataServiceMetadata_Compression) EnumDescriptor deprecated

func (DataServiceMetadata_Compression) EnumDescriptor() ([]byte, []int)

Deprecated: Use DataServiceMetadata_Compression.Descriptor instead.

func (DataServiceMetadata_Compression) Number

func (DataServiceMetadata_Compression) String

func (DataServiceMetadata_Compression) Type

type DataServiceMetadata_ElementSpec

type DataServiceMetadata_ElementSpec struct {
	// Serialized element spec.
	ElementSpec []byte `protobuf:"bytes,1,opt,name=element_spec,json=elementSpec,proto3,oneof"`
}

type DebugEvent

type DebugEvent struct {

	// Timestamp in seconds (with microsecond precision).
	WallTime float64 `protobuf:"fixed64,1,opt,name=wall_time,json=wallTime,proto3" json:"wall_time,omitempty"`
	// Step of training (if available).
	Step int64 `protobuf:"varint,2,opt,name=step,proto3" json:"step,omitempty"`
	// Types that are assignable to What:
	//
	//	*DebugEvent_DebugMetadata
	//	*DebugEvent_SourceFile
	//	*DebugEvent_StackFrameWithId
	//	*DebugEvent_GraphOpCreation
	//	*DebugEvent_DebuggedGraph
	//	*DebugEvent_Execution
	//	*DebugEvent_GraphExecutionTrace
	//	*DebugEvent_GraphId
	//	*DebugEvent_DebuggedDevice
	What isDebugEvent_What `protobuf_oneof:"what"`
	// contains filtered or unexported fields
}

An Event related to the debugging of a TensorFlow program.

func (*DebugEvent) Descriptor deprecated

func (*DebugEvent) Descriptor() ([]byte, []int)

Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead.

func (*DebugEvent) GetDebugMetadata

func (x *DebugEvent) GetDebugMetadata() *DebugMetadata

func (*DebugEvent) GetDebuggedDevice

func (x *DebugEvent) GetDebuggedDevice() *DebuggedDevice

func (*DebugEvent) GetDebuggedGraph

func (x *DebugEvent) GetDebuggedGraph() *DebuggedGraph

func (*DebugEvent) GetExecution

func (x *DebugEvent) GetExecution() *Execution

func (*DebugEvent) GetGraphExecutionTrace

func (x *DebugEvent) GetGraphExecutionTrace() *GraphExecutionTrace

func (*DebugEvent) GetGraphId

func (x *DebugEvent) GetGraphId() string

func (*DebugEvent) GetGraphOpCreation

func (x *DebugEvent) GetGraphOpCreation() *GraphOpCreation

func (*DebugEvent) GetSourceFile

func (x *DebugEvent) GetSourceFile() *SourceFile

func (*DebugEvent) GetStackFrameWithId

func (x *DebugEvent) GetStackFrameWithId() *StackFrameWithId

func (*DebugEvent) GetStep

func (x *DebugEvent) GetStep() int64

func (*DebugEvent) GetWallTime

func (x *DebugEvent) GetWallTime() float64

func (*DebugEvent) GetWhat

func (m *DebugEvent) GetWhat() isDebugEvent_What

func (*DebugEvent) ProtoMessage

func (*DebugEvent) ProtoMessage()

func (*DebugEvent) ProtoReflect

func (x *DebugEvent) ProtoReflect() protoreflect.Message

func (*DebugEvent) Reset

func (x *DebugEvent) Reset()

func (*DebugEvent) String

func (x *DebugEvent) String() string

type DebugEvent_DebugMetadata

type DebugEvent_DebugMetadata struct {
	// Metadata related to this debugging data.
	DebugMetadata *DebugMetadata `protobuf:"bytes,3,opt,name=debug_metadata,json=debugMetadata,proto3,oneof"`
}

type DebugEvent_DebuggedDevice

type DebugEvent_DebuggedDevice struct {
	// A device on which debugger-instrumented ops and/or tensors reside.
	DebuggedDevice *DebuggedDevice `protobuf:"bytes,12,opt,name=debugged_device,json=debuggedDevice,proto3,oneof"`
}

type DebugEvent_DebuggedGraph

type DebugEvent_DebuggedGraph struct {
	// Information about a debugged graph.
	DebuggedGraph *DebuggedGraph `protobuf:"bytes,8,opt,name=debugged_graph,json=debuggedGraph,proto3,oneof"`
}

type DebugEvent_Execution

type DebugEvent_Execution struct {
	// Execution of an op or a Graph (e.g., a tf.function).
	Execution *Execution `protobuf:"bytes,9,opt,name=execution,proto3,oneof"`
}

type DebugEvent_GraphExecutionTrace

type DebugEvent_GraphExecutionTrace struct {
	// A graph execution trace: Contains information about the intermediate
	// tensors computed during the graph execution.
	GraphExecutionTrace *GraphExecutionTrace `protobuf:"bytes,10,opt,name=graph_execution_trace,json=graphExecutionTrace,proto3,oneof"`
}

type DebugEvent_GraphId

type DebugEvent_GraphId struct {
	// The ID of the graph (i.e., FuncGraph) executed here: applicable only
	// to the execution of a FuncGraph.
	GraphId string `protobuf:"bytes,11,opt,name=graph_id,json=graphId,proto3,oneof"`
}

type DebugEvent_GraphOpCreation

type DebugEvent_GraphOpCreation struct {
	// The creation of an op within a graph (e.g., a FuncGraph compiled from
	// a Python function).
	GraphOpCreation *GraphOpCreation `protobuf:"bytes,7,opt,name=graph_op_creation,json=graphOpCreation,proto3,oneof"`
}

type DebugEvent_SourceFile

type DebugEvent_SourceFile struct {
	// The content of a source file.
	SourceFile *SourceFile `protobuf:"bytes,4,opt,name=source_file,json=sourceFile,proto3,oneof"`
}

type DebugEvent_StackFrameWithId

type DebugEvent_StackFrameWithId struct {
	// A stack frame (filename, line number and column number, function name and
	// code string) with ID.
	StackFrameWithId *StackFrameWithId `protobuf:"bytes,6,opt,name=stack_frame_with_id,json=stackFrameWithId,proto3,oneof"`
}

type DebugMetadata

type DebugMetadata struct {

	// Version of TensorFlow.
	TensorflowVersion string `protobuf:"bytes,1,opt,name=tensorflow_version,json=tensorflowVersion,proto3" json:"tensorflow_version,omitempty"`
	// Version of the DebugEvent file format.
	// Has a format of "debug.Event:<number>", e.g., "debug.Event:1".
	FileVersion string `protobuf:"bytes,2,opt,name=file_version,json=fileVersion,proto3" json:"file_version,omitempty"`
	// A unique ID for the current run of tfdbg.
	// A run of tfdbg is defined as a TensorFlow job instrumented by tfdbg.
	// Multiple hosts in a distributed TensorFlow job instrumented by tfdbg
	// have the same ID.
	TfdbgRunId string `protobuf:"bytes,3,opt,name=tfdbg_run_id,json=tfdbgRunId,proto3" json:"tfdbg_run_id,omitempty"`
	// contains filtered or unexported fields
}

Metadata about the debugger and the debugged TensorFlow program.

func (*DebugMetadata) Descriptor deprecated

func (*DebugMetadata) Descriptor() ([]byte, []int)

Deprecated: Use DebugMetadata.ProtoReflect.Descriptor instead.

func (*DebugMetadata) GetFileVersion

func (x *DebugMetadata) GetFileVersion() string

func (*DebugMetadata) GetTensorflowVersion

func (x *DebugMetadata) GetTensorflowVersion() string

func (*DebugMetadata) GetTfdbgRunId

func (x *DebugMetadata) GetTfdbgRunId() string

func (*DebugMetadata) ProtoMessage

func (*DebugMetadata) ProtoMessage()

func (*DebugMetadata) ProtoReflect

func (x *DebugMetadata) ProtoReflect() protoreflect.Message

func (*DebugMetadata) Reset

func (x *DebugMetadata) Reset()

func (*DebugMetadata) String

func (x *DebugMetadata) String() string

type DebugOptions

type DebugOptions struct {

	// Debugging options
	DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts,proto3" json:"debug_tensor_watch_opts,omitempty"`
	// Caller-specified global step count.
	// Note that this is distinct from the session run count and the executor
	// step count.
	GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep,proto3" json:"global_step,omitempty"`
	// Whether the total disk usage of tfdbg is to be reset to zero
	// in this Session.run call. This is used by wrappers and hooks
	// such as the local CLI ones to indicate that the dumped tensors
	// are cleaned up from the disk after each Session.run.
	ResetDiskByteUsage bool `protobuf:"varint,11,opt,name=reset_disk_byte_usage,json=resetDiskByteUsage,proto3" json:"reset_disk_byte_usage,omitempty"`
	// contains filtered or unexported fields
}

Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).

func (*DebugOptions) Descriptor deprecated

func (*DebugOptions) Descriptor() ([]byte, []int)

Deprecated: Use DebugOptions.ProtoReflect.Descriptor instead.

func (*DebugOptions) GetDebugTensorWatchOpts

func (x *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch

func (*DebugOptions) GetGlobalStep

func (x *DebugOptions) GetGlobalStep() int64

func (*DebugOptions) GetResetDiskByteUsage

func (x *DebugOptions) GetResetDiskByteUsage() bool

func (*DebugOptions) ProtoMessage

func (*DebugOptions) ProtoMessage()

func (*DebugOptions) ProtoReflect

func (x *DebugOptions) ProtoReflect() protoreflect.Message

func (*DebugOptions) Reset

func (x *DebugOptions) Reset()

func (*DebugOptions) String

func (x *DebugOptions) String() string

type DebugTensorWatch

type DebugTensorWatch struct {

	// Name of the node to watch.
	// Use "*" for wildcard. But note: currently, regex is not supported in
	// general.
	NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"`
	// Output slot to watch.
	// The semantics of output_slot == -1 is that all outputs of the node
	// will be watched (i.e., a wildcard).
	// Other negative values of output_slot are invalid and will lead to
	// errors currently.
	OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"`
	// Name(s) of the debugging op(s).
	// One or more than one probes on a tensor.
	// e.g., {"DebugIdentity", "DebugNanCount"}
	DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps,proto3" json:"debug_ops,omitempty"`
	// URL(s) for debug targets(s).
	//
	// Supported URL formats are:
	//   - file:///foo/tfdbg_dump: Writes out Event content to file
	//     /foo/tfdbg_dump.  Assumes all directories can be created if they don't
	//     already exist.
	//   - grpc://localhost:11011: Sends an RPC request to an EventListener
	//     service running at localhost:11011 with the event.
	//   - memcbk:///event_key: Routes tensors to clients using the
	//     callback registered with the DebugCallbackRegistry for event_key.
	//
	// Each debug op listed in debug_ops will publish its output tensor (debug
	// signal) to all URLs in debug_urls.
	//
	// N.B. Session::Run() supports concurrent invocations of the same inputs
	// (feed keys), outputs and target nodes. If such concurrent invocations
	// are to be debugged, the callers of Session::Run() must use distinct
	// debug_urls to make sure that the streamed or dumped events do not overlap
	// among the invocations.
	// TODO(cais): More visible documentation of this in g3docs.
	DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls,proto3" json:"debug_urls,omitempty"`
	// Do not error out if debug op creation fails (e.g., due to dtype
	// incompatibility). Instead, just log the failure.
	TolerateDebugOpCreationFailures bool `` /* 161-byte string literal not displayed */
	// contains filtered or unexported fields
}

Option for watching a node in TensorFlow Debugger (tfdbg).

func (*DebugTensorWatch) Descriptor deprecated

func (*DebugTensorWatch) Descriptor() ([]byte, []int)

Deprecated: Use DebugTensorWatch.ProtoReflect.Descriptor instead.

func (*DebugTensorWatch) GetDebugOps

func (x *DebugTensorWatch) GetDebugOps() []string

func (*DebugTensorWatch) GetDebugUrls

func (x *DebugTensorWatch) GetDebugUrls() []string

func (*DebugTensorWatch) GetNodeName

func (x *DebugTensorWatch) GetNodeName() string

func (*DebugTensorWatch) GetOutputSlot

func (x *DebugTensorWatch) GetOutputSlot() int32

func (*DebugTensorWatch) GetTolerateDebugOpCreationFailures

func (x *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool

func (*DebugTensorWatch) ProtoMessage

func (*DebugTensorWatch) ProtoMessage()

func (*DebugTensorWatch) ProtoReflect

func (x *DebugTensorWatch) ProtoReflect() protoreflect.Message

func (*DebugTensorWatch) Reset

func (x *DebugTensorWatch) Reset()

func (*DebugTensorWatch) String

func (x *DebugTensorWatch) String() string

type DebuggedDevice

type DebuggedDevice struct {

	// Name of the device.
	DeviceName string `protobuf:"bytes,1,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
	// A debugger-generated ID for the device. Guaranteed to be unique within
	// the scope of the debugged TensorFlow program, including single-host and
	// multi-host settings.
	// TODO(cais): Test the uniqueness guarantee in multi-host settings.
	DeviceId int32 `protobuf:"varint,2,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
	// contains filtered or unexported fields
}

A device on which ops and/or tensors are instrumented by the debugger.

func (*DebuggedDevice) Descriptor deprecated

func (*DebuggedDevice) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedDevice.ProtoReflect.Descriptor instead.

func (*DebuggedDevice) GetDeviceId

func (x *DebuggedDevice) GetDeviceId() int32

func (*DebuggedDevice) GetDeviceName

func (x *DebuggedDevice) GetDeviceName() string

func (*DebuggedDevice) ProtoMessage

func (*DebuggedDevice) ProtoMessage()

func (*DebuggedDevice) ProtoReflect

func (x *DebuggedDevice) ProtoReflect() protoreflect.Message

func (*DebuggedDevice) Reset

func (x *DebuggedDevice) Reset()

func (*DebuggedDevice) String

func (x *DebuggedDevice) String() string

type DebuggedGraph

type DebuggedGraph struct {

	// An ID for the graph.
	// This can be used up to look up graph names. Generated by the debugger.
	GraphId string `protobuf:"bytes,1,opt,name=graph_id,json=graphId,proto3" json:"graph_id,omitempty"`
	// Name of the graph (if available).
	GraphName string `protobuf:"bytes,2,opt,name=graph_name,json=graphName,proto3" json:"graph_name,omitempty"`
	// Names of the instrumented ops. This can be used to look up op name
	// based on the numeric-summary tensors (2nd column).
	InstrumentedOps []string `protobuf:"bytes,3,rep,name=instrumented_ops,json=instrumentedOps,proto3" json:"instrumented_ops,omitempty"`
	// Original (uninstrumented) GraphDef (if available).
	OriginalGraphDef []byte `protobuf:"bytes,4,opt,name=original_graph_def,json=originalGraphDef,proto3" json:"original_graph_def,omitempty"`
	// An encoded version of a GraphDef.
	// This graph may include the debugger-inserted ops.
	InstrumentedGraphDef []byte `protobuf:"bytes,5,opt,name=instrumented_graph_def,json=instrumentedGraphDef,proto3" json:"instrumented_graph_def,omitempty"`
	// IDs of the immediate enclosing context (graph), if any.
	OuterContextId string `protobuf:"bytes,6,opt,name=outer_context_id,json=outerContextId,proto3" json:"outer_context_id,omitempty"`
	// contains filtered or unexported fields
}

A debugger-instrumented graph.

func (*DebuggedGraph) Descriptor deprecated

func (*DebuggedGraph) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedGraph.ProtoReflect.Descriptor instead.

func (*DebuggedGraph) GetGraphId

func (x *DebuggedGraph) GetGraphId() string

func (*DebuggedGraph) GetGraphName

func (x *DebuggedGraph) GetGraphName() string

func (*DebuggedGraph) GetInstrumentedGraphDef

func (x *DebuggedGraph) GetInstrumentedGraphDef() []byte

func (*DebuggedGraph) GetInstrumentedOps

func (x *DebuggedGraph) GetInstrumentedOps() []string

func (*DebuggedGraph) GetOriginalGraphDef

func (x *DebuggedGraph) GetOriginalGraphDef() []byte

func (*DebuggedGraph) GetOuterContextId

func (x *DebuggedGraph) GetOuterContextId() string

func (*DebuggedGraph) ProtoMessage

func (*DebuggedGraph) ProtoMessage()

func (*DebuggedGraph) ProtoReflect

func (x *DebuggedGraph) ProtoReflect() protoreflect.Message

func (*DebuggedGraph) Reset

func (x *DebuggedGraph) Reset()

func (*DebuggedGraph) String

func (x *DebuggedGraph) String() string

type DebuggedSourceFile

type DebuggedSourceFile struct {

	// The host name on which a source code file is located.
	Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
	// Path to the source code file.
	FilePath string `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
	// The timestamp at which the source code file is last modified.
	LastModified int64 `protobuf:"varint,3,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"`
	// Byte size of the file.
	Bytes int64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"`
	// Line-by-line content of the source code file.
	Lines []string `protobuf:"bytes,5,rep,name=lines,proto3" json:"lines,omitempty"`
	// contains filtered or unexported fields
}

func (*DebuggedSourceFile) Descriptor deprecated

func (*DebuggedSourceFile) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedSourceFile.ProtoReflect.Descriptor instead.

func (*DebuggedSourceFile) GetBytes

func (x *DebuggedSourceFile) GetBytes() int64

func (*DebuggedSourceFile) GetFilePath

func (x *DebuggedSourceFile) GetFilePath() string

func (*DebuggedSourceFile) GetHost

func (x *DebuggedSourceFile) GetHost() string

func (*DebuggedSourceFile) GetLastModified

func (x *DebuggedSourceFile) GetLastModified() int64

func (*DebuggedSourceFile) GetLines

func (x *DebuggedSourceFile) GetLines() []string

func (*DebuggedSourceFile) ProtoMessage

func (*DebuggedSourceFile) ProtoMessage()

func (*DebuggedSourceFile) ProtoReflect

func (x *DebuggedSourceFile) ProtoReflect() protoreflect.Message

func (*DebuggedSourceFile) Reset

func (x *DebuggedSourceFile) Reset()

func (*DebuggedSourceFile) String

func (x *DebuggedSourceFile) String() string

type DebuggedSourceFiles

type DebuggedSourceFiles struct {

	// A collection of source code files.
	SourceFiles []*DebuggedSourceFile `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles,proto3" json:"source_files,omitempty"`
	// contains filtered or unexported fields
}

func (*DebuggedSourceFiles) Descriptor deprecated

func (*DebuggedSourceFiles) Descriptor() ([]byte, []int)

Deprecated: Use DebuggedSourceFiles.ProtoReflect.Descriptor instead.

func (*DebuggedSourceFiles) GetSourceFiles

func (x *DebuggedSourceFiles) GetSourceFiles() []*DebuggedSourceFile

func (*DebuggedSourceFiles) ProtoMessage

func (*DebuggedSourceFiles) ProtoMessage()

func (*DebuggedSourceFiles) ProtoReflect

func (x *DebuggedSourceFiles) ProtoReflect() protoreflect.Message

func (*DebuggedSourceFiles) Reset

func (x *DebuggedSourceFiles) Reset()

func (*DebuggedSourceFiles) String

func (x *DebuggedSourceFiles) String() string

type DeploymentMode

type DeploymentMode int32

tf.data service deployment mode.

const (
	DeploymentMode_DEPLOYMENT_MODE_UNSPECIFIED DeploymentMode = 0
	// tf.data service workers colocate with TF workers.
	DeploymentMode_DEPLOYMENT_MODE_COLOCATED DeploymentMode = 1
	// tf.data service workers run in dedicated tf.data hosts.
	DeploymentMode_DEPLOYMENT_MODE_REMOTE DeploymentMode = 2
	// tf.data service workers run in colocated TF hosts and dedicated tf.data
	// hosts.
	DeploymentMode_DEPLOYMENT_MODE_HYBRID DeploymentMode = 3
)

func (DeploymentMode) Descriptor

func (DeploymentMode) Enum

func (x DeploymentMode) Enum() *DeploymentMode

func (DeploymentMode) EnumDescriptor deprecated

func (DeploymentMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use DeploymentMode.Descriptor instead.

func (DeploymentMode) Number

func (DeploymentMode) String

func (x DeploymentMode) String() string

func (DeploymentMode) Type

type DeviceProperties

type DeviceProperties struct {

	// Device type (CPU, GPU, ...)
	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
	// Vendor (Intel, nvidia, ...)
	Vendor string `protobuf:"bytes,2,opt,name=vendor,proto3" json:"vendor,omitempty"`
	// Model (Haswell, K40, ...)
	Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
	// Core Frequency in Mhz
	Frequency int64 `protobuf:"varint,4,opt,name=frequency,proto3" json:"frequency,omitempty"`
	// Number of cores
	NumCores int64 `protobuf:"varint,5,opt,name=num_cores,json=numCores,proto3" json:"num_cores,omitempty"`
	// Version of the tools and libraries used with this device (e.g. gcc 4.9,
	// cudnn 5.1)
	Environment map[string]string `` /* 163-byte string literal not displayed */
	// Number of registers per core.
	NumRegisters int64 `protobuf:"varint,7,opt,name=num_registers,json=numRegisters,proto3" json:"num_registers,omitempty"`
	// L1 cache size in bytes
	L1CacheSize int64 `protobuf:"varint,8,opt,name=l1_cache_size,json=l1CacheSize,proto3" json:"l1_cache_size,omitempty"`
	// L2 cache size in bytes
	L2CacheSize int64 `protobuf:"varint,9,opt,name=l2_cache_size,json=l2CacheSize,proto3" json:"l2_cache_size,omitempty"`
	// L3 cache size in bytes
	L3CacheSize int64 `protobuf:"varint,10,opt,name=l3_cache_size,json=l3CacheSize,proto3" json:"l3_cache_size,omitempty"`
	// Shared memory size per multiprocessor in bytes. This field is
	// applicable to GPUs only.
	SharedMemorySizePerMultiprocessor int64 `` /* 168-byte string literal not displayed */
	// Memory size in bytes
	MemorySize int64 `protobuf:"varint,12,opt,name=memory_size,json=memorySize,proto3" json:"memory_size,omitempty"`
	// Memory bandwidth in KB/s
	Bandwidth int64 `protobuf:"varint,13,opt,name=bandwidth,proto3" json:"bandwidth,omitempty"`
	// contains filtered or unexported fields
}

func (*DeviceProperties) Descriptor deprecated

func (*DeviceProperties) Descriptor() ([]byte, []int)

Deprecated: Use DeviceProperties.ProtoReflect.Descriptor instead.

func (*DeviceProperties) GetBandwidth

func (x *DeviceProperties) GetBandwidth() int64

func (*DeviceProperties) GetEnvironment

func (x *DeviceProperties) GetEnvironment() map[string]string

func (*DeviceProperties) GetFrequency

func (x *DeviceProperties) GetFrequency() int64

func (*DeviceProperties) GetL1CacheSize

func (x *DeviceProperties) GetL1CacheSize() int64

func (*DeviceProperties) GetL2CacheSize

func (x *DeviceProperties) GetL2CacheSize() int64

func (*DeviceProperties) GetL3CacheSize

func (x *DeviceProperties) GetL3CacheSize() int64

func (*DeviceProperties) GetMemorySize

func (x *DeviceProperties) GetMemorySize() int64

func (*DeviceProperties) GetModel

func (x *DeviceProperties) GetModel() string

func (*DeviceProperties) GetNumCores

func (x *DeviceProperties) GetNumCores() int64

func (*DeviceProperties) GetNumRegisters

func (x *DeviceProperties) GetNumRegisters() int64

func (*DeviceProperties) GetSharedMemorySizePerMultiprocessor

func (x *DeviceProperties) GetSharedMemorySizePerMultiprocessor() int64

func (*DeviceProperties) GetType

func (x *DeviceProperties) GetType() string

func (*DeviceProperties) GetVendor

func (x *DeviceProperties) GetVendor() string

func (*DeviceProperties) ProtoMessage

func (*DeviceProperties) ProtoMessage()

func (*DeviceProperties) ProtoReflect

func (x *DeviceProperties) ProtoReflect() protoreflect.Message

func (*DeviceProperties) Reset

func (x *DeviceProperties) Reset()

func (*DeviceProperties) String

func (x *DeviceProperties) String() string

type DictValue

type DictValue struct {
	Fields map[string]*StructuredValue `` /* 153-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents a Python dict keyed by `str`. The comment on Unicode from Value.string_value applies analogously.

func (*DictValue) Descriptor deprecated

func (*DictValue) Descriptor() ([]byte, []int)

Deprecated: Use DictValue.ProtoReflect.Descriptor instead.

func (*DictValue) GetFields

func (x *DictValue) GetFields() map[string]*StructuredValue

func (*DictValue) ProtoMessage

func (*DictValue) ProtoMessage()

func (*DictValue) ProtoReflect

func (x *DictValue) ProtoReflect() protoreflect.Message

func (*DictValue) Reset

func (x *DictValue) Reset()

func (*DictValue) String

func (x *DictValue) String() string

type DispatcherConfig

type DispatcherConfig struct {

	// The port for the dispatcher to bind to. A value of 0 indicates that the
	// dispatcher may bind to any available port.
	Port int64 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
	// The protocol for the dispatcher to use when connecting to workers.
	Protocol string `protobuf:"bytes,2,opt,name=protocol,proto3" json:"protocol,omitempty"`
	// A work directory to use for storing dispatcher state, and for recovering
	// during restarts. The empty string indicates not to use any work directory.
	WorkDir string `protobuf:"bytes,3,opt,name=work_dir,json=workDir,proto3" json:"work_dir,omitempty"`
	// Whether to run in fault tolerant mode, where dispatcher state is saved
	// across restarts. Requires that `work_dir` is nonempty.
	FaultTolerantMode bool `protobuf:"varint,4,opt,name=fault_tolerant_mode,json=faultTolerantMode,proto3" json:"fault_tolerant_mode,omitempty"`
	// (Optional.) If the job uses auto-sharding, it needs to specify a fixed list
	// of worker addresses that will register with the dispatcher. The worker
	// addresses should be in the format "host" or "host:port", where "port" is an
	// integer, named port, or %port% to match any port.
	WorkerAddresses []string `protobuf:"bytes,7,rep,name=worker_addresses,json=workerAddresses,proto3" json:"worker_addresses,omitempty"`
	// (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
	// "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
	DeploymentMode DeploymentMode `` /* 140-byte string literal not displayed */
	// How often the dispatcher should scan through to delete old and unused
	// jobs. A value of 0 indicates that the decision should be left up to the
	// runtime.
	JobGcCheckIntervalMs int64 `` /* 128-byte string literal not displayed */
	// How long a job needs to be unused before it becomes a candidate for garbage
	// collection. A value of -1 indicates that jobs should never be garbage
	// collected. A value of 0 indicates that the decision should be left up to
	// the runtime. Note: This does not apply to dynamic sharding unless users
	// explicitly opt-in by enabling `gc_dynamic_sharding_jobs` below.
	JobGcTimeoutMs int64 `protobuf:"varint,6,opt,name=job_gc_timeout_ms,json=jobGcTimeoutMs,proto3" json:"job_gc_timeout_ms,omitempty"`
	// Whether dynamically sharded jobs should be eligible for garbage collection.
	// These jobs are not garbage collected by default, since if a job is garbage
	// collected and then re-created, it will revisit all data from the start. If
	// revisiting data is acceptible and you want automatic reclamation of
	// iterator memory, set `gc_dynamic_sharding_jobs` to `true`.
	GcDynamicShardingJobs bool `` /* 130-byte string literal not displayed */
	// How long to wait before garbage-collecting a client that hasn't
	// heartbeated to the dispatcher. A value of 0 indicates that the timeout
	// should be left to the runtime.
	ClientTimeoutMs int64 `protobuf:"varint,8,opt,name=client_timeout_ms,json=clientTimeoutMs,proto3" json:"client_timeout_ms,omitempty"`
	// How long to wait for a worker to heartbeat before considering it missing.
	// A value of 0 indicates that the timeout should be left to the runtime.
	WorkerTimeoutMs int64 `protobuf:"varint,10,opt,name=worker_timeout_ms,json=workerTimeoutMs,proto3" json:"worker_timeout_ms,omitempty"`
	// The maximum number of snapshots that a worker can concurrently process at a
	// given point in time. This is a tradeoff between worker resource usage and
	// snapshot wall time. A value of 0 indicates that the decision should be left
	// up to the runtime.
	WorkerMaxConcurrentSnapshots int64 `` /* 151-byte string literal not displayed */
	// contains filtered or unexported fields
}

Configuration for a tf.data service DispatchServer. Next id: 13

func (*DispatcherConfig) Descriptor deprecated

func (*DispatcherConfig) Descriptor() ([]byte, []int)

Deprecated: Use DispatcherConfig.ProtoReflect.Descriptor instead.

func (*DispatcherConfig) GetClientTimeoutMs

func (x *DispatcherConfig) GetClientTimeoutMs() int64

func (*DispatcherConfig) GetDeploymentMode

func (x *DispatcherConfig) GetDeploymentMode() DeploymentMode

func (*DispatcherConfig) GetFaultTolerantMode

func (x *DispatcherConfig) GetFaultTolerantMode() bool

func (*DispatcherConfig) GetGcDynamicShardingJobs added in v0.5.0

func (x *DispatcherConfig) GetGcDynamicShardingJobs() bool

func (*DispatcherConfig) GetJobGcCheckIntervalMs

func (x *DispatcherConfig) GetJobGcCheckIntervalMs() int64

func (*DispatcherConfig) GetJobGcTimeoutMs

func (x *DispatcherConfig) GetJobGcTimeoutMs() int64

func (*DispatcherConfig) GetPort

func (x *DispatcherConfig) GetPort() int64

func (*DispatcherConfig) GetProtocol

func (x *DispatcherConfig) GetProtocol() string

func (*DispatcherConfig) GetWorkDir

func (x *DispatcherConfig) GetWorkDir() string

func (*DispatcherConfig) GetWorkerAddresses

func (x *DispatcherConfig) GetWorkerAddresses() []string

func (*DispatcherConfig) GetWorkerMaxConcurrentSnapshots added in v0.6.0

func (x *DispatcherConfig) GetWorkerMaxConcurrentSnapshots() int64

func (*DispatcherConfig) GetWorkerTimeoutMs added in v0.4.0

func (x *DispatcherConfig) GetWorkerTimeoutMs() int64

func (*DispatcherConfig) ProtoMessage

func (*DispatcherConfig) ProtoMessage()

func (*DispatcherConfig) ProtoReflect

func (x *DispatcherConfig) ProtoReflect() protoreflect.Message

func (*DispatcherConfig) Reset

func (x *DispatcherConfig) Reset()

func (*DispatcherConfig) String

func (x *DispatcherConfig) String() string

type DistributedSnapshotMetadata added in v0.4.0

type DistributedSnapshotMetadata struct {

	// The element spec of the snapshotted dataset.
	ElementSpec []byte `protobuf:"bytes,1,opt,name=element_spec,json=elementSpec,proto3" json:"element_spec,omitempty"`
	// Whether and how to compress the snapshot.  Supported values are defined in
	// `tsl::io::compression`.  In particular, an empty string specifies not to
	// compress.
	Compression string `protobuf:"bytes,2,opt,name=compression,proto3" json:"compression,omitempty"`
	// contains filtered or unexported fields
}

Metadata for a `tf.data.Dataset` distributed snapshot.

func (*DistributedSnapshotMetadata) Descriptor deprecated added in v0.4.0

func (*DistributedSnapshotMetadata) Descriptor() ([]byte, []int)

Deprecated: Use DistributedSnapshotMetadata.ProtoReflect.Descriptor instead.

func (*DistributedSnapshotMetadata) GetCompression added in v0.4.0

func (x *DistributedSnapshotMetadata) GetCompression() string

func (*DistributedSnapshotMetadata) GetElementSpec added in v0.4.0

func (x *DistributedSnapshotMetadata) GetElementSpec() []byte

func (*DistributedSnapshotMetadata) ProtoMessage added in v0.4.0

func (*DistributedSnapshotMetadata) ProtoMessage()

func (*DistributedSnapshotMetadata) ProtoReflect added in v0.4.0

func (*DistributedSnapshotMetadata) Reset added in v0.4.0

func (x *DistributedSnapshotMetadata) Reset()

func (*DistributedSnapshotMetadata) String added in v0.4.0

func (x *DistributedSnapshotMetadata) String() string

type ErrorSourceProto added in v0.2.0

type ErrorSourceProto struct {
	ErrorSource ErrorSourceProto_ErrorSource `` /* 154-byte string literal not displayed */
	// contains filtered or unexported fields
}

If included as a payload, this message contains the error source information where the error was raised. URI: "type.googleapis.com/tensorflow.core.platform.ErrorSourceProto"

func (*ErrorSourceProto) Descriptor deprecated added in v0.2.0

func (*ErrorSourceProto) Descriptor() ([]byte, []int)

Deprecated: Use ErrorSourceProto.ProtoReflect.Descriptor instead.

func (*ErrorSourceProto) GetErrorSource added in v0.2.0

func (x *ErrorSourceProto) GetErrorSource() ErrorSourceProto_ErrorSource

func (*ErrorSourceProto) ProtoMessage added in v0.2.0

func (*ErrorSourceProto) ProtoMessage()

func (*ErrorSourceProto) ProtoReflect added in v0.2.0

func (x *ErrorSourceProto) ProtoReflect() protoreflect.Message

func (*ErrorSourceProto) Reset added in v0.2.0

func (x *ErrorSourceProto) Reset()

func (*ErrorSourceProto) String added in v0.2.0

func (x *ErrorSourceProto) String() string

type ErrorSourceProto_ErrorSource added in v0.2.0

type ErrorSourceProto_ErrorSource int32
const (
	ErrorSourceProto_UNKNOWN        ErrorSourceProto_ErrorSource = 0
	ErrorSourceProto_TPU_COMPILE_OP ErrorSourceProto_ErrorSource = 1
	// Old bridge.
	ErrorSourceProto_TF_XLA_BRIDGE ErrorSourceProto_ErrorSource = 2
	// TPUBridge.
	ErrorSourceProto_MLIR_BRIDGE_PHASE_1 ErrorSourceProto_ErrorSource = 3
	// LegalizeToHlo.
	ErrorSourceProto_MLIR_BRIDGE_PHASE_2 ErrorSourceProto_ErrorSource = 4
	// eager::RemoteMgr.
	ErrorSourceProto_EAGER_REMOTE_MGR ErrorSourceProto_ErrorSource = 5
)

func (ErrorSourceProto_ErrorSource) Descriptor added in v0.2.0

func (ErrorSourceProto_ErrorSource) Enum added in v0.2.0

func (ErrorSourceProto_ErrorSource) EnumDescriptor deprecated added in v0.2.0

func (ErrorSourceProto_ErrorSource) EnumDescriptor() ([]byte, []int)

Deprecated: Use ErrorSourceProto_ErrorSource.Descriptor instead.

func (ErrorSourceProto_ErrorSource) Number added in v0.2.0

func (ErrorSourceProto_ErrorSource) String added in v0.2.0

func (ErrorSourceProto_ErrorSource) Type added in v0.2.0

type Execution

type Execution struct {

	// Op type (e.g., "MatMul").
	// In the case of a Graph, this is the name of the Graph.
	OpType string `protobuf:"bytes,1,opt,name=op_type,json=opType,proto3" json:"op_type,omitempty"`
	// Number of output tensors.
	NumOutputs int32 `protobuf:"varint,2,opt,name=num_outputs,json=numOutputs,proto3" json:"num_outputs,omitempty"`
	// The graph that's executed: applicable only to the eager
	// execution of a FuncGraph.
	GraphId string `protobuf:"bytes,3,opt,name=graph_id,json=graphId,proto3" json:"graph_id,omitempty"`
	// IDs of the input tensors (if available).
	InputTensorIds []int64 `protobuf:"varint,4,rep,packed,name=input_tensor_ids,json=inputTensorIds,proto3" json:"input_tensor_ids,omitempty"`
	// IDs of the output tensors (if availbable).
	// If specified, must have the same length as tensor_protos.
	OutputTensorIds []int64 `protobuf:"varint,5,rep,packed,name=output_tensor_ids,json=outputTensorIds,proto3" json:"output_tensor_ids,omitempty"`
	// Type of the tensor value encapsulated in this proto.
	TensorDebugMode TensorDebugMode `` /* 141-byte string literal not displayed */
	// Output Tensor values in the type described by `tensor_value_type`.
	// The length of this should match `num_outputs`.
	TensorProtos []*tensor_go_proto.TensorProto `protobuf:"bytes,7,rep,name=tensor_protos,json=tensorProtos,proto3" json:"tensor_protos,omitempty"`
	// Stack trace of the eager execution.
	CodeLocation *CodeLocation `protobuf:"bytes,8,opt,name=code_location,json=codeLocation,proto3" json:"code_location,omitempty"`
	// Debugged-generated IDs of the devices on which the output tensors reside.
	// To look up details about the device (e.g., name), cross-reference this
	// field with the DebuggedDevice messages.
	OutputTensorDeviceIds []int32 `` /* 136-byte string literal not displayed */
	// contains filtered or unexported fields
}

Data relating to the eager execution of an op or a Graph. For a op that generates N output tensors (N >= 0), only one Execution proto will be used to describe the execution event.

func (*Execution) Descriptor deprecated

func (*Execution) Descriptor() ([]byte, []int)

Deprecated: Use Execution.ProtoReflect.Descriptor instead.

func (*Execution) GetCodeLocation

func (x *Execution) GetCodeLocation() *CodeLocation

func (*Execution) GetGraphId

func (x *Execution) GetGraphId() string

func (*Execution) GetInputTensorIds

func (x *Execution) GetInputTensorIds() []int64

func (*Execution) GetNumOutputs

func (x *Execution) GetNumOutputs() int32

func (*Execution) GetOpType

func (x *Execution) GetOpType() string

func (*Execution) GetOutputTensorDeviceIds

func (x *Execution) GetOutputTensorDeviceIds() []int32

func (*Execution) GetOutputTensorIds

func (x *Execution) GetOutputTensorIds() []int64

func (*Execution) GetTensorDebugMode

func (x *Execution) GetTensorDebugMode() TensorDebugMode

func (*Execution) GetTensorProtos

func (x *Execution) GetTensorProtos() []*tensor_go_proto.TensorProto

func (*Execution) ProtoMessage

func (*Execution) ProtoMessage()

func (*Execution) ProtoReflect

func (x *Execution) ProtoReflect() protoreflect.Message

func (*Execution) Reset

func (x *Execution) Reset()

func (*Execution) String

func (x *Execution) String() string

type FingerprintDef added in v0.2.0

type FingerprintDef struct {

	// Hash of the saved_model.pb, referred to as a "checksum".
	SavedModelChecksum uint64 `protobuf:"varint,1,opt,name=saved_model_checksum,json=savedModelChecksum,proto3" json:"saved_model_checksum,omitempty"`
	// Hash of regularized graph_def.
	GraphDefProgramHash uint64 `protobuf:"varint,2,opt,name=graph_def_program_hash,json=graphDefProgramHash,proto3" json:"graph_def_program_hash,omitempty"`
	// Hash of the regularized (sorted) SignatureDefs.
	SignatureDefHash uint64 `protobuf:"varint,3,opt,name=signature_def_hash,json=signatureDefHash,proto3" json:"signature_def_hash,omitempty"`
	// Hash of the regularized SavedObjectGraph.
	SavedObjectGraphHash uint64 `` /* 126-byte string literal not displayed */
	// Hash of the checkpoint.
	CheckpointHash uint64 `protobuf:"varint,5,opt,name=checkpoint_hash,json=checkpointHash,proto3" json:"checkpoint_hash,omitempty"`
	// Version specification of the fingerprint.
	Version *versions_go_proto.VersionDef `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` // TODO(b/290068219): add USM version when GA
	// contains filtered or unexported fields
}

Protocol buffer representing a SavedModel Fingerprint.

If there are multiple MetaGraphDefs in the SavedModel, the FingerprintDef corresponds to the first one.

func (*FingerprintDef) Descriptor deprecated added in v0.2.0

func (*FingerprintDef) Descriptor() ([]byte, []int)

Deprecated: Use FingerprintDef.ProtoReflect.Descriptor instead.

func (*FingerprintDef) GetCheckpointHash added in v0.3.0

func (x *FingerprintDef) GetCheckpointHash() uint64

func (*FingerprintDef) GetGraphDefProgramHash added in v0.3.0

func (x *FingerprintDef) GetGraphDefProgramHash() uint64

func (*FingerprintDef) GetSavedModelChecksum added in v0.4.0

func (x *FingerprintDef) GetSavedModelChecksum() uint64

func (*FingerprintDef) GetSavedObjectGraphHash added in v0.3.0

func (x *FingerprintDef) GetSavedObjectGraphHash() uint64

func (*FingerprintDef) GetSignatureDefHash added in v0.3.0

func (x *FingerprintDef) GetSignatureDefHash() uint64

func (*FingerprintDef) GetVersion added in v0.3.0

func (x *FingerprintDef) GetVersion() *versions_go_proto.VersionDef

func (*FingerprintDef) ProtoMessage added in v0.2.0

func (*FingerprintDef) ProtoMessage()

func (*FingerprintDef) ProtoReflect added in v0.2.0

func (x *FingerprintDef) ProtoReflect() protoreflect.Message

func (*FingerprintDef) Reset added in v0.2.0

func (x *FingerprintDef) Reset()

func (*FingerprintDef) String added in v0.2.0

func (x *FingerprintDef) String() string

type FunctionSpec

type FunctionSpec struct {

	// Full arg spec from inspect.getfullargspec().
	Fullargspec *StructuredValue `protobuf:"bytes,1,opt,name=fullargspec,proto3" json:"fullargspec,omitempty"`
	// Whether this represents a class method.
	IsMethod bool `protobuf:"varint,2,opt,name=is_method,json=isMethod,proto3" json:"is_method,omitempty"`
	// The input signature, if specified.
	InputSignature *StructuredValue        `protobuf:"bytes,5,opt,name=input_signature,json=inputSignature,proto3" json:"input_signature,omitempty"`
	JitCompile     FunctionSpec_JitCompile `` /* 132-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents `FunctionSpec` used in `Function`. This represents a function that has been wrapped as a TensorFlow `Function`.

func (*FunctionSpec) Descriptor deprecated

func (*FunctionSpec) Descriptor() ([]byte, []int)

Deprecated: Use FunctionSpec.ProtoReflect.Descriptor instead.

func (*FunctionSpec) GetFullargspec

func (x *FunctionSpec) GetFullargspec() *StructuredValue

func (*FunctionSpec) GetInputSignature

func (x *FunctionSpec) GetInputSignature() *StructuredValue

func (*FunctionSpec) GetIsMethod

func (x *FunctionSpec) GetIsMethod() bool

func (*FunctionSpec) GetJitCompile

func (x *FunctionSpec) GetJitCompile() FunctionSpec_JitCompile

func (*FunctionSpec) ProtoMessage

func (*FunctionSpec) ProtoMessage()

func (*FunctionSpec) ProtoReflect

func (x *FunctionSpec) ProtoReflect() protoreflect.Message

func (*FunctionSpec) Reset

func (x *FunctionSpec) Reset()

func (*FunctionSpec) String

func (x *FunctionSpec) String() string

type FunctionSpec_JitCompile

type FunctionSpec_JitCompile int32

Whether the function should be compiled by XLA.

The public interface to `tf.function` uses an optional boolean to represent three distinct states for this field. Unfortunately, proto3 removes the ability to explicitly check for the presence or absence of a field, so we instead map to an enum.

See `tf.function` for details.

const (
	FunctionSpec_DEFAULT FunctionSpec_JitCompile = 0
	FunctionSpec_ON      FunctionSpec_JitCompile = 1
	FunctionSpec_OFF     FunctionSpec_JitCompile = 2
)

func (FunctionSpec_JitCompile) Descriptor

func (FunctionSpec_JitCompile) Enum

func (FunctionSpec_JitCompile) EnumDescriptor deprecated

func (FunctionSpec_JitCompile) EnumDescriptor() ([]byte, []int)

Deprecated: Use FunctionSpec_JitCompile.Descriptor instead.

func (FunctionSpec_JitCompile) Number

func (FunctionSpec_JitCompile) String

func (x FunctionSpec_JitCompile) String() string

func (FunctionSpec_JitCompile) Type

type GPUOptions

type GPUOptions struct {

	// Fraction of the total GPU memory to allocate for each process.
	// 1 means to allocate all of the GPU memory, 0.5 means the process
	// allocates up to ~50% of the total GPU memory.
	//
	// GPU memory is pre-allocated unless the allow_growth option is enabled.
	//
	// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
	// the amount of memory available on the GPU device by using host memory as a
	// swap space. Accessing memory not available on the device will be
	// significantly slower as that would require memory transfer between the host
	// and the device. Options to reduce the memory requirement should be
	// considered before enabling this option as this may come with a negative
	// performance impact. Oversubscription using the unified memory requires
	// Pascal class or newer GPUs and it is currently only supported on the Linux
	// operating system. See
	// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
	// for the detailed requirements.
	PerProcessGpuMemoryFraction float64 `` /* 150-byte string literal not displayed */
	// If true, the allocator does not pre-allocate the entire specified
	// GPU memory region, instead starting small and growing as needed.
	AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"`
	// The type of GPU allocation strategy to use.
	//
	// Allowed values:
	// "": The empty string (default) uses a system-chosen default
	//
	//	which may change over time.
	//
	// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
	//
	//	version of dlmalloc.
	AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"`
	// Delay deletion of up to this many bytes to reduce the number of
	// interactions with gpu driver code.  If 0, the system chooses
	// a reasonable default (several MBs).
	DeferredDeletionBytes int64 `` /* 127-byte string literal not displayed */
	// A comma-separated list of GPU ids that determines the 'visible'
	// to 'virtual' mapping of GPU devices.  For example, if TensorFlow
	// can see 8 GPU devices in the process, and one wanted to map
	// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
	// then one would specify this field as "5,3".  This field is similar in
	// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
	// it applies to the visible GPU devices in the process.
	//
	// NOTE:
	//  1. The GPU driver provides the process with the visible GPUs
	//     in an order which is not guaranteed to have any correlation to
	//     the *physical* GPU id in the machine.  This field is used for
	//     remapping "visible" to "virtual", which means this operates only
	//     after the process starts.  Users are required to use vendor
	//     specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
	//     physical to visible device mapping prior to invoking TensorFlow.
	//  2. In the code, the ids in this list are also called "platform GPU id"s,
	//     and the 'virtual' ids of GPU devices (i.e. the ids in the device
	//     name "/device:GPU:<id>") are also called "TF GPU id"s. Please
	//     refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
	//     for more information.
	VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"`
	// In the event polling loop sleep this many microseconds between
	// PollEvents calls, when the queue is not empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingActiveDelayUsecs int32 `` /* 135-byte string literal not displayed */
	// This field is deprecated and ignored.
	PollingInactiveDelayMsecs int32 `` /* 141-byte string literal not displayed */
	// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
	// enabling this option forces all CPU tensors to be allocated with Cuda
	// pinned memory. Normally, TensorFlow will infer which tensors should be
	// allocated as the pinned memory. But in case where the inference is
	// incomplete, this option can significantly speed up the cross-device memory
	// copy performance as long as it fits the memory.
	// Note that this option is not something that should be
	// enabled by default for unknown or very large models, since all Cuda pinned
	// memory is unpageable, having too much pinned memory might negatively impact
	// the overall host system performance.
	ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"`
	// Everything inside experimental is subject to change and is not subject
	// to API stability guarantees in
	// https://www.tensorflow.org/guide/version_compat.
	Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

func (*GPUOptions) Descriptor deprecated

func (*GPUOptions) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions.ProtoReflect.Descriptor instead.

func (*GPUOptions) GetAllocatorType

func (x *GPUOptions) GetAllocatorType() string

func (*GPUOptions) GetAllowGrowth

func (x *GPUOptions) GetAllowGrowth() bool

func (*GPUOptions) GetDeferredDeletionBytes

func (x *GPUOptions) GetDeferredDeletionBytes() int64

func (*GPUOptions) GetExperimental

func (x *GPUOptions) GetExperimental() *GPUOptions_Experimental

func (*GPUOptions) GetForceGpuCompatible

func (x *GPUOptions) GetForceGpuCompatible() bool

func (*GPUOptions) GetPerProcessGpuMemoryFraction

func (x *GPUOptions) GetPerProcessGpuMemoryFraction() float64

func (*GPUOptions) GetPollingActiveDelayUsecs

func (x *GPUOptions) GetPollingActiveDelayUsecs() int32

func (*GPUOptions) GetPollingInactiveDelayMsecs

func (x *GPUOptions) GetPollingInactiveDelayMsecs() int32

func (*GPUOptions) GetVisibleDeviceList

func (x *GPUOptions) GetVisibleDeviceList() string

func (*GPUOptions) ProtoMessage

func (*GPUOptions) ProtoMessage()

func (*GPUOptions) ProtoReflect

func (x *GPUOptions) ProtoReflect() protoreflect.Message

func (*GPUOptions) Reset

func (x *GPUOptions) Reset()

func (*GPUOptions) String

func (x *GPUOptions) String() string

type GPUOptions_Experimental

type GPUOptions_Experimental struct {

	// The multi virtual device settings. If empty (not set), it will create
	// single virtual device on each visible GPU, according to the settings
	// in "visible_device_list" above. Otherwise, the number of elements in the
	// list must be the same as the number of visible GPUs (after
	// "visible_device_list" filtering if it is set), and the string represented
	// device names (e.g. /device:GPU:<id>) will refer to the virtual
	// devices and have the <id> field assigned sequentially starting from 0,
	// according to the order of the virtual devices determined by
	// device_ordinal and the location in the virtual device list.
	//
	// For example,
	//
	//	visible_device_list = "1,0"
	//	virtual_devices { memory_limit: 1GB memory_limit: 2GB }
	//	virtual_devices { memory_limit: 3GB memory_limit: 4GB }
	//
	// will create 4 virtual devices as:
	//
	//	/device:GPU:0 -> visible GPU 1 with 1GB memory
	//	/device:GPU:1 -> visible GPU 1 with 2GB memory
	//	/device:GPU:2 -> visible GPU 0 with 3GB memory
	//	/device:GPU:3 -> visible GPU 0 with 4GB memory
	//
	// but
	//
	//	visible_device_list = "1,0"
	//	virtual_devices { memory_limit: 1GB memory_limit: 2GB
	//	                  device_ordinal: 10 device_ordinal: 20}
	//	virtual_devices { memory_limit: 3GB memory_limit: 4GB
	//	                  device_ordinal: 10 device_ordinal: 20}
	//
	// will create 4 virtual devices as:
	//
	//	/device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
	//	/device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
	//	/device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
	//	/device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
	//
	// NOTE:
	//  1. It's invalid to set both this and "per_process_gpu_memory_fraction"
	//     at the same time.
	//  2. Currently this setting is per-process, not per-session. Using
	//     different settings in different sessions within same process will
	//     result in undefined behavior.
	VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices,proto3" json:"virtual_devices,omitempty"`
	// The number of virtual devices to create on each visible GPU. The
	// available memory will be split equally among all virtual devices. If the
	// field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
	// be ignored.
	NumVirtualDevicesPerGpu int32 `` /* 138-byte string literal not displayed */
	// If true, uses CUDA unified memory for memory allocations. If
	// per_process_gpu_memory_fraction option is greater than 1.0, then unified
	// memory is used regardless of the value for this field. See comments for
	// per_process_gpu_memory_fraction field for more details and requirements
	// of the unified memory. This option is useful to oversubscribe memory if
	// multiple processes are sharing a single GPU while individually using less
	// than 1.0 per process memory fraction.
	UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"`
	// If > 1, the number of device-to-device copy streams to create
	// for each GPUDevice.  Default value is 0, which is automatically
	// converted to 1.
	NumDevToDevCopyStreams int32 `` /* 136-byte string literal not displayed */
	// If non-empty, defines a good GPU ring order on a single worker based on
	// device interconnect.  This assumes that all workers have the same GPU
	// topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
	// This ring order is used by the RingReducer implementation of
	// CollectiveReduce, and serves as an override to automatic ring order
	// generation in OrderTaskDeviceMap() during CollectiveParam resolution.
	CollectiveRingOrder string `protobuf:"bytes,4,opt,name=collective_ring_order,json=collectiveRingOrder,proto3" json:"collective_ring_order,omitempty"`
	// If true then extra work is done by GPUDevice and GPUBFCAllocator to
	// keep track of when GPU memory is freed and when kernels actually
	// complete so that we can know when a nominally free memory chunk
	// is really not subject to pending use.
	TimestampedAllocator bool `protobuf:"varint,5,opt,name=timestamped_allocator,json=timestampedAllocator,proto3" json:"timestamped_allocator,omitempty"`
	// Parameters for GPUKernelTracker.  By default no kernel tracking is done.
	// Note that timestamped_allocator is only effective if some tracking is
	// specified.
	//
	// If kernel_tracker_max_interval = n > 0, then a tracking event
	// is inserted after every n kernels without an event.
	KernelTrackerMaxInterval int32 `` /* 138-byte string literal not displayed */
	// If kernel_tracker_max_bytes = n > 0, then a tracking event is
	// inserted after every series of kernels allocating a sum of
	// memory >= n.  If one kernel allocates b * n bytes, then one
	// event will be inserted after it, but it will count as b against
	// the pending limit.
	KernelTrackerMaxBytes int32 `` /* 129-byte string literal not displayed */
	// If kernel_tracker_max_pending > 0 then no more than this many
	// tracking events can be outstanding at a time.  An attempt to
	// launch an additional kernel will stall until an event
	// completes.
	KernelTrackerMaxPending int32 `` /* 135-byte string literal not displayed */
	// BFC Allocator can return an allocated chunk of memory upto 2x the
	// requested size. For virtual devices with tight memory constraints, and
	// proportionately large allocation requests, this can lead to a significant
	// reduction in available memory. The threshold below controls when a chunk
	// should be split if the chunk size exceeds requested memory size. It is
	// expressed as a fraction of total available memory for the tf device. For
	// example setting it to 0.05 would imply a chunk needs to be split if its
	// size exceeds the requested memory by 5% of the total virtual device/gpu
	// memory size.
	InternalFragmentationFraction float64 `` /* 153-byte string literal not displayed */
	// When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
	UseCudaMallocAsync bool `protobuf:"varint,11,opt,name=use_cuda_malloc_async,json=useCudaMallocAsync,proto3" json:"use_cuda_malloc_async,omitempty"`
	// By default, BFCAllocator may sleep when it runs out of memory, in the
	// hopes that another thread will free up memory in the meantime.  Setting
	// this to true disables the sleep; instead we'll OOM immediately.
	DisallowRetryOnAllocationFailure bool `` /* 165-byte string literal not displayed */
	// Memory limit for "GPU host allocator", aka pinned memory allocator.  This
	// can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
	GpuHostMemLimitInMb float32 `` /* 129-byte string literal not displayed */
	// If true, then the host allocator allocates its max memory all upfront and
	// never grows.  This can be useful for latency-sensitive systems, because
	// growing the GPU host memory pool can be expensive.
	//
	// You probably only want to use this in combination with
	// gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
	// quite high.
	GpuHostMemDisallowGrowth bool `` /* 141-byte string literal not displayed */
	// Memory limit for gpu system. This can also be set by
	// TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
	// gpu_system_memory_size_in_mb. With this, user can configure the gpu
	// system memory size for better resource estimation of multi-tenancy(one
	// gpu with multiple model) use case.
	GpuSystemMemorySizeInMb int32 `` /* 140-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*GPUOptions_Experimental) Descriptor deprecated

func (*GPUOptions_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions_Experimental.ProtoReflect.Descriptor instead.

func (*GPUOptions_Experimental) GetCollectiveRingOrder

func (x *GPUOptions_Experimental) GetCollectiveRingOrder() string

func (*GPUOptions_Experimental) GetDisallowRetryOnAllocationFailure

func (x *GPUOptions_Experimental) GetDisallowRetryOnAllocationFailure() bool

func (*GPUOptions_Experimental) GetGpuHostMemDisallowGrowth added in v0.4.0

func (x *GPUOptions_Experimental) GetGpuHostMemDisallowGrowth() bool

func (*GPUOptions_Experimental) GetGpuHostMemLimitInMb added in v0.4.0

func (x *GPUOptions_Experimental) GetGpuHostMemLimitInMb() float32

func (*GPUOptions_Experimental) GetGpuSystemMemorySizeInMb added in v0.7.0

func (x *GPUOptions_Experimental) GetGpuSystemMemorySizeInMb() int32

func (*GPUOptions_Experimental) GetInternalFragmentationFraction

func (x *GPUOptions_Experimental) GetInternalFragmentationFraction() float64

func (*GPUOptions_Experimental) GetKernelTrackerMaxBytes

func (x *GPUOptions_Experimental) GetKernelTrackerMaxBytes() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxInterval

func (x *GPUOptions_Experimental) GetKernelTrackerMaxInterval() int32

func (*GPUOptions_Experimental) GetKernelTrackerMaxPending

func (x *GPUOptions_Experimental) GetKernelTrackerMaxPending() int32

func (*GPUOptions_Experimental) GetNumDevToDevCopyStreams

func (x *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32

func (*GPUOptions_Experimental) GetNumVirtualDevicesPerGpu added in v0.7.0

func (x *GPUOptions_Experimental) GetNumVirtualDevicesPerGpu() int32

func (*GPUOptions_Experimental) GetTimestampedAllocator

func (x *GPUOptions_Experimental) GetTimestampedAllocator() bool

func (*GPUOptions_Experimental) GetUseCudaMallocAsync

func (x *GPUOptions_Experimental) GetUseCudaMallocAsync() bool

func (*GPUOptions_Experimental) GetUseUnifiedMemory

func (x *GPUOptions_Experimental) GetUseUnifiedMemory() bool

func (*GPUOptions_Experimental) GetVirtualDevices

func (*GPUOptions_Experimental) ProtoMessage

func (*GPUOptions_Experimental) ProtoMessage()

func (*GPUOptions_Experimental) ProtoReflect

func (x *GPUOptions_Experimental) ProtoReflect() protoreflect.Message

func (*GPUOptions_Experimental) Reset

func (x *GPUOptions_Experimental) Reset()

func (*GPUOptions_Experimental) String

func (x *GPUOptions_Experimental) String() string

type GPUOptions_Experimental_VirtualDevices

type GPUOptions_Experimental_VirtualDevices struct {

	// Per "virtual" device memory limit, in MB. The number of elements in
	// the list is the number of virtual devices to create on the
	// corresponding visible GPU (see "virtual_devices" below).
	// If empty and `num_virtual_devices_per_gpu` is not set, it will create
	// single virtual device taking all available memory from the device.
	//
	// For the concept of "visible" and "virtual" GPU, see the comments for
	// "visible_device_list" above for more information.
	MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"`
	// Priority values to use with the virtual devices. Use the cuda function
	// cudaDeviceGetStreamPriorityRange to query for valid range of values for
	// priority.
	//
	// On a P4000 GPU with cuda 10.1, the priority range reported was 0 for
	// least priority and -1 for greatest priority.
	//
	// If this field is not specified, then the virtual devices will be
	// created with the default. If this field has values set, then the size
	// of this must match with the above memory_limit_mb.
	Priority []int32 `protobuf:"varint,2,rep,packed,name=priority,proto3" json:"priority,omitempty"`
	// Virtual Device ordinal number determines the device ID of the device.
	// A Virtual device with a lower ordinal number always receives the a
	// smaller device id. The phyiscal device id and location in the
	// virtual device list is used to break ties.
	DeviceOrdinal []int32 `protobuf:"varint,3,rep,packed,name=device_ordinal,json=deviceOrdinal,proto3" json:"device_ordinal,omitempty"`
	// contains filtered or unexported fields
}

Configuration for breaking down a visible GPU into multiple "virtual" devices.

func (*GPUOptions_Experimental_VirtualDevices) Descriptor deprecated

func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int)

Deprecated: Use GPUOptions_Experimental_VirtualDevices.ProtoReflect.Descriptor instead.

func (*GPUOptions_Experimental_VirtualDevices) GetDeviceOrdinal added in v0.2.0

func (x *GPUOptions_Experimental_VirtualDevices) GetDeviceOrdinal() []int32

func (*GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb

func (x *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32

func (*GPUOptions_Experimental_VirtualDevices) GetPriority

func (x *GPUOptions_Experimental_VirtualDevices) GetPriority() []int32

func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage

func (*GPUOptions_Experimental_VirtualDevices) ProtoReflect

func (*GPUOptions_Experimental_VirtualDevices) Reset

func (*GPUOptions_Experimental_VirtualDevices) String

type GraphExecutionTrace

type GraphExecutionTrace struct {

	// Unique ID of the context that the executed op(s) belong to (e.g., a
	// compiled concrete tf.function).
	TfdbgContextId string `protobuf:"bytes,1,opt,name=tfdbg_context_id,json=tfdbgContextId,proto3" json:"tfdbg_context_id,omitempty"`
	// Name of the op (applicable only in the case of the `FULL_TENSOR` trace
	// level).
	OpName string `protobuf:"bytes,2,opt,name=op_name,json=opName,proto3" json:"op_name,omitempty"`
	// Output slot of the tensor (applicable only in the case of the `FULL_TENSOR`
	// trace level).
	OutputSlot int32 `protobuf:"varint,3,opt,name=output_slot,json=outputSlot,proto3" json:"output_slot,omitempty"`
	// Type of the tensor value encapsulated in this proto.
	TensorDebugMode TensorDebugMode `` /* 141-byte string literal not displayed */
	// Tensor value in the type described by `tensor_value_type`.
	// This tensor may summarize the value of a single intermediate op of the
	// graph, or those of multiple intermediate tensors.
	TensorProto *tensor_go_proto.TensorProto `protobuf:"bytes,5,opt,name=tensor_proto,json=tensorProto,proto3" json:"tensor_proto,omitempty"`
	// Name of the device that the op belongs to.
	DeviceName string `protobuf:"bytes,6,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
	// contains filtered or unexported fields
}

Data relating to an execution of a Graph (e.g., an eager execution of a FuncGraph). The values of the intermediate tensors computed in the graph are recorded in this proto. A graph execution may correspond to one or more pieces of `GraphExecutionTrace`, depending on whether the instrumented tensor values are summarized in an aggregated or separate fashion.

func (*GraphExecutionTrace) Descriptor deprecated

func (*GraphExecutionTrace) Descriptor() ([]byte, []int)

Deprecated: Use GraphExecutionTrace.ProtoReflect.Descriptor instead.

func (*GraphExecutionTrace) GetDeviceName

func (x *GraphExecutionTrace) GetDeviceName() string

func (*GraphExecutionTrace) GetOpName

func (x *GraphExecutionTrace) GetOpName() string

func (*GraphExecutionTrace) GetOutputSlot

func (x *GraphExecutionTrace) GetOutputSlot() int32

func (*GraphExecutionTrace) GetTensorDebugMode

func (x *GraphExecutionTrace) GetTensorDebugMode() TensorDebugMode

func (*GraphExecutionTrace) GetTensorProto

func (x *GraphExecutionTrace) GetTensorProto() *tensor_go_proto.TensorProto

func (*GraphExecutionTrace) GetTfdbgContextId

func (x *GraphExecutionTrace) GetTfdbgContextId() string

func (*GraphExecutionTrace) ProtoMessage

func (*GraphExecutionTrace) ProtoMessage()

func (*GraphExecutionTrace) ProtoReflect

func (x *GraphExecutionTrace) ProtoReflect() protoreflect.Message

func (*GraphExecutionTrace) Reset

func (x *GraphExecutionTrace) Reset()

func (*GraphExecutionTrace) String

func (x *GraphExecutionTrace) String() string

type GraphOpCreation

type GraphOpCreation struct {

	// Type of the op (e.g., "MatMul").
	OpType string `protobuf:"bytes,1,opt,name=op_type,json=opType,proto3" json:"op_type,omitempty"`
	// Name of the op (e.g., "Dense/MatMul_1").
	OpName string `protobuf:"bytes,2,opt,name=op_name,json=opName,proto3" json:"op_name,omitempty"`
	// Name of the graph that the op is a part of (if available).
	GraphName string `protobuf:"bytes,3,opt,name=graph_name,json=graphName,proto3" json:"graph_name,omitempty"`
	// Unique ID of the graph (generated by debugger).
	// This is the ID of the immediately-enclosing graph.
	GraphId string `protobuf:"bytes,4,opt,name=graph_id,json=graphId,proto3" json:"graph_id,omitempty"`
	// Name of the device that the op is assigned to (if available).
	DeviceName string `protobuf:"bytes,5,opt,name=device_name,json=deviceName,proto3" json:"device_name,omitempty"`
	// Names of the input tensors to the op.
	InputNames []string `protobuf:"bytes,6,rep,name=input_names,json=inputNames,proto3" json:"input_names,omitempty"`
	// Number of output tensors emitted by the op.
	NumOutputs int32 `protobuf:"varint,7,opt,name=num_outputs,json=numOutputs,proto3" json:"num_outputs,omitempty"`
	// The unique ID for code location (stack trace) of the op's creation.
	CodeLocation *CodeLocation `protobuf:"bytes,8,opt,name=code_location,json=codeLocation,proto3" json:"code_location,omitempty"`
	// Unique IDs for the output tensors of this op.
	OutputTensorIds []int32 `protobuf:"varint,9,rep,packed,name=output_tensor_ids,json=outputTensorIds,proto3" json:"output_tensor_ids,omitempty"`
	// contains filtered or unexported fields
}

The creation of an op in a TensorFlow Graph (e.g., FuncGraph in TF2).

func (*GraphOpCreation) Descriptor deprecated

func (*GraphOpCreation) Descriptor() ([]byte, []int)

Deprecated: Use GraphOpCreation.ProtoReflect.Descriptor instead.

func (*GraphOpCreation) GetCodeLocation

func (x *GraphOpCreation) GetCodeLocation() *CodeLocation

func (*GraphOpCreation) GetDeviceName

func (x *GraphOpCreation) GetDeviceName() string

func (*GraphOpCreation) GetGraphId

func (x *GraphOpCreation) GetGraphId() string

func (*GraphOpCreation) GetGraphName

func (x *GraphOpCreation) GetGraphName() string

func (*GraphOpCreation) GetInputNames

func (x *GraphOpCreation) GetInputNames() []string

func (*GraphOpCreation) GetNumOutputs

func (x *GraphOpCreation) GetNumOutputs() int32

func (*GraphOpCreation) GetOpName

func (x *GraphOpCreation) GetOpName() string

func (*GraphOpCreation) GetOpType

func (x *GraphOpCreation) GetOpType() string

func (*GraphOpCreation) GetOutputTensorIds

func (x *GraphOpCreation) GetOutputTensorIds() []int32

func (*GraphOpCreation) ProtoMessage

func (*GraphOpCreation) ProtoMessage()

func (*GraphOpCreation) ProtoReflect

func (x *GraphOpCreation) ProtoReflect() protoreflect.Message

func (*GraphOpCreation) Reset

func (x *GraphOpCreation) Reset()

func (*GraphOpCreation) String

func (x *GraphOpCreation) String() string

type GraphOptions

type GraphOptions struct {

	// If true, use control flow to schedule the activation of Recv nodes.
	// (Currently ignored.)
	EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"`
	// Options controlling how graph is optimized.
	OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions,proto3" json:"optimizer_options,omitempty"`
	// The number of steps to run before returning a cost model detailing
	// the memory usage and performance of each node of the graph. 0 means
	// no cost model.
	BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"`
	// The number of steps to skip before collecting statistics for the
	// cost model.
	BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"`
	// Annotate each Node with Op output shape data, to the extent it can
	// be statically inferred.
	InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"`
	// Only place the subgraphs that are run, rather than the entire graph.
	//
	// This is useful for interactive graph building, where one might
	// produce graphs that cannot be placed during the debugging
	// process.  In particular, it allows the client to continue work in
	// a session after adding a node to a graph whose placement
	// constraints are unsatisfiable.
	PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"`
	// If true, transfer float values between processes as bfloat16.
	EnableBfloat16Sendrecv bool `` /* 130-byte string literal not displayed */
	// If > 0, record a timeline every this many steps.
	// EXPERIMENTAL: This currently has no effect in MasterSession.
	TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"`
	// Options that control the type and amount of graph rewriting.
	// Not currently configurable via the public Python API (i.e. there is no API
	// stability guarantee if you import RewriterConfig explicitly).
	RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions,proto3" json:"rewrite_options,omitempty"`
	// contains filtered or unexported fields
}

func (*GraphOptions) Descriptor deprecated

func (*GraphOptions) Descriptor() ([]byte, []int)

Deprecated: Use GraphOptions.ProtoReflect.Descriptor instead.

func (*GraphOptions) GetBuildCostModel

func (x *GraphOptions) GetBuildCostModel() int64

func (*GraphOptions) GetBuildCostModelAfter

func (x *GraphOptions) GetBuildCostModelAfter() int64

func (*GraphOptions) GetEnableBfloat16Sendrecv

func (x *GraphOptions) GetEnableBfloat16Sendrecv() bool

func (*GraphOptions) GetEnableRecvScheduling

func (x *GraphOptions) GetEnableRecvScheduling() bool

func (*GraphOptions) GetInferShapes

func (x *GraphOptions) GetInferShapes() bool

func (*GraphOptions) GetOptimizerOptions

func (x *GraphOptions) GetOptimizerOptions() *OptimizerOptions

func (*GraphOptions) GetPlacePrunedGraph

func (x *GraphOptions) GetPlacePrunedGraph() bool

func (*GraphOptions) GetRewriteOptions

func (x *GraphOptions) GetRewriteOptions() *RewriterConfig

func (*GraphOptions) GetTimelineStep

func (x *GraphOptions) GetTimelineStep() int32

func (*GraphOptions) ProtoMessage

func (*GraphOptions) ProtoMessage()

func (*GraphOptions) ProtoReflect

func (x *GraphOptions) ProtoReflect() protoreflect.Message

func (*GraphOptions) Reset

func (x *GraphOptions) Reset()

func (*GraphOptions) String

func (x *GraphOptions) String() string

type JobDef

type JobDef struct {

	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Mapping from task ID to "hostname:port" string.
	//
	// If the `name` field contains "worker", and the `tasks` map contains a
	// mapping from 7 to "example.org:2222", then the device prefix
	// "/job:worker/task:7" will be assigned to "example.org:2222".
	//
	// If a job has multiple replicas, host-ports will be comma-delimited, with
	// one entry for each replica.
	Tasks map[int32]string `` /* 152-byte string literal not displayed */
	// contains filtered or unexported fields
}

Defines a single job in a TensorFlow cluster.

func (*JobDef) Descriptor deprecated

func (*JobDef) Descriptor() ([]byte, []int)

Deprecated: Use JobDef.ProtoReflect.Descriptor instead.

func (*JobDef) GetName

func (x *JobDef) GetName() string

func (*JobDef) GetTasks

func (x *JobDef) GetTasks() map[int32]string

func (*JobDef) ProtoMessage

func (*JobDef) ProtoMessage()

func (*JobDef) ProtoReflect

func (x *JobDef) ProtoReflect() protoreflect.Message

func (*JobDef) Reset

func (x *JobDef) Reset()

func (*JobDef) String

func (x *JobDef) String() string

type JobDeviceFilters

type JobDeviceFilters struct {

	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Mapping from task ID to task device filters.
	Tasks map[int32]*TaskDeviceFilters `` /* 152-byte string literal not displayed */
	// contains filtered or unexported fields
}

Defines the device filters for tasks in a job.

func (*JobDeviceFilters) Descriptor deprecated

func (*JobDeviceFilters) Descriptor() ([]byte, []int)

Deprecated: Use JobDeviceFilters.ProtoReflect.Descriptor instead.

func (*JobDeviceFilters) GetName

func (x *JobDeviceFilters) GetName() string

func (*JobDeviceFilters) GetTasks

func (x *JobDeviceFilters) GetTasks() map[int32]*TaskDeviceFilters

func (*JobDeviceFilters) ProtoMessage

func (*JobDeviceFilters) ProtoMessage()

func (*JobDeviceFilters) ProtoReflect

func (x *JobDeviceFilters) ProtoReflect() protoreflect.Message

func (*JobDeviceFilters) Reset

func (x *JobDeviceFilters) Reset()

func (*JobDeviceFilters) String

func (x *JobDeviceFilters) String() string

type ListValue

type ListValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents a Python list.

func (*ListValue) Descriptor deprecated

func (*ListValue) Descriptor() ([]byte, []int)

Deprecated: Use ListValue.ProtoReflect.Descriptor instead.

func (*ListValue) GetValues

func (x *ListValue) GetValues() []*StructuredValue

func (*ListValue) ProtoMessage

func (*ListValue) ProtoMessage()

func (*ListValue) ProtoReflect

func (x *ListValue) ProtoReflect() protoreflect.Message

func (*ListValue) Reset

func (x *ListValue) Reset()

func (*ListValue) String

func (x *ListValue) String() string

type MetaGraphDef

type MetaGraphDef struct {
	MetaInfoDef *MetaGraphDef_MetaInfoDef `protobuf:"bytes,1,opt,name=meta_info_def,json=metaInfoDef,proto3" json:"meta_info_def,omitempty"`
	// GraphDef.
	GraphDef *graph_go_proto.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef,proto3" json:"graph_def,omitempty"`
	// SaverDef.
	SaverDef *SaverDef `protobuf:"bytes,3,opt,name=saver_def,json=saverDef,proto3" json:"saver_def,omitempty"`
	// collection_def: Map from collection name to collections.
	// See CollectionDef section for details.
	CollectionDef map[string]*CollectionDef `` /* 188-byte string literal not displayed */
	// signature_def: Map from user supplied key for a signature to a single
	// SignatureDef.
	SignatureDef map[string]*SignatureDef `` /* 185-byte string literal not displayed */
	// Asset file def to be used with the defined graph.
	AssetFileDef []*AssetFileDef `protobuf:"bytes,6,rep,name=asset_file_def,json=assetFileDef,proto3" json:"asset_file_def,omitempty"`
	// Extra information about the structure of functions and stateful objects.
	ObjectGraphDef *SavedObjectGraph `protobuf:"bytes,7,opt,name=object_graph_def,json=objectGraphDef,proto3" json:"object_graph_def,omitempty"`
	// contains filtered or unexported fields
}

Protocol buffer containing the following which are necessary to restart training, run inference. It can be used to serialize/de-serialize memory objects necessary for running computation in a graph when crossing the process boundary. It can be used for long term storage of graphs, cross-language execution of graphs, etc.

MetaInfoDef
GraphDef
SaverDef
CollectionDef
TensorInfo
SignatureDef

func (*MetaGraphDef) Descriptor deprecated

func (*MetaGraphDef) Descriptor() ([]byte, []int)

Deprecated: Use MetaGraphDef.ProtoReflect.Descriptor instead.

func (*MetaGraphDef) GetAssetFileDef

func (x *MetaGraphDef) GetAssetFileDef() []*AssetFileDef

func (*MetaGraphDef) GetCollectionDef

func (x *MetaGraphDef) GetCollectionDef() map[string]*CollectionDef

func (*MetaGraphDef) GetGraphDef

func (x *MetaGraphDef) GetGraphDef() *graph_go_proto.GraphDef

func (*MetaGraphDef) GetMetaInfoDef

func (x *MetaGraphDef) GetMetaInfoDef() *MetaGraphDef_MetaInfoDef

func (*MetaGraphDef) GetObjectGraphDef

func (x *MetaGraphDef) GetObjectGraphDef() *SavedObjectGraph

func (*MetaGraphDef) GetSaverDef

func (x *MetaGraphDef) GetSaverDef() *SaverDef

func (*MetaGraphDef) GetSignatureDef

func (x *MetaGraphDef) GetSignatureDef() map[string]*SignatureDef

func (*MetaGraphDef) ProtoMessage

func (*MetaGraphDef) ProtoMessage()

func (*MetaGraphDef) ProtoReflect

func (x *MetaGraphDef) ProtoReflect() protoreflect.Message

func (*MetaGraphDef) Reset

func (x *MetaGraphDef) Reset()

func (*MetaGraphDef) String

func (x *MetaGraphDef) String() string

type MetaGraphDef_MetaInfoDef

type MetaGraphDef_MetaInfoDef struct {

	// User specified Version string. Can be the name of the model and revision,
	// steps this model has been trained to, etc.
	MetaGraphVersion string `protobuf:"bytes,1,opt,name=meta_graph_version,json=metaGraphVersion,proto3" json:"meta_graph_version,omitempty"`
	// A copy of the OpDefs used by the producer of this graph_def.
	// Descriptions and Ops not used in graph_def are stripped out.
	StrippedOpList *op_def_go_proto.OpList `protobuf:"bytes,2,opt,name=stripped_op_list,json=strippedOpList,proto3" json:"stripped_op_list,omitempty"`
	// A serialized protobuf. Can be the time this meta graph is created, or
	// modified, or name of the model.
	AnyInfo *anypb.Any `protobuf:"bytes,3,opt,name=any_info,json=anyInfo,proto3" json:"any_info,omitempty"`
	// User supplied tag(s) on the meta_graph and included graph_def.
	//
	// MetaGraphDefs should be tagged with their capabilities or use-cases.
	// Examples: "train", "serve", "gpu", "tpu", etc.
	// These tags enable loaders to access the MetaGraph(s) appropriate for a
	// specific use-case or runtime environment.
	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
	// The __version__ string of the tensorflow build used to write this graph.
	// This will be populated by the framework, which will overwrite any user
	// supplied value.
	TensorflowVersion string `protobuf:"bytes,5,opt,name=tensorflow_version,json=tensorflowVersion,proto3" json:"tensorflow_version,omitempty"`
	// The __git_version__ string of the tensorflow build used to write this
	// graph. This will be populated by the framework, which will overwrite any
	// user supplied value.
	TensorflowGitVersion string `protobuf:"bytes,6,opt,name=tensorflow_git_version,json=tensorflowGitVersion,proto3" json:"tensorflow_git_version,omitempty"`
	// A flag to denote whether default-valued attrs have been stripped from
	// the nodes in this graph_def.
	StrippedDefaultAttrs bool `protobuf:"varint,7,opt,name=stripped_default_attrs,json=strippedDefaultAttrs,proto3" json:"stripped_default_attrs,omitempty"`
	// FunctionDef name to aliases mapping.
	FunctionAliases map[string]string `` /* 194-byte string literal not displayed */
	// contains filtered or unexported fields
}

Meta information regarding the graph to be exported. To be used by users of this protocol buffer to encode information regarding their meta graph.

func (*MetaGraphDef_MetaInfoDef) Descriptor deprecated

func (*MetaGraphDef_MetaInfoDef) Descriptor() ([]byte, []int)

Deprecated: Use MetaGraphDef_MetaInfoDef.ProtoReflect.Descriptor instead.

func (*MetaGraphDef_MetaInfoDef) GetAnyInfo

func (x *MetaGraphDef_MetaInfoDef) GetAnyInfo() *anypb.Any

func (*MetaGraphDef_MetaInfoDef) GetFunctionAliases

func (x *MetaGraphDef_MetaInfoDef) GetFunctionAliases() map[string]string

func (*MetaGraphDef_MetaInfoDef) GetMetaGraphVersion

func (x *MetaGraphDef_MetaInfoDef) GetMetaGraphVersion() string

func (*MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs

func (x *MetaGraphDef_MetaInfoDef) GetStrippedDefaultAttrs() bool

func (*MetaGraphDef_MetaInfoDef) GetStrippedOpList

func (x *MetaGraphDef_MetaInfoDef) GetStrippedOpList() *op_def_go_proto.OpList

func (*MetaGraphDef_MetaInfoDef) GetTags

func (x *MetaGraphDef_MetaInfoDef) GetTags() []string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion

func (x *MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion() string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowVersion

func (x *MetaGraphDef_MetaInfoDef) GetTensorflowVersion() string

func (*MetaGraphDef_MetaInfoDef) ProtoMessage

func (*MetaGraphDef_MetaInfoDef) ProtoMessage()

func (*MetaGraphDef_MetaInfoDef) ProtoReflect

func (x *MetaGraphDef_MetaInfoDef) ProtoReflect() protoreflect.Message

func (*MetaGraphDef_MetaInfoDef) Reset

func (x *MetaGraphDef_MetaInfoDef) Reset()

func (*MetaGraphDef_MetaInfoDef) String

func (x *MetaGraphDef_MetaInfoDef) String() string

type NamedDevice

type NamedDevice struct {
	Name       string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Properties *DeviceProperties `protobuf:"bytes,2,opt,name=properties,proto3" json:"properties,omitempty"`
	// contains filtered or unexported fields
}

func (*NamedDevice) Descriptor deprecated

func (*NamedDevice) Descriptor() ([]byte, []int)

Deprecated: Use NamedDevice.ProtoReflect.Descriptor instead.

func (*NamedDevice) GetName

func (x *NamedDevice) GetName() string

func (*NamedDevice) GetProperties

func (x *NamedDevice) GetProperties() *DeviceProperties

func (*NamedDevice) ProtoMessage

func (*NamedDevice) ProtoMessage()

func (*NamedDevice) ProtoReflect

func (x *NamedDevice) ProtoReflect() protoreflect.Message

func (*NamedDevice) Reset

func (x *NamedDevice) Reset()

func (*NamedDevice) String

func (x *NamedDevice) String() string

type NamedTensorProto

type NamedTensorProto struct {

	// Name of the tensor.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The client can populate a TensorProto using a tensorflow::Tensor`, or
	// directly using the protobuf field accessors.
	//
	// The client specifies whether the returned tensor values should be
	// filled tensor fields (float_val, int_val, etc.) or encoded in a
	// compact form in tensor.tensor_content.
	Tensor *tensor_go_proto.TensorProto `protobuf:"bytes,2,opt,name=tensor,proto3" json:"tensor,omitempty"`
	// contains filtered or unexported fields
}

A pair of tensor name and tensor values.

func (*NamedTensorProto) Descriptor deprecated

func (*NamedTensorProto) Descriptor() ([]byte, []int)

Deprecated: Use NamedTensorProto.ProtoReflect.Descriptor instead.

func (*NamedTensorProto) GetName

func (x *NamedTensorProto) GetName() string

func (*NamedTensorProto) GetTensor

func (*NamedTensorProto) ProtoMessage

func (*NamedTensorProto) ProtoMessage()

func (*NamedTensorProto) ProtoReflect

func (x *NamedTensorProto) ProtoReflect() protoreflect.Message

func (*NamedTensorProto) Reset

func (x *NamedTensorProto) Reset()

func (*NamedTensorProto) String

func (x *NamedTensorProto) String() string

type NamedTupleValue

type NamedTupleValue struct {
	Name   string       `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Values []*PairValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents Python's namedtuple.

func (*NamedTupleValue) Descriptor deprecated

func (*NamedTupleValue) Descriptor() ([]byte, []int)

Deprecated: Use NamedTupleValue.ProtoReflect.Descriptor instead.

func (*NamedTupleValue) GetName

func (x *NamedTupleValue) GetName() string

func (*NamedTupleValue) GetValues

func (x *NamedTupleValue) GetValues() []*PairValue

func (*NamedTupleValue) ProtoMessage

func (*NamedTupleValue) ProtoMessage()

func (*NamedTupleValue) ProtoReflect

func (x *NamedTupleValue) ProtoReflect() protoreflect.Message

func (*NamedTupleValue) Reset

func (x *NamedTupleValue) Reset()

func (*NamedTupleValue) String

func (x *NamedTupleValue) String() string

type NoneValue

type NoneValue struct {
	// contains filtered or unexported fields
}

Represents None.

func (*NoneValue) Descriptor deprecated

func (*NoneValue) Descriptor() ([]byte, []int)

Deprecated: Use NoneValue.ProtoReflect.Descriptor instead.

func (*NoneValue) ProtoMessage

func (*NoneValue) ProtoMessage()

func (*NoneValue) ProtoReflect

func (x *NoneValue) ProtoReflect() protoreflect.Message

func (*NoneValue) Reset

func (x *NoneValue) Reset()

func (*NoneValue) String

func (x *NoneValue) String() string

type OptimizerOptions

type OptimizerOptions struct {

	// If true, optimize the graph using common subexpression elimination.
	// Note: the optimization Level L1 will override this setting to true. So in
	// order to disable common subexpression elimination the opt_level has to be
	// set to L0.
	DoCommonSubexpressionElimination bool `` /* 162-byte string literal not displayed */
	// If true, perform constant folding optimization on the graph.
	// Note: the optimization Level L1 will override this setting to true. So in
	// order to disable constant folding the opt_level has to be set to L0.
	DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"`
	// Constant folding optimization replaces tensors whose values can be
	// predetermined, with constant nodes. To avoid inserting too large constants,
	// the size of each constant created can be limited. If this value is zero, a
	// default limit of 10 MiB will be applied. If constant folding optimization
	// is disabled, this value is ignored.
	MaxFoldedConstantInBytes int64 `` /* 140-byte string literal not displayed */
	// If true, perform function inlining on the graph.
	DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"`
	// Overall optimization level. The actual optimizations applied will be the
	// logical OR of the flags that this level implies and any flags already set.
	OptLevel       OptimizerOptions_Level          `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
	GlobalJitLevel OptimizerOptions_GlobalJitLevel `` /* 154-byte string literal not displayed */
	// CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
	//   - this flag is true, or
	//   - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
	CpuGlobalJit bool `protobuf:"varint,7,opt,name=cpu_global_jit,json=cpuGlobalJit,proto3" json:"cpu_global_jit,omitempty"`
	// contains filtered or unexported fields
}

Options passed to the graph optimizer

func (*OptimizerOptions) Descriptor deprecated

func (*OptimizerOptions) Descriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions.ProtoReflect.Descriptor instead.

func (*OptimizerOptions) GetCpuGlobalJit

func (x *OptimizerOptions) GetCpuGlobalJit() bool

func (*OptimizerOptions) GetDoCommonSubexpressionElimination

func (x *OptimizerOptions) GetDoCommonSubexpressionElimination() bool

func (*OptimizerOptions) GetDoConstantFolding

func (x *OptimizerOptions) GetDoConstantFolding() bool

func (*OptimizerOptions) GetDoFunctionInlining

func (x *OptimizerOptions) GetDoFunctionInlining() bool

func (*OptimizerOptions) GetGlobalJitLevel

func (x *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel

func (*OptimizerOptions) GetMaxFoldedConstantInBytes

func (x *OptimizerOptions) GetMaxFoldedConstantInBytes() int64

func (*OptimizerOptions) GetOptLevel

func (x *OptimizerOptions) GetOptLevel() OptimizerOptions_Level

func (*OptimizerOptions) ProtoMessage

func (*OptimizerOptions) ProtoMessage()

func (*OptimizerOptions) ProtoReflect

func (x *OptimizerOptions) ProtoReflect() protoreflect.Message

func (*OptimizerOptions) Reset

func (x *OptimizerOptions) Reset()

func (*OptimizerOptions) String

func (x *OptimizerOptions) String() string

type OptimizerOptions_GlobalJitLevel

type OptimizerOptions_GlobalJitLevel int32

Control the use of the compiler/jit. Experimental.

const (
	OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0 // Default setting ("off" now, but later expected to be "on")
	OptimizerOptions_OFF     OptimizerOptions_GlobalJitLevel = -1
	// The following settings turn on compilation, with higher values being
	// more aggressive.  Higher values may reduce opportunities for parallelism
	// and may use more memory.  (At present, there is no distinction, but this
	// is expected to change.)
	OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
	OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)

func (OptimizerOptions_GlobalJitLevel) Descriptor

func (OptimizerOptions_GlobalJitLevel) Enum

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor deprecated

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions_GlobalJitLevel.Descriptor instead.

func (OptimizerOptions_GlobalJitLevel) Number

func (OptimizerOptions_GlobalJitLevel) String

func (OptimizerOptions_GlobalJitLevel) Type

type OptimizerOptions_Level

type OptimizerOptions_Level int32

Optimization level

const (
	// L1 is the default level.
	// Optimization performed at L1 :
	// 1. Common subexpression elimination
	// 2. Constant folding
	OptimizerOptions_L1 OptimizerOptions_Level = 0
	// No optimizations
	OptimizerOptions_L0 OptimizerOptions_Level = -1
)

func (OptimizerOptions_Level) Descriptor

func (OptimizerOptions_Level) Enum

func (OptimizerOptions_Level) EnumDescriptor deprecated

func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int)

Deprecated: Use OptimizerOptions_Level.Descriptor instead.

func (OptimizerOptions_Level) Number

func (OptimizerOptions_Level) String

func (x OptimizerOptions_Level) String() string

func (OptimizerOptions_Level) Type

type PairValue

type PairValue struct {
	Key   string           `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
	Value *StructuredValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
	// contains filtered or unexported fields
}

Represents a (key, value) pair.

func (*PairValue) Descriptor deprecated

func (*PairValue) Descriptor() ([]byte, []int)

Deprecated: Use PairValue.ProtoReflect.Descriptor instead.

func (*PairValue) GetKey

func (x *PairValue) GetKey() string

func (*PairValue) GetValue

func (x *PairValue) GetValue() *StructuredValue

func (*PairValue) ProtoMessage

func (*PairValue) ProtoMessage()

func (*PairValue) ProtoReflect

func (x *PairValue) ProtoReflect() protoreflect.Message

func (*PairValue) Reset

func (x *PairValue) Reset()

func (*PairValue) String

func (x *PairValue) String() string

type ProcessingModeDef

type ProcessingModeDef struct {
	ShardingPolicy ProcessingModeDef_ShardingPolicy `` /* 158-byte string literal not displayed */
	// contains filtered or unexported fields
}

Next tag: 2

func (*ProcessingModeDef) Descriptor deprecated

func (*ProcessingModeDef) Descriptor() ([]byte, []int)

Deprecated: Use ProcessingModeDef.ProtoReflect.Descriptor instead.

func (*ProcessingModeDef) GetShardingPolicy

func (x *ProcessingModeDef) GetShardingPolicy() ProcessingModeDef_ShardingPolicy

func (*ProcessingModeDef) ProtoMessage

func (*ProcessingModeDef) ProtoMessage()

func (*ProcessingModeDef) ProtoReflect

func (x *ProcessingModeDef) ProtoReflect() protoreflect.Message

func (*ProcessingModeDef) Reset

func (x *ProcessingModeDef) Reset()

func (*ProcessingModeDef) String

func (x *ProcessingModeDef) String() string

type ProcessingModeDef_ShardingPolicy

type ProcessingModeDef_ShardingPolicy int32

Specifies how data is sharded among tf.data service workers.

const (
	// No sharding will be performed. Each worker produces the entire dataset
	// without any sharding. With this mode, the best practice is to shuffle the
	// dataset nondeterministically so that workers process the dataset in
	// different orders.
	ProcessingModeDef_OFF ProcessingModeDef_ShardingPolicy = 0
	// The input dataset is dynamically split among workers at runtime. Each
	// worker gets the next split when it reads data from the dispatcher. There
	// is no fixed sharding with this mode.
	ProcessingModeDef_DYNAMIC ProcessingModeDef_ShardingPolicy = 1
	// The following are static sharding policies. The semantics are similar to
	// `tf.data.experimental.AutoShardPolicy`. These policies require:
	//   - The tf.data service cluster has a fixed size, and you need to specify
	//     the workers in DispatcherConfig.
	//   - Each client only reads from the local tf.data service worker.
	//
	// Shards by input files (each worker will get a set of files to process).
	// When this option is selected, make sure that there is at least as many
	// files as workers. If there are fewer input files than workers, a runtime
	// error will be raised.
	ProcessingModeDef_FILE ProcessingModeDef_ShardingPolicy = 2
	// Shards by elements produced by the dataset. Each worker will process the
	// whole dataset and discard the portion that is not for itself. Note that
	// for this mode to correctly partitions the dataset elements, the dataset
	// needs to produce elements in a deterministic order.
	ProcessingModeDef_DATA ProcessingModeDef_ShardingPolicy = 3
	// Attempts FILE-based sharding, falling back to DATA-based sharding on
	// failures.
	ProcessingModeDef_FILE_OR_DATA ProcessingModeDef_ShardingPolicy = 4
	// Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
	// placeholder to replace with `shard(num_workers, worker_index)`.
	ProcessingModeDef_HINT ProcessingModeDef_ShardingPolicy = 5
)

func (ProcessingModeDef_ShardingPolicy) Descriptor

func (ProcessingModeDef_ShardingPolicy) Enum

func (ProcessingModeDef_ShardingPolicy) EnumDescriptor deprecated

func (ProcessingModeDef_ShardingPolicy) EnumDescriptor() ([]byte, []int)

Deprecated: Use ProcessingModeDef_ShardingPolicy.Descriptor instead.

func (ProcessingModeDef_ShardingPolicy) Number

func (ProcessingModeDef_ShardingPolicy) String

func (ProcessingModeDef_ShardingPolicy) Type

type QueueRunnerDef

type QueueRunnerDef struct {

	// Queue name.
	QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"`
	// A list of enqueue operations.
	EnqueueOpName []string `protobuf:"bytes,2,rep,name=enqueue_op_name,json=enqueueOpName,proto3" json:"enqueue_op_name,omitempty"`
	// The operation to run to close the queue.
	CloseOpName string `protobuf:"bytes,3,opt,name=close_op_name,json=closeOpName,proto3" json:"close_op_name,omitempty"`
	// The operation to run to cancel the queue.
	CancelOpName string `protobuf:"bytes,4,opt,name=cancel_op_name,json=cancelOpName,proto3" json:"cancel_op_name,omitempty"`
	// A list of exception types considered to signal a safely closed queue
	// if raised during enqueue operations.
	QueueClosedExceptionTypes []for_core_protos_go_proto.Code `` /* 175-byte string literal not displayed */
	// contains filtered or unexported fields
}

Protocol buffer representing a QueueRunner.

func (*QueueRunnerDef) Descriptor deprecated

func (*QueueRunnerDef) Descriptor() ([]byte, []int)

Deprecated: Use QueueRunnerDef.ProtoReflect.Descriptor instead.

func (*QueueRunnerDef) GetCancelOpName

func (x *QueueRunnerDef) GetCancelOpName() string

func (*QueueRunnerDef) GetCloseOpName

func (x *QueueRunnerDef) GetCloseOpName() string

func (*QueueRunnerDef) GetEnqueueOpName

func (x *QueueRunnerDef) GetEnqueueOpName() []string

func (*QueueRunnerDef) GetQueueClosedExceptionTypes

func (x *QueueRunnerDef) GetQueueClosedExceptionTypes() []for_core_protos_go_proto.Code

func (*QueueRunnerDef) GetQueueName

func (x *QueueRunnerDef) GetQueueName() string

func (*QueueRunnerDef) ProtoMessage

func (*QueueRunnerDef) ProtoMessage()

func (*QueueRunnerDef) ProtoReflect

func (x *QueueRunnerDef) ProtoReflect() protoreflect.Message

func (*QueueRunnerDef) Reset

func (x *QueueRunnerDef) Reset()

func (*QueueRunnerDef) String

func (x *QueueRunnerDef) String() string

type RecvBufRespExtra

type RecvBufRespExtra struct {
	TensorContent [][]byte `protobuf:"bytes,1,rep,name=tensor_content,json=tensorContent,proto3" json:"tensor_content,omitempty"`
	// contains filtered or unexported fields
}

Extra data needed on a non-RDMA RecvBufResponse.

func (*RecvBufRespExtra) Descriptor deprecated

func (*RecvBufRespExtra) Descriptor() ([]byte, []int)

Deprecated: Use RecvBufRespExtra.ProtoReflect.Descriptor instead.

func (*RecvBufRespExtra) GetTensorContent

func (x *RecvBufRespExtra) GetTensorContent() [][]byte

func (*RecvBufRespExtra) ProtoMessage

func (*RecvBufRespExtra) ProtoMessage()

func (*RecvBufRespExtra) ProtoReflect

func (x *RecvBufRespExtra) ProtoReflect() protoreflect.Message

func (*RecvBufRespExtra) Reset

func (x *RecvBufRespExtra) Reset()

func (*RecvBufRespExtra) String

func (x *RecvBufRespExtra) String() string

type RegisteredSaver

type RegisteredSaver struct {

	// The name of the registered saver/restore function.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Unique auto-generated name of the object.
	ObjectName string `protobuf:"bytes,2,opt,name=object_name,json=objectName,proto3" json:"object_name,omitempty"`
	// contains filtered or unexported fields
}

func (*RegisteredSaver) Descriptor deprecated

func (*RegisteredSaver) Descriptor() ([]byte, []int)

Deprecated: Use RegisteredSaver.ProtoReflect.Descriptor instead.

func (*RegisteredSaver) GetName

func (x *RegisteredSaver) GetName() string

func (*RegisteredSaver) GetObjectName

func (x *RegisteredSaver) GetObjectName() string

func (*RegisteredSaver) ProtoMessage

func (*RegisteredSaver) ProtoMessage()

func (*RegisteredSaver) ProtoReflect

func (x *RegisteredSaver) ProtoReflect() protoreflect.Message

func (*RegisteredSaver) Reset

func (x *RegisteredSaver) Reset()

func (*RegisteredSaver) String

func (x *RegisteredSaver) String() string

type RemoteTensorHandle

type RemoteTensorHandle struct {

	// The ID of the operation that produced this tensor.
	OpId int64 `protobuf:"varint,1,opt,name=op_id,json=opId,proto3" json:"op_id,omitempty"`
	// The index into the outputs of the operation that produced this tensor.
	OutputNum int32 `protobuf:"varint,2,opt,name=output_num,json=outputNum,proto3" json:"output_num,omitempty"`
	// Device where the tensor is located. Cannot be empty.
	// For multi-device functions, it's the default device passed to placer.
	Device string `protobuf:"bytes,3,opt,name=device,proto3" json:"device,omitempty"`
	// Device of the operation producing this tensor. Can be empty if the
	// operation producing this tensor is a multi-device function.
	OpDevice string `protobuf:"bytes,4,opt,name=op_device,json=opDevice,proto3" json:"op_device,omitempty"`
	// Tensor type.
	Dtype types_go_proto.DataType `protobuf:"varint,5,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// Optional data types and shapes of a remote resource variable.
	ResourceDtypesAndShapes []*ResourceDtypeAndShape `` /* 134-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RemoteTensorHandle) Descriptor deprecated

func (*RemoteTensorHandle) Descriptor() ([]byte, []int)

Deprecated: Use RemoteTensorHandle.ProtoReflect.Descriptor instead.

func (*RemoteTensorHandle) GetDevice

func (x *RemoteTensorHandle) GetDevice() string

func (*RemoteTensorHandle) GetDtype

func (*RemoteTensorHandle) GetOpDevice

func (x *RemoteTensorHandle) GetOpDevice() string

func (*RemoteTensorHandle) GetOpId

func (x *RemoteTensorHandle) GetOpId() int64

func (*RemoteTensorHandle) GetOutputNum

func (x *RemoteTensorHandle) GetOutputNum() int32

func (*RemoteTensorHandle) GetResourceDtypesAndShapes

func (x *RemoteTensorHandle) GetResourceDtypesAndShapes() []*ResourceDtypeAndShape

func (*RemoteTensorHandle) ProtoMessage

func (*RemoteTensorHandle) ProtoMessage()

func (*RemoteTensorHandle) ProtoReflect

func (x *RemoteTensorHandle) ProtoReflect() protoreflect.Message

func (*RemoteTensorHandle) Reset

func (x *RemoteTensorHandle) Reset()

func (*RemoteTensorHandle) String

func (x *RemoteTensorHandle) String() string

type ResourceDtypeAndShape

type ResourceDtypeAndShape struct {
	Dtype types_go_proto.DataType                 `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	// contains filtered or unexported fields
}

func (*ResourceDtypeAndShape) Descriptor deprecated

func (*ResourceDtypeAndShape) Descriptor() ([]byte, []int)

Deprecated: Use ResourceDtypeAndShape.ProtoReflect.Descriptor instead.

func (*ResourceDtypeAndShape) GetDtype

func (*ResourceDtypeAndShape) GetShape

func (*ResourceDtypeAndShape) ProtoMessage

func (*ResourceDtypeAndShape) ProtoMessage()

func (*ResourceDtypeAndShape) ProtoReflect

func (x *ResourceDtypeAndShape) ProtoReflect() protoreflect.Message

func (*ResourceDtypeAndShape) Reset

func (x *ResourceDtypeAndShape) Reset()

func (*ResourceDtypeAndShape) String

func (x *ResourceDtypeAndShape) String() string

type RewriterConfig

type RewriterConfig struct {

	// CPU Conversion settings between NHCW and NCHW.
	CpuLayoutConversion RewriterConfig_CpuLayout `` /* 163-byte string literal not displayed */
	// Optimize tensor layouts (default is ON)
	// e.g. This will try to use NCHW layout on GPU which is faster.
	LayoutOptimizer RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Fold constants (default is ON)
	// Statically infer the value of tensors when possible, and materialize the
	// result using constants.
	ConstantFolding RewriterConfig_Toggle `` /* 145-byte string literal not displayed */
	// Shape optimizations (default is ON)
	// Simplify computations made on shapes.
	ShapeOptimization RewriterConfig_Toggle `` /* 152-byte string literal not displayed */
	// Remapping (default is ON)
	// Remap subgraphs onto more efficient implementations.
	Remapping RewriterConfig_Toggle `protobuf:"varint,14,opt,name=remapping,proto3,enum=tensorflow.RewriterConfig_Toggle" json:"remapping,omitempty"`
	// Common subgraph elimination (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	CommonSubgraphElimination RewriterConfig_Toggle `` /* 178-byte string literal not displayed */
	// Arithmetic optimizations (default is ON)
	// e.g. Simplify arithmetic ops; merge ops with same value (like constants).
	ArithmeticOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Control dependency optimizations (default is ON).
	// Remove redundant control dependencies, which may enable other optimization.
	DependencyOptimization RewriterConfig_Toggle `` /* 166-byte string literal not displayed */
	// Loop optimizations (default is ON).
	LoopOptimization RewriterConfig_Toggle `` /* 148-byte string literal not displayed */
	// Function optimizations (default is ON).
	FunctionOptimization RewriterConfig_Toggle `` /* 161-byte string literal not displayed */
	// Strips debug-related nodes from the graph (off by default).
	DebugStripper RewriterConfig_Toggle `` /* 140-byte string literal not displayed */
	// If true, don't remove unnecessary ops from the graph
	DisableModelPruning bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning,proto3" json:"disable_model_pruning,omitempty"`
	// Try to allocate some independent Op outputs contiguously in order to
	// merge or eliminate downstream Ops (off by default).
	ScopedAllocatorOptimization RewriterConfig_Toggle `` /* 184-byte string literal not displayed */
	// Force small ops onto the CPU (default is OFF).
	PinToHostOptimization RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Enable the swap of kernel implementations based on the device placement
	// (default is ON).
	ImplementationSelector RewriterConfig_Toggle `` /* 167-byte string literal not displayed */
	// Optimize data types for CUDA (default is OFF).
	// This will try to use float16 on GPU which is faster.
	// Note that this can change the numerical stability of the graph and may
	// require the use of loss scaling to maintain model convergence.
	AutoMixedPrecision RewriterConfig_Toggle `` /* 157-byte string literal not displayed */
	// Optimize data types for oneDNN (default is OFF).
	// This will try to use bfloat16 on CPUs, which is faster.
	// Note that this can change the numerical stability of the graph.
	// Note: this is deprecated.
	// It is replaced by auto_mixed_precision_onednn_bfloat16
	AutoMixedPrecisionMkl RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Optimize data types for oneDNN (default is OFF).
	// This will try to use bfloat16 on CPUs, which is faster.
	// Note that this can change the numerical stability of the graph.
	// Note: this is equivalent to the deprecated option auto_mixed_precision_mkl
	AutoMixedPrecisionOnednnBfloat16 RewriterConfig_Toggle `` /* 203-byte string literal not displayed */
	// Emulate a model using data type float16 on CPU (default is OFF).
	// This will try to emulate the float16 inputs and outputs of an operator
	// on CPU to have better correlation with float16 on GPU; however the
	// computation in the operator is based on float32.
	// Note that this can change the numerical stability of the graph.
	AutoMixedPrecisionCpu RewriterConfig_Toggle `` /* 168-byte string literal not displayed */
	// Disable the entire meta optimizer (off by default).
	DisableMetaOptimizer bool `protobuf:"varint,19,opt,name=disable_meta_optimizer,json=disableMetaOptimizer,proto3" json:"disable_meta_optimizer,omitempty"`
	// Disable the TFG optimizer (off by default).
	DisableTfgOptimizer bool `protobuf:"varint,32,opt,name=disable_tfg_optimizer,json=disableTfgOptimizer,proto3" json:"disable_tfg_optimizer,omitempty"`
	// Optimizers registered by plugin (default is ON)
	UsePluginOptimizers RewriterConfig_Toggle `` /* 160-byte string literal not displayed */
	// Conditional code motion (default is ON).
	ExperimentalConditionalCodeMotion RewriterConfig_Toggle `` /* 204-byte string literal not displayed */
	// Controls how many times we run the optimizers in meta optimizer (default
	// is once).
	MetaOptimizerIterations RewriterConfig_NumIterationsType `` /* 183-byte string literal not displayed */
	// The minimum number of nodes in a graph to optimizer. For smaller graphs,
	// optimization is skipped.
	// 0 means the system picks an appropriate number.
	// < 0 means do not skip optimization.
	MinGraphNodes int32 `protobuf:"varint,17,opt,name=min_graph_nodes,json=minGraphNodes,proto3" json:"min_graph_nodes,omitempty"`
	// Disable optimizations that assume compressed tensors. Note that this flag
	// is experimental and may be removed in the future.
	ExperimentalDisableCompressedTensorOptimization bool `` /* 210-byte string literal not displayed */
	// Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
	// QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
	// have to extract quantization configs (e.g. min/max range, number of bits,
	// and per-channel) from the quantization emulation ops. Note that this flag
	// is experimental and may be removed in the future. See b/174138564 for more
	// details.
	ExperimentalDisableFoldingQuantizationEmulation bool `` /* 210-byte string literal not displayed */
	// Configures memory optimization passes through the meta-optimizer. Has no
	// effect on manually requested memory optimization passes in the optimizers
	// field.
	MemoryOptimization RewriterConfig_MemOptType `` /* 158-byte string literal not displayed */
	// A node name scope for node names which are valid outputs of recomputations.
	// Inputs to nodes that match this scope may be recomputed (subject either to
	// manual annotation of those input nodes or to manual annotation and
	// heuristics depending on memory_optimization), but the nodes themselves will
	// not be recomputed. This matches any sub-scopes as well, meaning the scope
	// can appear not just as a top-level scope. For example, if the value is
	// "gradients/", the default, it will match node name "gradients/foo",
	// "foo/gradients/bar", but not "foo_gradients/"
	MemoryOptimizerTargetNodeNameScope string `` /* 171-byte string literal not displayed */
	// Maximum number of milliseconds to spend optimizing a single graph before
	// timing out. If less than or equal to 0 (default value) the optimizer will
	// never time out.
	MetaOptimizerTimeoutMs int64 `` /* 133-byte string literal not displayed */
	// Configures AutoParallel optimization passes either through the
	// meta-optimizer or when manually specified through the optimizers field.
	AutoParallel *AutoParallelOptions `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel,proto3" json:"auto_parallel,omitempty"`
	// If true, any optimization pass failing will cause the MetaOptimizer to
	// stop with an error. By default - or when set to false, failing passes are
	// skipped silently.
	FailOnOptimizerErrors bool                    `` /* 130-byte string literal not displayed */
	ScopedAllocatorOpts   *ScopedAllocatorOptions `protobuf:"bytes,16,opt,name=scoped_allocator_opts,json=scopedAllocatorOpts,proto3" json:"scoped_allocator_opts,omitempty"`
	// If non-empty, will use this as an alternative way to specify a list of
	// optimizations to turn on and the order of the optimizations (replacing the
	// meta-optimizer).
	//
	// Of the RewriterConfig options, only the AutoParallel configuration options
	// (the auto_parallel field) apply to manually requested optimization passes
	// ("autoparallel"). Memory optimization passes ("memory") invoked here are
	// not configurable (in contrast to memory optimization passes through the
	// meta-optimizer) and act only on manual op annotations.
	//
	// Custom optimizers (see custom_optimizers) that are not part of this
	// schedule will be run after - in the order that they were specified.
	Optimizers []string `protobuf:"bytes,100,rep,name=optimizers,proto3" json:"optimizers,omitempty"`
	// list of CustomGraphOptimizers to apply.
	CustomOptimizers []*RewriterConfig_CustomGraphOptimizer `protobuf:"bytes,200,rep,name=custom_optimizers,json=customOptimizers,proto3" json:"custom_optimizers,omitempty"`
	// VerifierConfig specifying the verifiers to be run after every optimizer.
	InterOptimizerVerifierConfig *VerifierConfig `` /* 151-byte string literal not displayed */
	// VerifierConfig specifying the verifiers to be run at the end, after all
	// optimizers have run.
	PostOptimizationVerifierConfig *VerifierConfig `` /* 157-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RewriterConfig) Descriptor deprecated

func (*RewriterConfig) Descriptor() ([]byte, []int)

Deprecated: Use RewriterConfig.ProtoReflect.Descriptor instead.

func (*RewriterConfig) GetArithmeticOptimization

func (x *RewriterConfig) GetArithmeticOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecision

func (x *RewriterConfig) GetAutoMixedPrecision() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecisionCpu

func (x *RewriterConfig) GetAutoMixedPrecisionCpu() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecisionMkl

func (x *RewriterConfig) GetAutoMixedPrecisionMkl() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoMixedPrecisionOnednnBfloat16 added in v0.2.0

func (x *RewriterConfig) GetAutoMixedPrecisionOnednnBfloat16() RewriterConfig_Toggle

func (*RewriterConfig) GetAutoParallel

func (x *RewriterConfig) GetAutoParallel() *AutoParallelOptions

func (*RewriterConfig) GetCommonSubgraphElimination

func (x *RewriterConfig) GetCommonSubgraphElimination() RewriterConfig_Toggle

func (*RewriterConfig) GetConstantFolding

func (x *RewriterConfig) GetConstantFolding() RewriterConfig_Toggle

func (*RewriterConfig) GetCpuLayoutConversion

func (x *RewriterConfig) GetCpuLayoutConversion() RewriterConfig_CpuLayout

func (*RewriterConfig) GetCustomOptimizers

func (x *RewriterConfig) GetCustomOptimizers() []*RewriterConfig_CustomGraphOptimizer

func (*RewriterConfig) GetDebugStripper

func (x *RewriterConfig) GetDebugStripper() RewriterConfig_Toggle

func (*RewriterConfig) GetDependencyOptimization

func (x *RewriterConfig) GetDependencyOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetDisableMetaOptimizer

func (x *RewriterConfig) GetDisableMetaOptimizer() bool

func (*RewriterConfig) GetDisableModelPruning

func (x *RewriterConfig) GetDisableModelPruning() bool

func (*RewriterConfig) GetDisableTfgOptimizer added in v0.6.0

func (x *RewriterConfig) GetDisableTfgOptimizer() bool

func (*RewriterConfig) GetExperimentalConditionalCodeMotion added in v0.2.0

func (x *RewriterConfig) GetExperimentalConditionalCodeMotion() RewriterConfig_Toggle

func (*RewriterConfig) GetExperimentalDisableCompressedTensorOptimization

func (x *RewriterConfig) GetExperimentalDisableCompressedTensorOptimization() bool

func (*RewriterConfig) GetExperimentalDisableFoldingQuantizationEmulation

func (x *RewriterConfig) GetExperimentalDisableFoldingQuantizationEmulation() bool

func (*RewriterConfig) GetFailOnOptimizerErrors

func (x *RewriterConfig) GetFailOnOptimizerErrors() bool

func (*RewriterConfig) GetFunctionOptimization

func (x *RewriterConfig) GetFunctionOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetImplementationSelector

func (x *RewriterConfig) GetImplementationSelector() RewriterConfig_Toggle

func (*RewriterConfig) GetInterOptimizerVerifierConfig

func (x *RewriterConfig) GetInterOptimizerVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetLayoutOptimizer

func (x *RewriterConfig) GetLayoutOptimizer() RewriterConfig_Toggle

func (*RewriterConfig) GetLoopOptimization

func (x *RewriterConfig) GetLoopOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetMemoryOptimization

func (x *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType

func (*RewriterConfig) GetMemoryOptimizerTargetNodeNameScope

func (x *RewriterConfig) GetMemoryOptimizerTargetNodeNameScope() string

func (*RewriterConfig) GetMetaOptimizerIterations

func (x *RewriterConfig) GetMetaOptimizerIterations() RewriterConfig_NumIterationsType

func (*RewriterConfig) GetMetaOptimizerTimeoutMs

func (x *RewriterConfig) GetMetaOptimizerTimeoutMs() int64

func (*RewriterConfig) GetMinGraphNodes

func (x *RewriterConfig) GetMinGraphNodes() int32

func (*RewriterConfig) GetOptimizers

func (x *RewriterConfig) GetOptimizers() []string

func (*RewriterConfig) GetPinToHostOptimization

func (x *RewriterConfig) GetPinToHostOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetPostOptimizationVerifierConfig

func (x *RewriterConfig) GetPostOptimizationVerifierConfig() *VerifierConfig

func (*RewriterConfig) GetRemapping

func (x *RewriterConfig) GetRemapping() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOptimization

func (x *RewriterConfig) GetScopedAllocatorOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetScopedAllocatorOpts

func (x *RewriterConfig) GetScopedAllocatorOpts() *ScopedAllocatorOptions

func (*RewriterConfig) GetShapeOptimization

func (x *RewriterConfig) GetShapeOptimization() RewriterConfig_Toggle

func (*RewriterConfig) GetUsePluginOptimizers

func (x *RewriterConfig) GetUsePluginOptimizers() RewriterConfig_Toggle

func (*RewriterConfig) ProtoMessage

func (*RewriterConfig) ProtoMessage()

func (*RewriterConfig) ProtoReflect

func (x *RewriterConfig) ProtoReflect() protoreflect.Message

func (*RewriterConfig) Reset

func (x *RewriterConfig) Reset()

func (*RewriterConfig) String

func (x *RewriterConfig) String() string

type RewriterConfig_CpuLayout

type RewriterConfig_CpuLayout int32

Enum for layout conversion between NCHW and NHWC on CPU. Default is OFF.

const (
	RewriterConfig_NO_CONVERSION_ON_CPU RewriterConfig_CpuLayout = 0
	RewriterConfig_NCHW_TO_NHWC         RewriterConfig_CpuLayout = 1
	RewriterConfig_NHWC_TO_NCHW         RewriterConfig_CpuLayout = 2
)

func (RewriterConfig_CpuLayout) Descriptor

func (RewriterConfig_CpuLayout) Enum

func (RewriterConfig_CpuLayout) EnumDescriptor deprecated

func (RewriterConfig_CpuLayout) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_CpuLayout.Descriptor instead.

func (RewriterConfig_CpuLayout) Number

func (RewriterConfig_CpuLayout) String

func (x RewriterConfig_CpuLayout) String() string

func (RewriterConfig_CpuLayout) Type

type RewriterConfig_CustomGraphOptimizer

type RewriterConfig_CustomGraphOptimizer struct {
	Name         string                                    `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	ParameterMap map[string]*attr_value_go_proto.AttrValue `` /* 185-byte string literal not displayed */
	// contains filtered or unexported fields
}

Message to describe custom graph optimizer and its parameters

func (*RewriterConfig_CustomGraphOptimizer) Descriptor deprecated

func (*RewriterConfig_CustomGraphOptimizer) Descriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_CustomGraphOptimizer.ProtoReflect.Descriptor instead.

func (*RewriterConfig_CustomGraphOptimizer) GetName

func (*RewriterConfig_CustomGraphOptimizer) GetParameterMap

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage

func (*RewriterConfig_CustomGraphOptimizer) ProtoMessage()

func (*RewriterConfig_CustomGraphOptimizer) ProtoReflect

func (*RewriterConfig_CustomGraphOptimizer) Reset

func (*RewriterConfig_CustomGraphOptimizer) String

type RewriterConfig_MemOptType

type RewriterConfig_MemOptType int32
const (
	// The default setting (SCHEDULING and SWAPPING HEURISTICS only)
	RewriterConfig_DEFAULT_MEM_OPT RewriterConfig_MemOptType = 0
	// Disabled in the meta-optimizer.
	RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 1
	// Driven by manual op-level annotations.
	RewriterConfig_MANUAL RewriterConfig_MemOptType = 2
	// Swapping heuristic will move a tensor from the GPU to the CPU and move
	// it back when needed to reduce peak memory usage.
	RewriterConfig_SWAPPING_HEURISTICS RewriterConfig_MemOptType = 4
	// Recomputation heuristics will recompute ops (such as Relu activation)
	// during backprop instead of storing them, reducing peak memory usage.
	RewriterConfig_RECOMPUTATION_HEURISTICS RewriterConfig_MemOptType = 5
	// Scheduling will split big ops such as AddN and try to enforce a schedule
	// of the new computations that decreases peak memory usage.
	RewriterConfig_SCHEDULING_HEURISTICS RewriterConfig_MemOptType = 6
	// Use any combination of swapping and recomputation heuristics.
	RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 3
)

func (RewriterConfig_MemOptType) Descriptor

func (RewriterConfig_MemOptType) Enum

func (RewriterConfig_MemOptType) EnumDescriptor deprecated

func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_MemOptType.Descriptor instead.

func (RewriterConfig_MemOptType) Number

func (RewriterConfig_MemOptType) String

func (x RewriterConfig_MemOptType) String() string

func (RewriterConfig_MemOptType) Type

type RewriterConfig_NumIterationsType

type RewriterConfig_NumIterationsType int32

Enum controlling the number of times to run optimizers. The default is to run them twice.

const (
	RewriterConfig_DEFAULT_NUM_ITERS RewriterConfig_NumIterationsType = 0
	RewriterConfig_ONE               RewriterConfig_NumIterationsType = 1
	RewriterConfig_TWO               RewriterConfig_NumIterationsType = 2
)

func (RewriterConfig_NumIterationsType) Descriptor

func (RewriterConfig_NumIterationsType) Enum

func (RewriterConfig_NumIterationsType) EnumDescriptor deprecated

func (RewriterConfig_NumIterationsType) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_NumIterationsType.Descriptor instead.

func (RewriterConfig_NumIterationsType) Number

func (RewriterConfig_NumIterationsType) String

func (RewriterConfig_NumIterationsType) Type

type RewriterConfig_Toggle

type RewriterConfig_Toggle int32
const (
	RewriterConfig_DEFAULT RewriterConfig_Toggle = 0
	RewriterConfig_ON      RewriterConfig_Toggle = 1
	RewriterConfig_OFF     RewriterConfig_Toggle = 2
	// Enable some aggressive optimizations that use assumptions that TF graphs
	// may break. For example, assume the shape of a placeholder matches its
	// actual feed.
	RewriterConfig_AGGRESSIVE RewriterConfig_Toggle = 3
	// Run MLIR pass if there's one implemented in TFG, do nothing otherwise.
	// I.e., if there's no corresponding TFG pass, it's an OFF. This is supposed
	// to be mapped with `ON` and there's no `AGGRESSIVE` in MLIR pass now.
	RewriterConfig_EXPERIMENTAL_MLIR RewriterConfig_Toggle = 4
	// Run both MLIR and Grappler passes consecutively and MLIR pass will come
	// first.
	RewriterConfig_EXPERIMENTAL_BOTH RewriterConfig_Toggle = 5
)

func (RewriterConfig_Toggle) Descriptor

func (RewriterConfig_Toggle) Enum

func (RewriterConfig_Toggle) EnumDescriptor deprecated

func (RewriterConfig_Toggle) EnumDescriptor() ([]byte, []int)

Deprecated: Use RewriterConfig_Toggle.Descriptor instead.

func (RewriterConfig_Toggle) Number

func (RewriterConfig_Toggle) String

func (x RewriterConfig_Toggle) String() string

func (RewriterConfig_Toggle) Type

type RunMetadata

type RunMetadata struct {

	// Statistics traced for this step. Populated if tracing is turned on via the
	// "RunOptions" proto.
	// EXPERIMENTAL: The format and set of events may change in future versions.
	StepStats *step_stats_go_proto.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats,proto3" json:"step_stats,omitempty"`
	// The cost graph for the computation defined by the run call.
	CostGraph *cost_graph_go_proto.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph,proto3" json:"cost_graph,omitempty"`
	// Graphs of the partitions executed by executors.
	PartitionGraphs []*graph_go_proto.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	// This is only populated for graphs that are run as functions in TensorFlow
	// V2. There will be an entry below for each function that is traced.
	// The main use cases of the post_optimization_graph and the partition_graphs
	// is to give the caller insight into the graphs that were actually run by the
	// runtime. Additional information (such as those in step_stats) will match
	// these graphs.
	// We also include the pre_optimization_graph since it is usually easier to
	// read, and is helpful in situations where the caller wants to get a high
	// level idea of what the built graph looks like (since the various graph
	// optimization passes might change the structure of the graph significantly).
	FunctionGraphs []*RunMetadata_FunctionGraphs `protobuf:"bytes,4,rep,name=function_graphs,json=functionGraphs,proto3" json:"function_graphs,omitempty"`
	// Metadata about the session.
	SessionMetadata *SessionMetadata `protobuf:"bytes,5,opt,name=session_metadata,json=sessionMetadata,proto3" json:"session_metadata,omitempty"`
	// contains filtered or unexported fields
}

Metadata output (i.e., non-Tensor) for a single Run() call.

func (*RunMetadata) Descriptor deprecated

func (*RunMetadata) Descriptor() ([]byte, []int)

Deprecated: Use RunMetadata.ProtoReflect.Descriptor instead.

func (*RunMetadata) GetCostGraph

func (x *RunMetadata) GetCostGraph() *cost_graph_go_proto.CostGraphDef

func (*RunMetadata) GetFunctionGraphs

func (x *RunMetadata) GetFunctionGraphs() []*RunMetadata_FunctionGraphs

func (*RunMetadata) GetPartitionGraphs

func (x *RunMetadata) GetPartitionGraphs() []*graph_go_proto.GraphDef

func (*RunMetadata) GetSessionMetadata added in v0.2.0

func (x *RunMetadata) GetSessionMetadata() *SessionMetadata

func (*RunMetadata) GetStepStats

func (x *RunMetadata) GetStepStats() *step_stats_go_proto.StepStats

func (*RunMetadata) ProtoMessage

func (*RunMetadata) ProtoMessage()

func (*RunMetadata) ProtoReflect

func (x *RunMetadata) ProtoReflect() protoreflect.Message

func (*RunMetadata) Reset

func (x *RunMetadata) Reset()

func (*RunMetadata) String

func (x *RunMetadata) String() string

type RunMetadata_FunctionGraphs

type RunMetadata_FunctionGraphs struct {

	// TODO(nareshmodi): Include some sort of function/cache-key identifier?
	PartitionGraphs       []*graph_go_proto.GraphDef `protobuf:"bytes,1,rep,name=partition_graphs,json=partitionGraphs,proto3" json:"partition_graphs,omitempty"`
	PreOptimizationGraph  *graph_go_proto.GraphDef   `protobuf:"bytes,2,opt,name=pre_optimization_graph,json=preOptimizationGraph,proto3" json:"pre_optimization_graph,omitempty"`
	PostOptimizationGraph *graph_go_proto.GraphDef   `` /* 126-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*RunMetadata_FunctionGraphs) Descriptor deprecated

func (*RunMetadata_FunctionGraphs) Descriptor() ([]byte, []int)

Deprecated: Use RunMetadata_FunctionGraphs.ProtoReflect.Descriptor instead.

func (*RunMetadata_FunctionGraphs) GetPartitionGraphs

func (x *RunMetadata_FunctionGraphs) GetPartitionGraphs() []*graph_go_proto.GraphDef

func (*RunMetadata_FunctionGraphs) GetPostOptimizationGraph

func (x *RunMetadata_FunctionGraphs) GetPostOptimizationGraph() *graph_go_proto.GraphDef

func (*RunMetadata_FunctionGraphs) GetPreOptimizationGraph

func (x *RunMetadata_FunctionGraphs) GetPreOptimizationGraph() *graph_go_proto.GraphDef

func (*RunMetadata_FunctionGraphs) ProtoMessage

func (*RunMetadata_FunctionGraphs) ProtoMessage()

func (*RunMetadata_FunctionGraphs) ProtoReflect

func (*RunMetadata_FunctionGraphs) Reset

func (x *RunMetadata_FunctionGraphs) Reset()

func (*RunMetadata_FunctionGraphs) String

func (x *RunMetadata_FunctionGraphs) String() string

type RunOptions

type RunOptions struct {
	TraceLevel RunOptions_TraceLevel `` /* 130-byte string literal not displayed */
	// Time to wait for operation to complete in milliseconds.
	TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs,proto3" json:"timeout_in_ms,omitempty"`
	// The thread pool to use, if session_inter_op_thread_pool is configured.
	// To use the caller thread set this to -1 - this uses the caller thread
	// to execute Session::Run() and thus avoids a context switch. Using the
	// caller thread to execute Session::Run() should be done ONLY for simple
	// graphs, where the overhead of an additional context switch is
	// comparable with the overhead of Session::Run().
	InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool,proto3" json:"inter_op_thread_pool,omitempty"`
	// Whether the partition graph(s) executed by the executor(s) should be
	// outputted via RunMetadata.
	OutputPartitionGraphs bool `` /* 127-byte string literal not displayed */
	// EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
	DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions,proto3" json:"debug_options,omitempty"`
	// When enabled, causes tensor allocation information to be included in
	// the error message when the Run() call fails because the allocator ran
	// out of memory (OOM).
	//
	// Enabling this option can slow down the Run() call.
	ReportTensorAllocationsUponOom bool                     `` /* 158-byte string literal not displayed */
	Experimental                   *RunOptions_Experimental `protobuf:"bytes,8,opt,name=experimental,proto3" json:"experimental,omitempty"`
	// contains filtered or unexported fields
}

Options for a single Run() call.

func (*RunOptions) Descriptor deprecated

func (*RunOptions) Descriptor() ([]byte, []int)

Deprecated: Use RunOptions.ProtoReflect.Descriptor instead.

func (*RunOptions) GetDebugOptions

func (x *RunOptions) GetDebugOptions() *DebugOptions

func (*RunOptions) GetExperimental

func (x *RunOptions) GetExperimental() *RunOptions_Experimental

func (*RunOptions) GetInterOpThreadPool

func (x *RunOptions) GetInterOpThreadPool() int32

func (*RunOptions) GetOutputPartitionGraphs

func (x *RunOptions) GetOutputPartitionGraphs() bool

func (*RunOptions) GetReportTensorAllocationsUponOom

func (x *RunOptions) GetReportTensorAllocationsUponOom() bool

func (*RunOptions) GetTimeoutInMs

func (x *RunOptions) GetTimeoutInMs() int64

func (*RunOptions) GetTraceLevel

func (x *RunOptions) GetTraceLevel() RunOptions_TraceLevel

func (*RunOptions) ProtoMessage

func (*RunOptions) ProtoMessage()

func (*RunOptions) ProtoReflect

func (x *RunOptions) ProtoReflect() protoreflect.Message

func (*RunOptions) Reset

func (x *RunOptions) Reset()

func (*RunOptions) String

func (x *RunOptions) String() string

type RunOptions_Experimental

type RunOptions_Experimental struct {

	// If non-zero, declares that this graph is going to use collective
	// ops and must synchronize step_ids with any other graph with this
	// same group_key value (in a distributed computation where tasks
	// run disjoint graphs).
	CollectiveGraphKey int64 `protobuf:"varint,1,opt,name=collective_graph_key,json=collectiveGraphKey,proto3" json:"collective_graph_key,omitempty"`
	// If true, then operations (using the inter-op pool) across all
	// session::run() calls will be centrally scheduled, optimizing for (median
	// and tail) latency.
	// Consider using this option for CPU-bound workloads like inference.
	UseRunHandlerPool     bool                                           `protobuf:"varint,2,opt,name=use_run_handler_pool,json=useRunHandlerPool,proto3" json:"use_run_handler_pool,omitempty"`
	RunHandlerPoolOptions *RunOptions_Experimental_RunHandlerPoolOptions `` /* 128-byte string literal not displayed */
	// contains filtered or unexported fields
}

Everything inside Experimental is subject to change and is not subject to API stability guarantees in https://www.tensorflow.org/guide/version_compat.

func (*RunOptions_Experimental) Descriptor deprecated

func (*RunOptions_Experimental) Descriptor() ([]byte, []int)

Deprecated: Use RunOptions_Experimental.ProtoReflect.Descriptor instead.

func (*RunOptions_Experimental) GetCollectiveGraphKey

func (x *RunOptions_Experimental) GetCollectiveGraphKey() int64

func (*RunOptions_Experimental) GetRunHandlerPoolOptions

func (*RunOptions_Experimental) GetUseRunHandlerPool

func (x *RunOptions_Experimental) GetUseRunHandlerPool() bool

func (*RunOptions_Experimental) ProtoMessage

func (*RunOptions_Experimental) ProtoMessage()

func (*RunOptions_Experimental) ProtoReflect

func (x *RunOptions_Experimental) ProtoReflect() protoreflect.Message

func (*RunOptions_Experimental) Reset

func (x *RunOptions_Experimental) Reset()

func (*RunOptions_Experimental) String

func (x *RunOptions_Experimental) String() string

type RunOptions_Experimental_RunHandlerPoolOptions

type RunOptions_Experimental_RunHandlerPoolOptions struct {

	// Priority of the request. The run handler thread pool will schedule ops
	// based on the priority number. The larger number means higher priority.
	Priority int64 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"`
	// contains filtered or unexported fields
}

Options for run handler thread pool.

func (*RunOptions_Experimental_RunHandlerPoolOptions) Descriptor deprecated

Deprecated: Use RunOptions_Experimental_RunHandlerPoolOptions.ProtoReflect.Descriptor instead.

func (*RunOptions_Experimental_RunHandlerPoolOptions) GetPriority

func (*RunOptions_Experimental_RunHandlerPoolOptions) ProtoMessage

func (*RunOptions_Experimental_RunHandlerPoolOptions) ProtoReflect

func (*RunOptions_Experimental_RunHandlerPoolOptions) Reset

func (*RunOptions_Experimental_RunHandlerPoolOptions) String

type RunOptions_TraceLevel

type RunOptions_TraceLevel int32

TODO(pbar) Turn this into a TraceOptions proto which allows tracing to be controlled in a more orthogonal manner?

const (
	RunOptions_NO_TRACE       RunOptions_TraceLevel = 0
	RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
	RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
	RunOptions_FULL_TRACE     RunOptions_TraceLevel = 3
)

func (RunOptions_TraceLevel) Descriptor

func (RunOptions_TraceLevel) Enum

func (RunOptions_TraceLevel) EnumDescriptor deprecated

func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int)

Deprecated: Use RunOptions_TraceLevel.Descriptor instead.

func (RunOptions_TraceLevel) Number

func (RunOptions_TraceLevel) String

func (x RunOptions_TraceLevel) String() string

func (RunOptions_TraceLevel) Type

type SaveableObject

type SaveableObject struct {

	// Node ids of concrete functions for saving and loading from a checkpoint.
	// These functions save and restore directly from tensors.
	SaveFunction    int32 `protobuf:"varint,2,opt,name=save_function,json=saveFunction,proto3" json:"save_function,omitempty"`
	RestoreFunction int32 `protobuf:"varint,3,opt,name=restore_function,json=restoreFunction,proto3" json:"restore_function,omitempty"`
	// contains filtered or unexported fields
}

func (*SaveableObject) Descriptor deprecated

func (*SaveableObject) Descriptor() ([]byte, []int)

Deprecated: Use SaveableObject.ProtoReflect.Descriptor instead.

func (*SaveableObject) GetRestoreFunction

func (x *SaveableObject) GetRestoreFunction() int32

func (*SaveableObject) GetSaveFunction

func (x *SaveableObject) GetSaveFunction() int32

func (*SaveableObject) ProtoMessage

func (*SaveableObject) ProtoMessage()

func (*SaveableObject) ProtoReflect

func (x *SaveableObject) ProtoReflect() protoreflect.Message

func (*SaveableObject) Reset

func (x *SaveableObject) Reset()

func (*SaveableObject) String

func (x *SaveableObject) String() string

type SavedAsset

type SavedAsset struct {

	// Index into `MetaGraphDef.asset_file_def[]` that describes the Asset.
	//
	// Only the field `AssetFileDef.filename` is used. Other fields, such as
	// `AssetFileDef.tensor_info`, MUST be ignored.
	AssetFileDefIndex int32 `protobuf:"varint,1,opt,name=asset_file_def_index,json=assetFileDefIndex,proto3" json:"asset_file_def_index,omitempty"`
	// contains filtered or unexported fields
}

A SavedAsset points to an asset in the MetaGraph.

When bound to a function this object evaluates to a tensor with the absolute filename. Users should not depend on a particular part of the filename to remain stable (e.g. basename could be changed).

func (*SavedAsset) Descriptor deprecated

func (*SavedAsset) Descriptor() ([]byte, []int)

Deprecated: Use SavedAsset.ProtoReflect.Descriptor instead.

func (*SavedAsset) GetAssetFileDefIndex

func (x *SavedAsset) GetAssetFileDefIndex() int32

func (*SavedAsset) ProtoMessage

func (*SavedAsset) ProtoMessage()

func (*SavedAsset) ProtoReflect

func (x *SavedAsset) ProtoReflect() protoreflect.Message

func (*SavedAsset) Reset

func (x *SavedAsset) Reset()

func (*SavedAsset) String

func (x *SavedAsset) String() string

type SavedBareConcreteFunction

type SavedBareConcreteFunction struct {

	// Identifies a SavedConcreteFunction.
	ConcreteFunctionName string `protobuf:"bytes,1,opt,name=concrete_function_name,json=concreteFunctionName,proto3" json:"concrete_function_name,omitempty"`
	// A sequence of unique strings, one per Tensor argument.
	ArgumentKeywords []string `protobuf:"bytes,2,rep,name=argument_keywords,json=argumentKeywords,proto3" json:"argument_keywords,omitempty"`
	// The prefix of `argument_keywords` which may be identified by position.
	AllowedPositionalArguments int64 `` /* 142-byte string literal not displayed */
	// The spec of the function that this ConcreteFunction is traced from. This
	// allows the ConcreteFunction to be called with nest structure inputs. This
	// field may not be populated. If this field is absent, the concrete function
	// can only be called with flat inputs.
	// TODO(b/169361281): support calling saved ConcreteFunction with structured
	// inputs in C++ SavedModel API.
	FunctionSpec *FunctionSpec `protobuf:"bytes,4,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedBareConcreteFunction) Descriptor deprecated

func (*SavedBareConcreteFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedBareConcreteFunction.ProtoReflect.Descriptor instead.

func (*SavedBareConcreteFunction) GetAllowedPositionalArguments

func (x *SavedBareConcreteFunction) GetAllowedPositionalArguments() int64

func (*SavedBareConcreteFunction) GetArgumentKeywords

func (x *SavedBareConcreteFunction) GetArgumentKeywords() []string

func (*SavedBareConcreteFunction) GetConcreteFunctionName

func (x *SavedBareConcreteFunction) GetConcreteFunctionName() string

func (*SavedBareConcreteFunction) GetFunctionSpec

func (x *SavedBareConcreteFunction) GetFunctionSpec() *FunctionSpec

func (*SavedBareConcreteFunction) ProtoMessage

func (*SavedBareConcreteFunction) ProtoMessage()

func (*SavedBareConcreteFunction) ProtoReflect

func (*SavedBareConcreteFunction) Reset

func (x *SavedBareConcreteFunction) Reset()

func (*SavedBareConcreteFunction) String

func (x *SavedBareConcreteFunction) String() string

type SavedConcreteFunction

type SavedConcreteFunction struct {
	BoundInputs []int32 `protobuf:"varint,2,rep,packed,name=bound_inputs,json=boundInputs,proto3" json:"bound_inputs,omitempty"`
	// Input in canonicalized form that was received to create this concrete
	// function.
	CanonicalizedInputSignature *StructuredValue `` /* 144-byte string literal not displayed */
	// Output that was the return value of this function after replacing all
	// Tensors with TensorSpecs. This can be an arbitrary nested function and will
	// be used to reconstruct the full structure from pure tensors.
	OutputSignature *StructuredValue `protobuf:"bytes,4,opt,name=output_signature,json=outputSignature,proto3" json:"output_signature,omitempty"`
	// contains filtered or unexported fields
}

Stores low-level information about a concrete function. Referenced in either a SavedFunction or a SavedBareConcreteFunction.

func (*SavedConcreteFunction) Descriptor deprecated

func (*SavedConcreteFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedConcreteFunction.ProtoReflect.Descriptor instead.

func (*SavedConcreteFunction) GetBoundInputs

func (x *SavedConcreteFunction) GetBoundInputs() []int32

func (*SavedConcreteFunction) GetCanonicalizedInputSignature

func (x *SavedConcreteFunction) GetCanonicalizedInputSignature() *StructuredValue

func (*SavedConcreteFunction) GetOutputSignature

func (x *SavedConcreteFunction) GetOutputSignature() *StructuredValue

func (*SavedConcreteFunction) ProtoMessage

func (*SavedConcreteFunction) ProtoMessage()

func (*SavedConcreteFunction) ProtoReflect

func (x *SavedConcreteFunction) ProtoReflect() protoreflect.Message

func (*SavedConcreteFunction) Reset

func (x *SavedConcreteFunction) Reset()

func (*SavedConcreteFunction) String

func (x *SavedConcreteFunction) String() string

type SavedConstant

type SavedConstant struct {

	// An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph.
	Operation string `protobuf:"bytes,1,opt,name=operation,proto3" json:"operation,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedConstant) Descriptor deprecated

func (*SavedConstant) Descriptor() ([]byte, []int)

Deprecated: Use SavedConstant.ProtoReflect.Descriptor instead.

func (*SavedConstant) GetOperation

func (x *SavedConstant) GetOperation() string

func (*SavedConstant) ProtoMessage

func (*SavedConstant) ProtoMessage()

func (*SavedConstant) ProtoReflect

func (x *SavedConstant) ProtoReflect() protoreflect.Message

func (*SavedConstant) Reset

func (x *SavedConstant) Reset()

func (*SavedConstant) String

func (x *SavedConstant) String() string

type SavedFunction

type SavedFunction struct {
	ConcreteFunctions []string      `protobuf:"bytes,1,rep,name=concrete_functions,json=concreteFunctions,proto3" json:"concrete_functions,omitempty"`
	FunctionSpec      *FunctionSpec `protobuf:"bytes,2,opt,name=function_spec,json=functionSpec,proto3" json:"function_spec,omitempty"`
	// contains filtered or unexported fields
}

A function with multiple signatures, possibly with non-Tensor arguments.

func (*SavedFunction) Descriptor deprecated

func (*SavedFunction) Descriptor() ([]byte, []int)

Deprecated: Use SavedFunction.ProtoReflect.Descriptor instead.

func (*SavedFunction) GetConcreteFunctions

func (x *SavedFunction) GetConcreteFunctions() []string

func (*SavedFunction) GetFunctionSpec

func (x *SavedFunction) GetFunctionSpec() *FunctionSpec

func (*SavedFunction) ProtoMessage

func (*SavedFunction) ProtoMessage()

func (*SavedFunction) ProtoReflect

func (x *SavedFunction) ProtoReflect() protoreflect.Message

func (*SavedFunction) Reset

func (x *SavedFunction) Reset()

func (*SavedFunction) String

func (x *SavedFunction) String() string

type SavedModel

type SavedModel struct {

	// The schema version of the SavedModel instance. Used for versioning when
	// making future changes to the specification/implementation. Initial value
	// at release will be 1.
	SavedModelSchemaVersion int64 `` /* 135-byte string literal not displayed */
	// One or more MetaGraphs.
	MetaGraphs []*MetaGraphDef `protobuf:"bytes,2,rep,name=meta_graphs,json=metaGraphs,proto3" json:"meta_graphs,omitempty"`
	// contains filtered or unexported fields
}

SavedModel is the high level serialization format for TensorFlow Models. See [todo: doc links, similar to session_bundle] for more information.

func (*SavedModel) Descriptor deprecated

func (*SavedModel) Descriptor() ([]byte, []int)

Deprecated: Use SavedModel.ProtoReflect.Descriptor instead.

func (*SavedModel) GetMetaGraphs

func (x *SavedModel) GetMetaGraphs() []*MetaGraphDef

func (*SavedModel) GetSavedModelSchemaVersion

func (x *SavedModel) GetSavedModelSchemaVersion() int64

func (*SavedModel) ProtoMessage

func (*SavedModel) ProtoMessage()

func (*SavedModel) ProtoReflect

func (x *SavedModel) ProtoReflect() protoreflect.Message

func (*SavedModel) Reset

func (x *SavedModel) Reset()

func (*SavedModel) String

func (x *SavedModel) String() string

type SavedObject

type SavedObject struct {

	// Objects which this object depends on: named edges in the dependency
	// graph.
	//
	// Note: All kinds of SavedObject may have children, except
	// "constant" and "captured_tensor".
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Ordered list of dependencies that must be loaded before this object.
	// SavedModel loads with the bottom-up approach, by first creating all objects
	// (in the order defined by the dependencies), then connecting the edges.
	Dependencies []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,15,rep,name=dependencies,proto3" json:"dependencies,omitempty"`
	// Slot variables owned by this object. This describes the three-way
	// (optimizer, variable, slot variable) relationship; none of the three
	// depend on the others directly.
	//
	// Note: currently only valid if kind == "user_object".
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
	// Types that are assignable to Kind:
	//
	//	*SavedObject_UserObject
	//	*SavedObject_Asset
	//	*SavedObject_Function
	//	*SavedObject_Variable
	//	*SavedObject_BareConcreteFunction
	//	*SavedObject_Constant
	//	*SavedObject_Resource
	//	*SavedObject_CapturedTensor
	Kind isSavedObject_Kind `protobuf_oneof:"kind"`
	// Stores the functions used to save and restore this object. At most one of
	// `saveable_objects` or `registered_saver` is defined for each SavedObject.
	// See the comment below for the difference between SaveableObject and
	// registered savers.
	SaveableObjects map[string]*SaveableObject `` /* 195-byte string literal not displayed */
	// The name of the registered class of the form "{package}.{class_name}".
	// This field is used to search for the registered class at loading time.
	RegisteredName string `protobuf:"bytes,13,opt,name=registered_name,json=registeredName,proto3" json:"registered_name,omitempty"`
	// The user-generated proto storing metadata for this object, to be passed to
	// the registered classes's _deserialize_from_proto method when this object is
	// loaded from the SavedModel.
	SerializedUserProto *anypb.Any `protobuf:"bytes,14,opt,name=serialized_user_proto,json=serializedUserProto,proto3" json:"serialized_user_proto,omitempty"`
	// String name of the registered saver. At most one of `saveable_objects` or
	// `registered_saver` is defined for each SavedObject.
	RegisteredSaver string `protobuf:"bytes,16,opt,name=registered_saver,json=registeredSaver,proto3" json:"registered_saver,omitempty"`
	// contains filtered or unexported fields
}

func (*SavedObject) Descriptor deprecated

func (*SavedObject) Descriptor() ([]byte, []int)

Deprecated: Use SavedObject.ProtoReflect.Descriptor instead.

func (*SavedObject) GetAsset

func (x *SavedObject) GetAsset() *SavedAsset

func (*SavedObject) GetBareConcreteFunction

func (x *SavedObject) GetBareConcreteFunction() *SavedBareConcreteFunction

func (*SavedObject) GetCapturedTensor

func (x *SavedObject) GetCapturedTensor() *CapturedTensor

func (*SavedObject) GetChildren

func (*SavedObject) GetConstant

func (x *SavedObject) GetConstant() *SavedConstant

func (*SavedObject) GetDependencies

func (*SavedObject) GetFunction

func (x *SavedObject) GetFunction() *SavedFunction

func (*SavedObject) GetKind

func (m *SavedObject) GetKind() isSavedObject_Kind

func (*SavedObject) GetRegisteredName

func (x *SavedObject) GetRegisteredName() string

func (*SavedObject) GetRegisteredSaver

func (x *SavedObject) GetRegisteredSaver() string

func (*SavedObject) GetResource

func (x *SavedObject) GetResource() *SavedResource

func (*SavedObject) GetSaveableObjects

func (x *SavedObject) GetSaveableObjects() map[string]*SaveableObject

func (*SavedObject) GetSerializedUserProto

func (x *SavedObject) GetSerializedUserProto() *anypb.Any

func (*SavedObject) GetUserObject

func (x *SavedObject) GetUserObject() *SavedUserObject

func (*SavedObject) GetVariable

func (x *SavedObject) GetVariable() *SavedVariable

func (*SavedObject) ProtoMessage

func (*SavedObject) ProtoMessage()

func (*SavedObject) ProtoReflect

func (x *SavedObject) ProtoReflect() protoreflect.Message

func (*SavedObject) Reset

func (x *SavedObject) Reset()

func (*SavedObject) String

func (x *SavedObject) String() string

type SavedObjectGraph

type SavedObjectGraph struct {

	// Flattened list of objects in the object graph.
	//
	// The position of the object in this list indicates its id.
	// Nodes[0] is considered the root node.
	Nodes []*SavedObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// Information about captures and output structures in concrete functions.
	// Referenced from SavedBareConcreteFunction and SavedFunction.
	ConcreteFunctions map[string]*SavedConcreteFunction `` /* 200-byte string literal not displayed */
	// contains filtered or unexported fields
}

func (*SavedObjectGraph) Descriptor deprecated

func (*SavedObjectGraph) Descriptor() ([]byte, []int)

Deprecated: Use SavedObjectGraph.ProtoReflect.Descriptor instead.

func (*SavedObjectGraph) GetConcreteFunctions

func (x *SavedObjectGraph) GetConcreteFunctions() map[string]*SavedConcreteFunction

func (*SavedObjectGraph) GetNodes

func (x *SavedObjectGraph) GetNodes() []*SavedObject

func (*SavedObjectGraph) ProtoMessage

func (*SavedObjectGraph) ProtoMessage()

func (*SavedObjectGraph) ProtoReflect

func (x *SavedObjectGraph) ProtoReflect() protoreflect.Message

func (*SavedObjectGraph) Reset

func (x *SavedObjectGraph) Reset()

func (*SavedObjectGraph) String

func (x *SavedObjectGraph) String() string

type SavedObject_Asset

type SavedObject_Asset struct {
	Asset *SavedAsset `protobuf:"bytes,5,opt,name=asset,proto3,oneof"`
}

type SavedObject_BareConcreteFunction

type SavedObject_BareConcreteFunction struct {
	BareConcreteFunction *SavedBareConcreteFunction `protobuf:"bytes,8,opt,name=bare_concrete_function,json=bareConcreteFunction,proto3,oneof"`
}

type SavedObject_CapturedTensor

type SavedObject_CapturedTensor struct {
	CapturedTensor *CapturedTensor `protobuf:"bytes,12,opt,name=captured_tensor,json=capturedTensor,proto3,oneof"`
}

type SavedObject_Constant

type SavedObject_Constant struct {
	Constant *SavedConstant `protobuf:"bytes,9,opt,name=constant,proto3,oneof"`
}

type SavedObject_Function

type SavedObject_Function struct {
	Function *SavedFunction `protobuf:"bytes,6,opt,name=function,proto3,oneof"`
}

type SavedObject_Resource

type SavedObject_Resource struct {
	Resource *SavedResource `protobuf:"bytes,10,opt,name=resource,proto3,oneof"`
}

type SavedObject_UserObject

type SavedObject_UserObject struct {
	UserObject *SavedUserObject `protobuf:"bytes,4,opt,name=user_object,json=userObject,proto3,oneof"`
}

type SavedObject_Variable

type SavedObject_Variable struct {
	Variable *SavedVariable `protobuf:"bytes,7,opt,name=variable,proto3,oneof"`
}

type SavedResource

type SavedResource struct {

	// A device specification indicating a required placement for the resource
	// creation function, e.g. "CPU". An empty string allows the user to select a
	// device.
	Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"`
	// contains filtered or unexported fields
}

A SavedResource represents a TF object that holds state during its lifetime. An object of this type can have a reference to a: create_resource() and an initialize() function.

func (*SavedResource) Descriptor deprecated

func (*SavedResource) Descriptor() ([]byte, []int)

Deprecated: Use SavedResource.ProtoReflect.Descriptor instead.

func (*SavedResource) GetDevice

func (x *SavedResource) GetDevice() string

func (*SavedResource) ProtoMessage

func (*SavedResource) ProtoMessage()

func (*SavedResource) ProtoReflect

func (x *SavedResource) ProtoReflect() protoreflect.Message

func (*SavedResource) Reset

func (x *SavedResource) Reset()

func (*SavedResource) String

func (x *SavedResource) String() string

type SavedUserObject

type SavedUserObject struct {

	// Corresponds to a registration of the type to use in the loading program.
	Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
	// Version information from the producer of this SavedUserObject.
	Version *versions_go_proto.VersionDef `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
	// Metadata for deserializing this object.
	//
	// Deprecated! At the time of deprecation, Keras was the only user of this
	// field, and its saving and loading code will be updated shortly.
	// Please save your application-specific metadata to a separate file.
	//
	// Deprecated: Marked as deprecated in tensorflow/core/protobuf/saved_object_graph.proto.
	Metadata string `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
	// contains filtered or unexported fields
}

A SavedUserObject is an object (in the object-oriented language of the TensorFlow program) of some user- or framework-defined class other than those handled specifically by the other kinds of SavedObjects.

This object cannot be evaluated as a tensor, and therefore cannot be bound to an input of a function.

func (*SavedUserObject) Descriptor deprecated

func (*SavedUserObject) Descriptor() ([]byte, []int)

Deprecated: Use SavedUserObject.ProtoReflect.Descriptor instead.

func (*SavedUserObject) GetIdentifier

func (x *SavedUserObject) GetIdentifier() string

func (*SavedUserObject) GetMetadata deprecated

func (x *SavedUserObject) GetMetadata() string

Deprecated: Marked as deprecated in tensorflow/core/protobuf/saved_object_graph.proto.

func (*SavedUserObject) GetVersion

func (x *SavedUserObject) GetVersion() *versions_go_proto.VersionDef

func (*SavedUserObject) ProtoMessage

func (*SavedUserObject) ProtoMessage()

func (*SavedUserObject) ProtoReflect

func (x *SavedUserObject) ProtoReflect() protoreflect.Message

func (*SavedUserObject) Reset

func (x *SavedUserObject) Reset()

func (*SavedUserObject) String

func (x *SavedUserObject) String() string

type SavedVariable

type SavedVariable struct {
	Dtype           types_go_proto.DataType                   `protobuf:"varint,1,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape           *tensor_shape_go_proto.TensorShapeProto   `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Trainable       bool                                      `protobuf:"varint,3,opt,name=trainable,proto3" json:"trainable,omitempty"`
	Synchronization variable_go_proto.VariableSynchronization `protobuf:"varint,4,opt,name=synchronization,proto3,enum=tensorflow.VariableSynchronization" json:"synchronization,omitempty"`
	Aggregation     variable_go_proto.VariableAggregation     `protobuf:"varint,5,opt,name=aggregation,proto3,enum=tensorflow.VariableAggregation" json:"aggregation,omitempty"`
	Name            string                                    `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
	Device          string                                    `protobuf:"bytes,7,opt,name=device,proto3" json:"device,omitempty"`
	// List of component variables for a distributed variable.
	//
	// When this field is non-empty, the SavedVariable will be assumed
	// to be a distributed variable defined by the components listed here.
	//
	// This is only supported by experimental loaders at the moment.
	ExperimentalDistributedVariableComponents []*SavedVariable `` /* 188-byte string literal not displayed */
	// contains filtered or unexported fields
}

Represents a Variable that is initialized by loading the contents from the checkpoint.

func (*SavedVariable) Descriptor deprecated

func (*SavedVariable) Descriptor() ([]byte, []int)

Deprecated: Use SavedVariable.ProtoReflect.Descriptor instead.

func (*SavedVariable) GetAggregation

func (*SavedVariable) GetDevice

func (x *SavedVariable) GetDevice() string

func (*SavedVariable) GetDtype

func (x *SavedVariable) GetDtype() types_go_proto.DataType

func (*SavedVariable) GetExperimentalDistributedVariableComponents

func (x *SavedVariable) GetExperimentalDistributedVariableComponents() []*SavedVariable

func (*SavedVariable) GetName

func (x *SavedVariable) GetName() string

func (*SavedVariable) GetShape

func (*SavedVariable) GetSynchronization

func (*SavedVariable) GetTrainable

func (x *SavedVariable) GetTrainable() bool

func (*SavedVariable) ProtoMessage

func (*SavedVariable) ProtoMessage()

func (*SavedVariable) ProtoReflect

func (x *SavedVariable) ProtoReflect() protoreflect.Message

func (*SavedVariable) Reset

func (x *SavedVariable) Reset()

func (*SavedVariable) String

func (x *SavedVariable) String() string

type SaverDef

type SaverDef struct {

	// The name of the tensor in which to specify the filename when saving or
	// restoring a model checkpoint.
	FilenameTensorName string `protobuf:"bytes,1,opt,name=filename_tensor_name,json=filenameTensorName,proto3" json:"filename_tensor_name,omitempty"`
	// The operation to run when saving a model checkpoint.
	SaveTensorName string `protobuf:"bytes,2,opt,name=save_tensor_name,json=saveTensorName,proto3" json:"save_tensor_name,omitempty"`
	// The operation to run when restoring a model checkpoint.
	RestoreOpName string `protobuf:"bytes,3,opt,name=restore_op_name,json=restoreOpName,proto3" json:"restore_op_name,omitempty"`
	// Maximum number of checkpoints to keep.  If 0, no checkpoints are deleted.
	MaxToKeep int32 `protobuf:"varint,4,opt,name=max_to_keep,json=maxToKeep,proto3" json:"max_to_keep,omitempty"`
	// Shard the save files, one per device that has Variable nodes.
	Sharded bool `protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"`
	// How often to keep an additional checkpoint. If not specified, only the last
	// "max_to_keep" checkpoints are kept; if specified, in addition to keeping
	// the last "max_to_keep" checkpoints, an additional checkpoint will be kept
	// for every n hours of training.
	KeepCheckpointEveryNHours float32                          `` /* 144-byte string literal not displayed */
	Version                   SaverDef_CheckpointFormatVersion `protobuf:"varint,7,opt,name=version,proto3,enum=tensorflow.SaverDef_CheckpointFormatVersion" json:"version,omitempty"`
	// contains filtered or unexported fields
}

Protocol buffer representing the configuration of a Saver.

func (*SaverDef) Descriptor deprecated

func (*SaverDef) Descriptor() ([]byte, []int)

Deprecated: Use SaverDef.ProtoReflect.Descriptor instead.

func (*SaverDef) GetFilenameTensorName

func (x *SaverDef) GetFilenameTensorName() string

func (*SaverDef) GetKeepCheckpointEveryNHours

func (x *SaverDef) GetKeepCheckpointEveryNHours() float32

func (*SaverDef) GetMaxToKeep

func (x *SaverDef) GetMaxToKeep() int32

func (*SaverDef) GetRestoreOpName

func (x *SaverDef) GetRestoreOpName() string

func (*SaverDef) GetSaveTensorName

func (x *SaverDef) GetSaveTensorName() string

func (*SaverDef) GetSharded

func (x *SaverDef) GetSharded() bool

func (*SaverDef) GetVersion

func (*SaverDef) ProtoMessage

func (*SaverDef) ProtoMessage()

func (*SaverDef) ProtoReflect

func (x *SaverDef) ProtoReflect() protoreflect.Message

func (*SaverDef) Reset

func (x *SaverDef) Reset()

func (*SaverDef) String

func (x *SaverDef) String() string

type SaverDef_CheckpointFormatVersion

type SaverDef_CheckpointFormatVersion int32

A version number that identifies a different on-disk checkpoint format. Usually, each subclass of BaseSaverBuilder works with a particular version/format. However, it is possible that the same builder may be upgraded to support a newer checkpoint format in the future.

const (
	// Internal legacy format.
	SaverDef_LEGACY SaverDef_CheckpointFormatVersion = 0
	// Deprecated format: tf.Saver() which works with tensorflow::table::Table.
	SaverDef_V1 SaverDef_CheckpointFormatVersion = 1
	// Current format: more efficient.
	SaverDef_V2 SaverDef_CheckpointFormatVersion = 2
)

func (SaverDef_CheckpointFormatVersion) Descriptor

func (SaverDef_CheckpointFormatVersion) Enum

func (SaverDef_CheckpointFormatVersion) EnumDescriptor deprecated

func (SaverDef_CheckpointFormatVersion) EnumDescriptor() ([]byte, []int)

Deprecated: Use SaverDef_CheckpointFormatVersion.Descriptor instead.

func (SaverDef_CheckpointFormatVersion) Number

func (SaverDef_CheckpointFormatVersion) String

func (SaverDef_CheckpointFormatVersion) Type

type ScopedAllocatorOptions

type ScopedAllocatorOptions struct {

	// If present, only perform optimization for these ops.
	EnableOp []string `protobuf:"bytes,1,rep,name=enable_op,json=enableOp,proto3" json:"enable_op,omitempty"`
	// contains filtered or unexported fields
}

func (*ScopedAllocatorOptions) Descriptor deprecated

func (*ScopedAllocatorOptions) Descriptor() ([]byte, []int)

Deprecated: Use ScopedAllocatorOptions.ProtoReflect.Descriptor instead.

func (*ScopedAllocatorOptions) GetEnableOp

func (x *ScopedAllocatorOptions) GetEnableOp() []string

func (*ScopedAllocatorOptions) ProtoMessage

func (*ScopedAllocatorOptions) ProtoMessage()

func (*ScopedAllocatorOptions) ProtoReflect

func (x *ScopedAllocatorOptions) ProtoReflect() protoreflect.Message

func (*ScopedAllocatorOptions) Reset

func (x *ScopedAllocatorOptions) Reset()

func (*ScopedAllocatorOptions) String

func (x *ScopedAllocatorOptions) String() string

type ServerDef

type ServerDef struct {

	// The cluster of which this server is a member.
	Cluster *ClusterDef `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// The name of the job of which this server is a member.
	//
	// NOTE(mrry): The `cluster` field must contain a `JobDef` with a `name` field
	// that matches this name.
	JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"`
	// Replica this server manages.
	Replica int32 `protobuf:"varint,8,opt,name=replica,proto3" json:"replica,omitempty"`
	// The task index of this server in its job.
	//
	// NOTE: The `cluster` field must contain a `JobDef` with a matching `name`
	// and a mapping in its `tasks` field for this index.
	TaskIndex int32 `protobuf:"varint,3,opt,name=task_index,json=taskIndex,proto3" json:"task_index,omitempty"`
	// The default configuration for sessions that run on this server.
	DefaultSessionConfig *ConfigProto `protobuf:"bytes,4,opt,name=default_session_config,json=defaultSessionConfig,proto3" json:"default_session_config,omitempty"`
	// The protocol to be used by this server.
	//
	// Acceptable values include: "grpc", "grpc+verbs".
	Protocol string `protobuf:"bytes,5,opt,name=protocol,proto3" json:"protocol,omitempty"`
	// The server port. If not set, then we identify the port from the job_name.
	Port int32 `protobuf:"varint,6,opt,name=port,proto3" json:"port,omitempty"`
	// Device filters for remote tasks in the cluster.
	// NOTE: This is an experimental feature and only effective in TensorFlow 2.x.
	ClusterDeviceFilters *ClusterDeviceFilters `protobuf:"bytes,7,opt,name=cluster_device_filters,json=clusterDeviceFilters,proto3" json:"cluster_device_filters,omitempty"`
	// contains filtered or unexported fields
}

Defines the configuration of a single TensorFlow server.

func (*ServerDef) Descriptor deprecated

func (*ServerDef) Descriptor() ([]byte, []int)

Deprecated: Use ServerDef.ProtoReflect.Descriptor instead.

func (*ServerDef) GetCluster

func (x *ServerDef) GetCluster() *ClusterDef

func (*ServerDef) GetClusterDeviceFilters

func (x *ServerDef) GetClusterDeviceFilters() *ClusterDeviceFilters

func (*ServerDef) GetDefaultSessionConfig

func (x *ServerDef) GetDefaultSessionConfig() *ConfigProto

func (*ServerDef) GetJobName

func (x *ServerDef) GetJobName() string

func (*ServerDef) GetPort

func (x *ServerDef) GetPort() int32

func (*ServerDef) GetProtocol

func (x *ServerDef) GetProtocol() string

func (*ServerDef) GetReplica added in v0.6.0

func (x *ServerDef) GetReplica() int32

func (*ServerDef) GetTaskIndex

func (x *ServerDef) GetTaskIndex() int32

func (*ServerDef) ProtoMessage

func (*ServerDef) ProtoMessage()

func (*ServerDef) ProtoReflect

func (x *ServerDef) ProtoReflect() protoreflect.Message

func (*ServerDef) Reset

func (x *ServerDef) Reset()

func (*ServerDef) String

func (x *ServerDef) String() string

type SessionMetadata

type SessionMetadata struct {
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The version is optional. If set, needs to be >= 0.
	Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

Metadata about the session.

This can be used by the runtime and the Ops for debugging, monitoring, etc.

The (name, version) tuple is expected to be a unique identifier for sessions within the same process.

NOTE: This is currently used and propagated only by the direct session.

func (*SessionMetadata) Descriptor deprecated

func (*SessionMetadata) Descriptor() ([]byte, []int)

Deprecated: Use SessionMetadata.ProtoReflect.Descriptor instead.

func (*SessionMetadata) GetName

func (x *SessionMetadata) GetName() string

func (*SessionMetadata) GetVersion

func (x *SessionMetadata) GetVersion() int64

func (*SessionMetadata) ProtoMessage

func (*SessionMetadata) ProtoMessage()

func (*SessionMetadata) ProtoReflect

func (x *SessionMetadata) ProtoReflect() protoreflect.Message

func (*SessionMetadata) Reset

func (x *SessionMetadata) Reset()

func (*SessionMetadata) String

func (x *SessionMetadata) String() string

type SignatureDef

type SignatureDef struct {

	// Named input parameters.
	Inputs map[string]*TensorInfo `` /* 153-byte string literal not displayed */
	// Named output parameters.
	Outputs map[string]*TensorInfo `` /* 155-byte string literal not displayed */
	// Extensible method_name information enabling third-party users to mark a
	// SignatureDef as supporting a particular method. This enables producers and
	// consumers of SignatureDefs, e.g. a model definition library and a serving
	// library to have a clear hand-off regarding the semantics of a computation.
	//
	// Note that multiple SignatureDefs in a single MetaGraphDef may have the same
	// method_name. This is commonly used to support multi-headed computation,
	// where a single graph computation may return multiple results.
	MethodName string `protobuf:"bytes,3,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
	// Named input to corresponding default values if any.
	Defaults map[string]*tensor_go_proto.TensorProto `` /* 157-byte string literal not displayed */
	// contains filtered or unexported fields
}

SignatureDef defines the signature of a computation supported by a TensorFlow graph.

For example, a model with two loss computations, sharing a single input, might have the following signature_def map, in a MetaGraphDef message.

Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, output key, and method_name are identical, and will be used by system(s) that implement or rely upon this particular loss method. The output tensor names differ, demonstrating how different outputs can exist for the same method.

signature_def {
  key: "loss_A"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_A:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
    method_name: "some/package/compute_loss"
  }
  ...
}

signature_def {
  key: "loss_B"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_B:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
    method_name: "some/package/compute_loss"
  }
  ...
}

func (*SignatureDef) Descriptor deprecated

func (*SignatureDef) Descriptor() ([]byte, []int)

Deprecated: Use SignatureDef.ProtoReflect.Descriptor instead.

func (*SignatureDef) GetDefaults added in v0.6.0

func (x *SignatureDef) GetDefaults() map[string]*tensor_go_proto.TensorProto

func (*SignatureDef) GetInputs

func (x *SignatureDef) GetInputs() map[string]*TensorInfo

func (*SignatureDef) GetMethodName

func (x *SignatureDef) GetMethodName() string

func (*SignatureDef) GetOutputs

func (x *SignatureDef) GetOutputs() map[string]*TensorInfo

func (*SignatureDef) ProtoMessage

func (*SignatureDef) ProtoMessage()

func (*SignatureDef) ProtoReflect

func (x *SignatureDef) ProtoReflect() protoreflect.Message

func (*SignatureDef) Reset

func (x *SignatureDef) Reset()

func (*SignatureDef) String

func (x *SignatureDef) String() string

type SnapshotMetadataRecord

type SnapshotMetadataRecord struct {

	// Stores the fingerprint of the graph that describes the dataset that is
	// snapshotted.
	GraphHash string `protobuf:"bytes,1,opt,name=graph_hash,json=graphHash,proto3" json:"graph_hash,omitempty"`
	// Run ID that this snapshot corresponds to.
	RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"`
	// Time when we started creating this snapshot.
	CreationTimestamp int64 `protobuf:"varint,3,opt,name=creation_timestamp,json=creationTimestamp,proto3" json:"creation_timestamp,omitempty"`
	// Version of the snapshot data file format.
	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
	// A list of tensor dtype corresponding to each element of the snapshot.
	Dtype []types_go_proto.DataType `protobuf:"varint,5,rep,packed,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// The number of elements in the snapshot.
	NumElements int64 `protobuf:"varint,6,opt,name=num_elements,json=numElements,proto3" json:"num_elements,omitempty"`
	Finalized   bool  `protobuf:"varint,1000,opt,name=finalized,proto3" json:"finalized,omitempty"`
	// contains filtered or unexported fields
}

This stores the metadata information present in each snapshot record.

func (*SnapshotMetadataRecord) Descriptor deprecated

func (*SnapshotMetadataRecord) Descriptor() ([]byte, []int)

Deprecated: Use SnapshotMetadataRecord.ProtoReflect.Descriptor instead.

func (*SnapshotMetadataRecord) GetCreationTimestamp

func (x *SnapshotMetadataRecord) GetCreationTimestamp() int64

func (*SnapshotMetadataRecord) GetDtype

func (*SnapshotMetadataRecord) GetFinalized

func (x *SnapshotMetadataRecord) GetFinalized() bool

func (*SnapshotMetadataRecord) GetGraphHash

func (x *SnapshotMetadataRecord) GetGraphHash() string

func (*SnapshotMetadataRecord) GetNumElements

func (x *SnapshotMetadataRecord) GetNumElements() int64

func (*SnapshotMetadataRecord) GetRunId

func (x *SnapshotMetadataRecord) GetRunId() string

func (*SnapshotMetadataRecord) GetVersion

func (x *SnapshotMetadataRecord) GetVersion() int64

func (*SnapshotMetadataRecord) ProtoMessage

func (*SnapshotMetadataRecord) ProtoMessage()

func (*SnapshotMetadataRecord) ProtoReflect

func (x *SnapshotMetadataRecord) ProtoReflect() protoreflect.Message

func (*SnapshotMetadataRecord) Reset

func (x *SnapshotMetadataRecord) Reset()

func (*SnapshotMetadataRecord) String

func (x *SnapshotMetadataRecord) String() string

type SnapshotRecord

type SnapshotRecord struct {
	Tensor []*tensor_go_proto.TensorProto `protobuf:"bytes,1,rep,name=tensor,proto3" json:"tensor,omitempty"`
	// contains filtered or unexported fields
}

Each SnapshotRecord represents one batch of pre-processed input data. A batch consists of a list of tensors that we encode as TensorProtos. This message doesn't store the structure of the batch.

func (*SnapshotRecord) Descriptor deprecated

func (*SnapshotRecord) Descriptor() ([]byte, []int)

Deprecated: Use SnapshotRecord.ProtoReflect.Descriptor instead.

func (*SnapshotRecord) GetTensor

func (x *SnapshotRecord) GetTensor() []*tensor_go_proto.TensorProto

func (*SnapshotRecord) ProtoMessage

func (*SnapshotRecord) ProtoMessage()

func (*SnapshotRecord) ProtoReflect

func (x *SnapshotRecord) ProtoReflect() protoreflect.Message

func (*SnapshotRecord) Reset

func (x *SnapshotRecord) Reset()

func (*SnapshotRecord) String

func (x *SnapshotRecord) String() string

type SnapshotTensorMetadata

type SnapshotTensorMetadata struct {
	TensorMetadata []*TensorMetadata `protobuf:"bytes,1,rep,name=tensor_metadata,json=tensorMetadata,proto3" json:"tensor_metadata,omitempty"`
	// contains filtered or unexported fields
}

Metadata for all the tensors in a Snapshot Record.

func (*SnapshotTensorMetadata) Descriptor deprecated

func (*SnapshotTensorMetadata) Descriptor() ([]byte, []int)

Deprecated: Use SnapshotTensorMetadata.ProtoReflect.Descriptor instead.

func (*SnapshotTensorMetadata) GetTensorMetadata

func (x *SnapshotTensorMetadata) GetTensorMetadata() []*TensorMetadata

func (*SnapshotTensorMetadata) ProtoMessage

func (*SnapshotTensorMetadata) ProtoMessage()

func (*SnapshotTensorMetadata) ProtoReflect

func (x *SnapshotTensorMetadata) ProtoReflect() protoreflect.Message

func (*SnapshotTensorMetadata) Reset

func (x *SnapshotTensorMetadata) Reset()

func (*SnapshotTensorMetadata) String

func (x *SnapshotTensorMetadata) String() string

type SourceFile

type SourceFile struct {

	// Path to the file.
	FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
	// Name of the host on which the file is located.
	HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
	// Line-by-line content of the file.
	Lines []string `protobuf:"bytes,3,rep,name=lines,proto3" json:"lines,omitempty"`
	// contains filtered or unexported fields
}

Content of a source file involved in the execution of the debugged TensorFlow program.

func (*SourceFile) Descriptor deprecated

func (*SourceFile) Descriptor() ([]byte, []int)

Deprecated: Use SourceFile.ProtoReflect.Descriptor instead.

func (*SourceFile) GetFilePath

func (x *SourceFile) GetFilePath() string

func (*SourceFile) GetHostName

func (x *SourceFile) GetHostName() string

func (*SourceFile) GetLines

func (x *SourceFile) GetLines() []string

func (*SourceFile) ProtoMessage

func (*SourceFile) ProtoMessage()

func (*SourceFile) ProtoReflect

func (x *SourceFile) ProtoReflect() protoreflect.Message

func (*SourceFile) Reset

func (x *SourceFile) Reset()

func (*SourceFile) String

func (x *SourceFile) String() string

type StackFrameWithId

type StackFrameWithId struct {

	// A unique ID for the stack frame: A UUID-like string.
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
	// Stack frame, i.e., a frame of a stack trace, containing information
	// regarding the file name, line number, function name, code content
	// of the line, and column number (if available).
	FileLineCol *graph_debug_info_proto.GraphDebugInfo_FileLineCol `protobuf:"bytes,2,opt,name=file_line_col,json=fileLineCol,proto3" json:"file_line_col,omitempty"`
	// contains filtered or unexported fields
}

A stack frame with ID.

func (*StackFrameWithId) Descriptor deprecated

func (*StackFrameWithId) Descriptor() ([]byte, []int)

Deprecated: Use StackFrameWithId.ProtoReflect.Descriptor instead.

func (*StackFrameWithId) GetFileLineCol

func (*StackFrameWithId) GetId

func (x *StackFrameWithId) GetId() string

func (*StackFrameWithId) ProtoMessage

func (*StackFrameWithId) ProtoMessage()

func (*StackFrameWithId) ProtoReflect

func (x *StackFrameWithId) ProtoReflect() protoreflect.Message

func (*StackFrameWithId) Reset

func (x *StackFrameWithId) Reset()

func (*StackFrameWithId) String

func (x *StackFrameWithId) String() string

type StatusProto added in v0.5.0

type StructuredValue

type StructuredValue struct {

	// The kind of value.
	//
	// Types that are assignable to Kind:
	//
	//	*StructuredValue_NoneValue
	//	*StructuredValue_Float64Value
	//	*StructuredValue_Int64Value
	//	*StructuredValue_StringValue
	//	*StructuredValue_BoolValue
	//	*StructuredValue_TensorShapeValue
	//	*StructuredValue_TensorDtypeValue
	//	*StructuredValue_TensorSpecValue
	//	*StructuredValue_TypeSpecValue
	//	*StructuredValue_BoundedTensorSpecValue
	//	*StructuredValue_ListValue
	//	*StructuredValue_TupleValue
	//	*StructuredValue_DictValue
	//	*StructuredValue_NamedTupleValue
	//	*StructuredValue_TensorValue
	//	*StructuredValue_NumpyValue
	Kind isStructuredValue_Kind `protobuf_oneof:"kind"`
	// contains filtered or unexported fields
}

`StructuredValue` represents a dynamically typed value representing various data structures that are inspired by Python data structures typically used in TensorFlow functions as inputs and outputs.

For example when saving a Layer there may be a `training` argument. If the user passes a boolean True/False, that switches between two concrete TensorFlow functions. In order to switch between them in the same way after loading the SavedModel, we need to represent "True" and "False".

A more advanced example might be a function which takes a list of dictionaries mapping from strings to Tensors. In order to map from user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]` after load to the right saved TensorFlow function, we need to represent the nested structure and the strings, recording that we have a trace for anything matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([], tf.float64)}]` as an example.

Likewise functions may return nested structures of Tensors, for example returning a dictionary mapping from strings to Tensors. In order for the loaded function to return the same structure we need to serialize it.

This is an ergonomic aid for working with loaded SavedModels, not a promise to serialize all possible function signatures. For example we do not expect to pickle generic Python objects, and ideally we'd stay language-agnostic.

func (*StructuredValue) Descriptor deprecated

func (*StructuredValue) Descriptor() ([]byte, []int)

Deprecated: Use StructuredValue.ProtoReflect.Descriptor instead.

func (*StructuredValue) GetBoolValue

func (x *StructuredValue) GetBoolValue() bool

func (*StructuredValue) GetBoundedTensorSpecValue

func (x *StructuredValue) GetBoundedTensorSpecValue() *BoundedTensorSpecProto

func (*StructuredValue) GetDictValue

func (x *StructuredValue) GetDictValue() *DictValue

func (*StructuredValue) GetFloat64Value

func (x *StructuredValue) GetFloat64Value() float64

func (*StructuredValue) GetInt64Value

func (x *StructuredValue) GetInt64Value() int64

func (*StructuredValue) GetKind

func (m *StructuredValue) GetKind() isStructuredValue_Kind

func (*StructuredValue) GetListValue

func (x *StructuredValue) GetListValue() *ListValue

func (*StructuredValue) GetNamedTupleValue

func (x *StructuredValue) GetNamedTupleValue() *NamedTupleValue

func (*StructuredValue) GetNoneValue

func (x *StructuredValue) GetNoneValue() *NoneValue

func (*StructuredValue) GetNumpyValue added in v0.6.0

func (x *StructuredValue) GetNumpyValue() *tensor_go_proto.TensorProto

func (*StructuredValue) GetStringValue

func (x *StructuredValue) GetStringValue() string

func (*StructuredValue) GetTensorDtypeValue

func (x *StructuredValue) GetTensorDtypeValue() types_go_proto.DataType

func (*StructuredValue) GetTensorShapeValue

func (x *StructuredValue) GetTensorShapeValue() *tensor_shape_go_proto.TensorShapeProto

func (*StructuredValue) GetTensorSpecValue

func (x *StructuredValue) GetTensorSpecValue() *TensorSpecProto

func (*StructuredValue) GetTensorValue added in v0.5.0

func (x *StructuredValue) GetTensorValue() *tensor_go_proto.TensorProto

func (*StructuredValue) GetTupleValue

func (x *StructuredValue) GetTupleValue() *TupleValue

func (*StructuredValue) GetTypeSpecValue

func (x *StructuredValue) GetTypeSpecValue() *TypeSpecProto

func (*StructuredValue) ProtoMessage

func (*StructuredValue) ProtoMessage()

func (*StructuredValue) ProtoReflect

func (x *StructuredValue) ProtoReflect() protoreflect.Message

func (*StructuredValue) Reset

func (x *StructuredValue) Reset()

func (*StructuredValue) String

func (x *StructuredValue) String() string

type StructuredValue_BoolValue

type StructuredValue_BoolValue struct {
	// Represents a boolean value.
	BoolValue bool `protobuf:"varint,14,opt,name=bool_value,json=boolValue,proto3,oneof"`
}

type StructuredValue_BoundedTensorSpecValue

type StructuredValue_BoundedTensorSpecValue struct {
	// Represents a value for tf.BoundedTensorSpec.
	BoundedTensorSpecValue *BoundedTensorSpecProto `protobuf:"bytes,35,opt,name=bounded_tensor_spec_value,json=boundedTensorSpecValue,proto3,oneof"`
}

type StructuredValue_DictValue

type StructuredValue_DictValue struct {
	// Represents a dict `Value`.
	DictValue *DictValue `protobuf:"bytes,53,opt,name=dict_value,json=dictValue,proto3,oneof"`
}

type StructuredValue_Float64Value

type StructuredValue_Float64Value struct {
	// Represents a double-precision floating-point value (a Python `float`).
	Float64Value float64 `protobuf:"fixed64,11,opt,name=float64_value,json=float64Value,proto3,oneof"`
}

type StructuredValue_Int64Value

type StructuredValue_Int64Value struct {
	// Represents a signed integer value, limited to 64 bits.
	// Larger values from Python's arbitrary-precision integers are unsupported.
	Int64Value int64 `protobuf:"zigzag64,12,opt,name=int64_value,json=int64Value,proto3,oneof"`
}

type StructuredValue_ListValue

type StructuredValue_ListValue struct {
	// Represents a list of `Value`.
	ListValue *ListValue `protobuf:"bytes,51,opt,name=list_value,json=listValue,proto3,oneof"`
}

type StructuredValue_NamedTupleValue

type StructuredValue_NamedTupleValue struct {
	// Represents Python's namedtuple.
	NamedTupleValue *NamedTupleValue `protobuf:"bytes,54,opt,name=named_tuple_value,json=namedTupleValue,proto3,oneof"`
}

type StructuredValue_NoneValue

type StructuredValue_NoneValue struct {
	// Represents None.
	NoneValue *NoneValue `protobuf:"bytes,1,opt,name=none_value,json=noneValue,proto3,oneof"`
}

type StructuredValue_NumpyValue added in v0.6.0

type StructuredValue_NumpyValue struct {
	// Represents a value for np.ndarray.
	NumpyValue *tensor_go_proto.TensorProto `protobuf:"bytes,56,opt,name=numpy_value,json=numpyValue,proto3,oneof"`
}

type StructuredValue_StringValue

type StructuredValue_StringValue struct {
	// Represents a string of Unicode characters stored in a Python `str`.
	// In Python 3, this is exactly what type `str` is.
	// In Python 2, this is the UTF-8 encoding of the characters.
	// For strings with ASCII characters only (as often used in TensorFlow code)
	// there is effectively no difference between the language versions.
	// The obsolescent `unicode` type of Python 2 is not supported here.
	StringValue string `protobuf:"bytes,13,opt,name=string_value,json=stringValue,proto3,oneof"`
}

type StructuredValue_TensorDtypeValue

type StructuredValue_TensorDtypeValue struct {
	// Represents an enum value for dtype.
	TensorDtypeValue types_go_proto.DataType `protobuf:"varint,32,opt,name=tensor_dtype_value,json=tensorDtypeValue,proto3,enum=tensorflow.DataType,oneof"`
}

type StructuredValue_TensorShapeValue

type StructuredValue_TensorShapeValue struct {
	// Represents a TensorShape.
	TensorShapeValue *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,31,opt,name=tensor_shape_value,json=tensorShapeValue,proto3,oneof"`
}

type StructuredValue_TensorSpecValue

type StructuredValue_TensorSpecValue struct {
	// Represents a value for tf.TensorSpec.
	TensorSpecValue *TensorSpecProto `protobuf:"bytes,33,opt,name=tensor_spec_value,json=tensorSpecValue,proto3,oneof"`
}

type StructuredValue_TensorValue added in v0.5.0

type StructuredValue_TensorValue struct {
	// Represents a value for tf.Tensor.
	TensorValue *tensor_go_proto.TensorProto `protobuf:"bytes,55,opt,name=tensor_value,json=tensorValue,proto3,oneof"`
}

type StructuredValue_TupleValue

type StructuredValue_TupleValue struct {
	// Represents a tuple of `Value`.
	TupleValue *TupleValue `protobuf:"bytes,52,opt,name=tuple_value,json=tupleValue,proto3,oneof"`
}

type StructuredValue_TypeSpecValue

type StructuredValue_TypeSpecValue struct {
	// Represents a value for tf.TypeSpec.
	TypeSpecValue *TypeSpecProto `protobuf:"bytes,34,opt,name=type_spec_value,json=typeSpecValue,proto3,oneof"`
}

type TaskDeviceFilters

type TaskDeviceFilters struct {
	DeviceFilters []string `protobuf:"bytes,1,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"`
	// contains filtered or unexported fields
}

Defines the device filters for a remote task.

func (*TaskDeviceFilters) Descriptor deprecated

func (*TaskDeviceFilters) Descriptor() ([]byte, []int)

Deprecated: Use TaskDeviceFilters.ProtoReflect.Descriptor instead.

func (*TaskDeviceFilters) GetDeviceFilters

func (x *TaskDeviceFilters) GetDeviceFilters() []string

func (*TaskDeviceFilters) ProtoMessage

func (*TaskDeviceFilters) ProtoMessage()

func (*TaskDeviceFilters) ProtoReflect

func (x *TaskDeviceFilters) ProtoReflect() protoreflect.Message

func (*TaskDeviceFilters) Reset

func (x *TaskDeviceFilters) Reset()

func (*TaskDeviceFilters) String

func (x *TaskDeviceFilters) String() string

type TensorConnection

type TensorConnection struct {

	// A tensor name. The value of this tensor will be substituted for
	// the tensor named in `to_tensor`.
	FromTensor string `protobuf:"bytes,1,opt,name=from_tensor,json=fromTensor,proto3" json:"from_tensor,omitempty"`
	// A tensor name. The value of this tensor will be bound to the
	// value of the tensor named in `from_tensor`.
	ToTensor string `protobuf:"bytes,2,opt,name=to_tensor,json=toTensor,proto3" json:"to_tensor,omitempty"`
	// contains filtered or unexported fields
}

Defines a connection between two tensors in a `GraphDef`.

func (*TensorConnection) Descriptor deprecated

func (*TensorConnection) Descriptor() ([]byte, []int)

Deprecated: Use TensorConnection.ProtoReflect.Descriptor instead.

func (*TensorConnection) GetFromTensor

func (x *TensorConnection) GetFromTensor() string

func (*TensorConnection) GetToTensor

func (x *TensorConnection) GetToTensor() string

func (*TensorConnection) ProtoMessage

func (*TensorConnection) ProtoMessage()

func (*TensorConnection) ProtoReflect

func (x *TensorConnection) ProtoReflect() protoreflect.Message

func (*TensorConnection) Reset

func (x *TensorConnection) Reset()

func (*TensorConnection) String

func (x *TensorConnection) String() string

type TensorDebugMode

type TensorDebugMode int32

Available modes for extracting debugging information from a Tensor. TODO(cais): Document the detailed column names and semantics in a separate markdown file once the implementation settles.

const (
	TensorDebugMode_UNSPECIFIED TensorDebugMode = 0
	// Only records what tensors are computed, eagerly or in graphs.
	// No information regarding the value of the tensor is available.
	TensorDebugMode_NO_TENSOR TensorDebugMode = 1
	// A minimalist health summary for float-type tensors.
	// Contains information only about the presence/absence of pathological
	// values including Infinity and NaN.
	// Applicable only to float dtypes.
	TensorDebugMode_CURT_HEALTH TensorDebugMode = 2
	// A concise health summary for float-type tensors.
	// Contains more information that CURT_HEALTH.
	// Infinity and NaN are treated differently.
	// Applicable only to float and integer dtypes.
	TensorDebugMode_CONCISE_HEALTH TensorDebugMode = 3
	// A detailed health summary.
	// Contains further detailed information than `CONCISE_HEALTH`.
	// Information about device, dtype and shape are included.
	// Counts for various types of values (Infinity, NaN, negative, zero,
	// positive) are included.
	// Applicable to float, integer and boolean dtypes.
	TensorDebugMode_FULL_HEALTH TensorDebugMode = 4
	// Provides full runtime shape information, up to a maximum rank, beyond
	// which the dimension sizes are truncated.
	TensorDebugMode_SHAPE TensorDebugMode = 5
	// Full numeric summary.
	// Including device, dtype, shape, counts of various types of values
	// (Infinity, NaN, negative, zero, positive), and summary statistics
	// (minimum, maximum, mean and variance).
	// Applicable to float, integer and boolean dtypes.
	TensorDebugMode_FULL_NUMERICS TensorDebugMode = 6
	// Full tensor value.
	TensorDebugMode_FULL_TENSOR TensorDebugMode = 7
	// Reduce the elements of a tensor to a rank-1 tensor of shape [3], in which
	//   - the 1st element is -inf if any element of the tensor is -inf,
	//     or zero otherwise.
	//   - the 2nd element is +inf if any element of the tensor is +inf,
	//     or zero otherwise.
	//   - the 3rd element is nan if any element of the tensor is nan, or zero
	//     otherwise.
	TensorDebugMode_REDUCE_INF_NAN_THREE_SLOTS TensorDebugMode = 8
)

func (TensorDebugMode) Descriptor

func (TensorDebugMode) Enum

func (x TensorDebugMode) Enum() *TensorDebugMode

func (TensorDebugMode) EnumDescriptor deprecated

func (TensorDebugMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use TensorDebugMode.Descriptor instead.

func (TensorDebugMode) Number

func (TensorDebugMode) String

func (x TensorDebugMode) String() string

func (TensorDebugMode) Type

type TensorInfo

type TensorInfo struct {

	// Types that are assignable to Encoding:
	//
	//	*TensorInfo_Name
	//	*TensorInfo_CooSparse_
	//	*TensorInfo_CompositeTensor_
	Encoding isTensorInfo_Encoding   `protobuf_oneof:"encoding"`
	Dtype    types_go_proto.DataType `protobuf:"varint,2,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// The static shape should be recorded here, to the extent that it can
	// be known in advance.  In the case of a SparseTensor, this field describes
	// the logical shape of the represented tensor (aka dense_shape).
	TensorShape *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,3,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"`
	// contains filtered or unexported fields
}

Information about a Tensor necessary for feeding or retrieval.

func (*TensorInfo) Descriptor deprecated

func (*TensorInfo) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo.ProtoReflect.Descriptor instead.

func (*TensorInfo) GetCompositeTensor

func (x *TensorInfo) GetCompositeTensor() *TensorInfo_CompositeTensor

func (*TensorInfo) GetCooSparse

func (x *TensorInfo) GetCooSparse() *TensorInfo_CooSparse

func (*TensorInfo) GetDtype

func (x *TensorInfo) GetDtype() types_go_proto.DataType

func (*TensorInfo) GetEncoding

func (m *TensorInfo) GetEncoding() isTensorInfo_Encoding

func (*TensorInfo) GetName

func (x *TensorInfo) GetName() string

func (*TensorInfo) GetTensorShape

func (x *TensorInfo) GetTensorShape() *tensor_shape_go_proto.TensorShapeProto

func (*TensorInfo) ProtoMessage

func (*TensorInfo) ProtoMessage()

func (*TensorInfo) ProtoReflect

func (x *TensorInfo) ProtoReflect() protoreflect.Message

func (*TensorInfo) Reset

func (x *TensorInfo) Reset()

func (*TensorInfo) String

func (x *TensorInfo) String() string

type TensorInfo_CompositeTensor

type TensorInfo_CompositeTensor struct {

	// The serialized TypeSpec for the composite tensor.
	TypeSpec *TypeSpecProto `protobuf:"bytes,1,opt,name=type_spec,json=typeSpec,proto3" json:"type_spec,omitempty"`
	// A TensorInfo for each flattened component tensor.
	Components []*TensorInfo `protobuf:"bytes,2,rep,name=components,proto3" json:"components,omitempty"`
	// contains filtered or unexported fields
}

Generic encoding for composite tensors.

func (*TensorInfo_CompositeTensor) Descriptor deprecated

func (*TensorInfo_CompositeTensor) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo_CompositeTensor.ProtoReflect.Descriptor instead.

func (*TensorInfo_CompositeTensor) GetComponents

func (x *TensorInfo_CompositeTensor) GetComponents() []*TensorInfo

func (*TensorInfo_CompositeTensor) GetTypeSpec

func (x *TensorInfo_CompositeTensor) GetTypeSpec() *TypeSpecProto

func (*TensorInfo_CompositeTensor) ProtoMessage

func (*TensorInfo_CompositeTensor) ProtoMessage()

func (*TensorInfo_CompositeTensor) ProtoReflect

func (*TensorInfo_CompositeTensor) Reset

func (x *TensorInfo_CompositeTensor) Reset()

func (*TensorInfo_CompositeTensor) String

func (x *TensorInfo_CompositeTensor) String() string

type TensorInfo_CompositeTensor_

type TensorInfo_CompositeTensor_ struct {
	// Generic encoding for CompositeTensors.
	CompositeTensor *TensorInfo_CompositeTensor `protobuf:"bytes,5,opt,name=composite_tensor,json=compositeTensor,proto3,oneof"`
}

type TensorInfo_CooSparse

type TensorInfo_CooSparse struct {

	// The shape of the values Tensor is [?].  Its dtype must be the dtype of
	// the SparseTensor as a whole, given in the enclosing TensorInfo.
	ValuesTensorName string `protobuf:"bytes,1,opt,name=values_tensor_name,json=valuesTensorName,proto3" json:"values_tensor_name,omitempty"`
	// The indices Tensor must have dtype int64 and shape [?, ?].
	IndicesTensorName string `protobuf:"bytes,2,opt,name=indices_tensor_name,json=indicesTensorName,proto3" json:"indices_tensor_name,omitempty"`
	// The dynamic logical shape represented by the SparseTensor is recorded in
	// the Tensor referenced here.  It must have dtype int64 and shape [?].
	DenseShapeTensorName string `protobuf:"bytes,3,opt,name=dense_shape_tensor_name,json=denseShapeTensorName,proto3" json:"dense_shape_tensor_name,omitempty"`
	// contains filtered or unexported fields
}

For sparse tensors, The COO encoding stores a triple of values, indices, and shape.

func (*TensorInfo_CooSparse) Descriptor deprecated

func (*TensorInfo_CooSparse) Descriptor() ([]byte, []int)

Deprecated: Use TensorInfo_CooSparse.ProtoReflect.Descriptor instead.

func (*TensorInfo_CooSparse) GetDenseShapeTensorName

func (x *TensorInfo_CooSparse) GetDenseShapeTensorName() string

func (*TensorInfo_CooSparse) GetIndicesTensorName

func (x *TensorInfo_CooSparse) GetIndicesTensorName() string

func (*TensorInfo_CooSparse) GetValuesTensorName

func (x *TensorInfo_CooSparse) GetValuesTensorName() string

func (*TensorInfo_CooSparse) ProtoMessage

func (*TensorInfo_CooSparse) ProtoMessage()

func (*TensorInfo_CooSparse) ProtoReflect

func (x *TensorInfo_CooSparse) ProtoReflect() protoreflect.Message

func (*TensorInfo_CooSparse) Reset

func (x *TensorInfo_CooSparse) Reset()

func (*TensorInfo_CooSparse) String

func (x *TensorInfo_CooSparse) String() string

type TensorInfo_CooSparse_

type TensorInfo_CooSparse_ struct {
	// There are many possible encodings of sparse matrices
	// (https://en.wikipedia.org/wiki/Sparse_matrix).  Currently, TensorFlow
	// uses only the COO encoding.  This is supported and documented in the
	// SparseTensor Python class.
	CooSparse *TensorInfo_CooSparse `protobuf:"bytes,4,opt,name=coo_sparse,json=cooSparse,proto3,oneof"`
}

type TensorInfo_Name

type TensorInfo_Name struct {
	// For dense `Tensor`s, the name of the tensor in the graph.
	Name string `protobuf:"bytes,1,opt,name=name,proto3,oneof"`
}

type TensorMetadata

type TensorMetadata struct {
	TensorShape *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,2,opt,name=tensor_shape,json=tensorShape,proto3" json:"tensor_shape,omitempty"`
	// Number of uncompressed bytes used to store the tensor representation.
	TensorSizeBytes int64 `protobuf:"varint,3,opt,name=tensor_size_bytes,json=tensorSizeBytes,proto3" json:"tensor_size_bytes,omitempty"`
	// contains filtered or unexported fields
}

Metadata for a single tensor in the Snapshot Record.

func (*TensorMetadata) Descriptor deprecated

func (*TensorMetadata) Descriptor() ([]byte, []int)

Deprecated: Use TensorMetadata.ProtoReflect.Descriptor instead.

func (*TensorMetadata) GetTensorShape

func (*TensorMetadata) GetTensorSizeBytes

func (x *TensorMetadata) GetTensorSizeBytes() int64

func (*TensorMetadata) ProtoMessage

func (*TensorMetadata) ProtoMessage()

func (*TensorMetadata) ProtoReflect

func (x *TensorMetadata) ProtoReflect() protoreflect.Message

func (*TensorMetadata) Reset

func (x *TensorMetadata) Reset()

func (*TensorMetadata) String

func (x *TensorMetadata) String() string

type TensorSpecProto

type TensorSpecProto struct {
	Name  string                                  `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	Shape *tensor_shape_go_proto.TensorShapeProto `protobuf:"bytes,2,opt,name=shape,proto3" json:"shape,omitempty"`
	Dtype types_go_proto.DataType                 `protobuf:"varint,3,opt,name=dtype,proto3,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// contains filtered or unexported fields
}

A protobuf to represent tf.TensorSpec.

func (*TensorSpecProto) Descriptor deprecated

func (*TensorSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use TensorSpecProto.ProtoReflect.Descriptor instead.

func (*TensorSpecProto) GetDtype

func (x *TensorSpecProto) GetDtype() types_go_proto.DataType

func (*TensorSpecProto) GetName

func (x *TensorSpecProto) GetName() string

func (*TensorSpecProto) GetShape

func (*TensorSpecProto) ProtoMessage

func (*TensorSpecProto) ProtoMessage()

func (*TensorSpecProto) ProtoReflect

func (x *TensorSpecProto) ProtoReflect() protoreflect.Message

func (*TensorSpecProto) Reset

func (x *TensorSpecProto) Reset()

func (*TensorSpecProto) String

func (x *TensorSpecProto) String() string

type ThreadPoolOptionProto

type ThreadPoolOptionProto struct {

	// The number of threads in the pool.
	//
	// 0 means the system picks a value based on where this option proto is used
	// (see the declaration of the specific field for more info).
	NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"`
	// The global name of the threadpool.
	//
	// If empty, then the threadpool is made and used according to the scope it's
	// in - e.g., for a session threadpool, it is used by that session only.
	//
	// If non-empty, then:
	//   - a global threadpool associated with this name is looked
	//     up or created. This allows, for example, sharing one threadpool across
	//     many sessions (e.g., like the default behavior, if
	//     inter_op_parallelism_threads is not configured), but still partitioning
	//     into a large and small pool.
	//   - if the threadpool for this global_name already exists, then it is an
	//     error if the existing pool was created using a different num_threads
	//     value as is specified on this call.
	//   - threadpools created this way are never garbage collected.
	GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"`
	// contains filtered or unexported fields
}

func (*ThreadPoolOptionProto) Descriptor deprecated

func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int)

Deprecated: Use ThreadPoolOptionProto.ProtoReflect.Descriptor instead.

func (*ThreadPoolOptionProto) GetGlobalName

func (x *ThreadPoolOptionProto) GetGlobalName() string

func (*ThreadPoolOptionProto) GetNumThreads

func (x *ThreadPoolOptionProto) GetNumThreads() int32

func (*ThreadPoolOptionProto) ProtoMessage

func (*ThreadPoolOptionProto) ProtoMessage()

func (*ThreadPoolOptionProto) ProtoReflect

func (x *ThreadPoolOptionProto) ProtoReflect() protoreflect.Message

func (*ThreadPoolOptionProto) Reset

func (x *ThreadPoolOptionProto) Reset()

func (*ThreadPoolOptionProto) String

func (x *ThreadPoolOptionProto) String() string

type TrackableObjectGraph

type TrackableObjectGraph struct {
	Nodes []*TrackableObjectGraph_TrackableObject `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph) Descriptor deprecated

func (*TrackableObjectGraph) Descriptor() ([]byte, []int)

Deprecated: Use TrackableObjectGraph.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph) GetNodes

func (*TrackableObjectGraph) ProtoMessage

func (*TrackableObjectGraph) ProtoMessage()

func (*TrackableObjectGraph) ProtoReflect

func (x *TrackableObjectGraph) ProtoReflect() protoreflect.Message

func (*TrackableObjectGraph) Reset

func (x *TrackableObjectGraph) Reset()

func (*TrackableObjectGraph) String

func (x *TrackableObjectGraph) String() string

type TrackableObjectGraph_TrackableObject

type TrackableObjectGraph_TrackableObject struct {

	// Objects which this object depends on.
	Children []*TrackableObjectGraph_TrackableObject_ObjectReference `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"`
	// Serialized data specific to this object.
	Attributes []*TrackableObjectGraph_TrackableObject_SerializedTensor `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// Slot variables owned by this object.
	SlotVariables []*TrackableObjectGraph_TrackableObject_SlotVariableReference `protobuf:"bytes,3,rep,name=slot_variables,json=slotVariables,proto3" json:"slot_variables,omitempty"`
	// The registered saver used to save this object. If this saver is not
	// present when loading the checkpoint, then loading will fail.
	RegisteredSaver *RegisteredSaver `protobuf:"bytes,4,opt,name=registered_saver,json=registeredSaver,proto3" json:"registered_saver,omitempty"`
	// Whether this object has checkpoint values or descendants with checkpoint
	// values. This is computed at save time to avoid traversing the entire
	// object graph proto when restoring (which also has to traverse the live
	// object graph).
	HasCheckpointValues *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=has_checkpoint_values,json=hasCheckpointValues,proto3" json:"has_checkpoint_values,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject) Descriptor deprecated

func (*TrackableObjectGraph_TrackableObject) Descriptor() ([]byte, []int)

Deprecated: Use TrackableObjectGraph_TrackableObject.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject) GetAttributes

func (*TrackableObjectGraph_TrackableObject) GetChildren

func (*TrackableObjectGraph_TrackableObject) GetHasCheckpointValues

func (x *TrackableObjectGraph_TrackableObject) GetHasCheckpointValues() *wrapperspb.BoolValue

func (*TrackableObjectGraph_TrackableObject) GetRegisteredSaver

func (x *TrackableObjectGraph_TrackableObject) GetRegisteredSaver() *RegisteredSaver

func (*TrackableObjectGraph_TrackableObject) GetSlotVariables

func (*TrackableObjectGraph_TrackableObject) ProtoMessage

func (*TrackableObjectGraph_TrackableObject) ProtoMessage()

func (*TrackableObjectGraph_TrackableObject) ProtoReflect

func (*TrackableObjectGraph_TrackableObject) Reset

func (*TrackableObjectGraph_TrackableObject) String

type TrackableObjectGraph_TrackableObject_ObjectReference

type TrackableObjectGraph_TrackableObject_ObjectReference struct {

	// An index into `TrackableObjectGraph.nodes`, indicating the object
	// being referenced.
	NodeId int32 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
	// A user-provided name for the edge.
	LocalName string `protobuf:"bytes,2,opt,name=local_name,json=localName,proto3" json:"local_name,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_ObjectReference.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetLocalName

func (*TrackableObjectGraph_TrackableObject_ObjectReference) GetNodeId

func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_ObjectReference) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_ObjectReference) Reset

func (*TrackableObjectGraph_TrackableObject_ObjectReference) String

type TrackableObjectGraph_TrackableObject_SerializedTensor

type TrackableObjectGraph_TrackableObject_SerializedTensor struct {

	// A name for the Tensor. Simple variables have only one
	// `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may
	// be restored on object creation as an optimization.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// The full name of the variable/tensor, if applicable. Used to allow
	// name-based loading of checkpoints which were saved using an
	// object-based API. Should match the checkpoint key which would have been
	// assigned by tf.train.Saver.
	FullName string `protobuf:"bytes,2,opt,name=full_name,json=fullName,proto3" json:"full_name,omitempty"`
	// The generated name of the Tensor in the checkpoint.
	CheckpointKey string `protobuf:"bytes,3,opt,name=checkpoint_key,json=checkpointKey,proto3" json:"checkpoint_key,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_SerializedTensor.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetCheckpointKey

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetFullName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) GetName

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) Reset

func (*TrackableObjectGraph_TrackableObject_SerializedTensor) String

type TrackableObjectGraph_TrackableObject_SlotVariableReference

type TrackableObjectGraph_TrackableObject_SlotVariableReference struct {

	// An index into `TrackableObjectGraph.nodes`, indicating the
	// variable object this slot was created for.
	OriginalVariableNodeId int32 `` /* 132-byte string literal not displayed */
	// The name of the slot (e.g. "m"/"v").
	SlotName string `protobuf:"bytes,2,opt,name=slot_name,json=slotName,proto3" json:"slot_name,omitempty"`
	// An index into `TrackableObjectGraph.nodes`, indicating the
	// `Object` with the value of the slot variable.
	SlotVariableNodeId int32 `protobuf:"varint,3,opt,name=slot_variable_node_id,json=slotVariableNodeId,proto3" json:"slot_variable_node_id,omitempty"`
	// contains filtered or unexported fields
}

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Descriptor deprecated

Deprecated: Use TrackableObjectGraph_TrackableObject_SlotVariableReference.ProtoReflect.Descriptor instead.

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetOriginalVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotName

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) GetSlotVariableNodeId

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoMessage

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) ProtoReflect

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) Reset

func (*TrackableObjectGraph_TrackableObject_SlotVariableReference) String

type TupleValue

type TupleValue struct {
	Values []*StructuredValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// contains filtered or unexported fields
}

Represents a Python tuple.

func (*TupleValue) Descriptor deprecated

func (*TupleValue) Descriptor() ([]byte, []int)

Deprecated: Use TupleValue.ProtoReflect.Descriptor instead.

func (*TupleValue) GetValues

func (x *TupleValue) GetValues() []*StructuredValue

func (*TupleValue) ProtoMessage

func (*TupleValue) ProtoMessage()

func (*TupleValue) ProtoReflect

func (x *TupleValue) ProtoReflect() protoreflect.Message

func (*TupleValue) Reset

func (x *TupleValue) Reset()

func (*TupleValue) String

func (x *TupleValue) String() string

type TypeSpecProto

type TypeSpecProto struct {
	TypeSpecClass TypeSpecProto_TypeSpecClass `` /* 147-byte string literal not displayed */
	// The value returned by TypeSpec._serialize().
	TypeState *StructuredValue `protobuf:"bytes,2,opt,name=type_state,json=typeState,proto3" json:"type_state,omitempty"`
	// The name of the TypeSpec class.
	//   - If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is
	//     the one registered under this name. For types registered outside
	//     core TensorFlow by an add-on library, that library must be loaded
	//     before this value can be deserialized by nested_structure_coder.
	//   - If type_spec_class specifies a particular TypeSpec class, this field is
	//     redundant with the type_spec_class enum, and is only used for error
	//     reporting in older binaries that do not know the tupe_spec_class enum.
	TypeSpecClassName string `protobuf:"bytes,3,opt,name=type_spec_class_name,json=typeSpecClassName,proto3" json:"type_spec_class_name,omitempty"`
	// The number of flat tensor components required by this TypeSpec.
	NumFlatComponents int32 `protobuf:"varint,4,opt,name=num_flat_components,json=numFlatComponents,proto3" json:"num_flat_components,omitempty"`
	// contains filtered or unexported fields
}

Represents a tf.TypeSpec

func (*TypeSpecProto) Descriptor deprecated

func (*TypeSpecProto) Descriptor() ([]byte, []int)

Deprecated: Use TypeSpecProto.ProtoReflect.Descriptor instead.

func (*TypeSpecProto) GetNumFlatComponents

func (x *TypeSpecProto) GetNumFlatComponents() int32

func (*TypeSpecProto) GetTypeSpecClass

func (x *TypeSpecProto) GetTypeSpecClass() TypeSpecProto_TypeSpecClass

func (*TypeSpecProto) GetTypeSpecClassName

func (x *TypeSpecProto) GetTypeSpecClassName() string

func (*TypeSpecProto) GetTypeState

func (x *TypeSpecProto) GetTypeState() *StructuredValue

func (*TypeSpecProto) ProtoMessage

func (*TypeSpecProto) ProtoMessage()

func (*TypeSpecProto) ProtoReflect

func (x *TypeSpecProto) ProtoReflect() protoreflect.Message

func (*TypeSpecProto) Reset

func (x *TypeSpecProto) Reset()

func (*TypeSpecProto) String

func (x *TypeSpecProto) String() string

type TypeSpecProto_TypeSpecClass

type TypeSpecProto_TypeSpecClass int32
const (
	TypeSpecProto_UNKNOWN              TypeSpecProto_TypeSpecClass = 0
	TypeSpecProto_SPARSE_TENSOR_SPEC   TypeSpecProto_TypeSpecClass = 1  // tf.SparseTensorSpec
	TypeSpecProto_INDEXED_SLICES_SPEC  TypeSpecProto_TypeSpecClass = 2  // tf.IndexedSlicesSpec
	TypeSpecProto_RAGGED_TENSOR_SPEC   TypeSpecProto_TypeSpecClass = 3  // tf.RaggedTensorSpec
	TypeSpecProto_TENSOR_ARRAY_SPEC    TypeSpecProto_TypeSpecClass = 4  // tf.TensorArraySpec
	TypeSpecProto_DATA_DATASET_SPEC    TypeSpecProto_TypeSpecClass = 5  // tf.data.DatasetSpec
	TypeSpecProto_DATA_ITERATOR_SPEC   TypeSpecProto_TypeSpecClass = 6  // IteratorSpec from data/ops/iterator_ops.py
	TypeSpecProto_OPTIONAL_SPEC        TypeSpecProto_TypeSpecClass = 7  // tf.OptionalSpec
	TypeSpecProto_PER_REPLICA_SPEC     TypeSpecProto_TypeSpecClass = 8  // PerReplicaSpec from distribute/values.py
	TypeSpecProto_VARIABLE_SPEC        TypeSpecProto_TypeSpecClass = 9  // tf.VariableSpec
	TypeSpecProto_ROW_PARTITION_SPEC   TypeSpecProto_TypeSpecClass = 10 // RowPartitionSpec from ragged/row_partition.py
	TypeSpecProto_REGISTERED_TYPE_SPEC TypeSpecProto_TypeSpecClass = 12 // The type registered as type_spec_class_name.
	TypeSpecProto_EXTENSION_TYPE_SPEC  TypeSpecProto_TypeSpecClass = 13 // Subclasses of tf.ExtensionType
)

func (TypeSpecProto_TypeSpecClass) Descriptor

func (TypeSpecProto_TypeSpecClass) Enum

func (TypeSpecProto_TypeSpecClass) EnumDescriptor deprecated

func (TypeSpecProto_TypeSpecClass) EnumDescriptor() ([]byte, []int)

Deprecated: Use TypeSpecProto_TypeSpecClass.Descriptor instead.

func (TypeSpecProto_TypeSpecClass) Number

func (TypeSpecProto_TypeSpecClass) String

func (TypeSpecProto_TypeSpecClass) Type

type UniformQuantizedConvolutionDimensionNumbersAttr added in v0.3.0

type UniformQuantizedConvolutionDimensionNumbersAttr struct {

	// The dimension that represents batch in the input.
	InputBatchDimension int64 `protobuf:"varint,1,opt,name=input_batch_dimension,json=inputBatchDimension,proto3" json:"input_batch_dimension,omitempty"`
	// The dimension that represents features in the input.
	InputFeatureDimension int64 `` /* 127-byte string literal not displayed */
	// The dimensions that represents spatial dimensions in the input. Length must
	// be rank-2 for the tensor rank for Convolution op.
	InputSpatialDimensions []int64 `` /* 137-byte string literal not displayed */
	// The dimension that represents input features in the kernel (rhs).
	KernelInputFeatureDimension int64 `` /* 147-byte string literal not displayed */
	// The dimension that represents output features in the kernel (rhs).
	KernelOutputFeatureDimension int64 `` /* 150-byte string literal not displayed */
	// The dimensions that represents spatial dimensions in the kernel (rhs).
	// Length must be rank-2 for the tensor rank for Convolution op.
	KernelSpatialDimensions []int64 `` /* 140-byte string literal not displayed */
	// The dimension that represents batch in the output.
	OutputBatchDimension int64 `protobuf:"varint,7,opt,name=output_batch_dimension,json=outputBatchDimension,proto3" json:"output_batch_dimension,omitempty"`
	// The dimension that represents features in the output.
	OutputFeatureDimension int64 `` /* 130-byte string literal not displayed */
	// The dimensions that represents spatial dimensions in the output. Length
	// must be rank-2 for the tensor rank for Convolution op.
	OutputSpatialDimensions []int64 `` /* 140-byte string literal not displayed */
	// contains filtered or unexported fields
}

Describes the dimension numbers for Convolution op. Corresponds to ::mlir::mhlo::ConvDimensionNumbersAttr.

func (*UniformQuantizedConvolutionDimensionNumbersAttr) Descriptor deprecated added in v0.3.0

Deprecated: Use UniformQuantizedConvolutionDimensionNumbersAttr.ProtoReflect.Descriptor instead.

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetInputBatchDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetInputBatchDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetInputFeatureDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetInputFeatureDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetInputSpatialDimensions added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetInputSpatialDimensions() []int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelInputFeatureDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelInputFeatureDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelOutputFeatureDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelOutputFeatureDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelSpatialDimensions added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetKernelSpatialDimensions() []int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputBatchDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputBatchDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputFeatureDimension added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputFeatureDimension() int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputSpatialDimensions added in v0.3.0

func (x *UniformQuantizedConvolutionDimensionNumbersAttr) GetOutputSpatialDimensions() []int64

func (*UniformQuantizedConvolutionDimensionNumbersAttr) ProtoMessage added in v0.3.0

func (*UniformQuantizedConvolutionDimensionNumbersAttr) ProtoReflect added in v0.3.0

func (*UniformQuantizedConvolutionDimensionNumbersAttr) Reset added in v0.3.0

func (*UniformQuantizedConvolutionDimensionNumbersAttr) String added in v0.3.0

type ValuesDef

type ValuesDef struct {

	// Value names that have been seen in this context.
	Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	// Value names referenced by but external to this context.
	ExternalValues map[string]string `` /* 191-byte string literal not displayed */
	// contains filtered or unexported fields
}

Protocol buffer representing the values in ControlFlowContext.

func (*ValuesDef) Descriptor deprecated

func (*ValuesDef) Descriptor() ([]byte, []int)

Deprecated: Use ValuesDef.ProtoReflect.Descriptor instead.

func (*ValuesDef) GetExternalValues

func (x *ValuesDef) GetExternalValues() map[string]string

func (*ValuesDef) GetValues

func (x *ValuesDef) GetValues() []string

func (*ValuesDef) ProtoMessage

func (*ValuesDef) ProtoMessage()

func (*ValuesDef) ProtoReflect

func (x *ValuesDef) ProtoReflect() protoreflect.Message

func (*ValuesDef) Reset

func (x *ValuesDef) Reset()

func (*ValuesDef) String

func (x *ValuesDef) String() string

type VerifierConfig

type VerifierConfig struct {

	// Deadline for completion of all verification i.e. all the Toggle ON
	// verifiers must complete execution within this time.
	VerificationTimeoutInMs int64 `` /* 135-byte string literal not displayed */
	// Perform structural validation on a tensorflow graph. Default is OFF.
	StructureVerifier VerifierConfig_Toggle `` /* 151-byte string literal not displayed */
	// contains filtered or unexported fields
}

The config for graph verifiers.

func (*VerifierConfig) Descriptor deprecated

func (*VerifierConfig) Descriptor() ([]byte, []int)

Deprecated: Use VerifierConfig.ProtoReflect.Descriptor instead.

func (*VerifierConfig) GetStructureVerifier

func (x *VerifierConfig) GetStructureVerifier() VerifierConfig_Toggle

func (*VerifierConfig) GetVerificationTimeoutInMs

func (x *VerifierConfig) GetVerificationTimeoutInMs() int64

func (*VerifierConfig) ProtoMessage

func (*VerifierConfig) ProtoMessage()

func (*VerifierConfig) ProtoReflect

func (x *VerifierConfig) ProtoReflect() protoreflect.Message

func (*VerifierConfig) Reset

func (x *VerifierConfig) Reset()

func (*VerifierConfig) String

func (x *VerifierConfig) String() string

type VerifierConfig_Toggle

type VerifierConfig_Toggle int32
const (
	VerifierConfig_DEFAULT VerifierConfig_Toggle = 0
	VerifierConfig_ON      VerifierConfig_Toggle = 1
	VerifierConfig_OFF     VerifierConfig_Toggle = 2
)

func (VerifierConfig_Toggle) Descriptor

func (VerifierConfig_Toggle) Enum

func (VerifierConfig_Toggle) EnumDescriptor deprecated

func (VerifierConfig_Toggle) EnumDescriptor() ([]byte, []int)

Deprecated: Use VerifierConfig_Toggle.Descriptor instead.

func (VerifierConfig_Toggle) Number

func (VerifierConfig_Toggle) String

func (x VerifierConfig_Toggle) String() string

func (VerifierConfig_Toggle) Type

type WhileContextDef

type WhileContextDef struct {

	// Name of the context.
	ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName,proto3" json:"context_name,omitempty"`
	// The number of iterations allowed to run in parallel.
	ParallelIterations int32 `protobuf:"varint,2,opt,name=parallel_iterations,json=parallelIterations,proto3" json:"parallel_iterations,omitempty"`
	// Whether backprop is enabled for this while loop.
	BackProp bool `protobuf:"varint,3,opt,name=back_prop,json=backProp,proto3" json:"back_prop,omitempty"`
	// Whether GPU-CPU memory swap is enabled for this loop.
	SwapMemory bool `protobuf:"varint,4,opt,name=swap_memory,json=swapMemory,proto3" json:"swap_memory,omitempty"`
	// Name of the pivot tensor.
	PivotName string `protobuf:"bytes,5,opt,name=pivot_name,json=pivotName,proto3" json:"pivot_name,omitempty"`
	// Name of the pivot_for_pred tensor.
	PivotForPredName string `protobuf:"bytes,6,opt,name=pivot_for_pred_name,json=pivotForPredName,proto3" json:"pivot_for_pred_name,omitempty"`
	// Name of the pivot_for_body tensor.
	PivotForBodyName string `protobuf:"bytes,7,opt,name=pivot_for_body_name,json=pivotForBodyName,proto3" json:"pivot_for_body_name,omitempty"`
	// List of names for exit tensors.
	LoopExitNames []string `protobuf:"bytes,8,rep,name=loop_exit_names,json=loopExitNames,proto3" json:"loop_exit_names,omitempty"`
	// List of names for enter tensors.
	LoopEnterNames []string `protobuf:"bytes,10,rep,name=loop_enter_names,json=loopEnterNames,proto3" json:"loop_enter_names,omitempty"`
	// Values and external values in control flow context.
	ValuesDef *ValuesDef `protobuf:"bytes,9,opt,name=values_def,json=valuesDef,proto3" json:"values_def,omitempty"`
	// Optional name of the maximum_iterations tensor.
	MaximumIterationsName string `` /* 127-byte string literal not displayed */
	// Contexts contained inside this context (e.g. nested whiles).
	NestedContexts []*ControlFlowContextDef `protobuf:"bytes,12,rep,name=nested_contexts,json=nestedContexts,proto3" json:"nested_contexts,omitempty"`
	// contains filtered or unexported fields
}

Protocol buffer representing a WhileContext object.

func (*WhileContextDef) Descriptor deprecated

func (*WhileContextDef) Descriptor() ([]byte, []int)

Deprecated: Use WhileContextDef.ProtoReflect.Descriptor instead.

func (*WhileContextDef) GetBackProp

func (x *WhileContextDef) GetBackProp() bool

func (*WhileContextDef) GetContextName

func (x *WhileContextDef) GetContextName() string

func (*WhileContextDef) GetLoopEnterNames

func (x *WhileContextDef) GetLoopEnterNames() []string

func (*WhileContextDef) GetLoopExitNames

func (x *WhileContextDef) GetLoopExitNames() []string

func (*WhileContextDef) GetMaximumIterationsName

func (x *WhileContextDef) GetMaximumIterationsName() string

func (*WhileContextDef) GetNestedContexts

func (x *WhileContextDef) GetNestedContexts() []*ControlFlowContextDef

func (*WhileContextDef) GetParallelIterations

func (x *WhileContextDef) GetParallelIterations() int32

func (*WhileContextDef) GetPivotForBodyName

func (x *WhileContextDef) GetPivotForBodyName() string

func (*WhileContextDef) GetPivotForPredName

func (x *WhileContextDef) GetPivotForPredName() string

func (*WhileContextDef) GetPivotName

func (x *WhileContextDef) GetPivotName() string

func (*WhileContextDef) GetSwapMemory

func (x *WhileContextDef) GetSwapMemory() bool

func (*WhileContextDef) GetValuesDef

func (x *WhileContextDef) GetValuesDef() *ValuesDef

func (*WhileContextDef) ProtoMessage

func (*WhileContextDef) ProtoMessage()

func (*WhileContextDef) ProtoReflect

func (x *WhileContextDef) ProtoReflect() protoreflect.Message

func (*WhileContextDef) Reset

func (x *WhileContextDef) Reset()

func (*WhileContextDef) String

func (x *WhileContextDef) String() string

type WorkerConfig

type WorkerConfig struct {

	// The port for the worker to bind to. A value of 0 indicates that the
	// worker may bind to any available port.
	Port int64 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
	// The protocol for the worker to use when connecting to the dispatcher.
	Protocol string `protobuf:"bytes,2,opt,name=protocol,proto3" json:"protocol,omitempty"`
	// The address of the dispatcher to register with.
	DispatcherAddress string `protobuf:"bytes,3,opt,name=dispatcher_address,json=dispatcherAddress,proto3" json:"dispatcher_address,omitempty"`
	// The address of the worker server. The substring "%port%", if specified,
	// will be replaced with the worker's bound port. This is useful when the port
	// is set to `0`.
	WorkerAddress string `protobuf:"bytes,4,opt,name=worker_address,json=workerAddress,proto3" json:"worker_address,omitempty"`
	// Tags attached to the worker. This allows reading from selected workers.
	// For example, by applying a "COLOCATED" tag, tf.data service is able to read
	// from the local tf.data worker if one exists, then from off-TF-host workers,
	// to avoid cross-TF-host reads.
	WorkerTags []string `protobuf:"bytes,10,rep,name=worker_tags,json=workerTags,proto3" json:"worker_tags,omitempty"`
	// How often the worker should heartbeat to the master. A value of 0 indicates
	// that the decision should be left up to the runtime.
	HeartbeatIntervalMs int64 `protobuf:"varint,5,opt,name=heartbeat_interval_ms,json=heartbeatIntervalMs,proto3" json:"heartbeat_interval_ms,omitempty"`
	// How long to retry requests to the dispatcher before giving up and reporting
	// an error. A value of 0 indicates that the decision should be left up to the
	// runtime.
	DispatcherTimeoutMs int64 `protobuf:"varint,6,opt,name=dispatcher_timeout_ms,json=dispatcherTimeoutMs,proto3" json:"dispatcher_timeout_ms,omitempty"`
	// The protocol for the worker to use when transferring data to clients.
	DataTransferProtocol string `protobuf:"bytes,7,opt,name=data_transfer_protocol,json=dataTransferProtocol,proto3" json:"data_transfer_protocol,omitempty"`
	// The data transfer address of the worker server. The substring "%port%", if
	// specified, will be replaced with the worker's bound port. This is useful
	// when the port is set to `0`.
	DataTransferAddress string `protobuf:"bytes,8,opt,name=data_transfer_address,json=dataTransferAddress,proto3" json:"data_transfer_address,omitempty"`
	// Maximum size of the cross-trainer cache in bytes. If enabled, make sure
	// your training job provides sufficient memory resources.
	CrossTrainerCacheSizeBytes int64 `` /* 147-byte string literal not displayed */
	// The maximum size of a distributed snapshot chunk file. A value of 0
	// indicates that the decision should be left up to the runtime.
	SnapshotMaxChunkSizeBytes int64 `` /* 144-byte string literal not displayed */
	// When shutting down a worker, how long to wait for the gRPC server to
	// process the final requests. This is used to achieve clean shutdown in unit
	// tests.
	ShutdownQuietPeriodMs int64 `` /* 129-byte string literal not displayed */
	// contains filtered or unexported fields
}

Configuration for a tf.data service WorkerServer. Next id: 13

func (*WorkerConfig) Descriptor deprecated

func (*WorkerConfig) Descriptor() ([]byte, []int)

Deprecated: Use WorkerConfig.ProtoReflect.Descriptor instead.

func (*WorkerConfig) GetCrossTrainerCacheSizeBytes added in v0.2.0

func (x *WorkerConfig) GetCrossTrainerCacheSizeBytes() int64

func (*WorkerConfig) GetDataTransferAddress

func (x *WorkerConfig) GetDataTransferAddress() string

func (*WorkerConfig) GetDataTransferProtocol

func (x *WorkerConfig) GetDataTransferProtocol() string

func (*WorkerConfig) GetDispatcherAddress

func (x *WorkerConfig) GetDispatcherAddress() string

func (*WorkerConfig) GetDispatcherTimeoutMs

func (x *WorkerConfig) GetDispatcherTimeoutMs() int64

func (*WorkerConfig) GetHeartbeatIntervalMs

func (x *WorkerConfig) GetHeartbeatIntervalMs() int64

func (*WorkerConfig) GetPort

func (x *WorkerConfig) GetPort() int64

func (*WorkerConfig) GetProtocol

func (x *WorkerConfig) GetProtocol() string

func (*WorkerConfig) GetShutdownQuietPeriodMs

func (x *WorkerConfig) GetShutdownQuietPeriodMs() int64

func (*WorkerConfig) GetSnapshotMaxChunkSizeBytes added in v0.5.0

func (x *WorkerConfig) GetSnapshotMaxChunkSizeBytes() int64

func (*WorkerConfig) GetWorkerAddress

func (x *WorkerConfig) GetWorkerAddress() string

func (*WorkerConfig) GetWorkerTags

func (x *WorkerConfig) GetWorkerTags() []string

func (*WorkerConfig) ProtoMessage

func (*WorkerConfig) ProtoMessage()

func (*WorkerConfig) ProtoReflect

func (x *WorkerConfig) ProtoReflect() protoreflect.Message

func (*WorkerConfig) Reset

func (x *WorkerConfig) Reset()

func (*WorkerConfig) String

func (x *WorkerConfig) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL