protobuf

package
v0.0.0-...-d1695c4 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 29, 2021 License: Apache-2.0 Imports: 8 Imported by: 0

Documentation

Overview

Package protobuf is a generated protocol buffer package.

It is generated from these files:

tensorflow/core/protobuf/cluster.proto
tensorflow/core/protobuf/config.proto
tensorflow/core/protobuf/control_flow.proto
tensorflow/core/protobuf/debug.proto
tensorflow/core/protobuf/device_properties.proto
tensorflow/core/protobuf/master.proto
tensorflow/core/protobuf/master_service.proto
tensorflow/core/protobuf/meta_graph.proto
tensorflow/core/protobuf/named_tensor.proto
tensorflow/core/protobuf/queue_runner.proto
tensorflow/core/protobuf/rewriter_config.proto
tensorflow/core/protobuf/saved_model.proto
tensorflow/core/protobuf/saver.proto
tensorflow/core/protobuf/tensor_bundle.proto
tensorflow/core/protobuf/tensorflow_server.proto
tensorflow/core/protobuf/worker.proto
tensorflow/core/protobuf/worker_service.proto

It has these top-level messages:

JobDef
ClusterDef
GPUOptions
OptimizerOptions
GraphOptions
ThreadPoolOptionProto
RPCOptions
ConfigProto
RunOptions
RunMetadata
ValuesDef
CondContextDef
WhileContextDef
DebugTensorWatch
DebugOptions
DeviceProperties
DeviceMap
CreateSessionRequest
CreateSessionResponse
ExtendSessionRequest
ExtendSessionResponse
RunStepRequest
RunStepResponse
PartialRunSetupRequest
PartialRunSetupResponse
CloseSessionRequest
CloseSessionResponse
ResetRequest
ResetResponse
ListDevicesRequest
ListDevicesResponse
MetaGraphDef
CollectionDef
TensorInfo
SignatureDef
AssetFileDef
NamedTensorProto
QueueRunnerDef
AutoParallelOptions
RewriterConfig
SavedModel
SaverDef
BundleHeaderProto
BundleEntryProto
ServerDef
GetStatusRequest
GetStatusResponse
CreateWorkerSessionRequest
CreateWorkerSessionResponse
RegisterGraphRequest
RegisterGraphResponse
DeregisterGraphRequest
DeregisterGraphResponse
CleanupAllRequest
CleanupAllResponse
ExecutorOpts
RunGraphRequest
RunGraphResponse
CleanupGraphRequest
CleanupGraphResponse
RecvTensorRequest
RecvTensorResponse
LoggingRequest
LabeledStepStats
LoggingResponse
TraceOpts
TracingRequest
TracingResponse

Index

Constants

This section is empty.

Variables

View Source
var BundleHeaderProto_Endianness_name = map[int32]string{
	0: "LITTLE",
	1: "BIG",
}
View Source
var BundleHeaderProto_Endianness_value = map[string]int32{
	"LITTLE": 0,
	"BIG":    1,
}
View Source
var OptimizerOptions_GlobalJitLevel_name = map[int32]string{
	0:  "DEFAULT",
	-1: "OFF",
	1:  "ON_1",
	2:  "ON_2",
}
View Source
var OptimizerOptions_GlobalJitLevel_value = map[string]int32{
	"DEFAULT": 0,
	"OFF":     -1,
	"ON_1":    1,
	"ON_2":    2,
}
View Source
var OptimizerOptions_Level_name = map[int32]string{
	0:  "L1",
	-1: "L0",
}
View Source
var OptimizerOptions_Level_value = map[string]int32{
	"L1": 0,
	"L0": -1,
}
View Source
var RewriterConfig_MemOptType_name = map[int32]string{
	0: "NO_MEM_OPT",
	1: "MANUAL",
	2: "HEURISTICS",
}
View Source
var RewriterConfig_MemOptType_value = map[string]int32{
	"NO_MEM_OPT": 0,
	"MANUAL":     1,
	"HEURISTICS": 2,
}
View Source
var RunOptions_TraceLevel_name = map[int32]string{
	0: "NO_TRACE",
	1: "SOFTWARE_TRACE",
	2: "HARDWARE_TRACE",
	3: "FULL_TRACE",
}
View Source
var RunOptions_TraceLevel_value = map[string]int32{
	"NO_TRACE":       0,
	"SOFTWARE_TRACE": 1,
	"HARDWARE_TRACE": 2,
	"FULL_TRACE":     3,
}
View Source
var SaverDef_CheckpointFormatVersion_name = map[int32]string{
	0: "LEGACY",
	1: "V1",
	2: "V2",
}
View Source
var SaverDef_CheckpointFormatVersion_value = map[string]int32{
	"LEGACY": 0,
	"V1":     1,
	"V2":     2,
}

Functions

func RegisterMasterServiceServer

func RegisterMasterServiceServer(s *grpc.Server, srv MasterServiceServer)

func RegisterWorkerServiceServer

func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer)

Types

type AssetFileDef

type AssetFileDef struct {
	// The tensor to bind the asset filename to.
	TensorInfo *TensorInfo `protobuf:"bytes,1,opt,name=tensor_info,json=tensorInfo" json:"tensor_info,omitempty"`
	// The filename within an assets directory. Note: does not include the path
	// prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename
	// would be "vocab.txt".
	Filename string `protobuf:"bytes,2,opt,name=filename" json:"filename,omitempty"`
}

An asset file def for a single file or a set of sharded files with the same name.

func (*AssetFileDef) Descriptor

func (*AssetFileDef) Descriptor() ([]byte, []int)

func (*AssetFileDef) GetFilename

func (m *AssetFileDef) GetFilename() string

func (*AssetFileDef) GetTensorInfo

func (m *AssetFileDef) GetTensorInfo() *TensorInfo

func (*AssetFileDef) ProtoMessage

func (*AssetFileDef) ProtoMessage()

func (*AssetFileDef) Reset

func (m *AssetFileDef) Reset()

func (*AssetFileDef) String

func (m *AssetFileDef) String() string

type AutoParallelOptions

type AutoParallelOptions struct {
	Enable      bool  `protobuf:"varint,1,opt,name=enable" json:"enable,omitempty"`
	NumReplicas int32 `protobuf:"varint,2,opt,name=num_replicas,json=numReplicas" json:"num_replicas,omitempty"`
}

func (*AutoParallelOptions) Descriptor

func (*AutoParallelOptions) Descriptor() ([]byte, []int)

func (*AutoParallelOptions) GetEnable

func (m *AutoParallelOptions) GetEnable() bool

func (*AutoParallelOptions) GetNumReplicas

func (m *AutoParallelOptions) GetNumReplicas() int32

func (*AutoParallelOptions) ProtoMessage

func (*AutoParallelOptions) ProtoMessage()

func (*AutoParallelOptions) Reset

func (m *AutoParallelOptions) Reset()

func (*AutoParallelOptions) String

func (m *AutoParallelOptions) String() string

type BundleEntryProto

type BundleEntryProto struct {
	// The tensor dtype and shape.
	Dtype tensorflow1.DataType         `protobuf:"varint,1,opt,name=dtype,enum=tensorflow.DataType" json:"dtype,omitempty"`
	Shape *tensorflow.TensorShapeProto `protobuf:"bytes,2,opt,name=shape" json:"shape,omitempty"`
	// The binary content of the tensor lies in:
	//   File "shard_id": bytes [offset, offset + size).
	ShardId int32 `protobuf:"varint,3,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"`
	Offset  int64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"`
	Size    int64 `protobuf:"varint,5,opt,name=size" json:"size,omitempty"`
	// The CRC32C checksum of the tensor bytes.
	Crc32C uint32 `protobuf:"fixed32,6,opt,name=crc32c" json:"crc32c,omitempty"`
	// Iff present, this entry represents a partitioned tensor.  The previous
	// fields are interpreted as follows:
	//
	//   "dtype", "shape": describe the full tensor.
	//   "shard_id", "offset", "size", "crc32c": all IGNORED.
	//      These information for each slice can be looked up in their own
	//      BundleEntryProto, keyed by each "slice_name".
	Slices []*tensorflow15.TensorSliceProto `protobuf:"bytes,7,rep,name=slices" json:"slices,omitempty"`
}

Describes the metadata related to a checkpointed tensor.

func (*BundleEntryProto) Descriptor

func (*BundleEntryProto) Descriptor() ([]byte, []int)

func (*BundleEntryProto) GetCrc32C

func (m *BundleEntryProto) GetCrc32C() uint32

func (*BundleEntryProto) GetDtype

func (m *BundleEntryProto) GetDtype() tensorflow1.DataType

func (*BundleEntryProto) GetOffset

func (m *BundleEntryProto) GetOffset() int64

func (*BundleEntryProto) GetShape

func (*BundleEntryProto) GetShardId

func (m *BundleEntryProto) GetShardId() int32

func (*BundleEntryProto) GetSize

func (m *BundleEntryProto) GetSize() int64

func (*BundleEntryProto) GetSlices

func (*BundleEntryProto) ProtoMessage

func (*BundleEntryProto) ProtoMessage()

func (*BundleEntryProto) Reset

func (m *BundleEntryProto) Reset()

func (*BundleEntryProto) String

func (m *BundleEntryProto) String() string

type BundleHeaderProto

type BundleHeaderProto struct {
	// Number of data files in the bundle.
	NumShards  int32                        `protobuf:"varint,1,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
	Endianness BundleHeaderProto_Endianness `protobuf:"varint,2,opt,name=endianness,enum=tensorflow.BundleHeaderProto_Endianness" json:"endianness,omitempty"`
	// Versioning of the tensor bundle format.
	Version *tensorflow9.VersionDef `protobuf:"bytes,3,opt,name=version" json:"version,omitempty"`
}

Special header that is associated with a bundle.

TODO(zongheng,zhifengc): maybe in the future, we can add information about which binary produced this checkpoint, timestamp, etc. Sometime, these can be valuable debugging information. And if needed, these can be used as defensive information ensuring reader (binary version) of the checkpoint and the writer (binary version) must match within certain range, etc.

func (*BundleHeaderProto) Descriptor

func (*BundleHeaderProto) Descriptor() ([]byte, []int)

func (*BundleHeaderProto) GetEndianness

func (*BundleHeaderProto) GetNumShards

func (m *BundleHeaderProto) GetNumShards() int32

func (*BundleHeaderProto) GetVersion

func (m *BundleHeaderProto) GetVersion() *tensorflow9.VersionDef

func (*BundleHeaderProto) ProtoMessage

func (*BundleHeaderProto) ProtoMessage()

func (*BundleHeaderProto) Reset

func (m *BundleHeaderProto) Reset()

func (*BundleHeaderProto) String

func (m *BundleHeaderProto) String() string

type BundleHeaderProto_Endianness

type BundleHeaderProto_Endianness int32

An enum indicating the endianness of the platform that produced this bundle. A bundle can only be read by a platform with matching endianness. Defaults to LITTLE, as most modern platforms are little-endian.

Affects the binary tensor data bytes only, not the metadata in protobufs.

const (
	BundleHeaderProto_LITTLE BundleHeaderProto_Endianness = 0
	BundleHeaderProto_BIG    BundleHeaderProto_Endianness = 1
)

func (BundleHeaderProto_Endianness) EnumDescriptor

func (BundleHeaderProto_Endianness) EnumDescriptor() ([]byte, []int)

func (BundleHeaderProto_Endianness) String

type CleanupAllRequest

type CleanupAllRequest struct {
	// A list of container names.
	//
	// If 'container' is not empty, releases resoures in the given
	// containers in all devices.
	//
	// If 'container' is empty, releases resources in the default
	// container in all devices.
	Container []string `protobuf:"bytes,1,rep,name=container" json:"container,omitempty"`
}

func (*CleanupAllRequest) Descriptor

func (*CleanupAllRequest) Descriptor() ([]byte, []int)

func (*CleanupAllRequest) GetContainer

func (m *CleanupAllRequest) GetContainer() []string

func (*CleanupAllRequest) ProtoMessage

func (*CleanupAllRequest) ProtoMessage()

func (*CleanupAllRequest) Reset

func (m *CleanupAllRequest) Reset()

func (*CleanupAllRequest) String

func (m *CleanupAllRequest) String() string

type CleanupAllResponse

type CleanupAllResponse struct {
}

func (*CleanupAllResponse) Descriptor

func (*CleanupAllResponse) Descriptor() ([]byte, []int)

func (*CleanupAllResponse) ProtoMessage

func (*CleanupAllResponse) ProtoMessage()

func (*CleanupAllResponse) Reset

func (m *CleanupAllResponse) Reset()

func (*CleanupAllResponse) String

func (m *CleanupAllResponse) String() string

type CleanupGraphRequest

type CleanupGraphRequest struct {
	StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"`
}

func (*CleanupGraphRequest) Descriptor

func (*CleanupGraphRequest) Descriptor() ([]byte, []int)

func (*CleanupGraphRequest) GetStepId

func (m *CleanupGraphRequest) GetStepId() int64

func (*CleanupGraphRequest) ProtoMessage

func (*CleanupGraphRequest) ProtoMessage()

func (*CleanupGraphRequest) Reset

func (m *CleanupGraphRequest) Reset()

func (*CleanupGraphRequest) String

func (m *CleanupGraphRequest) String() string

type CleanupGraphResponse

type CleanupGraphResponse struct {
}

func (*CleanupGraphResponse) Descriptor

func (*CleanupGraphResponse) Descriptor() ([]byte, []int)

func (*CleanupGraphResponse) ProtoMessage

func (*CleanupGraphResponse) ProtoMessage()

func (*CleanupGraphResponse) Reset

func (m *CleanupGraphResponse) Reset()

func (*CleanupGraphResponse) String

func (m *CleanupGraphResponse) String() string

type CloseSessionRequest

type CloseSessionRequest struct {
	// REQUIRED: session_handle must be returned by a CreateSession call
	// to the same master service.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
}

func (*CloseSessionRequest) Descriptor

func (*CloseSessionRequest) Descriptor() ([]byte, []int)

func (*CloseSessionRequest) GetSessionHandle

func (m *CloseSessionRequest) GetSessionHandle() string

func (*CloseSessionRequest) ProtoMessage

func (*CloseSessionRequest) ProtoMessage()

func (*CloseSessionRequest) Reset

func (m *CloseSessionRequest) Reset()

func (*CloseSessionRequest) String

func (m *CloseSessionRequest) String() string

type CloseSessionResponse

type CloseSessionResponse struct {
}

func (*CloseSessionResponse) Descriptor

func (*CloseSessionResponse) Descriptor() ([]byte, []int)

func (*CloseSessionResponse) ProtoMessage

func (*CloseSessionResponse) ProtoMessage()

func (*CloseSessionResponse) Reset

func (m *CloseSessionResponse) Reset()

func (*CloseSessionResponse) String

func (m *CloseSessionResponse) String() string

type ClusterDef

type ClusterDef struct {
	// The jobs that comprise the cluster.
	Job []*JobDef `protobuf:"bytes,1,rep,name=job" json:"job,omitempty"`
}

Defines a TensorFlow cluster as a set of jobs.

func (*ClusterDef) Descriptor

func (*ClusterDef) Descriptor() ([]byte, []int)

func (*ClusterDef) GetJob

func (m *ClusterDef) GetJob() []*JobDef

func (*ClusterDef) ProtoMessage

func (*ClusterDef) ProtoMessage()

func (*ClusterDef) Reset

func (m *ClusterDef) Reset()

func (*ClusterDef) String

func (m *ClusterDef) String() string

type CollectionDef

type CollectionDef struct {
	// Types that are valid to be assigned to Kind:
	//	*CollectionDef_NodeList_
	//	*CollectionDef_BytesList_
	//	*CollectionDef_Int64List_
	//	*CollectionDef_FloatList_
	//	*CollectionDef_AnyList_
	Kind isCollectionDef_Kind `protobuf_oneof:"kind"`
}

CollectionDef should cover most collections. To add a user-defined collection, do one of the following:

  1. For simple data types, such as string, int, float: tf.add_to_collection("your_collection_name", your_simple_value) strings will be stored as bytes_list.

2. For Protobuf types, there are three ways to add them:

  1. tf.add_to_collection("your_collection_name", your_proto.SerializeToString())

    collection_def { key: "user_defined_bytes_collection" value { bytes_list { value: "queue_name: \"test_queue\"\n" } } }

    or

  2. tf.add_to_collection("your_collection_name", str(your_proto))

    collection_def { key: "user_defined_string_collection" value { bytes_list { value: "\n\ntest_queue" } } }

    or

  3. any_buf = any_pb2.Any() tf.add_to_collection("your_collection_name", any_buf.Pack(your_proto))

    collection_def { key: "user_defined_any_collection" value { any_list { value { type_url: "type.googleapis.com/tensorflow.QueueRunnerDef" value: "\n\ntest_queue" } } } }

  1. For Python objects, implement to_proto() and from_proto(), and register them in the following manner: ops.register_proto_function("your_collection_name", proto_type, to_proto=YourPythonObject.to_proto, from_proto=YourPythonObject.from_proto) These functions will be invoked to serialize and de-serialize the collection. For example, ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES, proto_type=variable_pb2.VariableDef, to_proto=Variable.to_proto, from_proto=Variable.from_proto)

func (*CollectionDef) Descriptor

func (*CollectionDef) Descriptor() ([]byte, []int)

func (*CollectionDef) GetAnyList

func (m *CollectionDef) GetAnyList() *CollectionDef_AnyList

func (*CollectionDef) GetBytesList

func (m *CollectionDef) GetBytesList() *CollectionDef_BytesList

func (*CollectionDef) GetFloatList

func (m *CollectionDef) GetFloatList() *CollectionDef_FloatList

func (*CollectionDef) GetInt64List

func (m *CollectionDef) GetInt64List() *CollectionDef_Int64List

func (*CollectionDef) GetKind

func (m *CollectionDef) GetKind() isCollectionDef_Kind

func (*CollectionDef) GetNodeList

func (m *CollectionDef) GetNodeList() *CollectionDef_NodeList

func (*CollectionDef) ProtoMessage

func (*CollectionDef) ProtoMessage()

func (*CollectionDef) Reset

func (m *CollectionDef) Reset()

func (*CollectionDef) String

func (m *CollectionDef) String() string

func (*CollectionDef) XXX_OneofFuncs

func (*CollectionDef) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type CollectionDef_AnyList

type CollectionDef_AnyList struct {
	Value []*google_protobuf.Any `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
}

AnyList is used for collecting Any protos.

func (*CollectionDef_AnyList) Descriptor

func (*CollectionDef_AnyList) Descriptor() ([]byte, []int)

func (*CollectionDef_AnyList) GetValue

func (m *CollectionDef_AnyList) GetValue() []*google_protobuf.Any

func (*CollectionDef_AnyList) ProtoMessage

func (*CollectionDef_AnyList) ProtoMessage()

func (*CollectionDef_AnyList) Reset

func (m *CollectionDef_AnyList) Reset()

func (*CollectionDef_AnyList) String

func (m *CollectionDef_AnyList) String() string

type CollectionDef_AnyList_

type CollectionDef_AnyList_ struct {
	AnyList *CollectionDef_AnyList `protobuf:"bytes,5,opt,name=any_list,json=anyList,oneof"`
}

type CollectionDef_BytesList

type CollectionDef_BytesList struct {
	Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}

BytesList is used for collecting strings and serialized protobufs. For example:

collection_def {
  key: "trainable_variables"
  value {
    bytes_list {
      value: "\n\017conv1/weights:0\022\024conv1/weights/Assign
             \032\024conv1/weights/read:0"
      value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032
             \023conv1/biases/read:0"
    }
  }
}

func (*CollectionDef_BytesList) Descriptor

func (*CollectionDef_BytesList) Descriptor() ([]byte, []int)

func (*CollectionDef_BytesList) GetValue

func (m *CollectionDef_BytesList) GetValue() [][]byte

func (*CollectionDef_BytesList) ProtoMessage

func (*CollectionDef_BytesList) ProtoMessage()

func (*CollectionDef_BytesList) Reset

func (m *CollectionDef_BytesList) Reset()

func (*CollectionDef_BytesList) String

func (m *CollectionDef_BytesList) String() string

type CollectionDef_BytesList_

type CollectionDef_BytesList_ struct {
	BytesList *CollectionDef_BytesList `protobuf:"bytes,2,opt,name=bytes_list,json=bytesList,oneof"`
}

type CollectionDef_FloatList

type CollectionDef_FloatList struct {
	Value []float32 `protobuf:"fixed32,1,rep,packed,name=value" json:"value,omitempty"`
}

FloatList is used for collecting float values.

func (*CollectionDef_FloatList) Descriptor

func (*CollectionDef_FloatList) Descriptor() ([]byte, []int)

func (*CollectionDef_FloatList) GetValue

func (m *CollectionDef_FloatList) GetValue() []float32

func (*CollectionDef_FloatList) ProtoMessage

func (*CollectionDef_FloatList) ProtoMessage()

func (*CollectionDef_FloatList) Reset

func (m *CollectionDef_FloatList) Reset()

func (*CollectionDef_FloatList) String

func (m *CollectionDef_FloatList) String() string

type CollectionDef_FloatList_

type CollectionDef_FloatList_ struct {
	FloatList *CollectionDef_FloatList `protobuf:"bytes,4,opt,name=float_list,json=floatList,oneof"`
}

type CollectionDef_Int64List

type CollectionDef_Int64List struct {
	Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}

Int64List is used for collecting int, int64 and long values.

func (*CollectionDef_Int64List) Descriptor

func (*CollectionDef_Int64List) Descriptor() ([]byte, []int)

func (*CollectionDef_Int64List) GetValue

func (m *CollectionDef_Int64List) GetValue() []int64

func (*CollectionDef_Int64List) ProtoMessage

func (*CollectionDef_Int64List) ProtoMessage()

func (*CollectionDef_Int64List) Reset

func (m *CollectionDef_Int64List) Reset()

func (*CollectionDef_Int64List) String

func (m *CollectionDef_Int64List) String() string

type CollectionDef_Int64List_

type CollectionDef_Int64List_ struct {
	Int64List *CollectionDef_Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}

type CollectionDef_NodeList

type CollectionDef_NodeList struct {
	Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
}

NodeList is used for collecting nodes in graph. For example

collection_def {
  key: "summaries"
  value {
    node_list {
      value: "input_producer/ScalarSummary:0"
      value: "shuffle_batch/ScalarSummary:0"
      value: "ImageSummary:0"
    }
  }

func (*CollectionDef_NodeList) Descriptor

func (*CollectionDef_NodeList) Descriptor() ([]byte, []int)

func (*CollectionDef_NodeList) GetValue

func (m *CollectionDef_NodeList) GetValue() []string

func (*CollectionDef_NodeList) ProtoMessage

func (*CollectionDef_NodeList) ProtoMessage()

func (*CollectionDef_NodeList) Reset

func (m *CollectionDef_NodeList) Reset()

func (*CollectionDef_NodeList) String

func (m *CollectionDef_NodeList) String() string

type CollectionDef_NodeList_

type CollectionDef_NodeList_ struct {
	NodeList *CollectionDef_NodeList `protobuf:"bytes,1,opt,name=node_list,json=nodeList,oneof"`
}

type CondContextDef

type CondContextDef struct {
	// Name of the context.
	ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName" json:"context_name,omitempty"`
	// Name of the pred tensor.
	PredName string `protobuf:"bytes,2,opt,name=pred_name,json=predName" json:"pred_name,omitempty"`
	// Name of the pivot tensor.
	PivotName string `protobuf:"bytes,3,opt,name=pivot_name,json=pivotName" json:"pivot_name,omitempty"`
	// Branch prediction. 0 or 1.
	Branch int32 `protobuf:"varint,4,opt,name=branch" json:"branch,omitempty"`
	// Values and external values in control flow context.
	ValuesDef *ValuesDef `protobuf:"bytes,5,opt,name=values_def,json=valuesDef" json:"values_def,omitempty"`
}

Protocol buffer representing a CondContext object.

func (*CondContextDef) Descriptor

func (*CondContextDef) Descriptor() ([]byte, []int)

func (*CondContextDef) GetBranch

func (m *CondContextDef) GetBranch() int32

func (*CondContextDef) GetContextName

func (m *CondContextDef) GetContextName() string

func (*CondContextDef) GetPivotName

func (m *CondContextDef) GetPivotName() string

func (*CondContextDef) GetPredName

func (m *CondContextDef) GetPredName() string

func (*CondContextDef) GetValuesDef

func (m *CondContextDef) GetValuesDef() *ValuesDef

func (*CondContextDef) ProtoMessage

func (*CondContextDef) ProtoMessage()

func (*CondContextDef) Reset

func (m *CondContextDef) Reset()

func (*CondContextDef) String

func (m *CondContextDef) String() string

type ConfigProto

type ConfigProto struct {
	// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
	// number of devices of that type to use.  If a particular device
	// type is not found in the map, the system picks an appropriate
	// number.
	DeviceCount map[string]int32 `` /* 162-byte string literal not displayed */
	// The execution of an individual op (for some op types) can be
	// parallelized on a pool of intra_op_parallelism_threads.
	// 0 means the system picks an appropriate number.
	IntraOpParallelismThreads int32 `` /* 134-byte string literal not displayed */
	// Nodes that perform blocking operations are enqueued on a pool of
	// inter_op_parallelism_threads available in each process.
	//
	// 0 means the system picks an appropriate number.
	//
	// Note that the first Session created in the process sets the
	// number of threads for all future sessions unless use_per_session_threads is
	// true or session_inter_op_thread_pool is configured.
	InterOpParallelismThreads int32 `` /* 134-byte string literal not displayed */
	// If true, use a new set of threads for this session rather than the global
	// pool of threads. Only supported by direct sessions.
	//
	// If false, use the global threads created by the first session, or the
	// per-session thread pools configured by session_inter_op_thread_pool.
	//
	// This option is deprecated. The same effect can be achieved by setting
	// session_inter_op_thread_pool to have one element, whose num_threads equals
	// inter_op_parallelism_threads.
	UsePerSessionThreads bool `protobuf:"varint,9,opt,name=use_per_session_threads,json=usePerSessionThreads" json:"use_per_session_threads,omitempty"`
	// This option is experimental - it may be replaced with a different mechanism
	// in the future. The intended use is for when some session invocations need
	// to run in a background pool limited to a small number of threads.
	//
	// Configures session thread pools. If this is configured, then RunOptions for
	// a Run call can select the thread pool to use.
	//
	// If a pool's num_threads is 0, then inter_op_parallelism_threads is used.
	SessionInterOpThreadPool []*ThreadPoolOptionProto `` /* 133-byte string literal not displayed */
	// Assignment of Nodes to Devices is recomputed every placement_period
	// steps until the system warms up (at which point the recomputation
	// typically slows down automatically).
	PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod" json:"placement_period,omitempty"`
	// When any filters are present sessions will ignore all devices which do not
	// match the filters. Each filter can be partially specified, e.g. "/job:ps"
	// "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters" json:"device_filters,omitempty"`
	// Options that apply to all GPUs.
	GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions" json:"gpu_options,omitempty"`
	// Whether soft placement is allowed. If allow_soft_placement is true,
	// an op will be placed on CPU if
	//   1. there's no GPU implementation for the OP
	// or
	//   2. no GPU devices are known or registered
	// or
	//   3. need to co-locate with reftype input(s) which are from CPU.
	AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement" json:"allow_soft_placement,omitempty"`
	// Whether device placements should be logged.
	LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement" json:"log_device_placement,omitempty"`
	// Options that apply to all graphs.
	GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions" json:"graph_options,omitempty"`
	// Global timeout for all blocking operations in this session.  If non-zero,
	// and not overridden on a per-operation basis, this value will be used as the
	// deadline for all blocking operations.
	OperationTimeoutInMs int64 `protobuf:"varint,11,opt,name=operation_timeout_in_ms,json=operationTimeoutInMs" json:"operation_timeout_in_ms,omitempty"`
	// Options that apply when this session uses the distributed runtime.
	RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions" json:"rpc_options,omitempty"`
	// Optional list of all workers to use in this session.
	ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef" json:"cluster_def,omitempty"`
}

Session configuration parameters. The system picks appropriate values for fields that are not set.

func (*ConfigProto) Descriptor

func (*ConfigProto) Descriptor() ([]byte, []int)

func (*ConfigProto) GetAllowSoftPlacement

func (m *ConfigProto) GetAllowSoftPlacement() bool

func (*ConfigProto) GetClusterDef

func (m *ConfigProto) GetClusterDef() *ClusterDef

func (*ConfigProto) GetDeviceCount

func (m *ConfigProto) GetDeviceCount() map[string]int32

func (*ConfigProto) GetDeviceFilters

func (m *ConfigProto) GetDeviceFilters() []string

func (*ConfigProto) GetGpuOptions

func (m *ConfigProto) GetGpuOptions() *GPUOptions

func (*ConfigProto) GetGraphOptions

func (m *ConfigProto) GetGraphOptions() *GraphOptions

func (*ConfigProto) GetInterOpParallelismThreads

func (m *ConfigProto) GetInterOpParallelismThreads() int32

func (*ConfigProto) GetIntraOpParallelismThreads

func (m *ConfigProto) GetIntraOpParallelismThreads() int32

func (*ConfigProto) GetLogDevicePlacement

func (m *ConfigProto) GetLogDevicePlacement() bool

func (*ConfigProto) GetOperationTimeoutInMs

func (m *ConfigProto) GetOperationTimeoutInMs() int64

func (*ConfigProto) GetPlacementPeriod

func (m *ConfigProto) GetPlacementPeriod() int32

func (*ConfigProto) GetRpcOptions

func (m *ConfigProto) GetRpcOptions() *RPCOptions

func (*ConfigProto) GetSessionInterOpThreadPool

func (m *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto

func (*ConfigProto) GetUsePerSessionThreads

func (m *ConfigProto) GetUsePerSessionThreads() bool

func (*ConfigProto) ProtoMessage

func (*ConfigProto) ProtoMessage()

func (*ConfigProto) Reset

func (m *ConfigProto) Reset()

func (*ConfigProto) String

func (m *ConfigProto) String() string

type CreateSessionRequest

type CreateSessionRequest struct {
	// The initial graph definition.
	GraphDef *tensorflow10.GraphDef `protobuf:"bytes,1,opt,name=graph_def,json=graphDef" json:"graph_def,omitempty"`
	// Configuration options.
	Config *ConfigProto `protobuf:"bytes,2,opt,name=config" json:"config,omitempty"`
	// The target string used from the client's perspective.
	Target string `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
}

func (*CreateSessionRequest) Descriptor

func (*CreateSessionRequest) Descriptor() ([]byte, []int)

func (*CreateSessionRequest) GetConfig

func (m *CreateSessionRequest) GetConfig() *ConfigProto

func (*CreateSessionRequest) GetGraphDef

func (m *CreateSessionRequest) GetGraphDef() *tensorflow10.GraphDef

func (*CreateSessionRequest) GetTarget

func (m *CreateSessionRequest) GetTarget() string

func (*CreateSessionRequest) ProtoMessage

func (*CreateSessionRequest) ProtoMessage()

func (*CreateSessionRequest) Reset

func (m *CreateSessionRequest) Reset()

func (*CreateSessionRequest) String

func (m *CreateSessionRequest) String() string

type CreateSessionResponse

type CreateSessionResponse struct {
	// The session handle to be used in subsequent calls for the created session.
	//
	// The client must arrange to call CloseSession with this returned
	// session handle to close the session.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// The initial version number for the graph, to be used in the next call
	// to ExtendSession.
	GraphVersion int64 `protobuf:"varint,2,opt,name=graph_version,json=graphVersion" json:"graph_version,omitempty"`
}

func (*CreateSessionResponse) Descriptor

func (*CreateSessionResponse) Descriptor() ([]byte, []int)

func (*CreateSessionResponse) GetGraphVersion

func (m *CreateSessionResponse) GetGraphVersion() int64

func (*CreateSessionResponse) GetSessionHandle

func (m *CreateSessionResponse) GetSessionHandle() string

func (*CreateSessionResponse) ProtoMessage

func (*CreateSessionResponse) ProtoMessage()

func (*CreateSessionResponse) Reset

func (m *CreateSessionResponse) Reset()

func (*CreateSessionResponse) String

func (m *CreateSessionResponse) String() string

type CreateWorkerSessionRequest

type CreateWorkerSessionRequest struct {
	// Sessions are identified by a given handle.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// Defines the configuration of a TensorFlow worker.
	ServerDef *ServerDef `protobuf:"bytes,2,opt,name=server_def,json=serverDef" json:"server_def,omitempty"`
}

func (*CreateWorkerSessionRequest) Descriptor

func (*CreateWorkerSessionRequest) Descriptor() ([]byte, []int)

func (*CreateWorkerSessionRequest) GetServerDef

func (m *CreateWorkerSessionRequest) GetServerDef() *ServerDef

func (*CreateWorkerSessionRequest) GetSessionHandle

func (m *CreateWorkerSessionRequest) GetSessionHandle() string

func (*CreateWorkerSessionRequest) ProtoMessage

func (*CreateWorkerSessionRequest) ProtoMessage()

func (*CreateWorkerSessionRequest) Reset

func (m *CreateWorkerSessionRequest) Reset()

func (*CreateWorkerSessionRequest) String

func (m *CreateWorkerSessionRequest) String() string

type CreateWorkerSessionResponse

type CreateWorkerSessionResponse struct {
}

func (*CreateWorkerSessionResponse) Descriptor

func (*CreateWorkerSessionResponse) Descriptor() ([]byte, []int)

func (*CreateWorkerSessionResponse) ProtoMessage

func (*CreateWorkerSessionResponse) ProtoMessage()

func (*CreateWorkerSessionResponse) Reset

func (m *CreateWorkerSessionResponse) Reset()

func (*CreateWorkerSessionResponse) String

func (m *CreateWorkerSessionResponse) String() string

type DebugOptions

type DebugOptions struct {
	// Debugging options
	DebugTensorWatchOpts []*DebugTensorWatch `protobuf:"bytes,4,rep,name=debug_tensor_watch_opts,json=debugTensorWatchOpts" json:"debug_tensor_watch_opts,omitempty"`
	// Caller-specified global step count.
	// Note that this is distinct from the session run count and the executor
	// step count.
	GlobalStep int64 `protobuf:"varint,10,opt,name=global_step,json=globalStep" json:"global_step,omitempty"`
}

EXPERIMENTAL. Options for initializing DebuggerState.

func (*DebugOptions) Descriptor

func (*DebugOptions) Descriptor() ([]byte, []int)

func (*DebugOptions) GetDebugTensorWatchOpts

func (m *DebugOptions) GetDebugTensorWatchOpts() []*DebugTensorWatch

func (*DebugOptions) GetGlobalStep

func (m *DebugOptions) GetGlobalStep() int64

func (*DebugOptions) ProtoMessage

func (*DebugOptions) ProtoMessage()

func (*DebugOptions) Reset

func (m *DebugOptions) Reset()

func (*DebugOptions) String

func (m *DebugOptions) String() string

type DebugTensorWatch

type DebugTensorWatch struct {
	// Name of the node to watch.
	NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName" json:"node_name,omitempty"`
	// Output slot to watch.
	// The semantics of output_slot == -1 is that the node is only watched for
	// completion, but not for any output tensors. See NodeCompletionCallback
	// in debug_gateway.h.
	// TODO(cais): Implement this semantics.
	OutputSlot int32 `protobuf:"varint,2,opt,name=output_slot,json=outputSlot" json:"output_slot,omitempty"`
	// Name(s) of the debugging op(s).
	// One or more than one probes on a tensor.
	// e.g., {"DebugIdentity", "DebugNanCount"}
	DebugOps []string `protobuf:"bytes,3,rep,name=debug_ops,json=debugOps" json:"debug_ops,omitempty"`
	// URL(s) for debug targets(s).
	//   E.g., "file:///foo/tfdbg_dump", "grpc://localhost:11011"
	// Each debug op listed in debug_ops will publish its output tensor (debug
	// signal) to all URLs in debug_urls.
	//
	// N.B. Session::Run() supports concurrent invocations of the same inputs
	// (feed keys), outputs and target nodes. If such concurrent invocations
	// are to be debugged, the callers of Session::Run() must use distinct
	// debug_urls to make sure that the streamed or dumped events do not overlap
	// among the invocations.
	// TODO(cais): More visible documentation of this in g3docs.
	DebugUrls []string `protobuf:"bytes,4,rep,name=debug_urls,json=debugUrls" json:"debug_urls,omitempty"`
	// Do not error out if debug op creation fails (e.g., due to dtype
	// incompatibility). Instead, just log the failure.
	TolerateDebugOpCreationFailures bool `` /* 154-byte string literal not displayed */
}

EXPERIMENTAL. Option for watching a node.

func (*DebugTensorWatch) Descriptor

func (*DebugTensorWatch) Descriptor() ([]byte, []int)

func (*DebugTensorWatch) GetDebugOps

func (m *DebugTensorWatch) GetDebugOps() []string

func (*DebugTensorWatch) GetDebugUrls

func (m *DebugTensorWatch) GetDebugUrls() []string

func (*DebugTensorWatch) GetNodeName

func (m *DebugTensorWatch) GetNodeName() string

func (*DebugTensorWatch) GetOutputSlot

func (m *DebugTensorWatch) GetOutputSlot() int32

func (*DebugTensorWatch) GetTolerateDebugOpCreationFailures

func (m *DebugTensorWatch) GetTolerateDebugOpCreationFailures() bool

func (*DebugTensorWatch) ProtoMessage

func (*DebugTensorWatch) ProtoMessage()

func (*DebugTensorWatch) Reset

func (m *DebugTensorWatch) Reset()

func (*DebugTensorWatch) String

func (m *DebugTensorWatch) String() string

type DeregisterGraphRequest

type DeregisterGraphRequest struct {
	// The session_handle used when registering the graph. If session_handle is
	// empty, a single global namespace is used.
	SessionHandle string `protobuf:"bytes,2,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// REQUIRED: graph_handle must be returned by a RegisterGraph call
	// to the same WorkerService.
	GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle" json:"graph_handle,omitempty"`
}

func (*DeregisterGraphRequest) Descriptor

func (*DeregisterGraphRequest) Descriptor() ([]byte, []int)

func (*DeregisterGraphRequest) GetGraphHandle

func (m *DeregisterGraphRequest) GetGraphHandle() string

func (*DeregisterGraphRequest) GetSessionHandle

func (m *DeregisterGraphRequest) GetSessionHandle() string

func (*DeregisterGraphRequest) ProtoMessage

func (*DeregisterGraphRequest) ProtoMessage()

func (*DeregisterGraphRequest) Reset

func (m *DeregisterGraphRequest) Reset()

func (*DeregisterGraphRequest) String

func (m *DeregisterGraphRequest) String() string

type DeregisterGraphResponse

type DeregisterGraphResponse struct {
}

func (*DeregisterGraphResponse) Descriptor

func (*DeregisterGraphResponse) Descriptor() ([]byte, []int)

func (*DeregisterGraphResponse) ProtoMessage

func (*DeregisterGraphResponse) ProtoMessage()

func (*DeregisterGraphResponse) Reset

func (m *DeregisterGraphResponse) Reset()

func (*DeregisterGraphResponse) String

func (m *DeregisterGraphResponse) String() string

type DeviceMap

type DeviceMap struct {
	NameAndDevice []*DeviceMap_NamedDevice `protobuf:"bytes,1,rep,name=name_and_device,json=nameAndDevice" json:"name_and_device,omitempty"`
}

func (*DeviceMap) Descriptor

func (*DeviceMap) Descriptor() ([]byte, []int)

func (*DeviceMap) GetNameAndDevice

func (m *DeviceMap) GetNameAndDevice() []*DeviceMap_NamedDevice

func (*DeviceMap) ProtoMessage

func (*DeviceMap) ProtoMessage()

func (*DeviceMap) Reset

func (m *DeviceMap) Reset()

func (*DeviceMap) String

func (m *DeviceMap) String() string

type DeviceMap_NamedDevice

type DeviceMap_NamedDevice struct {
	Name   string            `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
	Device *DeviceProperties `protobuf:"bytes,2,opt,name=device" json:"device,omitempty"`
}

func (*DeviceMap_NamedDevice) Descriptor

func (*DeviceMap_NamedDevice) Descriptor() ([]byte, []int)

func (*DeviceMap_NamedDevice) GetDevice

func (m *DeviceMap_NamedDevice) GetDevice() *DeviceProperties

func (*DeviceMap_NamedDevice) GetName

func (m *DeviceMap_NamedDevice) GetName() string

func (*DeviceMap_NamedDevice) ProtoMessage

func (*DeviceMap_NamedDevice) ProtoMessage()

func (*DeviceMap_NamedDevice) Reset

func (m *DeviceMap_NamedDevice) Reset()

func (*DeviceMap_NamedDevice) String

func (m *DeviceMap_NamedDevice) String() string

type DeviceProperties

type DeviceProperties struct {
	// Device type (CPU, GPU, ...)
	Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
	// Vendor (Intel, nvidia, ...)
	Vendor string `protobuf:"bytes,2,opt,name=vendor" json:"vendor,omitempty"`
	// Model (Haswell, K40, ...)
	Model string `protobuf:"bytes,3,opt,name=model" json:"model,omitempty"`
	// Core Frequency in Mhz
	Frequency int64 `protobuf:"varint,4,opt,name=frequency" json:"frequency,omitempty"`
	// Number of cores
	NumCores int64 `protobuf:"varint,5,opt,name=num_cores,json=numCores" json:"num_cores,omitempty"`
	// Version of the tools and libraries used with this device (e.g. gcc 4.9,
	// cudnn 5.1)
	Environment map[string]string `` /* 142-byte string literal not displayed */
	// Number of registers per core.
	NumRegisters int64 `protobuf:"varint,7,opt,name=num_registers,json=numRegisters" json:"num_registers,omitempty"`
	// L1 cache size in bytes
	L1CacheSize int64 `protobuf:"varint,8,opt,name=l1_cache_size,json=l1CacheSize" json:"l1_cache_size,omitempty"`
	// L2 cache size in bytes
	L2CacheSize int64 `protobuf:"varint,9,opt,name=l2_cache_size,json=l2CacheSize" json:"l2_cache_size,omitempty"`
	// L3 cache size in bytes
	L3CacheSize int64 `protobuf:"varint,10,opt,name=l3_cache_size,json=l3CacheSize" json:"l3_cache_size,omitempty"`
	// Shared memory size per multiprocessor in bytes. This field is
	// applicable to GPUs only.
	SharedMemorySizePerMultiprocessor int64 `` /* 161-byte string literal not displayed */
	// Memory size in bytes
	MemorySize int64 `protobuf:"varint,12,opt,name=memory_size,json=memorySize" json:"memory_size,omitempty"`
	// Memory bandwidth in KB/s
	Bandwidth int64 `protobuf:"varint,13,opt,name=bandwidth" json:"bandwidth,omitempty"`
}

func (*DeviceProperties) Descriptor

func (*DeviceProperties) Descriptor() ([]byte, []int)

func (*DeviceProperties) GetBandwidth

func (m *DeviceProperties) GetBandwidth() int64

func (*DeviceProperties) GetEnvironment

func (m *DeviceProperties) GetEnvironment() map[string]string

func (*DeviceProperties) GetFrequency

func (m *DeviceProperties) GetFrequency() int64

func (*DeviceProperties) GetL1CacheSize

func (m *DeviceProperties) GetL1CacheSize() int64

func (*DeviceProperties) GetL2CacheSize

func (m *DeviceProperties) GetL2CacheSize() int64

func (*DeviceProperties) GetL3CacheSize

func (m *DeviceProperties) GetL3CacheSize() int64

func (*DeviceProperties) GetMemorySize

func (m *DeviceProperties) GetMemorySize() int64

func (*DeviceProperties) GetModel

func (m *DeviceProperties) GetModel() string

func (*DeviceProperties) GetNumCores

func (m *DeviceProperties) GetNumCores() int64

func (*DeviceProperties) GetNumRegisters

func (m *DeviceProperties) GetNumRegisters() int64

func (*DeviceProperties) GetSharedMemorySizePerMultiprocessor

func (m *DeviceProperties) GetSharedMemorySizePerMultiprocessor() int64

func (*DeviceProperties) GetType

func (m *DeviceProperties) GetType() string

func (*DeviceProperties) GetVendor

func (m *DeviceProperties) GetVendor() string

func (*DeviceProperties) ProtoMessage

func (*DeviceProperties) ProtoMessage()

func (*DeviceProperties) Reset

func (m *DeviceProperties) Reset()

func (*DeviceProperties) String

func (m *DeviceProperties) String() string

type ExecutorOpts

type ExecutorOpts struct {
	RecordCosts    bool `protobuf:"varint,1,opt,name=record_costs,json=recordCosts" json:"record_costs,omitempty"`
	RecordTimeline bool `protobuf:"varint,3,opt,name=record_timeline,json=recordTimeline" json:"record_timeline,omitempty"`
}

Options specific to the execution of a single step.

func (*ExecutorOpts) Descriptor

func (*ExecutorOpts) Descriptor() ([]byte, []int)

func (*ExecutorOpts) GetRecordCosts

func (m *ExecutorOpts) GetRecordCosts() bool

func (*ExecutorOpts) GetRecordTimeline

func (m *ExecutorOpts) GetRecordTimeline() bool

func (*ExecutorOpts) ProtoMessage

func (*ExecutorOpts) ProtoMessage()

func (*ExecutorOpts) Reset

func (m *ExecutorOpts) Reset()

func (*ExecutorOpts) String

func (m *ExecutorOpts) String() string

type ExtendSessionRequest

type ExtendSessionRequest struct {
	// REQUIRED: session_handle must be returned by a CreateSession call
	// to the same master service.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// REQUIRED: The nodes to be added to the session's graph. If any node has
	// the same name as an existing node, the operation will fail with
	// ILLEGAL_ARGUMENT.
	GraphDef *tensorflow10.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef" json:"graph_def,omitempty"`
	// REQUIRED: The version number of the graph to be extended. This will be
	// tested against the current server-side version number, and the operation
	// will fail with FAILED_PRECONDITION if they do not match.
	CurrentGraphVersion int64 `protobuf:"varint,3,opt,name=current_graph_version,json=currentGraphVersion" json:"current_graph_version,omitempty"`
}

func (*ExtendSessionRequest) Descriptor

func (*ExtendSessionRequest) Descriptor() ([]byte, []int)

func (*ExtendSessionRequest) GetCurrentGraphVersion

func (m *ExtendSessionRequest) GetCurrentGraphVersion() int64

func (*ExtendSessionRequest) GetGraphDef

func (m *ExtendSessionRequest) GetGraphDef() *tensorflow10.GraphDef

func (*ExtendSessionRequest) GetSessionHandle

func (m *ExtendSessionRequest) GetSessionHandle() string

func (*ExtendSessionRequest) ProtoMessage

func (*ExtendSessionRequest) ProtoMessage()

func (*ExtendSessionRequest) Reset

func (m *ExtendSessionRequest) Reset()

func (*ExtendSessionRequest) String

func (m *ExtendSessionRequest) String() string

type ExtendSessionResponse

type ExtendSessionResponse struct {
	// The new version number for the extended graph, to be used in the next call
	// to ExtendSession.
	NewGraphVersion int64 `protobuf:"varint,4,opt,name=new_graph_version,json=newGraphVersion" json:"new_graph_version,omitempty"`
}

func (*ExtendSessionResponse) Descriptor

func (*ExtendSessionResponse) Descriptor() ([]byte, []int)

func (*ExtendSessionResponse) GetNewGraphVersion

func (m *ExtendSessionResponse) GetNewGraphVersion() int64

func (*ExtendSessionResponse) ProtoMessage

func (*ExtendSessionResponse) ProtoMessage()

func (*ExtendSessionResponse) Reset

func (m *ExtendSessionResponse) Reset()

func (*ExtendSessionResponse) String

func (m *ExtendSessionResponse) String() string

type GPUOptions

type GPUOptions struct {
	// A value between 0 and 1 that indicates what fraction of the
	// available GPU memory to pre-allocate for each process.  1 means
	// to pre-allocate all of the GPU memory, 0.5 means the process
	// allocates ~50% of the available GPU memory.
	PerProcessGpuMemoryFraction float64 `` /* 143-byte string literal not displayed */
	// The type of GPU allocation strategy to use.
	//
	// Allowed values:
	// "": The empty string (default) uses a system-chosen default
	//     which may change over time.
	//
	// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
	//        version of dlmalloc.
	AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType" json:"allocator_type,omitempty"`
	// Delay deletion of up to this many bytes to reduce the number of
	// interactions with gpu driver code.  If 0, the system chooses
	// a reasonable default (several MBs).
	DeferredDeletionBytes int64 `protobuf:"varint,3,opt,name=deferred_deletion_bytes,json=deferredDeletionBytes" json:"deferred_deletion_bytes,omitempty"`
	// If true, the allocator does not pre-allocate the entire specified
	// GPU memory region, instead starting small and growing as needed.
	AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth" json:"allow_growth,omitempty"`
	// A comma-separated list of GPU ids that determines the 'visible'
	// to 'virtual' mapping of GPU devices.  For example, if TensorFlow
	// can see 8 GPU devices in the process, and one wanted to map
	// visible GPU devices 5 and 3 as "/gpu:0", and "/gpu:1", then one
	// would specify this field as "5,3".  This field is similar in
	// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
	// it applies to the visible GPU devices in the process.
	//
	// NOTE: The GPU driver provides the process with the visible GPUs
	// in an order which is not guaranteed to have any correlation to
	// the *physical* GPU id in the machine.  This field is used for
	// remapping "visible" to "virtual", which means this operates only
	// after the process starts.  Users are required to use vendor
	// specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
	// physical to visible device mapping prior to invoking TensorFlow.
	VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList" json:"visible_device_list,omitempty"`
	// In the event polling loop sleep this many microseconds between
	// PollEvents calls, when the queue is not empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingActiveDelayUsecs int32 `` /* 128-byte string literal not displayed */
	// In the event polling loop sleep this many millisconds between
	// PollEvents calls, when the queue is empty.  If value is not
	// set or set to 0, gets set to a non-zero default.
	PollingInactiveDelayMsecs int32 `` /* 134-byte string literal not displayed */
	// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
	// enabling this option forces all CPU tensors to be allocated with Cuda
	// pinned memory. Normally, TensorFlow will infer which tensors should be
	// allocated as the pinned memory. But in case where the inference is
	// incomplete, this option can significantly speed up the cross-device memory
	// copy performance as long as it fits the memory.
	// Note that this option is not something that should be
	// enabled by default for unknown or very large models, since all Cuda pinned
	// memory is unpageable, having too much pinned memory might negatively impact
	// the overall host system performance.
	ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible" json:"force_gpu_compatible,omitempty"`
}

func (*GPUOptions) Descriptor

func (*GPUOptions) Descriptor() ([]byte, []int)

func (*GPUOptions) GetAllocatorType

func (m *GPUOptions) GetAllocatorType() string

func (*GPUOptions) GetAllowGrowth

func (m *GPUOptions) GetAllowGrowth() bool

func (*GPUOptions) GetDeferredDeletionBytes

func (m *GPUOptions) GetDeferredDeletionBytes() int64

func (*GPUOptions) GetForceGpuCompatible

func (m *GPUOptions) GetForceGpuCompatible() bool

func (*GPUOptions) GetPerProcessGpuMemoryFraction

func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64

func (*GPUOptions) GetPollingActiveDelayUsecs

func (m *GPUOptions) GetPollingActiveDelayUsecs() int32

func (*GPUOptions) GetPollingInactiveDelayMsecs

func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32

func (*GPUOptions) GetVisibleDeviceList

func (m *GPUOptions) GetVisibleDeviceList() string

func (*GPUOptions) ProtoMessage

func (*GPUOptions) ProtoMessage()

func (*GPUOptions) Reset

func (m *GPUOptions) Reset()

func (*GPUOptions) String

func (m *GPUOptions) String() string

type GetStatusRequest

type GetStatusRequest struct {
}

func (*GetStatusRequest) Descriptor

func (*GetStatusRequest) Descriptor() ([]byte, []int)

func (*GetStatusRequest) ProtoMessage

func (*GetStatusRequest) ProtoMessage()

func (*GetStatusRequest) Reset

func (m *GetStatusRequest) Reset()

func (*GetStatusRequest) String

func (m *GetStatusRequest) String() string

type GetStatusResponse

type GetStatusResponse struct {
	DeviceAttributes []*tensorflow14.DeviceAttributes `protobuf:"bytes,1,rep,name=device_attributes,json=deviceAttributes" json:"device_attributes,omitempty"`
}

func (*GetStatusResponse) Descriptor

func (*GetStatusResponse) Descriptor() ([]byte, []int)

func (*GetStatusResponse) GetDeviceAttributes

func (m *GetStatusResponse) GetDeviceAttributes() []*tensorflow14.DeviceAttributes

func (*GetStatusResponse) ProtoMessage

func (*GetStatusResponse) ProtoMessage()

func (*GetStatusResponse) Reset

func (m *GetStatusResponse) Reset()

func (*GetStatusResponse) String

func (m *GetStatusResponse) String() string

type GraphOptions

type GraphOptions struct {
	// If true, use control flow to schedule the activation of Recv nodes.
	// (Currently ignored.)
	EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling" json:"enable_recv_scheduling,omitempty"`
	// Options controlling how graph is optimized.
	OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions" json:"optimizer_options,omitempty"`
	// The number of steps to run before returning a cost model detailing
	// the memory usage and performance of each node of the graph. 0 means
	// no cost model.
	BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel" json:"build_cost_model,omitempty"`
	// The number of steps to skip before collecting statistics for the
	// cost model.
	BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter" json:"build_cost_model_after,omitempty"`
	// Annotate each Node with Op output shape data, to the extent it can
	// be statically inferred.
	InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes" json:"infer_shapes,omitempty"`
	// Only place the subgraphs that are run, rather than the entire graph.
	//
	// This is useful for interactive graph building, where one might
	// produce graphs that cannot be placed during the debugging
	// process.  In particular, it allows the client to continue work in
	// a session after adding a node to a graph whose placement
	// constraints are unsatisfiable.
	PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph" json:"place_pruned_graph,omitempty"`
	// If true, transfer float values between processes as bfloat16.
	EnableBfloat16Sendrecv bool `protobuf:"varint,7,opt,name=enable_bfloat16_sendrecv,json=enableBfloat16Sendrecv" json:"enable_bfloat16_sendrecv,omitempty"`
	// If > 0, record a timeline every this many steps.
	// EXPERIMENTAL: This currently has no effect in MasterSession.
	TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep" json:"timeline_step,omitempty"`
	// Options that control the type and amount of graph rewriting.
	// Not currently configurable via the public Python API (i.e. there is no API
	// stability guarantee if you import RewriterConfig explicitly).
	RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions" json:"rewrite_options,omitempty"`
}

func (*GraphOptions) Descriptor

func (*GraphOptions) Descriptor() ([]byte, []int)

func (*GraphOptions) GetBuildCostModel

func (m *GraphOptions) GetBuildCostModel() int64

func (*GraphOptions) GetBuildCostModelAfter

func (m *GraphOptions) GetBuildCostModelAfter() int64

func (*GraphOptions) GetEnableBfloat16Sendrecv

func (m *GraphOptions) GetEnableBfloat16Sendrecv() bool

func (*GraphOptions) GetEnableRecvScheduling

func (m *GraphOptions) GetEnableRecvScheduling() bool

func (*GraphOptions) GetInferShapes

func (m *GraphOptions) GetInferShapes() bool

func (*GraphOptions) GetOptimizerOptions

func (m *GraphOptions) GetOptimizerOptions() *OptimizerOptions

func (*GraphOptions) GetPlacePrunedGraph

func (m *GraphOptions) GetPlacePrunedGraph() bool

func (*GraphOptions) GetRewriteOptions

func (m *GraphOptions) GetRewriteOptions() *RewriterConfig

func (*GraphOptions) GetTimelineStep

func (m *GraphOptions) GetTimelineStep() int32

func (*GraphOptions) ProtoMessage

func (*GraphOptions) ProtoMessage()

func (*GraphOptions) Reset

func (m *GraphOptions) Reset()

func (*GraphOptions) String

func (m *GraphOptions) String() string

type JobDef

type JobDef struct {
	// The name of this job.
	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
	// Mapping from task ID to "hostname:port" string.
	//
	// If the `name` field contains "worker", and the `tasks` map contains a
	// mapping from 7 to "example.org:2222", then the device prefix
	// "/job:worker/task:7" will be assigned to "example.org:2222".
	Tasks map[int32]string `` /* 131-byte string literal not displayed */
}

Defines a single job in a TensorFlow cluster.

func (*JobDef) Descriptor

func (*JobDef) Descriptor() ([]byte, []int)

func (*JobDef) GetName

func (m *JobDef) GetName() string

func (*JobDef) GetTasks

func (m *JobDef) GetTasks() map[int32]string

func (*JobDef) ProtoMessage

func (*JobDef) ProtoMessage()

func (*JobDef) Reset

func (m *JobDef) Reset()

func (*JobDef) String

func (m *JobDef) String() string

type LabeledStepStats

type LabeledStepStats struct {
	StepId    int64                   `protobuf:"varint,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"`
	StepStats *tensorflow13.StepStats `protobuf:"bytes,2,opt,name=step_stats,json=stepStats" json:"step_stats,omitempty"`
}

func (*LabeledStepStats) Descriptor

func (*LabeledStepStats) Descriptor() ([]byte, []int)

func (*LabeledStepStats) GetStepId

func (m *LabeledStepStats) GetStepId() int64

func (*LabeledStepStats) GetStepStats

func (m *LabeledStepStats) GetStepStats() *tensorflow13.StepStats

func (*LabeledStepStats) ProtoMessage

func (*LabeledStepStats) ProtoMessage()

func (*LabeledStepStats) Reset

func (m *LabeledStepStats) Reset()

func (*LabeledStepStats) String

func (m *LabeledStepStats) String() string

type ListDevicesRequest

type ListDevicesRequest struct {
	// Optional: session_handle must be returned by a CreateSession call to the
	// same master service.
	//
	// When session_handle is empty, the ClusterSpec provided when the master was
	// started is used to compute the available devices. If the session_handle is
	// provided but not recognized, an error is returned. Finally, if a valid
	// session_handle is provided, the cluster configuration for that session is
	// used when computing the response.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
}

func (*ListDevicesRequest) Descriptor

func (*ListDevicesRequest) Descriptor() ([]byte, []int)

func (*ListDevicesRequest) GetSessionHandle

func (m *ListDevicesRequest) GetSessionHandle() string

func (*ListDevicesRequest) ProtoMessage

func (*ListDevicesRequest) ProtoMessage()

func (*ListDevicesRequest) Reset

func (m *ListDevicesRequest) Reset()

func (*ListDevicesRequest) String

func (m *ListDevicesRequest) String() string

type ListDevicesResponse

type ListDevicesResponse struct {
	LocalDevice  []*tensorflow14.DeviceAttributes `protobuf:"bytes,1,rep,name=local_device,json=localDevice" json:"local_device,omitempty"`
	RemoteDevice []*tensorflow14.DeviceAttributes `protobuf:"bytes,2,rep,name=remote_device,json=remoteDevice" json:"remote_device,omitempty"`
}

func (*ListDevicesResponse) Descriptor

func (*ListDevicesResponse) Descriptor() ([]byte, []int)

func (*ListDevicesResponse) GetLocalDevice

func (m *ListDevicesResponse) GetLocalDevice() []*tensorflow14.DeviceAttributes

func (*ListDevicesResponse) GetRemoteDevice

func (m *ListDevicesResponse) GetRemoteDevice() []*tensorflow14.DeviceAttributes

func (*ListDevicesResponse) ProtoMessage

func (*ListDevicesResponse) ProtoMessage()

func (*ListDevicesResponse) Reset

func (m *ListDevicesResponse) Reset()

func (*ListDevicesResponse) String

func (m *ListDevicesResponse) String() string

type LoggingRequest

type LoggingRequest struct {
	// If true, RPC logging will be activated.
	RpcLogging bool `protobuf:"varint,1,opt,name=rpc_logging,json=rpcLogging" json:"rpc_logging,omitempty"`
	// If true, discard any saved logging data (for all steps).
	Clear bool `protobuf:"varint,2,opt,name=clear" json:"clear,omitempty"`
	// When set, requests all saved log data pertaining to the step.
	// Any log data retrieved is eliminated from the store and cannot be
	// retrieved again.
	FetchStepId []int64 `protobuf:"varint,3,rep,packed,name=fetch_step_id,json=fetchStepId" json:"fetch_step_id,omitempty"`
}

Out-of-band request to begin or end logging, or to retrieve logs for particular steps.

func (*LoggingRequest) Descriptor

func (*LoggingRequest) Descriptor() ([]byte, []int)

func (*LoggingRequest) GetClear

func (m *LoggingRequest) GetClear() bool

func (*LoggingRequest) GetFetchStepId

func (m *LoggingRequest) GetFetchStepId() []int64

func (*LoggingRequest) GetRpcLogging

func (m *LoggingRequest) GetRpcLogging() bool

func (*LoggingRequest) ProtoMessage

func (*LoggingRequest) ProtoMessage()

func (*LoggingRequest) Reset

func (m *LoggingRequest) Reset()

func (*LoggingRequest) String

func (m *LoggingRequest) String() string

type LoggingResponse

type LoggingResponse struct {
	Step []*LabeledStepStats `protobuf:"bytes,1,rep,name=step" json:"step,omitempty"`
}

func (*LoggingResponse) Descriptor

func (*LoggingResponse) Descriptor() ([]byte, []int)

func (*LoggingResponse) GetStep

func (m *LoggingResponse) GetStep() []*LabeledStepStats

func (*LoggingResponse) ProtoMessage

func (*LoggingResponse) ProtoMessage()

func (*LoggingResponse) Reset

func (m *LoggingResponse) Reset()

func (*LoggingResponse) String

func (m *LoggingResponse) String() string

type MasterServiceClient

type MasterServiceClient interface {
	// Creates a session.
	CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error)
	// Extends a session.
	ExtendSession(ctx context.Context, in *ExtendSessionRequest, opts ...grpc.CallOption) (*ExtendSessionResponse, error)
	// Prepares future partial run calls.
	PartialRunSetup(ctx context.Context, in *PartialRunSetupRequest, opts ...grpc.CallOption) (*PartialRunSetupResponse, error)
	// Drives the graph computation.
	RunStep(ctx context.Context, in *RunStepRequest, opts ...grpc.CallOption) (*RunStepResponse, error)
	// Closes a session.
	CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error)
	// List the devices usable by the master.
	ListDevices(ctx context.Context, in *ListDevicesRequest, opts ...grpc.CallOption) (*ListDevicesResponse, error)
	// Close and abandon all existing sessions.  Ongoing computations
	// will no longer affect fresh ones via the resources in containers listed in
	// the ResetRequest.  See ResetRequest for more details.
	Reset(ctx context.Context, in *ResetRequest, opts ...grpc.CallOption) (*ResetResponse, error)
}

func NewMasterServiceClient

func NewMasterServiceClient(cc *grpc.ClientConn) MasterServiceClient

type MasterServiceServer

type MasterServiceServer interface {
	// Creates a session.
	CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error)
	// Extends a session.
	ExtendSession(context.Context, *ExtendSessionRequest) (*ExtendSessionResponse, error)
	// Prepares future partial run calls.
	PartialRunSetup(context.Context, *PartialRunSetupRequest) (*PartialRunSetupResponse, error)
	// Drives the graph computation.
	RunStep(context.Context, *RunStepRequest) (*RunStepResponse, error)
	// Closes a session.
	CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error)
	// List the devices usable by the master.
	ListDevices(context.Context, *ListDevicesRequest) (*ListDevicesResponse, error)
	// Close and abandon all existing sessions.  Ongoing computations
	// will no longer affect fresh ones via the resources in containers listed in
	// the ResetRequest.  See ResetRequest for more details.
	Reset(context.Context, *ResetRequest) (*ResetResponse, error)
}

type MetaGraphDef

type MetaGraphDef struct {
	MetaInfoDef *MetaGraphDef_MetaInfoDef `protobuf:"bytes,1,opt,name=meta_info_def,json=metaInfoDef" json:"meta_info_def,omitempty"`
	// GraphDef.
	GraphDef *tensorflow10.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef" json:"graph_def,omitempty"`
	// SaverDef.
	SaverDef *SaverDef `protobuf:"bytes,3,opt,name=saver_def,json=saverDef" json:"saver_def,omitempty"`
	// collection_def: Map from collection name to collections.
	// See CollectionDef section for details.
	CollectionDef map[string]*CollectionDef `` /* 167-byte string literal not displayed */
	// signature_def: Map from user supplied key for a signature to a single
	// SignatureDef.
	SignatureDef map[string]*SignatureDef `` /* 164-byte string literal not displayed */
	// Asset file def to be used with the defined graph.
	AssetFileDef []*AssetFileDef `protobuf:"bytes,6,rep,name=asset_file_def,json=assetFileDef" json:"asset_file_def,omitempty"`
}

NOTE: This protocol buffer is evolving, and will go through revisions in the coming months.

Protocol buffer containing the following which are necessary to restart training, run inference. It can be used to serialize/de-serialize memory objects necessary for running computation in a graph when crossing the process boundary. It can be used for long term storage of graphs, cross-language execution of graphs, etc.

MetaInfoDef
GraphDef
SaverDef
CollectionDef
TensorInfo
SignatureDef

func (*MetaGraphDef) Descriptor

func (*MetaGraphDef) Descriptor() ([]byte, []int)

func (*MetaGraphDef) GetAssetFileDef

func (m *MetaGraphDef) GetAssetFileDef() []*AssetFileDef

func (*MetaGraphDef) GetCollectionDef

func (m *MetaGraphDef) GetCollectionDef() map[string]*CollectionDef

func (*MetaGraphDef) GetGraphDef

func (m *MetaGraphDef) GetGraphDef() *tensorflow10.GraphDef

func (*MetaGraphDef) GetMetaInfoDef

func (m *MetaGraphDef) GetMetaInfoDef() *MetaGraphDef_MetaInfoDef

func (*MetaGraphDef) GetSaverDef

func (m *MetaGraphDef) GetSaverDef() *SaverDef

func (*MetaGraphDef) GetSignatureDef

func (m *MetaGraphDef) GetSignatureDef() map[string]*SignatureDef

func (*MetaGraphDef) ProtoMessage

func (*MetaGraphDef) ProtoMessage()

func (*MetaGraphDef) Reset

func (m *MetaGraphDef) Reset()

func (*MetaGraphDef) String

func (m *MetaGraphDef) String() string

type MetaGraphDef_MetaInfoDef

type MetaGraphDef_MetaInfoDef struct {
	// User specified Version string. Can be the name of the model and revision,
	// steps this model has been trained to, etc.
	MetaGraphVersion string `protobuf:"bytes,1,opt,name=meta_graph_version,json=metaGraphVersion" json:"meta_graph_version,omitempty"`
	// A copy of the OpDefs used by the producer of this graph_def.
	// Descriptions and Ops not used in graph_def are stripped out.
	StrippedOpList *tensorflow7.OpList `protobuf:"bytes,2,opt,name=stripped_op_list,json=strippedOpList" json:"stripped_op_list,omitempty"`
	// A serialized protobuf. Can be the time this meta graph is created, or
	// modified, or name of the model.
	AnyInfo *google_protobuf.Any `protobuf:"bytes,3,opt,name=any_info,json=anyInfo" json:"any_info,omitempty"`
	// User supplied tag(s) on the meta_graph and included graph_def.
	//
	// MetaGraphDefs should be tagged with their capabilities or use-cases.
	// Examples: "train", "serve", "gpu", "tpu", etc.
	// These tags enable loaders to access the MetaGraph(s) appropriate for a
	// specific use-case or runtime environment.
	Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"`
	// The __version__ string of the tensorflow build used to write this graph.
	// This will be populated by the framework, which will overwrite any user
	// supplied value.
	TensorflowVersion string `protobuf:"bytes,5,opt,name=tensorflow_version,json=tensorflowVersion" json:"tensorflow_version,omitempty"`
	// The __git_version__ string of the tensorflow build used to write this
	// graph. This will be populated by the framework, which will overwrite any
	// user supplied value.
	TensorflowGitVersion string `protobuf:"bytes,6,opt,name=tensorflow_git_version,json=tensorflowGitVersion" json:"tensorflow_git_version,omitempty"`
}

Meta information regarding the graph to be exported. To be used by users of this protocol buffer to encode information regarding their meta graph.

func (*MetaGraphDef_MetaInfoDef) Descriptor

func (*MetaGraphDef_MetaInfoDef) Descriptor() ([]byte, []int)

func (*MetaGraphDef_MetaInfoDef) GetAnyInfo

func (m *MetaGraphDef_MetaInfoDef) GetAnyInfo() *google_protobuf.Any

func (*MetaGraphDef_MetaInfoDef) GetMetaGraphVersion

func (m *MetaGraphDef_MetaInfoDef) GetMetaGraphVersion() string

func (*MetaGraphDef_MetaInfoDef) GetStrippedOpList

func (m *MetaGraphDef_MetaInfoDef) GetStrippedOpList() *tensorflow7.OpList

func (*MetaGraphDef_MetaInfoDef) GetTags

func (m *MetaGraphDef_MetaInfoDef) GetTags() []string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion

func (m *MetaGraphDef_MetaInfoDef) GetTensorflowGitVersion() string

func (*MetaGraphDef_MetaInfoDef) GetTensorflowVersion

func (m *MetaGraphDef_MetaInfoDef) GetTensorflowVersion() string

func (*MetaGraphDef_MetaInfoDef) ProtoMessage

func (*MetaGraphDef_MetaInfoDef) ProtoMessage()

func (*MetaGraphDef_MetaInfoDef) Reset

func (m *MetaGraphDef_MetaInfoDef) Reset()

func (*MetaGraphDef_MetaInfoDef) String

func (m *MetaGraphDef_MetaInfoDef) String() string

type NamedTensorProto

type NamedTensorProto struct {
	// Name of the tensor.
	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
	// The client can populate a TensorProto using a tensorflow::Tensor`, or
	// directly using the protobuf field accessors.
	//
	// The client specifies whether the returned tensor values should be
	// filled tensor fields (float_val, int_val, etc.) or encoded in a
	// compact form in tensor.tensor_content.
	Tensor *tensorflow4.TensorProto `protobuf:"bytes,2,opt,name=tensor" json:"tensor,omitempty"`
}

A pair of tensor name and tensor values.

func (*NamedTensorProto) Descriptor

func (*NamedTensorProto) Descriptor() ([]byte, []int)

func (*NamedTensorProto) GetName

func (m *NamedTensorProto) GetName() string

func (*NamedTensorProto) GetTensor

func (m *NamedTensorProto) GetTensor() *tensorflow4.TensorProto

func (*NamedTensorProto) ProtoMessage

func (*NamedTensorProto) ProtoMessage()

func (*NamedTensorProto) Reset

func (m *NamedTensorProto) Reset()

func (*NamedTensorProto) String

func (m *NamedTensorProto) String() string

type OptimizerOptions

type OptimizerOptions struct {
	// If true, optimize the graph using common subexpression elimination.
	DoCommonSubexpressionElimination bool `` /* 155-byte string literal not displayed */
	// If true, perform constant folding optimization on the graph.
	DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding" json:"do_constant_folding,omitempty"`
	// If true, perform function inlining on the graph.
	DoFunctionInlining bool                            `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining" json:"do_function_inlining,omitempty"`
	OptLevel           OptimizerOptions_Level          `protobuf:"varint,3,opt,name=opt_level,json=optLevel,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
	GlobalJitLevel     OptimizerOptions_GlobalJitLevel `` /* 147-byte string literal not displayed */
}

Options passed to the graph optimizer

func (*OptimizerOptions) Descriptor

func (*OptimizerOptions) Descriptor() ([]byte, []int)

func (*OptimizerOptions) GetDoCommonSubexpressionElimination

func (m *OptimizerOptions) GetDoCommonSubexpressionElimination() bool

func (*OptimizerOptions) GetDoConstantFolding

func (m *OptimizerOptions) GetDoConstantFolding() bool

func (*OptimizerOptions) GetDoFunctionInlining

func (m *OptimizerOptions) GetDoFunctionInlining() bool

func (*OptimizerOptions) GetGlobalJitLevel

func (m *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel

func (*OptimizerOptions) GetOptLevel

func (m *OptimizerOptions) GetOptLevel() OptimizerOptions_Level

func (*OptimizerOptions) ProtoMessage

func (*OptimizerOptions) ProtoMessage()

func (*OptimizerOptions) Reset

func (m *OptimizerOptions) Reset()

func (*OptimizerOptions) String

func (m *OptimizerOptions) String() string

type OptimizerOptions_GlobalJitLevel

type OptimizerOptions_GlobalJitLevel int32

Control the use of the compiler/jit. Experimental.

const (
	OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0
	OptimizerOptions_OFF     OptimizerOptions_GlobalJitLevel = -1
	// The following settings turn on compilation, with higher values being
	// more aggressive.  Higher values may reduce opportunities for parallelism
	// and may use more memory.  (At present, there is no distinction, but this
	// is expected to change.)
	OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
	OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor

func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_GlobalJitLevel) String

type OptimizerOptions_Level

type OptimizerOptions_Level int32

Optimization level

const (
	// L1 is the default level.
	// Optimization performed at L1 :
	// 1. Common subexpression elimination
	// 2. Constant folding
	OptimizerOptions_L1 OptimizerOptions_Level = 0
	// No optimizations
	OptimizerOptions_L0 OptimizerOptions_Level = -1
)

func (OptimizerOptions_Level) EnumDescriptor

func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int)

func (OptimizerOptions_Level) String

func (x OptimizerOptions_Level) String() string

type PartialRunSetupRequest

type PartialRunSetupRequest struct {
	// REQUIRED: session_handle must be returned by a CreateSession call
	// to the same master service.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// Tensors to be fed in future steps.
	Feed []string `protobuf:"bytes,2,rep,name=feed" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller expects a tensor to be returned
	// for each fetch[i] (see RunStepResponse.tensor), for corresponding partial
	// RunStepRequests. The order of specified fetches does not change the
	// execution order.
	Fetch []string `protobuf:"bytes,3,rep,name=fetch" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run in future
	// steps, but their outputs will not be fetched.
	Target []string `protobuf:"bytes,4,rep,name=target" json:"target,omitempty"`
}

func (*PartialRunSetupRequest) Descriptor

func (*PartialRunSetupRequest) Descriptor() ([]byte, []int)

func (*PartialRunSetupRequest) GetFeed

func (m *PartialRunSetupRequest) GetFeed() []string

func (*PartialRunSetupRequest) GetFetch

func (m *PartialRunSetupRequest) GetFetch() []string

func (*PartialRunSetupRequest) GetSessionHandle

func (m *PartialRunSetupRequest) GetSessionHandle() string

func (*PartialRunSetupRequest) GetTarget

func (m *PartialRunSetupRequest) GetTarget() []string

func (*PartialRunSetupRequest) ProtoMessage

func (*PartialRunSetupRequest) ProtoMessage()

func (*PartialRunSetupRequest) Reset

func (m *PartialRunSetupRequest) Reset()

func (*PartialRunSetupRequest) String

func (m *PartialRunSetupRequest) String() string

type PartialRunSetupResponse

type PartialRunSetupResponse struct {
	// The unique handle corresponding to the ongoing partial run call setup by
	// the invocation to PartialRunSetup. This handle may be passed to
	// RunStepRequest to send and receive tensors for this partial run.
	PartialRunHandle string `protobuf:"bytes,1,opt,name=partial_run_handle,json=partialRunHandle" json:"partial_run_handle,omitempty"`
}

func (*PartialRunSetupResponse) Descriptor

func (*PartialRunSetupResponse) Descriptor() ([]byte, []int)

func (*PartialRunSetupResponse) GetPartialRunHandle

func (m *PartialRunSetupResponse) GetPartialRunHandle() string

func (*PartialRunSetupResponse) ProtoMessage

func (*PartialRunSetupResponse) ProtoMessage()

func (*PartialRunSetupResponse) Reset

func (m *PartialRunSetupResponse) Reset()

func (*PartialRunSetupResponse) String

func (m *PartialRunSetupResponse) String() string

type QueueRunnerDef

type QueueRunnerDef struct {
	// Queue name.
	QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName" json:"queue_name,omitempty"`
	// A list of enqueue operations.
	EnqueueOpName []string `protobuf:"bytes,2,rep,name=enqueue_op_name,json=enqueueOpName" json:"enqueue_op_name,omitempty"`
	// The operation to run to close the queue.
	CloseOpName string `protobuf:"bytes,3,opt,name=close_op_name,json=closeOpName" json:"close_op_name,omitempty"`
	// The operation to run to cancel the queue.
	CancelOpName string `protobuf:"bytes,4,opt,name=cancel_op_name,json=cancelOpName" json:"cancel_op_name,omitempty"`
	// A list of exception types considered to signal a safely closed queue
	// if raised during enqueue operations.
	QueueClosedExceptionTypes []tensorflow_error.Code `` /* 168-byte string literal not displayed */
}

Protocol buffer representing a QueueRunner.

func (*QueueRunnerDef) Descriptor

func (*QueueRunnerDef) Descriptor() ([]byte, []int)

func (*QueueRunnerDef) GetCancelOpName

func (m *QueueRunnerDef) GetCancelOpName() string

func (*QueueRunnerDef) GetCloseOpName

func (m *QueueRunnerDef) GetCloseOpName() string

func (*QueueRunnerDef) GetEnqueueOpName

func (m *QueueRunnerDef) GetEnqueueOpName() []string

func (*QueueRunnerDef) GetQueueClosedExceptionTypes

func (m *QueueRunnerDef) GetQueueClosedExceptionTypes() []tensorflow_error.Code

func (*QueueRunnerDef) GetQueueName

func (m *QueueRunnerDef) GetQueueName() string

func (*QueueRunnerDef) ProtoMessage

func (*QueueRunnerDef) ProtoMessage()

func (*QueueRunnerDef) Reset

func (m *QueueRunnerDef) Reset()

func (*QueueRunnerDef) String

func (m *QueueRunnerDef) String() string

type RPCOptions

type RPCOptions struct {
	// If true, always use RPC to contact the session target.
	//
	// If false (the default option), TensorFlow may use an optimized
	// transport for client-master communication that avoids the RPC
	// stack. This option is primarily for used testing the RPC stack.
	UseRpcForInprocessMaster bool `` /* 133-byte string literal not displayed */
}

func (*RPCOptions) Descriptor

func (*RPCOptions) Descriptor() ([]byte, []int)

func (*RPCOptions) GetUseRpcForInprocessMaster

func (m *RPCOptions) GetUseRpcForInprocessMaster() bool

func (*RPCOptions) ProtoMessage

func (*RPCOptions) ProtoMessage()

func (*RPCOptions) Reset

func (m *RPCOptions) Reset()

func (*RPCOptions) String

func (m *RPCOptions) String() string

type RecvTensorRequest

type RecvTensorRequest struct {
	// The step in which the tensor will be produced.
	//
	// REQUIRED: This must eventually correspond to the `step_id` passed
	// into a RunGraph call on the same WorkerService.
	StepId int64 `protobuf:"varint,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"`
	// A key that identifies the tensor to be received.
	RendezvousKey string `protobuf:"bytes,2,opt,name=rendezvous_key,json=rendezvousKey" json:"rendezvous_key,omitempty"`
	// If true, use an out-of-band DMA mechanism to transfer the
	// received tensor.
	DmaOk bool `protobuf:"varint,3,opt,name=dma_ok,json=dmaOk" json:"dma_ok,omitempty"`
	// Optional information on client-side device locality.
	ClientLocality *tensorflow14.DeviceLocality `protobuf:"bytes,4,opt,name=client_locality,json=clientLocality" json:"client_locality,omitempty"`
	// Optional information on server-side device locality.
	ServerLocality *tensorflow14.DeviceLocality `protobuf:"bytes,5,opt,name=server_locality,json=serverLocality" json:"server_locality,omitempty"`
	// Optional information needed by the RPC subsystem.
	TransportOptions *google_protobuf.Any `protobuf:"bytes,6,opt,name=transport_options,json=transportOptions" json:"transport_options,omitempty"`
}

func (*RecvTensorRequest) Descriptor

func (*RecvTensorRequest) Descriptor() ([]byte, []int)

func (*RecvTensorRequest) GetClientLocality

func (m *RecvTensorRequest) GetClientLocality() *tensorflow14.DeviceLocality

func (*RecvTensorRequest) GetDmaOk

func (m *RecvTensorRequest) GetDmaOk() bool

func (*RecvTensorRequest) GetRendezvousKey

func (m *RecvTensorRequest) GetRendezvousKey() string

func (*RecvTensorRequest) GetServerLocality

func (m *RecvTensorRequest) GetServerLocality() *tensorflow14.DeviceLocality

func (*RecvTensorRequest) GetStepId

func (m *RecvTensorRequest) GetStepId() int64

func (*RecvTensorRequest) GetTransportOptions

func (m *RecvTensorRequest) GetTransportOptions() *google_protobuf.Any

func (*RecvTensorRequest) ProtoMessage

func (*RecvTensorRequest) ProtoMessage()

func (*RecvTensorRequest) Reset

func (m *RecvTensorRequest) Reset()

func (*RecvTensorRequest) String

func (m *RecvTensorRequest) String() string

type RecvTensorResponse

type RecvTensorResponse struct {
	// The tensor as a proto.
	Tensor *tensorflow4.TensorProto `protobuf:"bytes,1,opt,name=tensor" json:"tensor,omitempty"`
	// If true, this tensor was the output of a dead node, and the
	// content is invalid.
	IsDead bool `protobuf:"varint,2,opt,name=is_dead,json=isDead" json:"is_dead,omitempty"`
	// The time at which tensor was available and started to be returned.
	SendStartMicros int64 `protobuf:"varint,3,opt,name=send_start_micros,json=sendStartMicros" json:"send_start_micros,omitempty"`
	// Optional additional information about how to receive the tensor,
	// e.g. in the event that `RecvTensorRequest.dma_ok` was true.
	TransportOptions *google_protobuf.Any `protobuf:"bytes,4,opt,name=transport_options,json=transportOptions" json:"transport_options,omitempty"`
}

func (*RecvTensorResponse) Descriptor

func (*RecvTensorResponse) Descriptor() ([]byte, []int)

func (*RecvTensorResponse) GetIsDead

func (m *RecvTensorResponse) GetIsDead() bool

func (*RecvTensorResponse) GetSendStartMicros

func (m *RecvTensorResponse) GetSendStartMicros() int64

func (*RecvTensorResponse) GetTensor

func (m *RecvTensorResponse) GetTensor() *tensorflow4.TensorProto

func (*RecvTensorResponse) GetTransportOptions

func (m *RecvTensorResponse) GetTransportOptions() *google_protobuf.Any

func (*RecvTensorResponse) ProtoMessage

func (*RecvTensorResponse) ProtoMessage()

func (*RecvTensorResponse) Reset

func (m *RecvTensorResponse) Reset()

func (*RecvTensorResponse) String

func (m *RecvTensorResponse) String() string

type RegisterGraphRequest

type RegisterGraphRequest struct {
	// Subgraphs are scoped within one session.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// "graph_def" has the subgraph of nodes for this worker, with each node
	// having its device_name filled in.
	GraphDef *tensorflow10.GraphDef `protobuf:"bytes,2,opt,name=graph_def,json=graphDef" json:"graph_def,omitempty"`
	// True iff the graph (before partitioning) contains control flow nodes.
	//
	// As of 01/11/2015, this is no longer set by clients.
	HasControlFlow bool `protobuf:"varint,3,opt,name=has_control_flow,json=hasControlFlow" json:"has_control_flow,omitempty"`
	// Configuration options for the session in which this graph was created.
	GraphOptions *GraphOptions `protobuf:"bytes,4,opt,name=graph_options,json=graphOptions" json:"graph_options,omitempty"`
	// Field(s) used by TensorFlow Debugger (tfdbg).
	DebugOptions *DebugOptions `protobuf:"bytes,5,opt,name=debug_options,json=debugOptions" json:"debug_options,omitempty"`
}

func (*RegisterGraphRequest) Descriptor

func (*RegisterGraphRequest) Descriptor() ([]byte, []int)

func (*RegisterGraphRequest) GetDebugOptions

func (m *RegisterGraphRequest) GetDebugOptions() *DebugOptions

func (*RegisterGraphRequest) GetGraphDef

func (m *RegisterGraphRequest) GetGraphDef() *tensorflow10.GraphDef

func (*RegisterGraphRequest) GetGraphOptions

func (m *RegisterGraphRequest) GetGraphOptions() *GraphOptions

func (*RegisterGraphRequest) GetHasControlFlow

func (m *RegisterGraphRequest) GetHasControlFlow() bool

func (*RegisterGraphRequest) GetSessionHandle

func (m *RegisterGraphRequest) GetSessionHandle() string

func (*RegisterGraphRequest) ProtoMessage

func (*RegisterGraphRequest) ProtoMessage()

func (*RegisterGraphRequest) Reset

func (m *RegisterGraphRequest) Reset()

func (*RegisterGraphRequest) String

func (m *RegisterGraphRequest) String() string

type RegisterGraphResponse

type RegisterGraphResponse struct {
	// If the registration succeeds, returns an opaque graph_handle to
	// the master. The master calls RunGraph with graph_handle to
	// compute different steps.
	GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle" json:"graph_handle,omitempty"`
}

func (*RegisterGraphResponse) Descriptor

func (*RegisterGraphResponse) Descriptor() ([]byte, []int)

func (*RegisterGraphResponse) GetGraphHandle

func (m *RegisterGraphResponse) GetGraphHandle() string

func (*RegisterGraphResponse) ProtoMessage

func (*RegisterGraphResponse) ProtoMessage()

func (*RegisterGraphResponse) Reset

func (m *RegisterGraphResponse) Reset()

func (*RegisterGraphResponse) String

func (m *RegisterGraphResponse) String() string

type ResetRequest

type ResetRequest struct {
	// A list of container names, which may be empty.
	//
	// If 'container' is not empty, releases resoures in the given
	// containers in all devices.
	//
	// If 'container' is empty, releases resources in the default
	// container in all devices.
	Container []string `protobuf:"bytes,1,rep,name=container" json:"container,omitempty"`
	// When any filters are present, only devices that match the filters
	// will be reset. Each filter can be partially specified,
	// e.g. "/job:ps" "/job:worker/replica:3", etc.
	DeviceFilters []string `protobuf:"bytes,2,rep,name=device_filters,json=deviceFilters" json:"device_filters,omitempty"`
}

Reset() allows misbehaving or slow sessions to be aborted and closed, and causes their resources eventually to be released. Reset() does not wait for the computations in old sessions to cease; it merely starts the process of tearing them down. However, if a new session is started after a Reset(), the new session is isolated from changes that old sessions (started prior to the Reset()) may continue to make to resources, provided all those resources are in containers listed in "containers".

Old sessions may continue to have side-effects on resources not in containers listed in "containers", and thus may affect future sessions' results in ways that are hard to predict. Thus, if well-defined behavior is desired, is it recommended that all containers be listed in "containers". Similarly, if a device_filter is specified, results may be hard to predict.

func (*ResetRequest) Descriptor

func (*ResetRequest) Descriptor() ([]byte, []int)

func (*ResetRequest) GetContainer

func (m *ResetRequest) GetContainer() []string

func (*ResetRequest) GetDeviceFilters

func (m *ResetRequest) GetDeviceFilters() []string

func (*ResetRequest) ProtoMessage

func (*ResetRequest) ProtoMessage()

func (*ResetRequest) Reset

func (m *ResetRequest) Reset()

func (*ResetRequest) String

func (m *ResetRequest) String() string

type ResetResponse

type ResetResponse struct {
}

func (*ResetResponse) Descriptor

func (*ResetResponse) Descriptor() ([]byte, []int)

func (*ResetResponse) ProtoMessage

func (*ResetResponse) ProtoMessage()

func (*ResetResponse) Reset

func (m *ResetResponse) Reset()

func (*ResetResponse) String

func (m *ResetResponse) String() string

type RewriterConfig

type RewriterConfig struct {
	OptimizeTensorLayout bool `protobuf:"varint,1,opt,name=optimize_tensor_layout,json=optimizeTensorLayout" json:"optimize_tensor_layout,omitempty"`
	DisableModelPruning  bool `protobuf:"varint,2,opt,name=disable_model_pruning,json=disableModelPruning" json:"disable_model_pruning,omitempty"`
	ConstantFolding      bool `protobuf:"varint,3,opt,name=constant_folding,json=constantFolding" json:"constant_folding,omitempty"`
	// Configures memory optimization passes through the meta-optimizer. Has no
	// effect on manually requested memory optimization passes in the optimizers
	// field.
	MemoryOptimization RewriterConfig_MemOptType `` /* 151-byte string literal not displayed */
	// Configures AutoParallel optimization passes either through the
	// meta-optimizer or when manually specified through the optimizers field.
	AutoParallel *AutoParallelOptions `protobuf:"bytes,5,opt,name=auto_parallel,json=autoParallel" json:"auto_parallel,omitempty"`
	// If non-empty, will use this as an alternative way to specify a list of
	// optimizations to turn on and the order of the optimizations (replacing the
	// meta-optimizer).
	//
	// Of the RewriterConfig options, only the AutoParallel configuration options
	// (the auto_parallel field) apply to manually requested optimization passes
	// ("autoparallel"). Memory optimization passes ("memory") invoked here are
	// not configurable (in contrast to memory optimization passes through the
	// meta-optimizer) and act only on manual op annotations.
	Optimizers []string `protobuf:"bytes,100,rep,name=optimizers" json:"optimizers,omitempty"`
}

func (*RewriterConfig) Descriptor

func (*RewriterConfig) Descriptor() ([]byte, []int)

func (*RewriterConfig) GetAutoParallel

func (m *RewriterConfig) GetAutoParallel() *AutoParallelOptions

func (*RewriterConfig) GetConstantFolding

func (m *RewriterConfig) GetConstantFolding() bool

func (*RewriterConfig) GetDisableModelPruning

func (m *RewriterConfig) GetDisableModelPruning() bool

func (*RewriterConfig) GetMemoryOptimization

func (m *RewriterConfig) GetMemoryOptimization() RewriterConfig_MemOptType

func (*RewriterConfig) GetOptimizeTensorLayout

func (m *RewriterConfig) GetOptimizeTensorLayout() bool

func (*RewriterConfig) GetOptimizers

func (m *RewriterConfig) GetOptimizers() []string

func (*RewriterConfig) ProtoMessage

func (*RewriterConfig) ProtoMessage()

func (*RewriterConfig) Reset

func (m *RewriterConfig) Reset()

func (*RewriterConfig) String

func (m *RewriterConfig) String() string

type RewriterConfig_MemOptType

type RewriterConfig_MemOptType int32
const (
	// Disabled in the meta-optimizer.
	RewriterConfig_NO_MEM_OPT RewriterConfig_MemOptType = 0
	// Driven by manual op-level annotations.
	RewriterConfig_MANUAL RewriterConfig_MemOptType = 1
	// Driven by heuristics. The behavior of these heuristics is subject to
	// change. Currently includes an experimental recomputation heuristic.
	RewriterConfig_HEURISTICS RewriterConfig_MemOptType = 2
)

func (RewriterConfig_MemOptType) EnumDescriptor

func (RewriterConfig_MemOptType) EnumDescriptor() ([]byte, []int)

func (RewriterConfig_MemOptType) String

func (x RewriterConfig_MemOptType) String() string

type RunGraphRequest

type RunGraphRequest struct {
	// session_handle is the master-generated unique id for this session.
	// If session_handle is non-empty, it must be the same as used when
	// registering the graph. If it is empty, a single global namespace is used to
	// search for the graph_handle.
	SessionHandle string `protobuf:"bytes,8,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// REQUIRED: graph_handle must be returned by a RegisterGraph call
	// to the same WorkerService.
	GraphHandle string `protobuf:"bytes,1,opt,name=graph_handle,json=graphHandle" json:"graph_handle,omitempty"`
	// A unique ID to distinguish different runs of the same graph.
	//
	// The master generates a global unique `step_id` to distinguish
	// different runs of the graph computation. Subgraphs communicate
	// (e.g., send/recv ops) with each other using `step_id` to
	// distinguish tensors generated by different runs.
	StepId int64 `protobuf:"varint,2,opt,name=step_id,json=stepId" json:"step_id,omitempty"`
	// Options for this step.
	ExecOpts *ExecutorOpts `protobuf:"bytes,5,opt,name=exec_opts,json=execOpts" json:"exec_opts,omitempty"`
	// Runs the graph.
	//
	// Sends the tensors in "send" into the graph before the run and
	// fetches the keys into `RunGraphResponse.recv` after the run.
	Send    []*NamedTensorProto `protobuf:"bytes,3,rep,name=send" json:"send,omitempty"`
	RecvKey []string            `protobuf:"bytes,4,rep,name=recv_key,json=recvKey" json:"recv_key,omitempty"`
	// True if the RunGraphRequest is a partial run request.
	IsPartial bool `protobuf:"varint,6,opt,name=is_partial,json=isPartial" json:"is_partial,omitempty"`
	// True if this is the last partial run request in a sequence of requests.
	IsLastPartialRun bool `protobuf:"varint,7,opt,name=is_last_partial_run,json=isLastPartialRun" json:"is_last_partial_run,omitempty"`
}

func (*RunGraphRequest) Descriptor

func (*RunGraphRequest) Descriptor() ([]byte, []int)

func (*RunGraphRequest) GetExecOpts

func (m *RunGraphRequest) GetExecOpts() *ExecutorOpts

func (*RunGraphRequest) GetGraphHandle

func (m *RunGraphRequest) GetGraphHandle() string

func (*RunGraphRequest) GetIsLastPartialRun

func (m *RunGraphRequest) GetIsLastPartialRun() bool

func (*RunGraphRequest) GetIsPartial

func (m *RunGraphRequest) GetIsPartial() bool

func (*RunGraphRequest) GetRecvKey

func (m *RunGraphRequest) GetRecvKey() []string

func (*RunGraphRequest) GetSend

func (m *RunGraphRequest) GetSend() []*NamedTensorProto

func (*RunGraphRequest) GetSessionHandle

func (m *RunGraphRequest) GetSessionHandle() string

func (*RunGraphRequest) GetStepId

func (m *RunGraphRequest) GetStepId() int64

func (*RunGraphRequest) ProtoMessage

func (*RunGraphRequest) ProtoMessage()

func (*RunGraphRequest) Reset

func (m *RunGraphRequest) Reset()

func (*RunGraphRequest) String

func (m *RunGraphRequest) String() string

type RunGraphResponse

type RunGraphResponse struct {
	// A list of tensors corresponding to those requested by
	// `RunGraphRequest.recv_key`.
	Recv []*NamedTensorProto `protobuf:"bytes,1,rep,name=recv" json:"recv,omitempty"`
	// If the request asked for execution stats or cost graph, these are returned
	// here.
	StepStats *tensorflow13.StepStats   `protobuf:"bytes,2,opt,name=step_stats,json=stepStats" json:"step_stats,omitempty"`
	CostGraph *tensorflow2.CostGraphDef `protobuf:"bytes,3,opt,name=cost_graph,json=costGraph" json:"cost_graph,omitempty"`
}

func (*RunGraphResponse) Descriptor

func (*RunGraphResponse) Descriptor() ([]byte, []int)

func (*RunGraphResponse) GetCostGraph

func (m *RunGraphResponse) GetCostGraph() *tensorflow2.CostGraphDef

func (*RunGraphResponse) GetRecv

func (m *RunGraphResponse) GetRecv() []*NamedTensorProto

func (*RunGraphResponse) GetStepStats

func (m *RunGraphResponse) GetStepStats() *tensorflow13.StepStats

func (*RunGraphResponse) ProtoMessage

func (*RunGraphResponse) ProtoMessage()

func (*RunGraphResponse) Reset

func (m *RunGraphResponse) Reset()

func (*RunGraphResponse) String

func (m *RunGraphResponse) String() string

type RunMetadata

type RunMetadata struct {
	// Statistics traced for this step. Populated if tracing is turned on via the
	// "RunOptions" proto.
	// EXPERIMENTAL: The format and set of events may change in future versions.
	StepStats *tensorflow13.StepStats `protobuf:"bytes,1,opt,name=step_stats,json=stepStats" json:"step_stats,omitempty"`
	// The cost graph for the computation defined by the run call.
	CostGraph *tensorflow2.CostGraphDef `protobuf:"bytes,2,opt,name=cost_graph,json=costGraph" json:"cost_graph,omitempty"`
	// Graphs of the partitions executed by executors.
	PartitionGraphs []*tensorflow10.GraphDef `protobuf:"bytes,3,rep,name=partition_graphs,json=partitionGraphs" json:"partition_graphs,omitempty"`
}

Metadata output (i.e., non-Tensor) for a single Run() call.

func (*RunMetadata) Descriptor

func (*RunMetadata) Descriptor() ([]byte, []int)

func (*RunMetadata) GetCostGraph

func (m *RunMetadata) GetCostGraph() *tensorflow2.CostGraphDef

func (*RunMetadata) GetPartitionGraphs

func (m *RunMetadata) GetPartitionGraphs() []*tensorflow10.GraphDef

func (*RunMetadata) GetStepStats

func (m *RunMetadata) GetStepStats() *tensorflow13.StepStats

func (*RunMetadata) ProtoMessage

func (*RunMetadata) ProtoMessage()

func (*RunMetadata) Reset

func (m *RunMetadata) Reset()

func (*RunMetadata) String

func (m *RunMetadata) String() string

type RunOptions

type RunOptions struct {
	TraceLevel RunOptions_TraceLevel `protobuf:"varint,1,opt,name=trace_level,json=traceLevel,enum=tensorflow.RunOptions_TraceLevel" json:"trace_level,omitempty"`
	// Time to wait for operation to complete in milliseconds.
	TimeoutInMs int64 `protobuf:"varint,2,opt,name=timeout_in_ms,json=timeoutInMs" json:"timeout_in_ms,omitempty"`
	// The thread pool to use, if session_inter_op_thread_pool is configured.
	InterOpThreadPool int32 `protobuf:"varint,3,opt,name=inter_op_thread_pool,json=interOpThreadPool" json:"inter_op_thread_pool,omitempty"`
	// Whether the partition graph(s) executed by the executor(s) should be
	// outputted via RunMetadata.
	OutputPartitionGraphs bool `protobuf:"varint,5,opt,name=output_partition_graphs,json=outputPartitionGraphs" json:"output_partition_graphs,omitempty"`
	// EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
	DebugOptions *DebugOptions `protobuf:"bytes,6,opt,name=debug_options,json=debugOptions" json:"debug_options,omitempty"`
}

Options for a single Run() call.

func (*RunOptions) Descriptor

func (*RunOptions) Descriptor() ([]byte, []int)

func (*RunOptions) GetDebugOptions

func (m *RunOptions) GetDebugOptions() *DebugOptions

func (*RunOptions) GetInterOpThreadPool

func (m *RunOptions) GetInterOpThreadPool() int32

func (*RunOptions) GetOutputPartitionGraphs

func (m *RunOptions) GetOutputPartitionGraphs() bool

func (*RunOptions) GetTimeoutInMs

func (m *RunOptions) GetTimeoutInMs() int64

func (*RunOptions) GetTraceLevel

func (m *RunOptions) GetTraceLevel() RunOptions_TraceLevel

func (*RunOptions) ProtoMessage

func (*RunOptions) ProtoMessage()

func (*RunOptions) Reset

func (m *RunOptions) Reset()

func (*RunOptions) String

func (m *RunOptions) String() string

type RunOptions_TraceLevel

type RunOptions_TraceLevel int32

TODO(pbar) Turn this into a TraceOptions proto which allows tracing to be controlled in a more orthogonal manner?

const (
	RunOptions_NO_TRACE       RunOptions_TraceLevel = 0
	RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
	RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
	RunOptions_FULL_TRACE     RunOptions_TraceLevel = 3
)

func (RunOptions_TraceLevel) EnumDescriptor

func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int)

func (RunOptions_TraceLevel) String

func (x RunOptions_TraceLevel) String() string

type RunStepRequest

type RunStepRequest struct {
	// REQUIRED: session_handle must be returned by a CreateSession call
	// to the same master service.
	SessionHandle string `protobuf:"bytes,1,opt,name=session_handle,json=sessionHandle" json:"session_handle,omitempty"`
	// Tensors to be fed in the step. Each feed is a named tensor.
	Feed []*NamedTensorProto `protobuf:"bytes,2,rep,name=feed" json:"feed,omitempty"`
	// Fetches. A list of tensor names. The caller expects a tensor to
	// be returned for each fetch[i] (see RunStepResponse.tensor). The
	// order of specified fetches does not change the execution order.
	Fetch []string `protobuf:"bytes,3,rep,name=fetch" json:"fetch,omitempty"`
	// Target Nodes. A list of node names. The named nodes will be run
	// to but their outputs will not be fetched.
	Target []string `protobuf:"bytes,4,rep,name=target" json:"target,omitempty"`
	// Options for the run call.
	Options *RunOptions `protobuf:"bytes,5,opt,name=options" json:"options,omitempty"`
	// Partial run handle (optional). If specified, this will be a partial run
	// execution, run up to the specified fetches.
	PartialRunHandle string `protobuf:"bytes,6,opt,name=partial_run_handle,json=partialRunHandle" json:"partial_run_handle,omitempty"`
}

func (*RunStepRequest) Descriptor

func (*RunStepRequest) Descriptor() ([]byte, []int)

func (*RunStepRequest) GetFeed

func (m *RunStepRequest) GetFeed() []*NamedTensorProto

func (*RunStepRequest) GetFetch

func (m *RunStepRequest) GetFetch() []string

func (*RunStepRequest) GetOptions

func (m *RunStepRequest) GetOptions() *RunOptions

func (*RunStepRequest) GetPartialRunHandle

func (m *RunStepRequest) GetPartialRunHandle() string

func (*RunStepRequest) GetSessionHandle

func (m *RunStepRequest) GetSessionHandle() string

func (*RunStepRequest) GetTarget

func (m *RunStepRequest) GetTarget() []string

func (*RunStepRequest) ProtoMessage

func (*RunStepRequest) ProtoMessage()

func (*RunStepRequest) Reset

func (m *RunStepRequest) Reset()

func (*RunStepRequest) String

func (m *RunStepRequest) String() string

type RunStepResponse

type RunStepResponse struct {
	// NOTE: The order of the returned tensors may or may not match
	// the fetch order specified in RunStepRequest.
	Tensor []*NamedTensorProto `protobuf:"bytes,1,rep,name=tensor" json:"tensor,omitempty"`
	// Returned metadata if requested in the options.
	Metadata *RunMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"`
}

func (*RunStepResponse) Descriptor

func (*RunStepResponse) Descriptor() ([]byte, []int)

func (*RunStepResponse) GetMetadata

func (m *RunStepResponse) GetMetadata() *RunMetadata

func (*RunStepResponse) GetTensor

func (m *RunStepResponse) GetTensor() []*NamedTensorProto

func (*RunStepResponse) ProtoMessage

func (*RunStepResponse) ProtoMessage()

func (*RunStepResponse) Reset

func (m *RunStepResponse) Reset()

func (*RunStepResponse) String

func (m *RunStepResponse) String() string

type SavedModel

type SavedModel struct {
	// The schema version of the SavedModel instance. Used for versioning when
	// making future changes to the specification/implementation. Initial value
	// at release will be 1.
	SavedModelSchemaVersion int64 `` /* 128-byte string literal not displayed */
	// One or more MetaGraphs.
	MetaGraphs []*MetaGraphDef `protobuf:"bytes,2,rep,name=meta_graphs,json=metaGraphs" json:"meta_graphs,omitempty"`
}

SavedModel is the high level serialization format for TensorFlow Models. See [todo: doc links, similar to session_bundle] for more information.

func (*SavedModel) Descriptor

func (*SavedModel) Descriptor() ([]byte, []int)

func (*SavedModel) GetMetaGraphs

func (m *SavedModel) GetMetaGraphs() []*MetaGraphDef

func (*SavedModel) GetSavedModelSchemaVersion

func (m *SavedModel) GetSavedModelSchemaVersion() int64

func (*SavedModel) ProtoMessage

func (*SavedModel) ProtoMessage()

func (*SavedModel) Reset

func (m *SavedModel) Reset()

func (*SavedModel) String

func (m *SavedModel) String() string

type SaverDef

type SaverDef struct {
	// The name of the tensor in which to specify the filename when saving or
	// restoring a model checkpoint.
	FilenameTensorName string `protobuf:"bytes,1,opt,name=filename_tensor_name,json=filenameTensorName" json:"filename_tensor_name,omitempty"`
	// The operation to run when saving a model checkpoint.
	SaveTensorName string `protobuf:"bytes,2,opt,name=save_tensor_name,json=saveTensorName" json:"save_tensor_name,omitempty"`
	// The operation to run when restoring a model checkpoint.
	RestoreOpName string `protobuf:"bytes,3,opt,name=restore_op_name,json=restoreOpName" json:"restore_op_name,omitempty"`
	// Maximum number of checkpoints to keep.  If 0, no checkpoints are deleted.
	MaxToKeep int32 `protobuf:"varint,4,opt,name=max_to_keep,json=maxToKeep" json:"max_to_keep,omitempty"`
	// Shard the save files, one per device that has Variable nodes.
	Sharded bool `protobuf:"varint,5,opt,name=sharded" json:"sharded,omitempty"`
	// How often to keep an additional checkpoint. If not specified, only the last
	// "max_to_keep" checkpoints are kept; if specified, in addition to keeping
	// the last "max_to_keep" checkpoints, an additional checkpoint will be kept
	// for every n hours of training.
	KeepCheckpointEveryNHours float32                          `` /* 137-byte string literal not displayed */
	Version                   SaverDef_CheckpointFormatVersion `protobuf:"varint,7,opt,name=version,enum=tensorflow.SaverDef_CheckpointFormatVersion" json:"version,omitempty"`
}

Protocol buffer representing the configuration of a Saver.

func (*SaverDef) Descriptor

func (*SaverDef) Descriptor() ([]byte, []int)

func (*SaverDef) GetFilenameTensorName

func (m *SaverDef) GetFilenameTensorName() string

func (*SaverDef) GetKeepCheckpointEveryNHours

func (m *SaverDef) GetKeepCheckpointEveryNHours() float32

func (*SaverDef) GetMaxToKeep

func (m *SaverDef) GetMaxToKeep() int32

func (*SaverDef) GetRestoreOpName

func (m *SaverDef) GetRestoreOpName() string

func (*SaverDef) GetSaveTensorName

func (m *SaverDef) GetSaveTensorName() string

func (*SaverDef) GetSharded

func (m *SaverDef) GetSharded() bool

func (*SaverDef) GetVersion

func (*SaverDef) ProtoMessage

func (*SaverDef) ProtoMessage()

func (*SaverDef) Reset

func (m *SaverDef) Reset()

func (*SaverDef) String

func (m *SaverDef) String() string

type SaverDef_CheckpointFormatVersion

type SaverDef_CheckpointFormatVersion int32

A version number that identifies a different on-disk checkpoint format. Usually, each subclass of BaseSaverBuilder works with a particular version/format. However, it is possible that the same builder may be upgraded to support a newer checkpoint format in the future.

const (
	// Internal legacy format.
	SaverDef_LEGACY SaverDef_CheckpointFormatVersion = 0
	// Current format: tf.Saver() which works with tensorflow::table::Table.
	SaverDef_V1 SaverDef_CheckpointFormatVersion = 1
	// Experimental format under development.
	SaverDef_V2 SaverDef_CheckpointFormatVersion = 2
)

func (SaverDef_CheckpointFormatVersion) EnumDescriptor

func (SaverDef_CheckpointFormatVersion) EnumDescriptor() ([]byte, []int)

func (SaverDef_CheckpointFormatVersion) String

type ServerDef

type ServerDef struct {
	// The cluster of which this server is a member.
	Cluster *ClusterDef `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"`
	// The name of the job of which this server is a member.
	//
	// NOTE(mrry): The `cluster` field must contain a `JobDef` with a `name` field
	// that matches this name.
	JobName string `protobuf:"bytes,2,opt,name=job_name,json=jobName" json:"job_name,omitempty"`
	// The task index of this server in its job.
	//
	// NOTE: The `cluster` field must contain a `JobDef` with a matching `name`
	// and a mapping in its `tasks` field for this index.
	TaskIndex int32 `protobuf:"varint,3,opt,name=task_index,json=taskIndex" json:"task_index,omitempty"`
	// The default configuration for sessions that run on this server.
	DefaultSessionConfig *ConfigProto `protobuf:"bytes,4,opt,name=default_session_config,json=defaultSessionConfig" json:"default_session_config,omitempty"`
	// The protocol to be used by this server.
	//
	// Acceptable values include: "grpc".
	Protocol string `protobuf:"bytes,5,opt,name=protocol" json:"protocol,omitempty"`
}

Defines the configuration of a single TensorFlow server.

func (*ServerDef) Descriptor

func (*ServerDef) Descriptor() ([]byte, []int)

func (*ServerDef) GetCluster

func (m *ServerDef) GetCluster() *ClusterDef

func (*ServerDef) GetDefaultSessionConfig

func (m *ServerDef) GetDefaultSessionConfig() *ConfigProto

func (*ServerDef) GetJobName

func (m *ServerDef) GetJobName() string

func (*ServerDef) GetProtocol

func (m *ServerDef) GetProtocol() string

func (*ServerDef) GetTaskIndex

func (m *ServerDef) GetTaskIndex() int32

func (*ServerDef) ProtoMessage

func (*ServerDef) ProtoMessage()

func (*ServerDef) Reset

func (m *ServerDef) Reset()

func (*ServerDef) String

func (m *ServerDef) String() string

type SignatureDef

type SignatureDef struct {
	// Named input parameters.
	Inputs map[string]*TensorInfo `` /* 132-byte string literal not displayed */
	// Named output parameters.
	Outputs map[string]*TensorInfo `` /* 134-byte string literal not displayed */
	// Extensible method_name information enabling third-party users to mark a
	// SignatureDef as supporting a particular method. This enables producers and
	// consumers of SignatureDefs, e.g. a model definition library and a serving
	// library to have a clear hand-off regarding the semantics of a computation.
	//
	// Note that multiple SignatureDefs in a single MetaGraphDef may have the same
	// method_name. This is commonly used to support multi-headed computation,
	// where a single graph computation may return multiple results.
	MethodName string `protobuf:"bytes,3,opt,name=method_name,json=methodName" json:"method_name,omitempty"`
}

SignatureDef defines the signature of a computation supported by a TensorFlow graph.

For example, a model with two loss computations, sharing a single input, might have the following signature_def map.

Note that across the two SignatureDefs "loss_A" and "loss_B", the input key, output key, and method_name are identical, and will be used by system(s) that implement or rely upon this particular loss method. The output tensor names differ, demonstrating how different outputs can exist for the same method.

signature_def {
  key: "loss_A"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_A:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
  }
  ...
  method_name: "some/package/compute_loss"
}
signature_def {
  key: "loss_B"
  value {
    inputs {
      key: "input"
      value {
        name: "input:0"
        dtype: DT_STRING
        tensor_shape: ...
      }
    }
    outputs {
      key: "loss_output"
      value {
        name: "loss_output_B:0"
        dtype: DT_FLOAT
        tensor_shape: ...
      }
    }
  }
  ...
  method_name: "some/package/compute_loss"
}

func (*SignatureDef) Descriptor

func (*SignatureDef) Descriptor() ([]byte, []int)

func (*SignatureDef) GetInputs

func (m *SignatureDef) GetInputs() map[string]*TensorInfo

func (*SignatureDef) GetMethodName

func (m *SignatureDef) GetMethodName() string

func (*SignatureDef) GetOutputs

func (m *SignatureDef) GetOutputs() map[string]*TensorInfo

func (*SignatureDef) ProtoMessage

func (*SignatureDef) ProtoMessage()

func (*SignatureDef) Reset

func (m *SignatureDef) Reset()

func (*SignatureDef) String

func (m *SignatureDef) String() string

type TensorInfo

type TensorInfo struct {
	// Types that are valid to be assigned to Encoding:
	//	*TensorInfo_Name
	//	*TensorInfo_CooSparse_
	Encoding isTensorInfo_Encoding `protobuf_oneof:"encoding"`
	Dtype    tensorflow1.DataType  `protobuf:"varint,2,opt,name=dtype,enum=tensorflow.DataType" json:"dtype,omitempty"`
	// The static shape should be recorded here, to the extent that it can
	// be known in advance.  In the case of a SparseTensor, this field describes
	// the logical shape of the represented tensor (aka dense_shape).
	TensorShape *tensorflow.TensorShapeProto `protobuf:"bytes,3,opt,name=tensor_shape,json=tensorShape" json:"tensor_shape,omitempty"`
}

Information about a Tensor necessary for feeding or retrieval.

func (*TensorInfo) Descriptor

func (*TensorInfo) Descriptor() ([]byte, []int)

func (*TensorInfo) GetCooSparse

func (m *TensorInfo) GetCooSparse() *TensorInfo_CooSparse

func (*TensorInfo) GetDtype

func (m *TensorInfo) GetDtype() tensorflow1.DataType

func (*TensorInfo) GetEncoding

func (m *TensorInfo) GetEncoding() isTensorInfo_Encoding

func (*TensorInfo) GetName

func (m *TensorInfo) GetName() string

func (*TensorInfo) GetTensorShape

func (m *TensorInfo) GetTensorShape() *tensorflow.TensorShapeProto

func (*TensorInfo) ProtoMessage

func (*TensorInfo) ProtoMessage()

func (*TensorInfo) Reset

func (m *TensorInfo) Reset()

func (*TensorInfo) String

func (m *TensorInfo) String() string

func (*TensorInfo) XXX_OneofFuncs

func (*TensorInfo) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

type TensorInfo_CooSparse

type TensorInfo_CooSparse struct {
	// The shape of the values Tensor is [?].  Its dtype must be the dtype of
	// the SparseTensor as a whole, given in the enclosing TensorInfo.
	ValuesTensorName string `protobuf:"bytes,1,opt,name=values_tensor_name,json=valuesTensorName" json:"values_tensor_name,omitempty"`
	// The indices Tensor must have dtype int64 and shape [?, ?].
	IndicesTensorName string `protobuf:"bytes,2,opt,name=indices_tensor_name,json=indicesTensorName" json:"indices_tensor_name,omitempty"`
	// The dynamic logical shape represented by the SparseTensor is recorded in
	// the Tensor referenced here.  It must have dtype int64 and shape [?].
	DenseShapeTensorName string `protobuf:"bytes,3,opt,name=dense_shape_tensor_name,json=denseShapeTensorName" json:"dense_shape_tensor_name,omitempty"`
}

For sparse tensors, The COO encoding stores a triple of values, indices, and shape.

func (*TensorInfo_CooSparse) Descriptor

func (*TensorInfo_CooSparse) Descriptor() ([]byte, []int)

func (*TensorInfo_CooSparse) GetDenseShapeTensorName

func (m *TensorInfo_CooSparse) GetDenseShapeTensorName() string

func (*TensorInfo_CooSparse) GetIndicesTensorName

func (m *TensorInfo_CooSparse) GetIndicesTensorName() string

func (*TensorInfo_CooSparse) GetValuesTensorName

func (m *TensorInfo_CooSparse) GetValuesTensorName() string

func (*TensorInfo_CooSparse) ProtoMessage

func (*TensorInfo_CooSparse) ProtoMessage()

func (*TensorInfo_CooSparse) Reset

func (m *TensorInfo_CooSparse) Reset()

func (*TensorInfo_CooSparse) String

func (m *TensorInfo_CooSparse) String() string

type TensorInfo_CooSparse_

type TensorInfo_CooSparse_ struct {
	CooSparse *TensorInfo_CooSparse `protobuf:"bytes,4,opt,name=coo_sparse,json=cooSparse,oneof"`
}

type TensorInfo_Name

type TensorInfo_Name struct {
	Name string `protobuf:"bytes,1,opt,name=name,oneof"`
}

type ThreadPoolOptionProto

type ThreadPoolOptionProto struct {
	// The number of threads in the pool.
	//
	// 0 means the system picks a value based on where this option proto is used
	// (see the declaration of the specific field for more info).
	NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads" json:"num_threads,omitempty"`
}

func (*ThreadPoolOptionProto) Descriptor

func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int)

func (*ThreadPoolOptionProto) GetNumThreads

func (m *ThreadPoolOptionProto) GetNumThreads() int32

func (*ThreadPoolOptionProto) ProtoMessage

func (*ThreadPoolOptionProto) ProtoMessage()

func (*ThreadPoolOptionProto) Reset

func (m *ThreadPoolOptionProto) Reset()

func (*ThreadPoolOptionProto) String

func (m *ThreadPoolOptionProto) String() string

type TraceOpts

type TraceOpts struct {
	// Length of the trace to be taken, in seconds.
	Duration float64 `protobuf:"fixed64,1,opt,name=duration" json:"duration,omitempty"`
	// If true, capture step profile locally in each worker. Currently
	// unimplemented.
	UseStepProfiler bool `protobuf:"varint,2,opt,name=use_step_profiler,json=useStepProfiler" json:"use_step_profiler,omitempty"`
	// If true, capture kernel events from each worker.
	UseKernelProfiler bool `protobuf:"varint,3,opt,name=use_kernel_profiler,json=useKernelProfiler" json:"use_kernel_profiler,omitempty"`
	// If true, capture extended profiling events from TensorFlow process.
	UseExtendedProfiler bool `protobuf:"varint,4,opt,name=use_extended_profiler,json=useExtendedProfiler" json:"use_extended_profiler,omitempty"`
	// If true, capture GPU profiling events locally on each
	// machine. Currently unimplemented.
	UseGpuProfiler bool `protobuf:"varint,5,opt,name=use_gpu_profiler,json=useGpuProfiler" json:"use_gpu_profiler,omitempty"`
	// If true, collect sampled profile events. Currently unimplemented.
	UseSampleProfiler bool `protobuf:"varint,6,opt,name=use_sample_profiler,json=useSampleProfiler" json:"use_sample_profiler,omitempty"`
}

func (*TraceOpts) Descriptor

func (*TraceOpts) Descriptor() ([]byte, []int)

func (*TraceOpts) GetDuration

func (m *TraceOpts) GetDuration() float64

func (*TraceOpts) GetUseExtendedProfiler

func (m *TraceOpts) GetUseExtendedProfiler() bool

func (*TraceOpts) GetUseGpuProfiler

func (m *TraceOpts) GetUseGpuProfiler() bool

func (*TraceOpts) GetUseKernelProfiler

func (m *TraceOpts) GetUseKernelProfiler() bool

func (*TraceOpts) GetUseSampleProfiler

func (m *TraceOpts) GetUseSampleProfiler() bool

func (*TraceOpts) GetUseStepProfiler

func (m *TraceOpts) GetUseStepProfiler() bool

func (*TraceOpts) ProtoMessage

func (*TraceOpts) ProtoMessage()

func (*TraceOpts) Reset

func (m *TraceOpts) Reset()

func (*TraceOpts) String

func (m *TraceOpts) String() string

type TracingRequest

type TracingRequest struct {
	Options *TraceOpts `protobuf:"bytes,1,opt,name=options" json:"options,omitempty"`
}

Out-of-band request to configure distributed tracing.

func (*TracingRequest) Descriptor

func (*TracingRequest) Descriptor() ([]byte, []int)

func (*TracingRequest) GetOptions

func (m *TracingRequest) GetOptions() *TraceOpts

func (*TracingRequest) ProtoMessage

func (*TracingRequest) ProtoMessage()

func (*TracingRequest) Reset

func (m *TracingRequest) Reset()

func (*TracingRequest) String

func (m *TracingRequest) String() string

type TracingResponse

type TracingResponse struct {
}

func (*TracingResponse) Descriptor

func (*TracingResponse) Descriptor() ([]byte, []int)

func (*TracingResponse) ProtoMessage

func (*TracingResponse) ProtoMessage()

func (*TracingResponse) Reset

func (m *TracingResponse) Reset()

func (*TracingResponse) String

func (m *TracingResponse) String() string

type ValuesDef

type ValuesDef struct {
	// Value names that have been seen in this context.
	Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
	// Value names referenced by but external to this context.
	ExternalValues map[string]string `` /* 170-byte string literal not displayed */
}

Protocol buffer representing the values in ControlFlowContext.

func (*ValuesDef) Descriptor

func (*ValuesDef) Descriptor() ([]byte, []int)

func (*ValuesDef) GetExternalValues

func (m *ValuesDef) GetExternalValues() map[string]string

func (*ValuesDef) GetValues

func (m *ValuesDef) GetValues() []string

func (*ValuesDef) ProtoMessage

func (*ValuesDef) ProtoMessage()

func (*ValuesDef) Reset

func (m *ValuesDef) Reset()

func (*ValuesDef) String

func (m *ValuesDef) String() string

type WhileContextDef

type WhileContextDef struct {
	// Name of the context.
	ContextName string `protobuf:"bytes,1,opt,name=context_name,json=contextName" json:"context_name,omitempty"`
	// The number of iterations allowed to run in parallel.
	ParallelIterations int32 `protobuf:"varint,2,opt,name=parallel_iterations,json=parallelIterations" json:"parallel_iterations,omitempty"`
	// Whether backprop is enabled for this while loop.
	BackProp bool `protobuf:"varint,3,opt,name=back_prop,json=backProp" json:"back_prop,omitempty"`
	// Whether GPU-CPU memory swap is enabled for this loop.
	SwapMemory bool `protobuf:"varint,4,opt,name=swap_memory,json=swapMemory" json:"swap_memory,omitempty"`
	// Name of the pivot tensor.
	PivotName string `protobuf:"bytes,5,opt,name=pivot_name,json=pivotName" json:"pivot_name,omitempty"`
	// Name of the pivot_for_pred tensor.
	PivotForPredName string `protobuf:"bytes,6,opt,name=pivot_for_pred_name,json=pivotForPredName" json:"pivot_for_pred_name,omitempty"`
	// Name of the pivot_for_body tensor.
	PivotForBodyName string `protobuf:"bytes,7,opt,name=pivot_for_body_name,json=pivotForBodyName" json:"pivot_for_body_name,omitempty"`
	// List of names for exit tensors.
	LoopExitNames []string `protobuf:"bytes,8,rep,name=loop_exit_names,json=loopExitNames" json:"loop_exit_names,omitempty"`
	// List of names for enter tensors.
	LoopEnterNames []string `protobuf:"bytes,10,rep,name=loop_enter_names,json=loopEnterNames" json:"loop_enter_names,omitempty"`
	// Values and external values in control flow context.
	ValuesDef *ValuesDef `protobuf:"bytes,9,opt,name=values_def,json=valuesDef" json:"values_def,omitempty"`
}

Protocol buffer representing a WhileContext object.

func (*WhileContextDef) Descriptor

func (*WhileContextDef) Descriptor() ([]byte, []int)

func (*WhileContextDef) GetBackProp

func (m *WhileContextDef) GetBackProp() bool

func (*WhileContextDef) GetContextName

func (m *WhileContextDef) GetContextName() string

func (*WhileContextDef) GetLoopEnterNames

func (m *WhileContextDef) GetLoopEnterNames() []string

func (*WhileContextDef) GetLoopExitNames

func (m *WhileContextDef) GetLoopExitNames() []string

func (*WhileContextDef) GetParallelIterations

func (m *WhileContextDef) GetParallelIterations() int32

func (*WhileContextDef) GetPivotForBodyName

func (m *WhileContextDef) GetPivotForBodyName() string

func (*WhileContextDef) GetPivotForPredName

func (m *WhileContextDef) GetPivotForPredName() string

func (*WhileContextDef) GetPivotName

func (m *WhileContextDef) GetPivotName() string

func (*WhileContextDef) GetSwapMemory

func (m *WhileContextDef) GetSwapMemory() bool

func (*WhileContextDef) GetValuesDef

func (m *WhileContextDef) GetValuesDef() *ValuesDef

func (*WhileContextDef) ProtoMessage

func (*WhileContextDef) ProtoMessage()

func (*WhileContextDef) Reset

func (m *WhileContextDef) Reset()

func (*WhileContextDef) String

func (m *WhileContextDef) String() string

type WorkerServiceClient

type WorkerServiceClient interface {
	// See worker.proto for details.
	GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error)
	// See worker.proto for details.
	CreateWorkerSession(ctx context.Context, in *CreateWorkerSessionRequest, opts ...grpc.CallOption) (*CreateWorkerSessionResponse, error)
	// See worker.proto for details.
	RegisterGraph(ctx context.Context, in *RegisterGraphRequest, opts ...grpc.CallOption) (*RegisterGraphResponse, error)
	// See worker.proto for details.
	DeregisterGraph(ctx context.Context, in *DeregisterGraphRequest, opts ...grpc.CallOption) (*DeregisterGraphResponse, error)
	// See worker.proto for details.
	RunGraph(ctx context.Context, in *RunGraphRequest, opts ...grpc.CallOption) (*RunGraphResponse, error)
	// See worker.proto for details.
	CleanupGraph(ctx context.Context, in *CleanupGraphRequest, opts ...grpc.CallOption) (*CleanupGraphResponse, error)
	// See worker.proto for details.
	CleanupAll(ctx context.Context, in *CleanupAllRequest, opts ...grpc.CallOption) (*CleanupAllResponse, error)
	// See worker.proto for details.
	RecvTensor(ctx context.Context, in *RecvTensorRequest, opts ...grpc.CallOption) (*RecvTensorResponse, error)
	// See worker.proto for details.
	Logging(ctx context.Context, in *LoggingRequest, opts ...grpc.CallOption) (*LoggingResponse, error)
	// See worker.proto for details.
	Tracing(ctx context.Context, in *TracingRequest, opts ...grpc.CallOption) (*TracingResponse, error)
}

func NewWorkerServiceClient

func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient

type WorkerServiceServer

type WorkerServiceServer interface {
	// See worker.proto for details.
	GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error)
	// See worker.proto for details.
	CreateWorkerSession(context.Context, *CreateWorkerSessionRequest) (*CreateWorkerSessionResponse, error)
	// See worker.proto for details.
	RegisterGraph(context.Context, *RegisterGraphRequest) (*RegisterGraphResponse, error)
	// See worker.proto for details.
	DeregisterGraph(context.Context, *DeregisterGraphRequest) (*DeregisterGraphResponse, error)
	// See worker.proto for details.
	RunGraph(context.Context, *RunGraphRequest) (*RunGraphResponse, error)
	// See worker.proto for details.
	CleanupGraph(context.Context, *CleanupGraphRequest) (*CleanupGraphResponse, error)
	// See worker.proto for details.
	CleanupAll(context.Context, *CleanupAllRequest) (*CleanupAllResponse, error)
	// See worker.proto for details.
	RecvTensor(context.Context, *RecvTensorRequest) (*RecvTensorResponse, error)
	// See worker.proto for details.
	Logging(context.Context, *LoggingRequest) (*LoggingResponse, error)
	// See worker.proto for details.
	Tracing(context.Context, *TracingRequest) (*TracingResponse, error)
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL