runtime

package
v0.2300.10 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 4, 2024 License: Apache-2.0 Imports: 87 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// LogEventTrustRootChangeNoTrust is the event emitted when a compute
	// worker or a key manager node fails to initialize the verifier as there
	// is not enough trust in the new light block.
	LogEventTrustRootChangeNoTrust = "consensus/cometbft/verifier/chain_context/no_trust"

	// LogEventTrustRootChangeFailed is the event emitted when a compute
	// worker or a key manager node fails to initialize the verifier as
	// the new light block is invalid, e.g. has lower height than the last
	// known trusted block.
	LogEventTrustRootChangeFailed = "consensus/cometbft/verifier/chain_context/failed"
)

Keep the following two constants synced with the Rust part of the code in: runtime/src/consensus/tendermint/verifier/mod.rs.

Variables

View Source
var (

	// ByzantineExecutorHonest is a scenario in which the Byzantine node acts
	// as the primary worker, backup scheduler, and is honest.
	ByzantineExecutorHonest scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/honest",
		"executor",
		nil,
		oasis.ByzantineSlot1IdentitySeed,
		false,
		nil,
		nil,
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
	)
	// ByzantineExecutorSchedulerHonest is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and is honest.
	ByzantineExecutorSchedulerHonest scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/honest",
		"executor",
		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,
		nil,
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
	)
	// ByzantineExecutorDishonest is a scenario in which the Byzantine node acts
	// as the primary worker, backup scheduler, and is dishonest.
	ByzantineExecutorDishonest scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/dishonest",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertNoTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeIncorrectResults: 1,
			staking.SlashRuntimeLiveness:         1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorDishonest.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
	)
	// ByzantineExecutorSchedulerRunaway is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and runs away after publishes a proposal.
	ByzantineExecutorSchedulerRunaway scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/runaway",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorRunaway.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
	)
	// ByzantineExecutorSchedulerBogus is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and schedules bogus transactions.
	ByzantineExecutorSchedulerBogus scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/bogus",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
			{Name: byzantine.CfgExecutorProposeBogusTx},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
	)
	// ByzantineExecutorStraggler is a scenario in which the Byzantine node acts
	// as the primary worker, backup scheduler, and a straggler.
	ByzantineExecutorStraggler scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
	)
	// ByzantineExecutorSchedulerStraggler is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and a straggler.
	ByzantineExecutorSchedulerStraggler scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
	)
	// ByzantineExecutorStragglerAllowed is a scenario in which the Byzantine node acts
	// as the primary worker, backup scheduler, and a straggler. One straggler is allowed.
	ByzantineExecutorStragglerAllowed scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/straggler-allowed",
		"executor",
		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
		withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) {
			rt.Executor.AllowedStragglers = 1
		}),
	)
	// ByzantineExecutorSchedulerStragglerAllowed is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and a straggler. One straggler is allowed.
	ByzantineExecutorSchedulerStragglerAllowed scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/straggler-allowed",
		"executor",
		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
		withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) {

			rt.Executor.AllowedStragglers = 1
		}),
	)
	// ByzantineExecutorBackupStraggler is a scenario in which the Byzantine node acts
	// as the primary and backup worker, backup scheduler, and a straggler.
	ByzantineExecutorBackupStraggler scenario.Scenario = newByzantineImpl(
		"primary-backup-worker/backup-scheduler/straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker, scheduler.RoleBackupWorker},
			Index: backupSchedulerIndex,
		},
		withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) {

			rt.Executor.GroupBackupSize = 3
		}),
	)
	// ByzantineExecutorBackupSchedulerStraggler is a scenario in which the Byzantine node acts
	// as the primary and backup worker, primary scheduler, and a straggler.
	ByzantineExecutorBackupSchedulerStraggler scenario.Scenario = newByzantineImpl(
		"primary-backup-worker/primary-scheduler/straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker, scheduler.RoleBackupWorker},
			Index: primarySchedulerIndex,
		},
		withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) {

			rt.Executor.GroupBackupSize = 3
		}),
	)
	// ByzantineExecutorFailureIndicating is a scenario in which the Byzantine node acts
	// as the primary worker, backup scheduler, and submits failure indicating commitment.
	ByzantineExecutorFailureIndicating scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/failure-indicating",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
	)
	// ByzantineExecutorSchedulerFailureIndicating is a scenario in which the Byzantine node acts
	// as the primary worker, primary scheduler, and submits failure indicating commitment.
	ByzantineExecutorSchedulerFailureIndicating scenario.Scenario = newByzantineImpl(
		"primary-worker/primary-scheduler/failure-indicating",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgPrimarySchedulerExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: primarySchedulerIndex,
		},
	)
	// ByzantineExecutorCorruptGetDiff is the byzantine executor node scenario that corrupts GetDiff
	// responses.
	ByzantineExecutorCorruptGetDiff scenario.Scenario = newByzantineImpl(
		"primary-worker/backup-scheduler/corrupt-getdiff",
		"executor",

		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,
		nil,
		[]oasis.Argument{

			{Name: byzantine.CfgCorruptGetDiff},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
			Index: backupSchedulerIndex,
		},
	)
)
View Source
var (
	// DumpRestore is the dump and restore scenario.
	DumpRestore scenario.Scenario = newDumpRestoreImpl("dump-restore", nil)

	// DumpRestoreRuntimeRoundAdvance is the scenario where additional rounds are simulated after
	// the runtime stopped in the old network (so storage node state is behind).
	DumpRestoreRuntimeRoundAdvance scenario.Scenario = newDumpRestoreImpl(
		"dump-restore/runtime-round-advance",
		func(doc *genesis.Document) {

			for _, st := range doc.RootHash.RuntimeStates {
				st.Round += 10
			}
		},
	)
)
View Source
var (
	// EarlyQuery is the early query scenario where we query a validator node before the network
	// has started and there are no committed blocks.
	EarlyQuery scenario.Scenario = &earlyQueryImpl{
		Scenario: e2e.NewScenario("early-query"),
	}

	// EarlyQueryInitHeight is the same as EarlyQuery scenario but with an initial height set.
	EarlyQueryInitHeight scenario.Scenario = &earlyQueryImpl{
		Scenario:      e2e.NewScenario("early-query/init-height"),
		initialHeight: 42,
	}

	// EarlyQueryRuntime is the early query scenario where we query a runtime node.
	EarlyQueryRuntime scenario.Scenario = &earlyQueryImpl{
		Scenario: NewScenario("early-query", nil),
		runtime:  true,
	}
)
View Source
var (
	// GovernanceConsensusUpgrade is the governance consensus upgrade scenario.
	GovernanceConsensusUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, false)
	// GovernanceConsensusFailUpgrade is the governance consensus upgrade scenario
	// where node should fail the upgrade.
	GovernanceConsensusFailUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(false, false)
	// GovernanceConsensusCancelUpgrade is the governance consensus upgrade scenario
	// where the pending upgrade is canceled.
	GovernanceConsensusCancelUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, true)
)
View Source
var (
	// HaltRestore is the halt and restore scenario.
	HaltRestore scenario.Scenario = newHaltRestoreImpl(false)
	// HaltRestoreSuspended is the halt and restore scenario with a suspended runtime.
	HaltRestoreSuspended scenario.Scenario = newHaltRestoreImpl(true)
)
View Source
var (
	// KeyValueRuntimeBinary is the name of the simple key/value runtime binary.
	KeyValueRuntimeBinary = "simple-keyvalue"
	// KeyValueRuntimeUpgradeBinary is the name of the upgraded simple key/value runtime binary.
	KeyValueRuntimeUpgradeBinary = "simple-keyvalue-upgrade"
	// KeyManagerRuntimeBinary is the name of the simple key manager runtime binary.
	KeyManagerRuntimeBinary = "simple-keymanager"
	// KeyManagerRuntimeUpgradeBinary is the name of the upgraded simple key manager runtime binary.
	KeyManagerRuntimeUpgradeBinary = "simple-keymanager-upgrade"

	// KeyValueRuntimeID is the ID of the simple key/value runtime.
	KeyValueRuntimeID common.Namespace
	// KeyManagerRuntimeID is the ID of the key manager runtime.
	KeyManagerRuntimeID common.Namespace
)
View Source
var (
	// ParamsDummyScenario is a dummy instance of runtimeImpl used to register global e2e/runtime flags.
	ParamsDummyScenario = NewScenario("", nil)

	// Runtime is the basic network + client test case with runtime support.
	Runtime scenario.Scenario = NewScenario(
		"runtime",
		NewTestClient().WithScenario(SimpleKeyValueScenario),
	)

	// RuntimeEncryption is the basic network + client with encryption test case.
	RuntimeEncryption scenario.Scenario = NewScenario(
		"runtime-encryption",
		NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario),
	)

	// DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher
	// handler factories for the basic scenario.
	DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{
		oasis.LogAssertNoTimeouts(),
		oasis.LogAssertNoRoundFailures(),
		oasis.LogAssertNoExecutionDiscrepancyDetected(),
	}
)
View Source
var (
	InsertKeyValueScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", false, 0},
		GetKeyValueTx{"my_key", "my_value", false, 0},
	})

	InsertKeyValueEncScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", true, 0},
		GetKeyValueTx{"my_key", "my_value", true, 0},
	})

	RemoveKeyValueScenario = NewTestClientScenario([]interface{}{
		GetKeyValueTx{"my_key", "my_value", false, 0},
	})

	RemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{
		GetKeyValueTx{"my_key", "my_value", true, 0},
	})

	InsertTransferKeyValueScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", false, 0},
		GetKeyValueTx{"my_key", "my_value", false, 0},
		ConsensusTransferTx{},
	})

	InsertRemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", true, 0},
		GetKeyValueTx{"my_key", "my_value", true, 0},
		RemoveKeyValueTx{"my_key", "my_value", true, 0},
		GetKeyValueTx{"my_key", "", true, 0},
	})

	InsertRemoveKeyValueEncScenarioV2 = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key2", "my_value2", "", true, 0},
		GetKeyValueTx{"my_key2", "my_value2", true, 0},
		RemoveKeyValueTx{"my_key2", "my_value2", true, 0},
		GetKeyValueTx{"my_key2", "", true, 0},
	})

	SimpleKeyValueScenario = newSimpleKeyValueScenario(false, false)

	SimpleKeyValueEncScenario = newSimpleKeyValueScenario(false, true)

	SimpleKeyValueScenarioRepeated = newSimpleKeyValueScenario(true, false)
)
View Source
var (
	// TrustRootChangeTest is a happy path scenario which tests if trust
	// can be transferred to a new light block when consensus chain context
	// changes, e.g. on dump-restore network upgrades.
	TrustRootChangeTest scenario.Scenario = newTrustRootChangeImpl(
		"change",
		NewTestClient().WithScenario(InsertKeyValueEncScenario),
		true,
	)

	// TrustRootChangeFailsTest is an unhappy path scenario which tests
	// that trust is never transferred to untrusted or invalid light blocks when
	// consensus chain context changes.
	TrustRootChangeFailsTest scenario.Scenario = newTrustRootChangeImpl(
		"change-fails",
		NewTestClient().WithScenario(SimpleKeyValueEncScenario),
		false,
	)
)
View Source
var ArchiveAPI scenario.Scenario = &archiveAPI{
	Scenario: *NewScenario(
		"archive-api",
		NewTestClient().WithScenario(InsertTransferKeyValueScenario),
	),
}

ArchiveAPI is the scenario where archive node control, consensus and runtime APIs are tested.

View Source
var GasFeesRuntimes scenario.Scenario = &gasFeesRuntimesImpl{
	Scenario: *NewScenario("gas-fees/runtimes", nil),
}

GasFeesRuntimes is the runtime gas fees scenario.

View Source
var HaltRestoreNonMock scenario.Scenario = newHaltRestoreNonMockImpl()

HaltRestoreNonMock is the halt and restore scenario that uses the real beacon backend.

View Source
var HistoryReindex scenario.Scenario = newHistoryReindexImpl()

HistoryReindex is the scenario that triggers roothash history reindexing.

View Source
var KeymanagerDumpRestore scenario.Scenario = newKmDumpRestoreImpl()

KeymanagerDumpRestore is the keymanager dump restore rotation scenario.

In this scenario we test if the deployment of the master secret rotation feature is backwards compatible. The old key managers which are already initialized with the first master secret should be able to rotate secrets once enabled via the policy.

View Source
var KeymanagerEphemeralSecrets scenario.Scenario = newKmEphemeralSecretsImpl()

KeymanagerEphemeralSecrets is the keymanager ephemeral secret and ephemeral key generation scenario.

It uses encryption and decryption transactions provided by the simple key/value runtime to test whether the key manager client can retrieve private and public ephemeral keys from the key manager and if the latter generates those according to the specifications.

Scenario:

  • Start one key manager and test ephemeral secrets.
  • Restart the manager and test that the first secret was lost.
  • Start all managers and test that ephemeral secrets can be replicated.
  • Run managers for few epochs and test that everything works.
  • Publish transactions that use ephemeral keys to encrypt/decrypt messages.
View Source
var KeymanagerMasterSecrets scenario.Scenario = newKmMasterSecretsImpl()

KeymanagerMasterSecrets is the keymanager master secret rotation scenario.

View Source
var KeymanagerReplicate scenario.Scenario = newKmReplicateImpl()

KeymanagerReplicate is the keymanager replication scenario.

View Source
var KeymanagerReplicateMany scenario.Scenario = newKmReplicateManyImpl()

KeymanagerReplicateMany is a scenario where a large number of master secrets are generated and replicated. Its purpose is to benchmark how long replication takes on a local SGX machine.

Scenario:

  • Start the first two key managers.
  • Generate N master secrets.
  • Start the last two key managers.
  • Start a timer.
  • Wait until the master secrets are replicated.
  • Stop the timer.
  • Verify that all key managers possess the same secrets.
  • Verify that master secret generation still works.
View Source
var KeymanagerRestart scenario.Scenario = newKmRestartImpl()

KeymanagerRestart is the keymanager restart scenario.

View Source
var KeymanagerRotationFailure scenario.Scenario = newKmRotationFailureImpl()

KeymanagerRotationFailure is a scenario where the first master secret proposal is rejected because not enough nodes have replicated the secret. The second proposal is accepted, ensuring that nodes can properly handle potential reverts.

Scenario:

  • Start all key managers.
  • Verify that master secret generation works.
  • Stop the third key manager.
  • Verify that the next proposal is not accepted.
  • Repeat these steps N times.
View Source
var KeymanagerUpgrade scenario.Scenario = NewKmUpgradeImpl()

KeymanagerUpgrade is the keymanager upgrade scenario.

View Source
var LateStart scenario.Scenario = newLateStartImpl("late-start")

LateStart is the LateStart node basic scenario.

View Source
var MultipleRuntimes = func() scenario.Scenario {
	sc := &multipleRuntimesImpl{
		Scenario: *NewScenario("multiple-runtimes", nil),
	}
	sc.Flags.Int(cfgNumComputeRuntimes, 2, "number of compute runtimes per worker")
	sc.Flags.Int(cfgNumComputeRuntimeTxns, 2, "number of transactions to perform")
	sc.Flags.Int(cfgNumComputeWorkers, 2, "number of workers to initiate")
	sc.Flags.Uint16(cfgExecutorGroupSize, 2, "number of executor workers in committee")

	return sc
}()

MultipleRuntimes is a scenario which tests running multiple runtimes on one node.

View Source
var NodeShutdown scenario.Scenario = newNodeShutdownImpl()

NodeShutdown is the keymanager restart scenario.

View Source
var OffsetRestart scenario.Scenario = newOffsetRestartImpl()

OffsetRestart is the offset restart scenario..

View Source
var RuntimeDynamic scenario.Scenario = newRuntimeDynamicImpl()

RuntimeDynamic is the dynamic runtime registration scenario.

View Source
var RuntimeGovernance = func() scenario.Scenario {
	sc := &runtimeGovernanceImpl{
		Scenario: *NewScenario("runtime-governance", nil),
	}
	return sc
}()

RuntimeGovernance is a scenario which tests runtime governance.

Two runtimes with the runtime governance model are created at genesis time. We submit an update_runtime runtime transaction with a slightly modified runtime descriptor to the first runtime. This transaction triggers the runtime to emit an update_runtime message, which in turn causes the runtime to be re-registered with the updated descriptor specified in the message. After an epoch transition, we fetch the runtime descriptor from the registry and check if the modification took place or not.

Additionally, we test that a runtime cannot update another runtime by passing a modified other runtime's descriptor to the update_runtime call of another runtime.

View Source
var RuntimeMessage scenario.Scenario = newRuntimeMessage()

RuntimeMessage is the runtime message scenario.

View Source
var RuntimePrune scenario.Scenario = newRuntimePruneImpl()

RuntimePrune is the runtime prune scenario.

View Source
var RuntimeUpgrade scenario.Scenario = newRuntimeUpgradeImpl()

RuntimeUpgrade is the runtime upgrade scenario.

View Source
var (
	// Sentry is the Sentry node basic scenario.
	Sentry scenario.Scenario = newSentryImpl()
)
View Source
var StorageEarlyStateSync scenario.Scenario = newStorageEarlyStateSyncImpl()

StorageEarlyStateSync is the scenario where a runtime is registered first and is not yet operational, then a while later an executor node uses consensus layer state sync to catch up but the runtime has already advanced some epoch transition rounds and is no longer at genesis.

View Source
var StorageSync scenario.Scenario = newStorageSyncImpl()

StorageSync is the storage sync scenario.

View Source
var StorageSyncFromRegistered scenario.Scenario = newStorageSyncFromRegisteredImpl()

StorageSyncFromRegistered is the storage sync scenario which tests syncing from registered nodes not in committee.

View Source
var StorageSyncInconsistent scenario.Scenario = newStorageSyncInconsistentImpl()

StorageSyncInconsistent is the inconsistent storage sync scenario.

TrustRoot is the consensus trust root verification scenario.

View Source
var TxSourceMulti scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitLong,
	nodeRestartInterval:               nodeRestartIntervalLong,
	nodeLongRestartInterval:           nodeLongRestartInterval,
	nodeLongRestartDuration:           nodeLongRestartDuration,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             1000,
	enableCrashPoints:                 true,

	cmtRecoverCorruptedWAL: true,

	numValidatorNodes: 4,

	numKeyManagerNodes: 2,

	numComputeNodes: 5,

	numClientNodes: 2,
}

TxSourceMulti uses multiple workloads.

View Source
var TxSourceMultiShort scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi-short", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitShort,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             200,
	numValidatorNodes:                 4,
	numKeyManagerNodes:                2,
	numComputeNodes:                   4,
	numClientNodes:                    2,
}

TxSourceMultiShort uses multiple workloads for a short time.

View Source
var TxSourceMultiShortSGX scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi-short-sgx", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitShortSGX,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             200,

	numValidatorNodes:  2,
	numKeyManagerNodes: 1,
	numComputeNodes:    2,
	numClientNodes:     1,
}

TxSourceMultiShortSGX uses multiple workloads for a short time.

Functions

func NewKmUpgradeImpl added in v0.2300.0

func NewKmUpgradeImpl() scenario.Scenario

NewKmUpgradeImpl creates a new base scenario for oasis-node keymanager upgrade end-to-end tests.

func RegisterScenarios

func RegisterScenarios() error

RegisterScenarios registers all end-to-end scenarios.

Types

type ConsensusAccountsTx added in v0.2202.9

type ConsensusAccountsTx struct{}

ConsensusAccountsTx tests consensus account query.

type ConsensusTransferTx added in v0.2202.9

type ConsensusTransferTx struct{}

ConsensusTransferTx submits and empty consensus staking transfer.

type EncryptDecryptTx added in v0.2300.1

type EncryptDecryptTx struct {
	Message   []byte
	KeyPairID string
	Epoch     beacon.EpochTime
}

EncryptDecryptTx encrypts and decrypts a message while verifying if the original message matches the decrypted result.

type GetKeyValueTx added in v0.2202.9

type GetKeyValueTx struct {
	Key        string
	Response   string
	Encrypted  bool
	Generation uint64
}

GetKeyValueTx retrieves the value stored under the given key from the database, and verifies that the response (current value) contains the expected data.

type GetRuntimeIDTx added in v0.2202.9

type GetRuntimeIDTx struct{}

GetRuntimeIDTx retrieves the runtime ID.

type InsertKeyValueTx added in v0.2202.9

type InsertKeyValueTx struct {
	Key        string
	Value      string
	Response   string
	Encrypted  bool
	Generation uint64
}

InsertKeyValueTx inserts a key/value pair to the database, and verifies that the response (previous value) contains the expected data.

type InsertMsg added in v0.2202.9

type InsertMsg struct {
	Key        string
	Value      string
	Encrypted  bool
	Generation uint64
}

InsertMsg inserts an incoming runtime message.

type KeyValueQuery added in v0.2202.10

type KeyValueQuery struct {
	Key      string
	Response string
	Round    uint64
}

KeyValueQuery queries the value stored under the given key for the specified round from the database, and verifies that the response (current value) contains the expected data.

type KmUpgradeImpl added in v0.2300.0

type KmUpgradeImpl struct {
	Scenario
	// contains filtered or unexported fields
}

KmUpgradeImpl is a base class for keymanager upgrade end-to-end tests.

func (*KmUpgradeImpl) Clone added in v0.2300.0

func (sc *KmUpgradeImpl) Clone() scenario.Scenario

func (*KmUpgradeImpl) Fixture added in v0.2300.0

func (sc *KmUpgradeImpl) Fixture() (*oasis.NetworkFixture, error)

func (*KmUpgradeImpl) Run added in v0.2300.0

func (sc *KmUpgradeImpl) Run(ctx context.Context, childEnv *env.Env) error

type RemoveKeyValueTx added in v0.2202.9

type RemoveKeyValueTx struct {
	Key        string
	Response   string
	Encrypted  bool
	Generation uint64
}

RemoveKeyValueTx removes the value stored under the given key from the database.

type Scenario added in v0.2202.9

type Scenario struct {
	e2e.Scenario

	TestClient *TestClient
	// contains filtered or unexported fields
}

Scenario is a base class for tests involving oasis-node with runtime.

func NewScenario added in v0.2202.9

func NewScenario(name string, testClient *TestClient) *Scenario

NewScenario creates a new base scenario for oasis-node runtime end-to-end tests.

func (*Scenario) ApplyKeyManagerPolicy added in v0.2300.0

func (sc *Scenario) ApplyKeyManagerPolicy(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rotationInterval beacon.EpochTime, policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, nonce uint64) error

ApplyKeyManagerPolicy applies the given policy to the simple key manager runtime.

func (*Scenario) BuildAllEnclavePolicies added in v0.2300.0

func (sc *Scenario) BuildAllEnclavePolicies() (map[common.Namespace]map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, error)

BuildAllEnclavePolicies builds enclave policies for all key manager runtimes.

Policies are built from the fixture and adhere to the following rules:

  • Each SGX runtime must have only one deployment and a distinct enclave identity.
  • Key manager enclaves are not allowed to replicate the master secrets.
  • All compute runtime enclaves are allowed to query key manager enclaves.

func (*Scenario) BuildAllRuntimes added in v0.2300.0

func (sc *Scenario) BuildAllRuntimes(childEnv *env.Env, trustRoot *e2e.TrustRoot) error

BuildAllRuntimes builds all runtime binaries, i.e. the key/value and the key manager runtime.

func (*Scenario) BuildEnclavePolicies added in v0.2300.0

func (sc *Scenario) BuildEnclavePolicies() (map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX, error)

BuildEnclavePolicies builds enclave policies for the simple key manager runtime.

If the simple key manager runtime does not exist or is not running on an SGX platform, it returns nil.

func (*Scenario) BuildRuntimes added in v0.2300.0

func (sc *Scenario) BuildRuntimes(childEnv *env.Env, runtimes map[common.Namespace]string, trustRoot *e2e.TrustRoot) error

BuildRuntimes builds the specified runtime binaries using the provided trust root, if given.

func (*Scenario) BuildTargetDirs added in v0.2300.0

func (sc *Scenario) BuildTargetDirs() (string, string, error)

BuildTargetDirs returns the configured build and target directories.

func (*Scenario) Clone added in v0.2202.9

func (sc *Scenario) Clone() scenario.Scenario

func (*Scenario) CompareLongtermPublicKeys added in v0.2300.0

func (sc *Scenario) CompareLongtermPublicKeys(ctx context.Context, idxs []int) error

CompareLongtermPublicKeys compares long-term public keys generated by the specified key manager nodes.

func (*Scenario) EnableRuntimeDeployment added in v0.2300.0

func (sc *Scenario) EnableRuntimeDeployment(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rt *oasis.Runtime, deploymentIndex int, nonce uint64) error

EnableRuntimeDeployment registers the specified runtime deployment, updates the key manager policy, and waits until the deployment becomes active.

func (*Scenario) EnsureActiveVersionForComputeWorker added in v0.2300.0

func (sc *Scenario) EnsureActiveVersionForComputeWorker(ctx context.Context, node *oasis.Compute, rt *oasis.Runtime, v version.Version) error

EnsureActiveVersionForComputeWorker ensures that the specified compute worker has the correct active version of the given runtime.

func (*Scenario) EnsureActiveVersionForComputeWorkers added in v0.2300.0

func (sc *Scenario) EnsureActiveVersionForComputeWorkers(ctx context.Context, rt *oasis.Runtime, v version.Version) error

EnsureActiveVersionForComputeWorkers ensures that all compute workers have the correct active version of the given runtime.

func (*Scenario) EnsureActiveVersionForKeyManager added in v0.2300.0

func (sc *Scenario) EnsureActiveVersionForKeyManager(ctx context.Context, node *oasis.Keymanager, id common.Namespace, v version.Version) error

EnsureActiveVersionForKeyManager ensures that the specified key manager has the correct active version of the given runtime.

func (*Scenario) EnsureActiveVersionForKeyManagers added in v0.2300.0

func (sc *Scenario) EnsureActiveVersionForKeyManagers(ctx context.Context, id common.Namespace, v version.Version) error

EnsureActiveVersionForKeyManagers ensures that all key managers have the correct active version of the given runtime.

func (*Scenario) Fixture added in v0.2202.9

func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error)

func (*Scenario) KeyManagerStatus added in v0.2300.0

func (sc *Scenario) KeyManagerStatus(ctx context.Context) (*keymanager.Status, error)

KeyManagerStatus returns the latest key manager status.

func (*Scenario) KeymanagerInitResponse added in v0.2300.0

func (sc *Scenario) KeymanagerInitResponse(ctx context.Context, idx int) (*keymanager.InitResponse, error)

KeymanagerInitResponse returns InitResponse of the specified key manager node.

func (*Scenario) MasterSecret added in v0.2300.0

MasterSecret returns the key manager master secret.

func (*Scenario) PreInit added in v0.2202.9

func (sc *Scenario) PreInit() error

func (*Scenario) ResolveRuntimeBinaries added in v0.2202.12

func (sc *Scenario) ResolveRuntimeBinaries(baseRuntimeBinary string) map[node.TEEHardware]string

ResolveRuntimeBinaries returns the paths to the runtime binaries.

func (*Scenario) ResolveRuntimeBinary added in v0.2202.12

func (sc *Scenario) ResolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string

ResolveRuntimeBinary returns the path to the runtime binary.

func (*Scenario) RestartAndWaitKeymanagers added in v0.2300.0

func (sc *Scenario) RestartAndWaitKeymanagers(ctx context.Context, idxs []int) error

RestartAndWaitKeymanagers restarts the specified key manager nodes and waits for them to become ready.

func (*Scenario) RestartKeymanagers added in v0.2300.0

func (sc *Scenario) RestartKeymanagers(ctx context.Context, idxs []int) error

RestartKeymanagers restarts the specified key manager nodes.

func (*Scenario) Run added in v0.2202.9

func (sc *Scenario) Run(ctx context.Context, childEnv *env.Env) error

func (*Scenario) RunTestClientAndCheckLogs added in v0.2300.0

func (sc *Scenario) RunTestClientAndCheckLogs(ctx context.Context, childEnv *env.Env) error

RunTestClientAndCheckLogs initializes and starts the runtime test client, waits for the runtime test client to finish its work and then verifies the logs.

func (*Scenario) StartAndWaitKeymanagers added in v0.2300.0

func (sc *Scenario) StartAndWaitKeymanagers(ctx context.Context, idxs []int) error

StartAndWaitKeymanagers starts the specified key manager nodes and waits for them to become ready.

func (*Scenario) StartKeymanagers added in v0.2300.0

func (sc *Scenario) StartKeymanagers(idxs []int) error

StartKeymanagers starts the specified key manager nodes.

func (*Scenario) StartNetworkAndTestClient added in v0.2202.9

func (sc *Scenario) StartNetworkAndTestClient(ctx context.Context, childEnv *env.Env) error

StartNetworkAndTestClient starts the network and the runtime test client.

func (*Scenario) StartNetworkAndWaitForClientSync added in v0.2202.9

func (sc *Scenario) StartNetworkAndWaitForClientSync(ctx context.Context) error

StartNetworkAndWaitForClientSync starts the network and waits for the client node to sync.

func (*Scenario) StartTestClient added in v0.2300.0

func (sc *Scenario) StartTestClient(ctx context.Context, childEnv *env.Env) error

StartTestClient initializes and starts the runtime test client.

func (*Scenario) StopKeymanagers added in v0.2300.0

func (sc *Scenario) StopKeymanagers(idxs []int) error

StopKeymanagers stops the specified key manager nodes.

func (*Scenario) TEEHardware added in v0.2300.0

func (sc *Scenario) TEEHardware() (node.TEEHardware, error)

TEEHardware returns the configured TEE hardware.

func (*Scenario) UpdateEnclavePolicies added in v0.2300.0

func (sc *Scenario) UpdateEnclavePolicies(rt *oasis.Runtime, deploymentIndex int, policies map[sgx.EnclaveIdentity]*keymanager.EnclavePolicySGX)

UpdateEnclavePolicies updates enclave policies with a new runtime deployment.

func (*Scenario) UpdateRotationInterval added in v0.2300.0

func (sc *Scenario) UpdateRotationInterval(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, rotationInterval beacon.EpochTime, nonce uint64) error

UpdateRotationInterval updates the master secret rotation interval in the key manager policy.

func (*Scenario) UpgradeComputeRuntime added in v0.2300.0

func (sc *Scenario) UpgradeComputeRuntime(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, idx int, nonce uint64) error

UpgradeComputeRuntime upgrades the specified compute runtime.

func (*Scenario) UpgradeComputeRuntimeFixture added in v0.2300.0

func (sc *Scenario) UpgradeComputeRuntimeFixture(f *oasis.NetworkFixture) (int, error)

UpgradeComputeRuntimeFixture select the first compute runtime and prepares it for the upgrade.

func (*Scenario) UpgradeKeyManager added in v0.2300.0

func (sc *Scenario) UpgradeKeyManager(ctx context.Context, childEnv *env.Env, cli *cli.Helpers, idx int, nonce uint64) error

func (*Scenario) UpgradeKeyManagerFixture added in v0.2300.0

func (sc *Scenario) UpgradeKeyManagerFixture(f *oasis.NetworkFixture) (int, error)

UpgradeKeyManagerFixture select the first key manager runtime and prepares it for the upgrade.

func (*Scenario) WaitEphemeralSecrets added in v0.2300.0

func (sc *Scenario) WaitEphemeralSecrets(ctx context.Context, n int) (*keymanager.SignedEncryptedEphemeralSecret, error)

WaitEphemeralSecrets waits for the specified number of ephemeral secrets to be generated.

func (*Scenario) WaitForClientSync added in v0.2300.0

func (sc *Scenario) WaitForClientSync(ctx context.Context) error

WaitForClientSync waits for the first client to sync.

func (*Scenario) WaitKeymanagers added in v0.2300.0

func (sc *Scenario) WaitKeymanagers(ctx context.Context, idxs []int) error

WaitKeymanagers waits for the specified key manager nodes to become ready.

func (*Scenario) WaitMasterSecret added in v0.2300.0

func (sc *Scenario) WaitMasterSecret(ctx context.Context, generation uint64) (*keymanager.Status, error)

WaitMasterSecret waits until the specified generation of the master secret is generated.

func (*Scenario) WaitNextRuntimeBlock added in v0.2300.0

func (sc *Scenario) WaitNextRuntimeBlock(ch <-chan *roothash.AnnotatedBlock) (*roothash.AnnotatedBlock, error)

func (*Scenario) WaitNodesSynced added in v0.2300.0

func (sc *Scenario) WaitNodesSynced(ctx context.Context) error

WaitNodesSynced waits for all the nodes to sync.

func (*Scenario) WaitRuntimeBlock added in v0.2300.0

func (sc *Scenario) WaitRuntimeBlock(ch <-chan *roothash.AnnotatedBlock, round uint64) (*roothash.AnnotatedBlock, error)

func (*Scenario) WaitTestClient added in v0.2300.0

func (sc *Scenario) WaitTestClient() error

WaitTestClient waits for the runtime test client to finish its work.

func (*Scenario) WaitTestClientAndCheckLogs added in v0.2300.0

func (sc *Scenario) WaitTestClientAndCheckLogs() error

WaitTestClientAndCheckLogs waits for the runtime test client to finish its work and then verifies the logs.

type TestClient added in v0.2103.0

type TestClient struct {
	// contains filtered or unexported fields
}

TestClient is a client that exercises a pre-determined workload against the simple key-value runtime.

func NewTestClient added in v0.2300.0

func NewTestClient() *TestClient

func (*TestClient) Clone added in v0.2103.0

func (cli *TestClient) Clone() *TestClient

Clone returns a clone of a test client instance, in a state that is ready for Init.

func (*TestClient) Init added in v0.2103.0

func (cli *TestClient) Init(scenario *Scenario) error

Init initializes the test client.

func (*TestClient) Start added in v0.2103.0

func (cli *TestClient) Start(ctx context.Context, _ *env.Env) error

Start starts the test client in a background.

func (*TestClient) Stop added in v0.2300.0

func (cli *TestClient) Stop() error

Stop stops the client.

func (*TestClient) Wait added in v0.2103.0

func (cli *TestClient) Wait() error

Wait waits the client to finish its work.

func (*TestClient) WithScenario added in v0.2300.0

func (cli *TestClient) WithScenario(scenario TestClientScenario) *TestClient

WithScenario sets the scenario.

func (*TestClient) WithSeed added in v0.2300.0

func (cli *TestClient) WithSeed(seed string) *TestClient

WithSeed sets the seed.

type TestClientScenario added in v0.2202.9

type TestClientScenario func(submit func(req interface{}) error) error

TestClientScenario is a test scenario for a key-value runtime test client.

func JoinTestClientScenarios added in v0.2300.1

func JoinTestClientScenarios(scenarios ...TestClientScenario) TestClientScenario

JoinTestClientScenarios joins an arbitrary number of test client scenarios into a single scenario that executes them in the order they were provided.

func NewTestClientScenario added in v0.2202.9

func NewTestClientScenario(requests []interface{}) TestClientScenario

NewTestClientScenario creates a new test client scenario.

type TrustRootImpl added in v0.2202.9

type TrustRootImpl struct {
	Scenario
}

func NewTrustRootImpl added in v0.2202.9

func NewTrustRootImpl(name string, testClient *TestClient) *TrustRootImpl

func (*TrustRootImpl) Clone added in v0.2202.9

func (sc *TrustRootImpl) Clone() scenario.Scenario

func (*TrustRootImpl) Fixture added in v0.2202.9

func (sc *TrustRootImpl) Fixture() (*oasis.NetworkFixture, error)

func (*TrustRootImpl) PostRun added in v0.2202.10

func (sc *TrustRootImpl) PostRun(_ context.Context, childEnv *env.Env) error

PostRun re-builds simple key/value and key manager runtimes.

func (*TrustRootImpl) PreRun added in v0.2202.10

func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err error)

PreRun starts the network, prepares a trust root, builds simple key/value and key manager runtimes, prepares runtime bundles, and runs the test client.

func (*TrustRootImpl) Run added in v0.2202.9

func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error)

type TxnCall added in v0.2103.0

type TxnCall struct {
	// Nonce is a nonce.
	Nonce uint64 `json:"nonce"`
	// Method is the called method name.
	Method string `json:"method"`
	// Args are the method arguments.
	Args interface{} `json:"args"`
}

TxnCall is a transaction call in the test runtime.

type TxnOutput added in v0.2103.0

type TxnOutput struct {
	// Success can be of any type.
	Success cbor.RawMessage
	// Error is a string describing the error message.
	Error *string
}

TxnOutput is a transaction call output in the test runtime.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL