ethereum

package
v0.2.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 12, 2017 License: MIT Imports: 21 Imported by: 10

Documentation

Overview

Package ethereum contains all necessary components to plug into smartpool to work with ethereum blockchain. Such as: Contract, Network Client, Share receiver... This package also provides interfaces for different ethereum clients to be able to work with smartpool.

Index

Constants

View Source
const (
	FullBlockSolution int = 2
	ValidShare        int = 1
	InvalidShare      int = 0
)

Variables

This section is empty.

Functions

This section is empty.

Types

type Contract

type Contract struct {
	// contains filtered or unexported fields
}

func NewContract

func NewContract(client ContractClient) *Contract

func (*Contract) CanRegister

func (c *Contract) CanRegister() bool

func (*Contract) GetShareIndex

func (c *Contract) GetShareIndex(claim smartpool.Claim) *big.Int

func (*Contract) IsRegistered

func (c *Contract) IsRegistered() bool

func (*Contract) Register

func (c *Contract) Register(paymentAddress common.Address) error

func (*Contract) SetEpochData

func (c *Contract) SetEpochData(epochs ...int) error

func (*Contract) SubmitClaim

func (c *Contract) SubmitClaim(claim smartpool.Claim) error

func (*Contract) VerifyClaim

func (c *Contract) VerifyClaim(shareIndex *big.Int, claim smartpool.Claim) error

func (*Contract) Version

func (c *Contract) Version() string

type ContractClient

type ContractClient interface {
	Version() string
	IsRegistered() bool
	CanRegister() bool
	Register(paymentAddress common.Address) error
	GetClaimSeed() *big.Int
	SubmitClaim(
		numShares *big.Int,
		difficulty *big.Int,
		min *big.Int,
		max *big.Int,
		augMerkle *big.Int) error
	VerifyClaim(
		rlpHeader []byte,
		nonce *big.Int,
		shareIndex *big.Int,
		dataSetLookup []*big.Int,
		witnessForLookup []*big.Int,
		augCountersBranch []*big.Int,
		augHashesBranch []*big.Int) error
	SetEpochData(
		merkleRoot []*big.Int,
		fullSizeIn128Resolution []uint64,
		branchDepth []uint64,
		epoch []*big.Int) error
}

type DAGReader

type DAGReader interface {
	// NextWord return next data chunk of the DAG dataset. First 8 bytes must
	// be ignored.
	NextWord() ([]byte, error)
}

DAGReader provides a way for smartpool to retrieve DAG dataset. How the DAG is retrieve is upto structs implementing the interface.

type NetworkClient

type NetworkClient struct {
	// contains filtered or unexported fields
}

func NewNetworkClient

func NewNetworkClient(rpc RPCClient, workpool *WorkPool) *NetworkClient

func (*NetworkClient) Configure added in v0.2.0

func (nc *NetworkClient) Configure(etherbase common.Address, extradata string) error

func (*NetworkClient) GetWork

func (nc *NetworkClient) GetWork() smartpool.Work

func (*NetworkClient) ReadyToMine added in v0.2.0

func (nc *NetworkClient) ReadyToMine() bool

func (*NetworkClient) SubmitHashrate

func (nc *NetworkClient) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool

func (*NetworkClient) SubmitSolution

func (nc *NetworkClient) SubmitSolution(s smartpool.Solution) bool

type RPCClient

type RPCClient interface {
	ClientVersion() (string, error)
	GetWork() *Work
	SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool
	SubmitWork(nonce types.BlockNonce, hash, mixDigest common.Hash) bool
	IsVerified(h common.Hash) bool
	Syncing() bool
	BlockNumber() (*big.Int, error)
	GetLog(from *big.Int, event *big.Int, sender *big.Int) (*big.Int, *big.Int)
	SetEtherbase(etherbase common.Address) error
	SetExtradata(extradata string) error
	Broadcast(raw []byte) (common.Hash, error)
}

type Share

type Share struct {
	SolutionState int
	// contains filtered or unexported fields
}

func NewShare

func NewShare(h *types.Header, dif *big.Int) *Share

func (*Share) BlockHeader

func (s *Share) BlockHeader() *types.Header

func (*Share) Counter

func (s *Share) Counter() *big.Int

We use concatenation of timestamp and nonce as share counter Nonce in ethereum is 8 bytes so counter = timestamp << 64 + nonce

func (*Share) DAGElementArray

func (s *Share) DAGElementArray() []*big.Int

func (*Share) DAGProofArray

func (s *Share) DAGProofArray() []*big.Int

func (*Share) Difficulty

func (s *Share) Difficulty() *big.Int

func (*Share) FullSolution added in v0.2.0

func (s *Share) FullSolution() bool

func (*Share) Hash

func (s *Share) Hash() (result smartpool.SPHash)

func (*Share) HashNoNonce

func (s *Share) HashNoNonce() common.Hash

func (*Share) MixDigest

func (s *Share) MixDigest() common.Hash

func (*Share) Nonce

func (s *Share) Nonce() uint64

func (*Share) NonceBig

func (s *Share) NonceBig() *big.Int

func (*Share) NumberU64

func (s *Share) NumberU64() uint64

func (*Share) RlpHeaderWithoutNonce

func (s *Share) RlpHeaderWithoutNonce() ([]byte, error)

func (*Share) ShareDifficulty

func (s *Share) ShareDifficulty() *big.Int

func (*Share) Timestamp

func (s *Share) Timestamp() *big.Int

type Solution

type Solution struct {
	Nonce     types.BlockNonce
	Hash      common.Hash
	MixDigest common.Hash
}

func (*Solution) WorkID

func (s *Solution) WorkID() string

type TimestampClaimRepo added in v0.2.0

type TimestampClaimRepo struct {
	// contains filtered or unexported fields
}

TimestampClaimRepo only select shares that don't have most recent timestamp in order to make sure coming shares' counters are greater than selected shares

func NewTimestampClaimRepo added in v0.2.0

func NewTimestampClaimRepo() *TimestampClaimRepo

TODO: Load from persistent storage

func (*TimestampClaimRepo) AddShare added in v0.2.0

func (cr *TimestampClaimRepo) AddShare(s smartpool.Share)

func (*TimestampClaimRepo) GetCurrentClaim added in v0.2.0

func (cr *TimestampClaimRepo) GetCurrentClaim(threshold int) smartpool.Claim

type Work

type Work struct {
	// contains filtered or unexported fields
}

Work represents Ethereum pow work

func NewWork

func NewWork(h *types.Header, ph string, sh string, diff *big.Int) *Work

func (*Work) AcceptSolution

func (w *Work) AcceptSolution(sol smartpool.Solution) smartpool.Share

func (Work) BlockHeader

func (w Work) BlockHeader() *types.Header

func (*Work) CreatedAt

func (w *Work) CreatedAt() time.Time

func (*Work) ID

func (w *Work) ID() string

func (*Work) PoWHash

func (w *Work) PoWHash() common.Hash

func (Work) SeedHash

func (w Work) SeedHash() string

func (Work) ShareDifficulty

func (w Work) ShareDifficulty() *big.Int

type WorkPool

type WorkPool map[string]*Work

workpool keeps track of pending works to ensure that each submitted solution can actually be accepted by a real pow work. workpool also implements ShareReceiver interface.

func (WorkPool) AcceptSolution

func (wp WorkPool) AcceptSolution(s smartpool.Solution) smartpool.Share

AcceptSolution takes solution and find corresponding work and return associated share. It returns nil if the work is not found.

func (WorkPool) AddWork

func (wp WorkPool) AddWork(w *Work)

func (WorkPool) Cleanning

func (wp WorkPool) Cleanning()

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL