Documentation ¶
Index ¶
- Constants
- type BlockingOPRMaker
- type CommandBuilder
- func (b *CommandBuilder) Aggregator(w IEntryWriter) *CommandBuilder
- func (b *CommandBuilder) Build() *MinerCommand
- func (b *CommandBuilder) MinimumDifficulty(min uint64) *CommandBuilder
- func (b *CommandBuilder) NewOPRHash(oprhash []byte) *CommandBuilder
- func (b *CommandBuilder) PauseMining() *CommandBuilder
- func (b *CommandBuilder) ResetRecords() *CommandBuilder
- func (b *CommandBuilder) ResumeMining() *CommandBuilder
- func (b *CommandBuilder) StatsAggregator(w chan *SingleMinerStats) *CommandBuilder
- func (b *CommandBuilder) SubmitNonces() *CommandBuilder
- type ControlledMiner
- type EntryForwarder
- type EntryWriter
- func (w *EntryWriter) AddMiner() chan<- *opr.NonceRanking
- func (w *EntryWriter) Cancel()
- func (w *EntryWriter) CollectAndWrite(blocking bool)
- func (w *EntryWriter) ECBalance() (int64, error)
- func (w *EntryWriter) NextBlockWriter() IEntryWriter
- func (w *EntryWriter) PopulateECAddress() error
- func (w *EntryWriter) SetOPR(opr *opr.OraclePriceRecord)
- type GlobalStatTracker
- func (t *GlobalStatTracker) Collect(ctx context.Context)
- func (t *GlobalStatTracker) FetchAllStats() []*StatisticBucket
- func (t *GlobalStatTracker) FetchStats(height int) *StatisticBucket
- func (g *GlobalStatTracker) GetUpstream(id string) (upstream chan *GroupMinerStats)
- func (t *GlobalStatTracker) InsertStats(g *GroupMinerStats)
- func (g *GlobalStatTracker) StopUpstream(id string)
- type GroupMinerStats
- type IEntryWriter
- type IOPRMaker
- type MinerCommand
- type MinerSubmission
- type MiningCoordinator
- type MiningIdentity
- type NonceIncrementer
- type OPRMaker
- type PegnetMiner
- type SingleMinerStats
- type StatisticBucket
Constants ¶
const ( BatchCommand NewOPRHash ResetRecords MinimumAccept RecordsToKeep RecordAggregator StatsAggregator SubmitNonces PauseMining ResumeMining )
const (
// MaxGlobalStatsBuckets tells us when to garbage collect
MaxGlobalStatsBuckets = 250
)
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type BlockingOPRMaker ¶
type BlockingOPRMaker struct {
// contains filtered or unexported fields
}
func NewBlockingOPRMaker ¶
func NewBlockingOPRMaker() *BlockingOPRMaker
func (*BlockingOPRMaker) Drain ¶
func (b *BlockingOPRMaker) Drain()
Drain everything from the channels
func (*BlockingOPRMaker) NewOPR ¶
func (b *BlockingOPRMaker) NewOPR(ctx context.Context, minerNumber int, dbht int32, config *config.Config, alert chan *opr.OPRs) (*opr.OraclePriceRecord, error)
func (*BlockingOPRMaker) RecOPR ¶
func (b *BlockingOPRMaker) RecOPR(opr *opr.OraclePriceRecord)
type CommandBuilder ¶
type CommandBuilder struct {
// contains filtered or unexported fields
}
CommandBuilder just let's me use building syntax to build commands
func BuildCommand ¶
func BuildCommand() *CommandBuilder
func (*CommandBuilder) Aggregator ¶
func (b *CommandBuilder) Aggregator(w IEntryWriter) *CommandBuilder
func (*CommandBuilder) Build ¶
func (b *CommandBuilder) Build() *MinerCommand
func (*CommandBuilder) MinimumDifficulty ¶
func (b *CommandBuilder) MinimumDifficulty(min uint64) *CommandBuilder
func (*CommandBuilder) NewOPRHash ¶
func (b *CommandBuilder) NewOPRHash(oprhash []byte) *CommandBuilder
func (*CommandBuilder) PauseMining ¶
func (b *CommandBuilder) PauseMining() *CommandBuilder
func (*CommandBuilder) ResetRecords ¶
func (b *CommandBuilder) ResetRecords() *CommandBuilder
func (*CommandBuilder) ResumeMining ¶
func (b *CommandBuilder) ResumeMining() *CommandBuilder
func (*CommandBuilder) StatsAggregator ¶
func (b *CommandBuilder) StatsAggregator(w chan *SingleMinerStats) *CommandBuilder
func (*CommandBuilder) SubmitNonces ¶
func (b *CommandBuilder) SubmitNonces() *CommandBuilder
type ControlledMiner ¶
type ControlledMiner struct { Miner *PegnetMiner CommandChannel chan *MinerCommand }
func (*ControlledMiner) SendCommand ¶
func (c *ControlledMiner) SendCommand(command *MinerCommand)
type EntryForwarder ¶
type EntryForwarder struct { *EntryWriter Next *EntryForwarder // contains filtered or unexported fields }
EntryForwarder is a wrapper for network based miners to rely on a coordinator to write entries
func NewEntryForwarder ¶
func NewEntryForwarder(config *config.Config, keep int, entryChannel chan *factom.Entry) *EntryForwarder
func (*EntryForwarder) ECBalance ¶
func (w *EntryForwarder) ECBalance() (int64, error)
ECBalance is always positive, the coordinator will stop us mining if he runs out
func (*EntryForwarder) NextBlockWriter ¶
func (w *EntryForwarder) NextBlockWriter() IEntryWriter
NextBlockWriter gets the next block writer to use for the miner.
Because all miners will share a block writer, we make this call idempotent
type EntryWriter ¶
type EntryWriter struct { Keep int Next *EntryWriter EntryWritingFunction func(unique *opr.UniqueOPRData) error sync.Mutex sync.Once // contains filtered or unexported fields }
EntryWriter writes the best OPRs to factom once all the mining is done
func NewEntryWriter ¶
func NewEntryWriter(config *config.Config, keep int) *EntryWriter
func (*EntryWriter) AddMiner ¶
func (w *EntryWriter) AddMiner() chan<- *opr.NonceRanking
AddMiner will add a miner to listen to for this block, and return the channel they should talk to us on.
func (*EntryWriter) Cancel ¶
func (w *EntryWriter) Cancel()
Cancel will cancel a miner's write. If the miner was stopped, we should not expect his write
func (*EntryWriter) CollectAndWrite ¶
func (w *EntryWriter) CollectAndWrite(blocking bool)
CollectAndWrite will write the block when we collected all the miner data
The blocking is mainly for unit tests.
func (*EntryWriter) ECBalance ¶
func (w *EntryWriter) ECBalance() (int64, error)
func (*EntryWriter) NextBlockWriter ¶
func (w *EntryWriter) NextBlockWriter() IEntryWriter
NextBlockWriter gets the next block writer to use for the miner.
Because all miners will share a block writer, we make this call idempotent
func (*EntryWriter) PopulateECAddress ¶
func (w *EntryWriter) PopulateECAddress() error
PopulateECAddress only needs to be called once
func (*EntryWriter) SetOPR ¶
func (w *EntryWriter) SetOPR(opr *opr.OraclePriceRecord)
SetOPR is here because we need an opr to create the entry.
type GlobalStatTracker ¶
type GlobalStatTracker struct { MiningStatsChannel chan *GroupMinerStats // contains filtered or unexported fields }
GlobalStatTracker is the global tracker for the api's and whatnot
It has threadsafe queryable stats for the miners and their blockheights.
func NewGlobalStatTracker ¶
func NewGlobalStatTracker() *GlobalStatTracker
func (*GlobalStatTracker) Collect ¶
func (t *GlobalStatTracker) Collect(ctx context.Context)
Collect listens for new stats, and manages them
ctx can be cancelled
func (*GlobalStatTracker) FetchAllStats ¶
func (t *GlobalStatTracker) FetchAllStats() []*StatisticBucket
FetchAllStats is really for unit tests
func (*GlobalStatTracker) FetchStats ¶
func (t *GlobalStatTracker) FetchStats(height int) *StatisticBucket
func (*GlobalStatTracker) GetUpstream ¶
func (g *GlobalStatTracker) GetUpstream(id string) (upstream chan *GroupMinerStats)
func (*GlobalStatTracker) InsertStats ¶
func (t *GlobalStatTracker) InsertStats(g *GroupMinerStats)
func (*GlobalStatTracker) StopUpstream ¶
func (g *GlobalStatTracker) StopUpstream(id string)
type GroupMinerStats ¶
type GroupMinerStats struct { Miners map[int]*SingleMinerStats `json:"miners"` BlockHeight int `json:"blockheight"` ID string `json:"id"` Tags map[string]string `json:"tags"` }
GroupMinerStats has the stats for all miners running from a coordinator. It will do aggregation for simple global stats
func NewGroupMinerStats ¶
func NewGroupMinerStats(id string, height int) *GroupMinerStats
func (*GroupMinerStats) AvgDurationPerMiner ¶ added in v0.2.1
func (g *GroupMinerStats) AvgDurationPerMiner() time.Duration
AvgDurationPerMiner is the average duration of mining across all miners.
func (*GroupMinerStats) AvgHashRatePerMiner ¶
func (g *GroupMinerStats) AvgHashRatePerMiner() float64
func (*GroupMinerStats) LogFields ¶
func (g *GroupMinerStats) LogFields() log.Fields
func (*GroupMinerStats) TotalHashPower ¶
func (g *GroupMinerStats) TotalHashPower() float64
TotalHashPower is the sum of all miner's hashpower
type IEntryWriter ¶
type IEntryWriter interface { PopulateECAddress() error NextBlockWriter() IEntryWriter AddMiner() chan<- *opr.NonceRanking SetOPR(opr *opr.OraclePriceRecord) CollectAndWrite(blocking bool) ECBalance() (int64, error) }
type MinerCommand ¶
type MinerCommand struct { Command int Data interface{} }
type MinerSubmission ¶
type MiningCoordinator ¶
type MiningCoordinator struct { // Factom blockchain related alerts FactomMonitor common.IMonitor OPRGrader opr.IGrader // Miners mine the opr hashes Miners []*ControlledMiner // FactomEntryWriter writes the oprs to chain FactomEntryWriter IEntryWriter // Who we submit our stats too StatTracker *GlobalStatTracker // Used when going over the network OPRMaker IOPRMaker // contains filtered or unexported fields }
MiningCoordinator can coordinate multiple miners. This object will poll data from exchange sources, make an OPR, get the OPR hash, and send it to miners for them to work on. Once the miners get a top X records, we will aggregate and submit.
TODO: Make the coordinator look at the difficulties in the last block, and determine a minimum based on that.
func NewMiningCoordinatorFromConfig ¶
func NewMiningCoordinatorFromConfig(config *config.Config, monitor common.IMonitor, grader opr.IGrader, s *GlobalStatTracker) *MiningCoordinator
func NewNetworkedMiningCoordinatorFromConfig ¶
func NewNetworkedMiningCoordinatorFromConfig(config *config.Config, monitor common.IMonitor, grader opr.IGrader, s *GlobalStatTracker) *MiningCoordinator
func (*MiningCoordinator) InitMinters ¶
func (c *MiningCoordinator) InitMinters() error
func (*MiningCoordinator) LaunchMiners ¶
func (c *MiningCoordinator) LaunchMiners(ctx context.Context)
func (*MiningCoordinator) NewMiner ¶
func (c *MiningCoordinator) NewMiner(id int) *ControlledMiner
type MiningIdentity ¶
type MiningIdentity struct { Identity string Best *opr.NonceRanking }
type NonceIncrementer ¶
type NonceIncrementer struct { Nonce []byte // contains filtered or unexported fields }
NonceIncrementer is just simple to increment nonces
func NewNonceIncrementer ¶
func NewNonceIncrementer(id int) *NonceIncrementer
func (*NonceIncrementer) NextNonce ¶
func (i *NonceIncrementer) NextNonce()
NextNonce is just counting to get the next nonce. We preserve the first byte, as that is our ID and give us our nonce space
So []byte(ID, 255) -> []byte(ID, 1, 0) -> []byte(ID, 1, 1)
type OPRMaker ¶
type OPRMaker struct { }
OPRMaker TODO: Should we change this at all?
func NewOPRMaker ¶
func NewOPRMaker() *OPRMaker
type PegnetMiner ¶
type PegnetMiner struct { // ID is the miner number, starting with "1". Every miner launched gets the next // sequential number. ID int `json:"id"` Config *config.Config `json:"-"` // The config of the miner using the record // All the state variables PER oprhash. // Typically want to update these all in parallel MiningState oprMiningState // contains filtered or unexported fields }
PegnetMiner mines an OPRhash
func NewPegnetMinerFromConfig ¶
func NewPegnetMinerFromConfig(c *config.Config, id int, commands <-chan *MinerCommand) *PegnetMiner
func (*PegnetMiner) HandleCommand ¶
func (p *PegnetMiner) HandleCommand(c *MinerCommand)
func (*PegnetMiner) Mine ¶
func (p *PegnetMiner) Mine(ctx context.Context)
func (*PegnetMiner) ResetNonce ¶
func (p *PegnetMiner) ResetNonce()
type SingleMinerStats ¶
type SingleMinerStats struct { ID int `json:"id"` TotalHashes int64 `json:"totalhashes"` BestDifficulty uint64 `json:"bestdifficulty"` Start time.Time `json:"start"` Stop time.Time `json:"stop"` }
SingleMinerStats is the stats of a single miner
func NewSingleMinerStats ¶
func NewSingleMinerStats() *SingleMinerStats
func (*SingleMinerStats) NewDifficulty ¶
func (s *SingleMinerStats) NewDifficulty(diff uint64)
type StatisticBucket ¶
type StatisticBucket struct { // A statistic collection of each group GroupStats map[string]*GroupMinerStats `json:"allgroupstats"` BlockHeight int `json:"blockheight"` }