neuralnetwork

package
v0.0.0-...-beb861e Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 11, 2020 License: MIT Imports: 18 Imported by: 0

Documentation

Overview

Package neuralnetwork reproduces multilayer perceptron based on Andreas Mueller implementation + float 32 implementation + weight decay + batch normalization

Index

Examples

Constants

This section is empty.

Variables

View Source
var Activations32 = map[string]func(z blas32General){
	"identity": func(z blas32General) {},
	"logistic": func(z blas32General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] = 1 / (1 + M32.Exp(-z.Data[zpos+col]))
			}
		}
	},
	"tanh": func(z blas32General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] = M32.Tanh(-z.Data[zpos+col])
			}
		}
	},
	"relu": func(z blas32General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				if z.Data[zpos+col] < 0 {
					z.Data[zpos+col] = 0
				}
			}
		}
	},
	"softmax": func(z blas32General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			sum := float32(0)
			for col := 0; col < z.Cols; col++ {

				z.Data[zpos+col] = M32.Exp(z.Data[zpos+col])
				sum += z.Data[zpos+col]
			}
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] /= sum
			}
		}
	},
}

Activations32 is a map containing the inplace_activation functions

View Source
var Activations64 = map[string]func(z blas64General){
	"identity": func(z blas64General) {},
	"logistic": func(z blas64General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] = 1 / (1 + M64.Exp(-z.Data[zpos+col]))
			}
		}
	},
	"tanh": func(z blas64General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] = M64.Tanh(-z.Data[zpos+col])
			}
		}
	},
	"relu": func(z blas64General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			for col := 0; col < z.Cols; col++ {
				if z.Data[zpos+col] < 0 {
					z.Data[zpos+col] = 0
				}
			}
		}
	},
	"softmax": func(z blas64General) {
		for row, zpos := 0, 0; row < z.Rows; row, zpos = row+1, zpos+z.Stride {
			sum := float64(0)
			for col := 0; col < z.Cols; col++ {

				z.Data[zpos+col] = M64.Exp(z.Data[zpos+col])
				sum += z.Data[zpos+col]
			}
			for col := 0; col < z.Cols; col++ {
				z.Data[zpos+col] /= sum
			}
		}
	},
}

Activations64 is a map containing the inplace_activation functions

View Source
var Derivatives32 = map[string]func(Z, deltas blas32General){
	"identity": func(Z, deltas blas32General) {
	},
	"logistic": func(Z, deltas blas32General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				z := Z.Data[zpos+col]
				deltas.Data[dpos+col] *= z * (1 - z)
			}
		}
	},
	"tanh": func(Z, deltas blas32General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				z := Z.Data[zpos+col]
				deltas.Data[dpos+col] *= 1 - z*z
			}
		}
	},
	"relu": func(Z, deltas blas32General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				if Z.Data[zpos+col] == 0 {
					deltas.Data[dpos+col] = 0
				}
			}
		}
	},
}

Derivatives32 is a map of functions which multiply deltas with derivative of activation function

View Source
var Derivatives64 = map[string]func(Z, deltas blas64General){
	"identity": func(Z, deltas blas64General) {
	},
	"logistic": func(Z, deltas blas64General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				z := Z.Data[zpos+col]
				deltas.Data[dpos+col] *= z * (1 - z)
			}
		}
	},
	"tanh": func(Z, deltas blas64General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				z := Z.Data[zpos+col]
				deltas.Data[dpos+col] *= 1 - z*z
			}
		}
	},
	"relu": func(Z, deltas blas64General) {
		for row, zpos, dpos := 0, 0, 0; row < Z.Rows; row, zpos, dpos = row+1, zpos+Z.Stride, dpos+deltas.Stride {
			for col := 0; col < Z.Cols; col++ {
				if Z.Data[zpos+col] == 0 {
					deltas.Data[dpos+col] = 0
				}
			}
		}
	},
}

Derivatives64 is a map of functions which multiply deltas with derivative of activation function

View Source
var LossFunctions32 = map[string]func(y, h blas32General) float32{
	"square_loss": func(y, h blas32General) float32 {
		sum := float32(0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				e := h.Data[hpos+col] - y.Data[ypos+col]
				sum += e * e
			}
		}
		return sum / 2 / float32(h.Rows)
	},
	"log_loss": func(y, h blas32General) float32 {
		sum := float32(0)
		hmin, hmax := M32.Nextafter(0, 1), M32.Nextafter(1, 0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				hval := h.Data[hpos+col]
				if hval < hmin {
					hval = hmin
				} else if hval > hmax {
					hval = hmax
				}
				if y.Data[ypos+col] != 0 {
					sum += -y.Data[ypos+col] * M32.Log(hval)
				}
			}
		}
		return sum / float32(h.Rows)
	},
	"binary_log_loss": func(y, h blas32General) float32 {
		sum := float32(0)
		hmin, hmax := M32.Nextafter(0, 1), M32.Nextafter(1, 0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				hval := h.Data[hpos+col]
				if hval < hmin {
					hval = hmin
				} else if hval > hmax {
					hval = hmax
				}
				sum += -y.Data[ypos+col]*M32.Log(hval) - (1-y.Data[ypos+col])*M32.Log1p(-hval)
			}
		}
		return sum / float32(h.Rows)
	},
}

LossFunctions32 is a map for loss functions

View Source
var LossFunctions64 = map[string]func(y, h blas64General) float64{
	"square_loss": func(y, h blas64General) float64 {
		sum := float64(0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				e := h.Data[hpos+col] - y.Data[ypos+col]
				sum += e * e
			}
		}
		return sum / 2 / float64(h.Rows)
	},
	"log_loss": func(y, h blas64General) float64 {
		sum := float64(0)
		hmin, hmax := M64.Nextafter(0, 1), M64.Nextafter(1, 0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				hval := h.Data[hpos+col]
				if hval < hmin {
					hval = hmin
				} else if hval > hmax {
					hval = hmax
				}
				if y.Data[ypos+col] != 0 {
					sum += -y.Data[ypos+col] * M64.Log(hval)
				}
			}
		}
		return sum / float64(h.Rows)
	},
	"binary_log_loss": func(y, h blas64General) float64 {
		sum := float64(0)
		hmin, hmax := M64.Nextafter(0, 1), M64.Nextafter(1, 0)
		for row, hpos, ypos := 0, 0, 0; row < y.Rows; row, hpos, ypos = row+1, hpos+h.Stride, ypos+y.Stride {
			for col := 0; col < y.Cols; col++ {
				hval := h.Data[hpos+col]
				if hval < hmin {
					hval = hmin
				} else if hval > hmax {
					hval = hmax
				}
				sum += -y.Data[ypos+col]*M64.Log(hval) - (1-y.Data[ypos+col])*M64.Log1p(-hval)
			}
		}
		return sum / float64(h.Rows)
	},
}

LossFunctions64 is a map for loss functions

View Source
var M32 = struct {
	Ceil       func(float32) float32
	Sqrt       func(float32) float32
	Pow        func(float32, float32) float32
	IsInf      func(float32, int) bool
	Abs        func(float32) float32
	Exp        func(float32) float32
	Tanh       func(float32) float32
	Log        func(float32) float32
	Log1p      func(float32) float32
	MaxFloat32 float32
	Inf        func(int) float32
	IsNaN      func(float32) bool
	Nextafter  func(x, y float32) float32
	MaxFloatXX floatXX
}{
	Ceil: m32.Ceil, Sqrt: m32.Sqrt, Pow: m32.Pow, IsInf: m32.IsInf, Abs: m32.Abs, Exp: m32.Exp, Tanh: m32.Tanh, Log: m32.Log, Log1p: m32.Log1p,
	MaxFloat32: m32.MaxFloat32, Inf: m32.Inf, IsNaN: m32.IsNaN, Nextafter: m32.Nextafter, MaxFloatXX: m32.MaxFloat32}

M32 has funcs for float32 math

View Source
var M64 = struct {
	Ceil       func(float64) float64
	Sqrt       func(float64) float64
	Pow        func(float64, float64) float64
	IsInf      func(float64, int) bool
	Abs        func(float64) float64
	Exp        func(float64) float64
	Tanh       func(float64) float64
	Log        func(float64) float64
	Log1p      func(float64) float64
	MaxFloat64 float64
	Inf        func(int) float64
	IsNaN      func(float64) bool
	Nextafter  func(x, y float64) float64
}{Ceil: m64.Ceil, Sqrt: m64.Sqrt, Pow: m64.Pow, IsInf: m64.IsInf, Abs: m64.Abs, Exp: m64.Exp, Tanh: m64.Tanh, Log: m64.Log, Log1p: m64.Log1p,
	MaxFloat64: m64.MaxFloat64, Inf: m64.Inf, IsNaN: m64.IsNaN, Nextafter: m64.Nextafter}

M64 has funcs for float64 math

View Source
var MXX = M32

MXX has funcs for floatXX math

View Source
var MaxIdxXX = MaxIdx32

MaxIdxXX ...

View Source
var Regressors = []base.Predicter{&MLPRegressor{}}

Regressors is the list of regressors in this package

Functions

func MaxIdx32

func MaxIdx32(a []float32) int

MaxIdx32 ...

func MaxIdx64

func MaxIdx64(a []float64) int

MaxIdx64 ...

Types

type AdamOptimizer32

type AdamOptimizer32 struct {
	Params                []float32
	LearningRateInit      float32
	LearningRate          float32
	Beta1, Beta2, Epsilon float32
	// contains filtered or unexported fields
}

AdamOptimizer32 is the stochastic adam optimizer

type AdamOptimizer64

type AdamOptimizer64 struct {
	Params                []float64
	LearningRateInit      float64
	LearningRate          float64
	Beta1, Beta2, Epsilon float64
	// contains filtered or unexported fields
}

AdamOptimizer64 is the stochastic adam optimizer

type BaseMultilayerPerceptron32

type BaseMultilayerPerceptron32 struct {
	Activation         string  `json:"activation"`
	Solver             string  `json:"solver"`
	Alpha              float32 `json:"alpha"`
	WeightDecay        float32 `json:"weight_decay"`
	BatchSize          int     `json:"batch_size"`
	BatchNormalize     bool
	LearningRate       string           `json:"learning_rate"`
	LearningRateInit   float32          `json:"learning_rate_init"`
	PowerT             float32          `json:"power_t"`
	MaxIter            int              `json:"max_iter"`
	LossFuncName       string           `json:"loss_func_name"`
	HiddenLayerSizes   []int            `json:"hidden_layer_sizes"`
	Shuffle            bool             `json:"shuffle"`
	RandomState        base.RandomState `json:"random_state"`
	Tol                float32          `json:"tol"`
	Verbose            bool             `json:"verbose"`
	WarmStart          bool             `json:"warm_start"`
	Momentum           float32          `json:"momentum"`
	NesterovsMomentum  bool             `json:"nesterovs_momentum"`
	EarlyStopping      bool             `json:"early_stopping"`
	ValidationFraction float32          `json:"validation_fraction"`
	Beta1              float32          `json:"beta_1"`
	Beta2              float32          `json:"beta_2"`
	Epsilon            float32          `json:"epsilon"`
	NIterNoChange      int              `json:"n_iter_no_change"`

	// Outputs
	NLayers       int
	NIter         int
	NOutputs      int
	Intercepts    [][]float32     `json:"intercepts_"`
	Coefs         []blas32General `json:"coefs_"`
	OutActivation string          `json:"out_activation_"`
	Loss          float32

	LossCurve           []float32
	ValidationScores    []float32
	BestValidationScore float32
	BestLoss            float32
	NoImprovementCount  int
	// contains filtered or unexported fields
}

BaseMultilayerPerceptron32 closely matches sklearn/neural_network/multilayer_perceptron.py

func NewBaseMultilayerPerceptron32

func NewBaseMultilayerPerceptron32() *BaseMultilayerPerceptron32

NewBaseMultilayerPerceptron32 returns a BaseMultilayerPerceptron32 with defaults

func (*BaseMultilayerPerceptron32) Fit

func (mlp *BaseMultilayerPerceptron32) Fit(X, Y Matrix)

Fit compute Coefs and Intercepts

Example (Mnist)
X, Y := datasets.LoadMnist()
mlp := NewBaseMultilayerPerceptron32()
mlp.HiddenLayerSizes = []int{25}
mlp.MaxIter = 200
mlp.RandomState = base.NewLockedSource(7)
mlp.Shuffle = true
mlp.BatchNormalize = true
expectedMinAccuracy := .988

if testing.Short() {
	mlp.MaxIter = 20
	expectedMinAccuracy = .93
}
mlp.Fit(X, Y)
accuracy := mlp.Score(X, Y)
if accuracy < expectedMinAccuracy {
	fmt.Println("accuracy", accuracy)
} else {
	fmt.Println("ok")
}
Output:

ok

func (*BaseMultilayerPerceptron32) GetNOutputs

func (mlp *BaseMultilayerPerceptron32) GetNOutputs() int

GetNOutputs returns output columns number for Y to pass to predict

func (*BaseMultilayerPerceptron32) IsClassifier

func (mlp *BaseMultilayerPerceptron32) IsClassifier() bool

IsClassifier return true if LossFuncName is not square_loss

func (*BaseMultilayerPerceptron32) Predict

func (mlp *BaseMultilayerPerceptron32) Predict(X mat.Matrix, Y Mutable)

Predict do forward pass and fills Y (Y must be Mutable)

func (*BaseMultilayerPerceptron32) Score

func (mlp *BaseMultilayerPerceptron32) Score(Xmatrix, Ymatrix mat.Matrix) float64

Score for BaseMultiLayerPerceptron32 is R2Score or Accuracy depending on LossFuncName

func (*BaseMultilayerPerceptron32) SetParams

func (mlp *BaseMultilayerPerceptron32) SetParams(params map[string]interface{})

SetParams allow settings params from a map. (used by Unmarshal)

func (*BaseMultilayerPerceptron32) Unmarshal

func (mlp *BaseMultilayerPerceptron32) Unmarshal(buf []byte) error

Unmarshal init params intercepts_ coefs_ from json

type BaseMultilayerPerceptron64

type BaseMultilayerPerceptron64 struct {
	Activation         string  `json:"activation"`
	Solver             string  `json:"solver"`
	Alpha              float64 `json:"alpha"`
	WeightDecay        float64 `json:"weight_decay"`
	BatchSize          int     `json:"batch_size"`
	BatchNormalize     bool
	LearningRate       string           `json:"learning_rate"`
	LearningRateInit   float64          `json:"learning_rate_init"`
	PowerT             float64          `json:"power_t"`
	MaxIter            int              `json:"max_iter"`
	LossFuncName       string           `json:"loss_func_name"`
	HiddenLayerSizes   []int            `json:"hidden_layer_sizes"`
	Shuffle            bool             `json:"shuffle"`
	RandomState        base.RandomState `json:"random_state"`
	Tol                float64          `json:"tol"`
	Verbose            bool             `json:"verbose"`
	WarmStart          bool             `json:"warm_start"`
	Momentum           float64          `json:"momentum"`
	NesterovsMomentum  bool             `json:"nesterovs_momentum"`
	EarlyStopping      bool             `json:"early_stopping"`
	ValidationFraction float64          `json:"validation_fraction"`
	Beta1              float64          `json:"beta_1"`
	Beta2              float64          `json:"beta_2"`
	Epsilon            float64          `json:"epsilon"`
	NIterNoChange      int              `json:"n_iter_no_change"`

	// Outputs
	NLayers       int
	NIter         int
	NOutputs      int
	Intercepts    [][]float64     `json:"intercepts_"`
	Coefs         []blas64General `json:"coefs_"`
	OutActivation string          `json:"out_activation_"`
	Loss          float64

	LossCurve           []float64
	ValidationScores    []float64
	BestValidationScore float64
	BestLoss            float64
	NoImprovementCount  int
	// contains filtered or unexported fields
}

BaseMultilayerPerceptron64 closely matches sklearn/neural_network/multilayer_perceptron.py

func NewBaseMultilayerPerceptron64

func NewBaseMultilayerPerceptron64() *BaseMultilayerPerceptron64

NewBaseMultilayerPerceptron64 returns a BaseMultilayerPerceptron64 with defaults

func (*BaseMultilayerPerceptron64) Fit

func (mlp *BaseMultilayerPerceptron64) Fit(X, Y Matrix)

Fit compute Coefs and Intercepts

func (*BaseMultilayerPerceptron64) GetNOutputs

func (mlp *BaseMultilayerPerceptron64) GetNOutputs() int

GetNOutputs returns output columns number for Y to pass to predict

func (*BaseMultilayerPerceptron64) IsClassifier

func (mlp *BaseMultilayerPerceptron64) IsClassifier() bool

IsClassifier return true if LossFuncName is not square_loss

func (*BaseMultilayerPerceptron64) Predict

func (mlp *BaseMultilayerPerceptron64) Predict(X mat.Matrix, Y Mutable)

Predict do forward pass and fills Y (Y must be Mutable)

func (*BaseMultilayerPerceptron64) Score

func (mlp *BaseMultilayerPerceptron64) Score(Xmatrix, Ymatrix mat.Matrix) float64

Score for BaseMultiLayerPerceptron64 is R2Score or Accuracy depending on LossFuncName

func (*BaseMultilayerPerceptron64) SetParams

func (mlp *BaseMultilayerPerceptron64) SetParams(params map[string]interface{})

SetParams allow settings params from a map. (used by Unmarshal)

func (*BaseMultilayerPerceptron64) Unmarshal

func (mlp *BaseMultilayerPerceptron64) Unmarshal(buf []byte) error

Unmarshal init params intercepts_ coefs_ from json

type Float32Slice

type Float32Slice []float32

Float32Slice implements sort.Interface.

func (Float32Slice) Len

func (p Float32Slice) Len() int

func (Float32Slice) Less

func (p Float32Slice) Less(i, j int) bool

func (Float32Slice) Swap

func (p Float32Slice) Swap(i, j int)

type Float64Slice

type Float64Slice []float64

Float64Slice implements sort.Interface.

func (Float64Slice) Len

func (p Float64Slice) Len() int

func (Float64Slice) Less

func (p Float64Slice) Less(i, j int) bool

func (Float64Slice) Swap

func (p Float64Slice) Swap(i, j int)

type General32

type General32 blas32.General

General32 is like blas32.General

func FromDense32

func FromDense32(dst Mutable, dense General32) General32

FromDense32 fills dst (mat.Mutable) with src (mat.Dense)

func ToDense32

func ToDense32(m Matrix) General32

ToDense32 returns w view of m if m is a RawMatrixer, et returns a dense copy of m

func (General32) At

func (mat General32) At(r, c int) float64

At returns value at row,col

func (*General32) Copy

func (mat *General32) Copy(a Matrix)

Copy fills receiver with input matrix

func (General32) Dims

func (mat General32) Dims() (r, c int)

Dims return number of rows and columns

func (General32) Len

func (mat General32) Len() int

Len returns row count

func (General32) Less

func (mat General32) Less(i, j int) bool

Less compare rows. panics if Cols!=1

func (General32) RawMatrix

func (mat General32) RawMatrix() blas32General

RawMatrix return blas raw matrix

func (General32) RawRowView

func (mat General32) RawRowView(i int) []float32

RawRowView returns row as a float slice

func (General32) RowSlice

func (mat General32) RowSlice(i, j int) General32

RowSlice provides view on submatrix(startRow,endRow) as General32 returned matrix can be casted to *General32

func (General32) Set

func (mat General32) Set(r, c int, v float64)

Set set value at row,col

func (General32) Slice

func (mat General32) Slice(i, j, k, l int) Matrix

Slice provides view on submatrix(startRow,endRow,startCol,endCol) returned matrix can be casted to *General32

func (*General32) SumRows

func (mat *General32) SumRows(a General32)

SumRows sums rows of a into mat

func (General32) Swap

func (mat General32) Swap(i, j int)

Swap permutes 2 lines {

func (General32) T

func (mat General32) T() Matrix

T returns transposed Matrix

type General64

type General64 blas64.General

General64 is like blas64.General

func FromDense64

func FromDense64(dst Mutable, dense General64) General64

FromDense64 fills dst (mat.Mutable) with src (mat.Dense)

func ToDense64

func ToDense64(m Matrix) General64

ToDense64 returns w view of m if m is a RawMatrixer, et returns a dense copy of m

func (General64) At

func (mat General64) At(r, c int) float64

At returns value at row,col

func (*General64) Copy

func (mat *General64) Copy(a Matrix)

Copy fills receiver with input matrix

func (General64) Dims

func (mat General64) Dims() (r, c int)

Dims return number of rows and columns

func (General64) Len

func (mat General64) Len() int

Len returns row count

func (General64) Less

func (mat General64) Less(i, j int) bool

Less compare rows. panics if Cols!=1

func (General64) RawMatrix

func (mat General64) RawMatrix() blas64General

RawMatrix return blas raw matrix

func (General64) RawRowView

func (mat General64) RawRowView(i int) []float64

RawRowView returns row as a float slice

func (General64) RowSlice

func (mat General64) RowSlice(i, j int) General64

RowSlice provides view on submatrix(startRow,endRow) as General64 returned matrix can be casted to *General64

func (General64) Set

func (mat General64) Set(r, c int, v float64)

Set set value at row,col

func (General64) Slice

func (mat General64) Slice(i, j, k, l int) Matrix

Slice provides view on submatrix(startRow,endRow,startCol,endCol) returned matrix can be casted to *General64

func (*General64) SumRows

func (mat *General64) SumRows(a General64)

SumRows sums rows of a into mat

func (General64) Swap

func (mat General64) Swap(i, j int)

Swap permutes 2 lines {

func (General64) T

func (mat General64) T() Matrix

T returns transposed Matrix

type GeneralXX

type GeneralXX = General32

GeneralXX is for easy tranposition to float32 or float64

type LabelBinarizer32

type LabelBinarizer32 struct {
	NegLabel, PosLabel float32
	Classes            [][]float32
}

LabelBinarizer32 Binarize labels in a one-vs-all fashion

func NewLabelBinarizer32

func NewLabelBinarizer32(NegLabel, PosLabel float32) *LabelBinarizer32

NewLabelBinarizer32 ...

func (*LabelBinarizer32) Fit

func (m *LabelBinarizer32) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter

Fit for binarizer register classes

func (*LabelBinarizer32) FitTransform

func (m *LabelBinarizer32) FitTransform(X, Y mat.Matrix) (Xout, Yout General32)

FitTransform fit to data, then transform it

func (*LabelBinarizer32) InverseTransform

func (m *LabelBinarizer32) InverseTransform(X, Y General32) (Xout, Yout General32)

InverseTransform for LabelBinarizer32

func (*LabelBinarizer32) Transform

func (m *LabelBinarizer32) Transform(X, Y mat.Matrix) (Xout, Yout General32)

Transform for LabelBinarizer32

func (*LabelBinarizer32) TransformerClone

func (m *LabelBinarizer32) TransformerClone() *LabelBinarizer32

TransformerClone ...

type LabelBinarizer64

type LabelBinarizer64 struct {
	NegLabel, PosLabel float64
	Classes            [][]float64
}

LabelBinarizer64 Binarize labels in a one-vs-all fashion

func NewLabelBinarizer64

func NewLabelBinarizer64(NegLabel, PosLabel float64) *LabelBinarizer64

NewLabelBinarizer64 ...

func (*LabelBinarizer64) Fit

func (m *LabelBinarizer64) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter

Fit for binarizer register classes

func (*LabelBinarizer64) FitTransform

func (m *LabelBinarizer64) FitTransform(X, Y mat.Matrix) (Xout, Yout General64)

FitTransform fit to data, then transform it

func (*LabelBinarizer64) InverseTransform

func (m *LabelBinarizer64) InverseTransform(X, Y General64) (Xout, Yout General64)

InverseTransform for LabelBinarizer64

func (*LabelBinarizer64) Transform

func (m *LabelBinarizer64) Transform(X, Y mat.Matrix) (Xout, Yout General64)

Transform for LabelBinarizer64

func (*LabelBinarizer64) TransformerClone

func (m *LabelBinarizer64) TransformerClone() *LabelBinarizer64

TransformerClone ...

type MLPClassifier

type MLPClassifier struct{ BaseMultilayerPerceptron64 }

MLPClassifier ...

func NewMLPClassifier

func NewMLPClassifier(hiddenLayerSizes []int, activation string, solver string, Alpha float64) *MLPClassifier

NewMLPClassifier returns a *MLPClassifier with defaults activation is one of logistic,tanh,relu solver is on of agd,adagrad,rmsprop,adadelta,adam (one of the keys of base.Solvers) defaults to "adam" Alpha is the regularization parameter lossName is one of square,log,cross-entropy (one of the keys of lm.LossFunctions) defaults to "log"

func (*MLPClassifier) Fit

func (mlp *MLPClassifier) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter

Fit ...

Example (Breast_cancer)
ds := datasets.LoadBreastCancer()

scaler := preprocessing.NewStandardScaler()
scaler.Fit(ds.X, ds.Y)
X0, Y0 := scaler.Transform(ds.X, ds.Y)
nSamples, _ := Y0.Dims()
pca := preprocessing.NewPCA()
pca.Fit(X0, Y0)
X1, Y1 := pca.Transform(X0, Y0)
thres := .995
ExplainedVarianceRatio := 0.
var nComponents int
for nComponents = 0; nComponents < len(pca.ExplainedVarianceRatio) && ExplainedVarianceRatio < thres; nComponents++ {
	ExplainedVarianceRatio += pca.ExplainedVarianceRatio[nComponents]
}
fmt.Printf("ExplainedVarianceRatio %.3f %.3f\n", ExplainedVarianceRatio, pca.ExplainedVarianceRatio[0:nComponents])
fmt.Printf("%d components explain %.2f%% of variance\n", nComponents, thres*100.)
X1 = X1.Slice(0, nSamples, 0, nComponents).(*mat.Dense)
poly := preprocessing.NewPolynomialFeatures(2)
poly.IncludeBias = false

poly.Fit(X1, Y1)
X2, Y2 := poly.Transform(X1, Y1)

m := NewMLPClassifier([]int{}, "logistic", "adam", 0.)
m.RandomState = base.NewLockedSource(1)
m.LearningRateInit = .02
m.WeightDecay = .001
m.MaxIter = 300

log.SetPrefix("ExampleMLPClassifier_Fit_breast_cancer:")
defer log.SetPrefix("")
m.Fit(X2, Y2)
accuracy := m.Score(X2, Y2)
if accuracy <= .999 {
	fmt.Printf("accuracy:%.9f\n", accuracy)
} else {
	fmt.Println("accuracy>0.999 ? true")
}
Output:

ExplainedVarianceRatio 0.996 [0.443 0.190 0.094 0.066 0.055 0.040 0.023 0.016 0.014 0.012 0.010 0.009 0.008 0.005 0.003 0.003 0.002 0.002 0.002 0.001]
20 components explain 99.50% of variance
accuracy>0.999 ? true
Example (Iris)
// adapted from http://scikit-learn.org/stable/_downloads/plot_iris_logistic.ipynb
ds := datasets.LoadIris()

// we only take the first _ features.
nSamples, _ := ds.X.Dims()
X, YTrueClasses := ds.X.Slice(0, nSamples, 0, 2).(*mat.Dense), ds.Y
h := .02 // step size in the mesh

mlp := NewMLPClassifier([]int{}, "logistic", "lbfgs", 1e-5)

log.SetPrefix("ExampleMLPClassifier_Fit_iris:")
defer log.SetPrefix("")

// we create an instance of our Classifier and fit the data.
mlp.Fit(X, YTrueClasses)

accuracy := mlp.Score(X, YTrueClasses)
if accuracy >= 0.833 {
	fmt.Println("ok")
} else {
	fmt.Printf("Accuracy:%.3f\n", accuracy)
}

// Put the result into a color plot
if *visualDebug {
	// Plot the decision boundary. For that, we will assign a color to each point in the mesh [x_min, x_max]x[y_min, y_max].
	var xmin, xmax = mat.Min(X.ColView(0)) - .5, mat.Max(X.ColView(0)) + .5

	var ymin, ymax = mat.Min(X.ColView(1)) - .5, mat.Max(X.ColView(1)) + .5

	// xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
	nparange := func(min, max, h float64) []float64 {
		c := make([]float64, 0)
		for v := min; v <= max; v += h {
			c = append(c, v)
		}
		return c
	}
	npmeshgrid := func(xrange, yrange []float64) (xx, yy []float64) {
		for y := ymin; y <= ymax; y += h {
			for x := xmin; x <= xmax; x += h {
				xx = append(xx, x)
				yy = append(yy, y)
			}
		}
		return
	}
	npc := func(c ...[]float64) (XZ *mat.Dense) {
		XZ = mat.NewDense(len(c[0]), len(c), nil)
		for j, src := range c {
			XZ.SetCol(j, src)
		}
		return
	}
	var xx, yy = npmeshgrid(nparange(xmin, xmax, h), nparange(ymin, ymax, h))
	Xgrid := npc(xx, yy)
	Z := &mat.Dense{}
	mlp.Predict(Xgrid, Z)

	plt, _ := plot.New()
	xys := func(X, Y mat.Matrix, cls int) (xy plotter.XYs) {
		imax, _ := Y.Dims()
		for i := 0; i < imax; i++ {
			if int(Y.At(i, 0)) == cls {
				xy = append(xy, struct{ X, Y float64 }{X.At(i, 0), X.At(i, 1)})
			}
		}
		return
	}
	colors1 := []color.RGBA{{166, 206, 227, 255}, {253, 191, 111, 255}, {177, 89, 40, 255}}
	for cls := 0; cls <= 2; cls++ {
		s, _ := plotter.NewScatter(xys(Xgrid, Z, cls))
		s.GlyphStyle.Shape = draw.BoxGlyph{}
		s.GlyphStyle.Color = colors1[cls]
		s.GlyphStyle.Radius = 1
		plt.Add(s)

		s1, _ := plotter.NewScatter(xys(X, YTrueClasses, cls))
		s1.GlyphStyle.Shape = draw.CircleGlyph{}
		s1.GlyphStyle.Radius = 4
		s1.GlyphStyle.Color = colors1[cls]
		plt.Add(s1)
		plt.Legend.Add(ds.TargetNames[cls], s1)
	}
	plt.X.Label.Text = ds.FeatureNames[0]
	plt.Y.Label.Text = ds.FeatureNames[1]
	// Save the plot to a PNG file.
	pngfile := "/tmp/ExampleMLPClassifier_Fit_iris.png"
	os.Remove(pngfile)
	if err := plt.Save(7*vg.Inch, 7*vg.Inch, pngfile); err != nil {
		panic(err)
	}
	cmd := exec.Command("display", pngfile)
	err := cmd.Start()
	if err != nil {
		fmt.Println(err.Error())
	}
	time.Sleep(200 * time.Millisecond)
	os.Remove(pngfile)

}
Output:

ok
Example (Mnist)
// fitting mnist with randomstate 7, shuffle, batchnorm,400 iterations should allow accuracy 99.96%. use embedded label binarizer

MaxIter := 400
expectedMinAccuracy := .999
if testing.Short() {
	log.Println("ExampleMLPClassifier_Fit_mnist reducted iterations because testing with -short")
	MaxIter = 40
	expectedMinAccuracy = .95

}
X, Y := datasets.LoadMnist()
mlp := NewMLPClassifier([]int{25}, "logistic", "adam", 0)
mlp.RandomState = base.NewLockedSource(7)
mlp.Shuffle = true
mlp.BatchNormalize = true
mlp.MaxIter = MaxIter

log.SetPrefix("ExampleMLPClassifier_Fit_mnist:")
defer log.SetPrefix("")

mlp.Fit(X, Y)
pred := mlp.Predict(X, nil)
acc := metrics.AccuracyScore(Y, pred, true, nil)
if acc < expectedMinAccuracy {
	fmt.Printf("Accuracy:%.2f%%\n", acc*100)
} else {
	fmt.Println("ok")
}
Output:

ok

func (*MLPClassifier) IsClassifier

func (*MLPClassifier) IsClassifier() bool

IsClassifier returns true for MLPClassifier

func (*MLPClassifier) Predict

func (mlp *MLPClassifier) Predict(X mat.Matrix, Ymutable mat.Mutable) *mat.Dense

Predict return the forward result for MLPClassifier

Example (Mnist)
X, Y := datasets.LoadMnist()
lb := preprocessing.NewLabelBinarizer(0, 1)
X, Ybin := lb.FitTransform(X, Y)
Theta1T, Theta2T := datasets.LoadMnistWeights()
mlp := NewMLPClassifier([]int{25}, "logistic", "adam", 0)
mlp.Shuffle = false
mlp.initialize(Ybin.RawMatrix().Cols, []int{400, 25, 10}, true, true)
mat.NewDense(401, 25, mlp.packedParameters[:401*25]).Copy(Theta1T.T())
mat.NewDense(26, 10, mlp.packedParameters[401*25:]).Copy(Theta2T.T())
mlp.WarmStart = true

predBin := mat.NewDense(Ybin.RawMatrix().Rows, Ybin.RawMatrix().Cols, nil)
mlp.Predict(X, predBin)
//_, pred := lb.InverseTransform(nil, predBin)
acc := metrics.AccuracyScore(Ybin, predBin, true, nil)
fmt.Printf("Accuracy:%.2f%%\n", acc*100)
Output:

Accuracy:97.52%

func (*MLPClassifier) PredicterClone

func (mlp *MLPClassifier) PredicterClone() base.Predicter

PredicterClone returns an (possibly unfitted) copy of predicter

func (*MLPClassifier) Score

func (mlp *MLPClassifier) Score(Xmatrix, Ymatrix mat.Matrix) float64

Score for MLPClassifier computes accuracy score

type MLPRegressor

type MLPRegressor struct{ BaseMultilayerPerceptron64 }

MLPRegressor ...

func NewMLPRegressor

func NewMLPRegressor(hiddenLayerSizes []int, activation string, solver string, Alpha float64) *MLPRegressor

NewMLPRegressor returns a *MLPRegressor with defaults activation is one of identity,logistic,tanh,relu solver is on of sgd,adam defaults to "adam" Alpha is the regularization parameter

func (*MLPRegressor) Fit

func (mlp *MLPRegressor) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter

Fit ...

Example (Boston)
// exmaple inspired from # https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/
// with wider_model
// added weight decay and reduced epochs from 100 to 20
ds := datasets.LoadBoston()
X, Y := ds.X, ds.Y

mlp := NewMLPRegressor([]int{20}, "relu", "adam", 0)
mlp.RandomState = base.NewLockedSource(1)
mlp.LearningRateInit = .05
mlp.WeightDecay = .01
mlp.Shuffle = false
mlp.BatchSize = 5
mlp.MaxIter = 100
m := pipeline.MakePipeline(preprocessing.NewStandardScaler(), mlp)
_ = m
randomState := rand.New(base.NewLockedSource(7))
scorer := func(Y, Ypred mat.Matrix) float64 {
	e := metrics.MeanSquaredError(Y, Ypred, nil, "").At(0, 0)
	return e
}
mean := func(x []float64) float64 { return floats.Sum(x) / float64(len(x)) }

log.SetPrefix("ExampleMLPRegressor_Fit_boston:")
defer log.SetPrefix("")
res := modelselection.CrossValidate(m, X, Y,
	nil,
	scorer,
	&modelselection.KFold{NSplits: 10, Shuffle: true, RandomState: randomState}, 10)
fmt.Println(math.Sqrt(mean(res.TestScore)) < 20)
Output:

true

func (*MLPRegressor) IsClassifier

func (*MLPRegressor) IsClassifier() bool

IsClassifier returns false for MLPRegressor

func (*MLPRegressor) Predict

func (mlp *MLPRegressor) Predict(X mat.Matrix, Ymutable mat.Mutable) *mat.Dense

Predict return the forward result

func (*MLPRegressor) PredicterClone

func (mlp *MLPRegressor) PredicterClone() base.Predicter

PredicterClone allow clone predicter for pipeline on model_selection

func (*MLPRegressor) Score

func (mlp *MLPRegressor) Score(X, Y mat.Matrix) float64

Score for MLPRegressor returns R2Score

type Matrix

type Matrix = mat.Matrix

Matrix interface (identical to gonum/mat one's)

type Mutable

type Mutable interface {
	Set(i, j int, v float64)
	Matrix
}

Mutable provide Set to set value at row,col

type Optimizer32

type Optimizer32 interface {
	// contains filtered or unexported methods
}

Optimizer32 is an interface for stochastic optimizers

type Optimizer64

type Optimizer64 interface {
	// contains filtered or unexported methods
}

Optimizer64 is an interface for stochastic optimizers

type RawMatrixer32

type RawMatrixer32 interface {
	RawMatrix() blas32General
}

RawMatrixer32 provide access to blas matrix

type RawMatrixer64

type RawMatrixer64 interface {
	RawMatrix() blas64General
}

RawMatrixer64 provide access to blas matrix

type RawRowViewer

type RawRowViewer = RawRowViewer64

RawRowViewer returns row as a float slice

type RawRowViewer32

type RawRowViewer32 interface {
	RawRowView(i int) []float32
}

RawRowViewer32 returns row as a float slice

type RawRowViewer64

type RawRowViewer64 interface {
	RawRowView(i int) []float64
}

RawRowViewer64 returns row as a float slice

type RawRowViewerXX

type RawRowViewerXX = RawRowViewer32

RawRowViewerXX returns row as a float slice

type SGDOptimizer32

type SGDOptimizer32 struct {
	Params           []float32
	LearningRateInit float32
	LearningRate     float32
	PowerT           float32
	LRSchedule       string
	Momentum         float32
	Nesterov         bool
	// contains filtered or unexported fields
}

SGDOptimizer32 is the stochastic gradient descent optimizer

type SGDOptimizer64

type SGDOptimizer64 struct {
	Params           []float64
	LearningRateInit float64
	LearningRate     float64
	PowerT           float64
	LRSchedule       string
	Momentum         float64
	Nesterov         bool
	// contains filtered or unexported fields
}

SGDOptimizer64 is the stochastic gradient descent optimizer

type Slicer

type Slicer interface {
	Slice(i, j, k, l int) Matrix
}

Slicer provides Slice(startRow,endRow,startCol,endCol)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL