orm

package
v0.0.0-...-2684b26 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 2, 2024 License: MIT Imports: 12 Imported by: 0

Documentation

Index

Constants

View Source
const BackRepoDataTemplateCode = `// generated code - do not edit
package orm

type BackRepoData struct {
	// insertion point for slices{{` + string(rune(BackRepoDataSlice)) + `}}
}

func CopyBackRepoToBackRepoData(backRepo *BackRepoStruct, backRepoData *BackRepoData) {
	// insertion point for slices copies{{` + string(rune(BackRepoDataSliceCopies)) + `}}
}
`
View Source
const BackRepoPerStructTemplateCode = `// generated by stacks/gong/go/models/orm_file_per_struct_back_repo.go
package orm

import (
	"database/sql"
	"encoding/json"
	"errors"
	"fmt"
	"io/ioutil"
	"log"
	"os"
	"path/filepath"
	"sort"
	"time"

	"gorm.io/gorm"

	"github.com/tealeg/xlsx/v3"

	"{{PkgPathRoot}}"
)

// dummy variable to have the import declaration wihthout compile failure (even if no code needing this import is generated)
var dummy_{{Structname}}_sql sql.NullBool
var dummy_{{Structname}}_time time.Duration
var dummy_{{Structname}}_sort sort.Float64Slice

// {{Structname}}API is the input in POST API
//
// for POST, API, one needs the fields of the model as well as the fields
// from associations ("Has One" and "Has Many") that are generated to
// fullfill the ORM requirements for associations
//
// swagger:model {{structname}}API
type {{Structname}}API struct {
	gorm.Model

	models.{{Structname}}_WOP

	// encoding of pointers
	// for API, it cannot be embedded
	{{Structname}}PointersEncoding {{Structname}}PointersEncoding
}

// {{Structname}}PointersEncoding encodes pointers to Struct and
// reverse pointers of slice of poitners to Struct
type {{Structname}}PointersEncoding struct {
	// insertion for pointer fields encoding declaration{{` + string(rune(BackRepoPointerEncodingFieldsDeclaration)) + `}}
}

// {{Structname}}DB describes a {{structname}} in the database
//
// It incorporates the GORM ID, basic fields from the model (because they can be serialized),
// the encoded version of pointers
//
// swagger:model {{structname}}DB
type {{Structname}}DB struct {
	gorm.Model

	// insertion for basic fields declaration{{` + string(rune(BackRepoBasicFieldsDeclaration)) + `}}
	
	// encoding of pointers
	// for GORM serialization, it is necessary to embed to Pointer Encoding declaration
	{{Structname}}PointersEncoding
}

// {{Structname}}DBs arrays {{structname}}DBs
// swagger:response {{structname}}DBsResponse
type {{Structname}}DBs []{{Structname}}DB

// {{Structname}}DBResponse provides response
// swagger:response {{structname}}DBResponse
type {{Structname}}DBResponse struct {
	{{Structname}}DB
}

// {{Structname}}WOP is a {{Structname}} without pointers (WOP is an acronym for "Without Pointers")
// it holds the same basic fields but pointers are encoded into uint
type {{Structname}}WOP struct {
	ID int{{` + string(rune(BackRepoWOPInitialIndex)) + `}}

	// insertion for WOP basic fields{{` + string(rune(BackRepoBasicAndTimeFieldsWOPDeclaration)) + `}}
	// insertion for WOP pointer fields{{` + string(rune(BackRepoPointerEncodingFieldsWOPDeclaration)) + `}}
}

var {{Structname}}_Fields = []string{
	// insertion for WOP basic fields{{` + string(rune(BackRepoBasicAndTimeFieldsName)) + `}}
}

type BackRepo{{Structname}}Struct struct {
	// stores {{Structname}}DB according to their gorm ID
	Map_{{Structname}}DBID_{{Structname}}DB map[uint]*{{Structname}}DB

	// stores {{Structname}}DB ID according to {{Structname}} address
	Map_{{Structname}}Ptr_{{Structname}}DBID map[*models.{{Structname}}]uint

	// stores {{Structname}} according to their gorm ID
	Map_{{Structname}}DBID_{{Structname}}Ptr map[uint]*models.{{Structname}}

	db *gorm.DB

	stage *models.StageStruct
}

func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) GetStage() (stage *models.StageStruct) {
	stage = backRepo{{Structname}}.stage
	return
}

func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) GetDB() *gorm.DB {
	return backRepo{{Structname}}.db
}

// Get{{Structname}}DBFrom{{Structname}}Ptr is a handy function to access the back repo instance from the stage instance
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) Get{{Structname}}DBFrom{{Structname}}Ptr({{structname}} *models.{{Structname}}) ({{structname}}DB *{{Structname}}DB) {
	id := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]
	{{structname}}DB = backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[id]
	return
}

// BackRepo{{Structname}}.CommitPhaseOne commits all staged instances of {{Structname}} to the BackRepo
// Phase One is the creation of instance in the database if it is not yet done to get the unique ID for each staged instance
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseOne(stage *models.StageStruct) (Error error) {

	for {{structname}} := range stage.{{Structname}}s {
		backRepo{{Structname}}.CommitPhaseOneInstance({{structname}})
	}

	// parse all backRepo instance and checks wether some instance have been unstaged
	// in this case, remove them from the back repo
	for id, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr {
		if _, ok := stage.{{Structname}}s[{{structname}}]; !ok {
			backRepo{{Structname}}.CommitDeleteInstance(id)
		}
	}

	return
}

// BackRepo{{Structname}}.CommitDeleteInstance commits deletion of {{Structname}} to the BackRepo
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitDeleteInstance(id uint) (Error error) {

	{{structname}} := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[id]

	// {{structname}} is not staged anymore, remove {{structname}}DB
	{{structname}}DB := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[id]
	query := backRepo{{Structname}}.db.Unscoped().Delete(&{{structname}}DB)
	if query.Error != nil {
		log.Fatal(query.Error)
	}

	// update stores
	delete(backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID, {{structname}})
	delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr, id)
	delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB, id)

	return
}

// BackRepo{{Structname}}.CommitPhaseOneInstance commits {{structname}} staged instances of {{Structname}} to the BackRepo
// Phase One is the creation of instance in the database if it is not yet done to get the unique ID for each staged instance
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseOneInstance({{structname}} *models.{{Structname}}) (Error error) {

	// check if the {{structname}} is not commited yet
	if _, ok := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok {
		return
	}

	// initiate {{structname}}
	var {{structname}}DB {{Structname}}DB
	{{structname}}DB.CopyBasicFieldsFrom{{Structname}}({{structname}})

	query := backRepo{{Structname}}.db.Create(&{{structname}}DB)
	if query.Error != nil {
		log.Fatal(query.Error)
	}

	// update stores
	backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] = {{structname}}DB.ID
	backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] = {{structname}}
	backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = &{{structname}}DB

	return
}

// BackRepo{{Structname}}.CommitPhaseTwo commits all staged instances of {{Structname}} to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseTwo(backRepo *BackRepoStruct) (Error error) {

	for idx, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr {
		backRepo{{Structname}}.CommitPhaseTwoInstance(backRepo, idx, {{structname}})
	}

	return
}

// BackRepo{{Structname}}.CommitPhaseTwoInstance commits {{structname }} of models.{{Structname}} to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CommitPhaseTwoInstance(backRepo *BackRepoStruct, idx uint, {{structname}} *models.{{Structname}}) (Error error) {

	// fetch matching {{structname}}DB
	if {{structname}}DB, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[idx]; ok {

		{{structname}}DB.CopyBasicFieldsFrom{{Structname}}({{structname}})

		// insertion point for translating pointers encodings into actual pointers{{` + string(rune(BackRepoPointerEncodingFieldsCommit)) + `}}
		query := backRepo{{Structname}}.db.Save(&{{structname}}DB)
		if query.Error != nil {
			log.Fatalln(query.Error)
		}

	} else {
		err := errors.New(
			fmt.Sprintf("Unkown {{Structname}} intance %s", {{structname}}.Name))
		return err
	}

	return
}

// BackRepo{{Structname}}.CheckoutPhaseOne Checkouts all BackRepo instances to the Stage
//
// Phase One will result in having instances on the stage aligned with the back repo
// pointers are not initialized yet (this is for phase two)
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseOne() (Error error) {

	{{structname}}DBArray := make([]{{Structname}}DB, 0)
	query := backRepo{{Structname}}.db.Find(&{{structname}}DBArray)
	if query.Error != nil {
		return query.Error
	}

	// list of instances to be removed
	// start from the initial map on the stage and remove instances that have been checked out
	{{structname}}InstancesToBeRemovedFromTheStage := make(map[*models.{{Structname}}]any)
	for key, value := range backRepo{{Structname}}.stage.{{Structname}}s {
		{{structname}}InstancesToBeRemovedFromTheStage[key] = value
	}

	// copy orm objects to the the map
	for _, {{structname}}DB := range {{structname}}DBArray {
		backRepo{{Structname}}.CheckoutPhaseOneInstance(&{{structname}}DB)

		// do not remove this instance from the stage, therefore
		// remove instance from the list of instances to be be removed from the stage
		{{structname}}, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID]
		if ok {
			delete({{structname}}InstancesToBeRemovedFromTheStage, {{structname}})
		}
	}

	// remove from stage and back repo's 3 maps all {{structname}}s that are not in the checkout
	for {{structname}} := range {{structname}}InstancesToBeRemovedFromTheStage {
		{{structname}}.Unstage(backRepo{{Structname}}.GetStage())

		// remove instance from the back repo 3 maps
		{{structname}}ID := backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]
		delete(backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID, {{structname}})
		delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB, {{structname}}ID)
		delete(backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr, {{structname}}ID)
	}

	return
}

// CheckoutPhaseOneInstance takes a {{structname}}DB that has been found in the DB, updates the backRepo and stages the
// models version of the {{structname}}DB
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseOneInstance({{structname}}DB *{{Structname}}DB) (Error error) {

	{{structname}}, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID]
	if !ok {
		{{structname}} = new(models.{{Structname}})

		backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID] = {{structname}}
		backRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}] = {{structname}}DB.ID

		// append model store with the new element
		{{structname}}.Name = {{structname}}DB.Name_Data.String
		{{structname}}.Stage(backRepo{{Structname}}.GetStage())
	}
	{{structname}}DB.CopyBasicFieldsTo{{Structname}}({{structname}})

	// in some cases, the instance might have been unstaged. It is necessary to stage it again
	{{structname}}.Stage(backRepo{{Structname}}.GetStage())

	// preserve pointer to {{structname}}DB. Otherwise, pointer will is recycled and the map of pointers
	// Map_{{Structname}}DBID_{{Structname}}DB)[{{structname}}DB hold variable pointers
	{{structname}}DB_Data := *{{structname}}DB
	preservedPtrTo{{Structname}} := &{{structname}}DB_Data
	backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = preservedPtrTo{{Structname}}

	return
}

// BackRepo{{Structname}}.CheckoutPhaseTwo Checkouts all staged instances of {{Structname}} to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseTwo(backRepo *BackRepoStruct) (Error error) {

	// parse all DB instance and update all pointer fields of the translated models instance
	for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB {
		backRepo{{Structname}}.CheckoutPhaseTwoInstance(backRepo, {{structname}}DB)
	}
	return
}

// BackRepo{{Structname}}.CheckoutPhaseTwoInstance Checkouts staged instances of {{Structname}} to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) CheckoutPhaseTwoInstance(backRepo *BackRepoStruct, {{structname}}DB *{{Structname}}DB) (Error error) {

	{{structname}} := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr[{{structname}}DB.ID]

	{{structname}}DB.DecodePointers(backRepo, {{structname}})

	return
}

func ({{structname}}DB *{{Structname}}DB) DecodePointers(backRepo *BackRepoStruct, {{structname}} *models.{{Structname}}) {

	// insertion point for checkout of pointer encoding{{` + string(rune(BackRepoPointerEncodingFieldsCheckout)) + `}}
	return
}

// Commit{{Structname}} allows commit of a single {{structname}} (if already staged)
func (backRepo *BackRepoStruct) Commit{{Structname}}({{structname}} *models.{{Structname}}) {
	backRepo.BackRepo{{Structname}}.CommitPhaseOneInstance({{structname}})
	if id, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok {
		backRepo.BackRepo{{Structname}}.CommitPhaseTwoInstance(backRepo, id, {{structname}})
	}
	backRepo.CommitFromBackNb = backRepo.CommitFromBackNb + 1
}

// Commit{{Structname}} allows checkout of a single {{structname}} (if already staged and with a BackRepo id)
func (backRepo *BackRepoStruct) Checkout{{Structname}}({{structname}} *models.{{Structname}}) {
	// check if the {{structname}} is staged
	if _, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok {

		if id, ok := backRepo.BackRepo{{Structname}}.Map_{{Structname}}Ptr_{{Structname}}DBID[{{structname}}]; ok {
			var {{structname}}DB {{Structname}}DB
			{{structname}}DB.ID = id

			if err := backRepo.BackRepo{{Structname}}.db.First(&{{structname}}DB, id).Error; err != nil {
				log.Fatalln("Checkout{{Structname}} : Problem with getting object with id:", id)
			}
			backRepo.BackRepo{{Structname}}.CheckoutPhaseOneInstance(&{{structname}}DB)
			backRepo.BackRepo{{Structname}}.CheckoutPhaseTwoInstance(backRepo, &{{structname}}DB)
		}
	}
}

// CopyBasicFieldsFrom{{Structname}}
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}({{structname}} *models.{{Structname}}) {
	// insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}}
}

// CopyBasicFieldsFrom{{Structname}}_WOP
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}_WOP({{structname}} *models.{{Structname}}_WOP) {
	// insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}}
}

// CopyBasicFieldsFrom{{Structname}}WOP
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsFrom{{Structname}}WOP({{structname}} *{{Structname}}WOP) {
	// insertion point for fields commit{{` + string(rune(BackRepoBasicFieldsCommit)) + `}}
}

// CopyBasicFieldsTo{{Structname}}
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}({{structname}} *models.{{Structname}}) {
	// insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}}
}

// CopyBasicFieldsTo{{Structname}}_WOP
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}_WOP({{structname}} *models.{{Structname}}_WOP) {
	// insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}}
}

// CopyBasicFieldsTo{{Structname}}WOP
func ({{structname}}DB *{{Structname}}DB) CopyBasicFieldsTo{{Structname}}WOP({{structname}} *{{Structname}}WOP) {
	{{structname}}.ID = int({{structname}}DB.ID)
	// insertion point for checkout of basic fields (back repo to stage){{` + string(rune(BackRepoBasicFieldsCheckout)) + `}}
}

// Backup generates a json file from a slice of all {{Structname}}DB instances in the backrepo
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) Backup(dirPath string) {

	filename := filepath.Join(dirPath, "{{Structname}}DB.json")

	// organize the map into an array with increasing IDs, in order to have repoductible
	// backup file
	forBackup := make([]*{{Structname}}DB, 0)
	for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB {
		forBackup = append(forBackup, {{structname}}DB)
	}

	sort.Slice(forBackup[:], func(i, j int) bool {
		return forBackup[i].ID < forBackup[j].ID
	})

	file, err := json.MarshalIndent(forBackup, "", " ")

	if err != nil {
		log.Fatal("Cannot json {{Structname}} ", filename, " ", err.Error())
	}

	err = ioutil.WriteFile(filename, file, 0644)
	if err != nil {
		log.Fatal("Cannot write the json {{Structname}} file", err.Error())
	}
}

// Backup generates a json file from a slice of all {{Structname}}DB instances in the backrepo
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) BackupXL(file *xlsx.File) {

	// organize the map into an array with increasing IDs, in order to have repoductible
	// backup file
	forBackup := make([]*{{Structname}}DB, 0)
	for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB {
		forBackup = append(forBackup, {{structname}}DB)
	}

	sort.Slice(forBackup[:], func(i, j int) bool {
		return forBackup[i].ID < forBackup[j].ID
	})

	sh, err := file.AddSheet("{{Structname}}")
	if err != nil {
		log.Fatal("Cannot add XL file", err.Error())
	}
	_ = sh

	row := sh.AddRow()
	row.WriteSlice(&{{Structname}}_Fields, -1)
	for _, {{structname}}DB := range forBackup {

		var {{structname}}WOP {{Structname}}WOP
		{{structname}}DB.CopyBasicFieldsTo{{Structname}}WOP(&{{structname}}WOP)

		row := sh.AddRow()
		row.WriteStruct(&{{structname}}WOP, -1)
	}
}

// RestoreXL from the "{{Structname}}" sheet all {{Structname}}DB instances
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestoreXLPhaseOne(file *xlsx.File) {

	// resets the map
	BackRepo{{Structname}}id_atBckpTime_newID = make(map[uint]uint)

	sh, ok := file.Sheet["{{Structname}}"]
	_ = sh
	if !ok {
		log.Fatal(errors.New("sheet not found"))
	}

	// log.Println("Max row is", sh.MaxRow)
	err := sh.ForEachRow(backRepo{{Structname}}.rowVisitor{{Structname}})
	if err != nil {
		log.Fatal("Err=", err)
	}
}

func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) rowVisitor{{Structname}}(row *xlsx.Row) error {

	log.Printf("row line %d\n", row.GetCoordinate())
	log.Println(row)

	// skip first line
	if row.GetCoordinate() > 0 {
		var {{structname}}WOP {{Structname}}WOP
		row.ReadStruct(&{{structname}}WOP)

		// add the unmarshalled struct to the stage
		{{structname}}DB := new({{Structname}}DB)
		{{structname}}DB.CopyBasicFieldsFrom{{Structname}}WOP(&{{structname}}WOP)

		{{structname}}DB_ID_atBackupTime := {{structname}}DB.ID
		{{structname}}DB.ID = 0
		query := backRepo{{Structname}}.db.Create({{structname}}DB)
		if query.Error != nil {
			log.Fatal(query.Error)
		}
		backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = {{structname}}DB
		BackRepo{{Structname}}id_atBckpTime_newID[{{structname}}DB_ID_atBackupTime] = {{structname}}DB.ID
	}
	return nil
}

// RestorePhaseOne read the file "{{Structname}}DB.json" in dirPath that stores an array
// of {{Structname}}DB and stores it in the database
// the map BackRepo{{Structname}}id_atBckpTime_newID is updated accordingly
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestorePhaseOne(dirPath string) {

	// resets the map
	BackRepo{{Structname}}id_atBckpTime_newID = make(map[uint]uint)

	filename := filepath.Join(dirPath, "{{Structname}}DB.json")
	jsonFile, err := os.Open(filename)
	// if we os.Open returns an error then handle it
	if err != nil {
		log.Fatal("Cannot restore/open the json {{Structname}} file", filename, " ", err.Error())
	}

	// read our opened jsonFile as a byte array.
	byteValue, _ := ioutil.ReadAll(jsonFile)

	var forRestore []*{{Structname}}DB

	err = json.Unmarshal(byteValue, &forRestore)

	// fill up Map_{{Structname}}DBID_{{Structname}}DB
	for _, {{structname}}DB := range forRestore {

		{{structname}}DB_ID_atBackupTime := {{structname}}DB.ID
		{{structname}}DB.ID = 0
		query := backRepo{{Structname}}.db.Create({{structname}}DB)
		if query.Error != nil {
			log.Fatal(query.Error)
		}
		backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[{{structname}}DB.ID] = {{structname}}DB
		BackRepo{{Structname}}id_atBckpTime_newID[{{structname}}DB_ID_atBackupTime] = {{structname}}DB.ID
	}

	if err != nil {
		log.Fatal("Cannot restore/unmarshall json {{Structname}} file", err.Error())
	}
}

// RestorePhaseTwo uses all map BackRepo<{{Structname}}>id_atBckpTime_newID
// to compute new index
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) RestorePhaseTwo() {

	for _, {{structname}}DB := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB {

		// next line of code is to avert unused variable compilation error
		_ = {{structname}}DB

		// insertion point for reindexing pointers encoding{{` + string(rune(BackRepoPointerEncodingFieldsReindexing)) + `}}
		// update databse with new index encoding
		query := backRepo{{Structname}}.db.Model({{structname}}DB).Updates(*{{structname}}DB)
		if query.Error != nil {
			log.Fatal(query.Error)
		}
	}

}

// BackRepo{{Structname}}.ResetReversePointers commits all staged instances of {{Structname}} to the BackRepo
// Phase Two is the update of instance with the field in the database
func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) ResetReversePointers(backRepo *BackRepoStruct) (Error error) {

	for idx, {{structname}} := range backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}Ptr {
		backRepo{{Structname}}.ResetReversePointersInstance(backRepo, idx, {{structname}})
	}

	return
}

func (backRepo{{Structname}} *BackRepo{{Structname}}Struct) ResetReversePointersInstance(backRepo *BackRepoStruct, idx uint, {{structname}} *models.{{Structname}}) (Error error) {

	// fetch matching {{structname}}DB
	if {{structname}}DB, ok := backRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB[idx]; ok {
		_ = {{structname}}DB // to avoid unused variable error if there are no reverse to reset

		// insertion point for reverse pointers reset{{` + string(rune(BackRepoPointerReversePointersReseting)) + `}}
		// end of insertion point for reverse pointers reset
	}

	return
}

// this field is used during the restauration process.
// it stores the ID at the backup time and is used for renumbering
var BackRepo{{Structname}}id_atBckpTime_newID map[uint]uint
`
View Source
const BackRepoTemplateCode = `// do not modify, generated file
package orm

import (
	"bufio"
	"bytes"
	"io/ioutil"
	"log"
	"os"
	"path/filepath"
	"sync"

	"{{PkgPathRoot}}/models"

	"github.com/tealeg/xlsx/v3"

	"github.com/glebarez/sqlite"
	"gorm.io/gorm"
	"gorm.io/gorm/schema"
)

// BackRepoStruct supports callback functions
type BackRepoStruct struct {
	// insertion point for per struct back repo declarations{{` + string(rune(BackRepoPerStructDeclarations)) + `}}
	CommitFromBackNb uint // records commit increments when performed by the back

	PushFromFrontNb uint // records commit increments when performed by the front

	stage *models.StageStruct

	// the back repo can broadcast the CommitFromBackNb to all interested subscribers
	rwMutex     sync.RWMutex
	subscribers []chan int
}

func NewBackRepo(stage *models.StageStruct, filename string) (backRepo *BackRepoStruct) {

	// adjust naming strategy to the stack
	gormConfig := &gorm.Config{
		NamingStrategy: schema.NamingStrategy{
			TablePrefix: "github_com_fullstack_lang_gong_test_go_", // table name prefix
		},
	}
	db, err := gorm.Open(sqlite.Open(filename), gormConfig)

	// since testsim is a multi threaded application. It is important to set up
	// only one open connexion at a time
	dbDB_inMemory, err := db.DB()
	if err != nil {
		panic("cannot access DB of db" + err.Error())
	}
	// it is mandatory to allow parallel access, otherwise, bizarre errors occurs
	dbDB_inMemory.SetMaxOpenConns(1)

	if err != nil {
		panic("Failed to connect to database!")
	}

	// adjust naming strategy to the stack
	db.Config.NamingStrategy = &schema.NamingStrategy{
		TablePrefix: "github_com_fullstack_lang_gong_test_go_", // table name prefix
	}

	err = db.AutoMigrate( // insertion point for reference to structs{{` + string(rune(BackRepoPerStructRefToStructDB)) + `}}
	)

	if err != nil {
		msg := err.Error()
		panic("problem with migration " + msg + " on package github.com/fullstack-lang/gong/test/go")
	}

	backRepo = new(BackRepoStruct)

	// insertion point for per struct back repo declarations{{` + string(rune(BackRepoPerStructInits)) + `}}

	stage.BackRepo = backRepo
	backRepo.stage = stage

	return
}

func (backRepo *BackRepoStruct) GetStage() (stage *models.StageStruct) {
	stage = backRepo.stage
	return
}

func (backRepo *BackRepoStruct) GetLastCommitFromBackNb() uint {
	return backRepo.CommitFromBackNb
}

func (backRepo *BackRepoStruct) GetLastPushFromFrontNb() uint {
	return backRepo.PushFromFrontNb
}

func (backRepo *BackRepoStruct) IncrementCommitFromBackNb() uint {
	if backRepo.stage.OnInitCommitCallback != nil {
		backRepo.stage.OnInitCommitCallback.BeforeCommit(backRepo.stage)
	}
	if backRepo.stage.OnInitCommitFromBackCallback != nil {
		backRepo.stage.OnInitCommitFromBackCallback.BeforeCommit(backRepo.stage)
	}
	backRepo.CommitFromBackNb = backRepo.CommitFromBackNb + 1

	backRepo.broadcastNbCommitToBack()
	
	return backRepo.CommitFromBackNb
}

func (backRepo *BackRepoStruct) IncrementPushFromFrontNb() uint {
	if backRepo.stage.OnInitCommitCallback != nil {
		backRepo.stage.OnInitCommitCallback.BeforeCommit(backRepo.stage)
	}
	if backRepo.stage.OnInitCommitFromFrontCallback != nil {
		backRepo.stage.OnInitCommitFromFrontCallback.BeforeCommit(backRepo.stage)
	}
	backRepo.PushFromFrontNb = backRepo.PushFromFrontNb + 1
	return backRepo.CommitFromBackNb
}

// Commit the BackRepoStruct inner variables and link to the database
func (backRepo *BackRepoStruct) Commit(stage *models.StageStruct) {
	// insertion point for per struct back repo phase one commit{{` + string(rune(BackRepoPerStructPhaseOneCommits)) + `}}

	// insertion point for per struct back repo phase two commit{{` + string(rune(BackRepoPerStructPhaseTwoCommits)) + `}}

	backRepo.IncrementCommitFromBackNb()
}

// Checkout the database into the stage
func (backRepo *BackRepoStruct) Checkout(stage *models.StageStruct) {
	// insertion point for per struct back repo phase one commit{{` + string(rune(BackRepoPerStructPhaseOneCheckouts)) + `}}

	// insertion point for per struct back repo phase two commit{{` + string(rune(BackRepoPerStructPhaseTwoCheckouts)) + `}}
}

// Backup the BackRepoStruct
func (backRepo *BackRepoStruct) Backup(stage *models.StageStruct, dirPath string) {
	os.MkdirAll(dirPath, os.ModePerm)

	// insertion point for per struct backup{{` + string(rune(BackRepoBackup)) + `}}
}

// Backup in XL the BackRepoStruct
func (backRepo *BackRepoStruct) BackupXL(stage *models.StageStruct, dirPath string) {
	os.MkdirAll(dirPath, os.ModePerm)

	// open an existing file
	file := xlsx.NewFile()

	// insertion point for per struct backup{{` + string(rune(BackRepoBackupXL)) + `}}

	var b bytes.Buffer
	writer := bufio.NewWriter(&b)
	file.Write(writer)
	theBytes := b.Bytes()

	filename := filepath.Join(dirPath, "bckp.xlsx")
	err := ioutil.WriteFile(filename, theBytes, 0644)
	if err != nil {
		log.Panic("Cannot write the XL file", err.Error())
	}
}

// Restore the database into the back repo
func (backRepo *BackRepoStruct) Restore(stage *models.StageStruct, dirPath string) {
	backRepo.stage.Commit()
	backRepo.stage.Reset()
	backRepo.stage.Checkout()

	//
	// restauration first phase (create DB instance with new IDs)
	//

	// insertion point for per struct backup{{` + string(rune(BackRepoRestorePhaseOne)) + `}}

	//
	// restauration second phase (reindex pointers with the new ID)
	//

	// insertion point for per struct backup{{` + string(rune(BackRepoRestorePhaseTwo)) + `}}

	backRepo.stage.Checkout()
}

// Restore the database into the back repo
func (backRepo *BackRepoStruct) RestoreXL(stage *models.StageStruct, dirPath string) {

	// clean the stage
	backRepo.stage.Reset()

	// commit the cleaned stage
	backRepo.stage.Commit()

	// open an existing file
	filename := filepath.Join(dirPath, "bckp.xlsx")
	file, err := xlsx.OpenFile(filename)
	_ = file

	if err != nil {
		log.Panic("Cannot read the XL file", err.Error())
	}

	//
	// restauration first phase (create DB instance with new IDs)
	//

	// insertion point for per struct backup{{` + string(rune(BackRepoRestoreXLPhaseOne)) + `}}

	// commit the restored stage
	backRepo.stage.Commit()
}

func (backRepoStruct *BackRepoStruct) SubscribeToCommitNb() <-chan int {
	backRepoStruct.rwMutex.Lock()
	defer backRepoStruct.rwMutex.Unlock()

	ch := make(chan int)
	backRepoStruct.subscribers = append(backRepoStruct.subscribers, ch)
	return ch
}

func (backRepoStruct *BackRepoStruct) broadcastNbCommitToBack() {
	backRepoStruct.rwMutex.RLock()
	defer backRepoStruct.rwMutex.RUnlock()

	activeChannels := make([]chan int, 0)

	for _, ch := range backRepoStruct.subscribers {
		select {
		case ch <- int(backRepoStruct.CommitFromBackNb):
			activeChannels = append(activeChannels, ch)
		default:
			// Assume channel is no longer active; don't add to activeChannels
			log.Println("Channel no longer active")
			close(ch)
		}
	}
	backRepoStruct.subscribers = activeChannels
}
`
View Source
const GetInstanceDBFromInstanceTemplateCode = `// generated code - do not edit
package orm

import (
	"{{PkgPathRoot}}/models"
)

type GongstructDB interface {
	// insertion point for generic types
	// "int" is present to handle the case when no struct is present
	int{{` + string(rune(GetInstanceDBFromInstanceGonstructDBDefinition)) + `}}
}

func GetInstanceDBFromInstance[T models.Gongstruct, T2 GongstructDB](
	stage *models.StageStruct,
	backRepo *BackRepoStruct,
	instance *T) (ret *T2) {

	switch concreteInstance := any(instance).(type) {
	// insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGet)) + `}}
	default:
		_ = concreteInstance
	}
	return
}

func GetID[T models.Gongstruct](
	stage *models.StageStruct,
	backRepo *BackRepoStruct,
	instance *T) (id int) {

	switch inst := any(instance).(type) {
	// insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)) + `}}
	default:
		_ = inst
	}
	return
}

func GetIDPointer[T models.PointerToGongstruct](
	stage *models.StageStruct,
	backRepo *BackRepoStruct,
	instance T) (id int) {

	switch inst := any(instance).(type) {
	// insertion point for per struct backup{{` + string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)) + `}}
	default:
		_ = inst
	}
	return
}
`
View Source
const GetReverseFieldOwnerName = `// generated code - do not edit
package orm

import (
	"{{PkgPathRoot}}/models"
)

func GetReverseFieldOwnerName[T models.Gongstruct](
	stage *models.StageStruct,
	backRepo *BackRepoStruct,
	instance *T,
	reverseField *models.ReverseField) (res string) {

	res = ""
	switch inst := any(instance).(type) {
	// insertion point{{` + string(rune(GetReverseFieldOwnerNameSwitch)) + `}}
	default:
		_ = inst
	}
	return
}

func GetReverseFieldOwner[T models.Gongstruct](
	stage *models.StageStruct,
	backRepo *BackRepoStruct,
	instance *T,
	reverseField *models.ReverseField) (res any) {

	res = nil
	switch inst := any(instance).(type) {
	// insertion point{{` + string(rune(GetReverseFieldOwnerSwitch)) + `}}
	default:
		_ = inst
	}
	return res
}
`
View Source
const IntSliceTemplateCode = `` /* 530-byte string literal not displayed */

Variables

View Source
var BackRepoDataSubTemplate map[string]string = map[string]string{

	string(rune(BackRepoDataSlice)): `

	{{Structname}}APIs []*{{Structname}}API`,

	string(rune(BackRepoDataSliceCopies)): `
	for _, {{structname}}DB := range backRepo.BackRepo{{Structname}}.Map_{{Structname}}DBID_{{Structname}}DB {

		var {{structname}}API {{Structname}}API
		{{structname}}API.ID = {{structname}}DB.ID
		{{structname}}API.{{Structname}}PointersEncoding = {{structname}}DB.{{Structname}}PointersEncoding
		{{structname}}DB.CopyBasicFieldsTo{{Structname}}_WOP(&{{structname}}API.{{Structname}}_WOP)

		backRepoData.{{Structname}}APIs = append(backRepoData.{{Structname}}APIs, &{{structname}}API)
	}
`,
}
View Source
var BackRepoFieldSubTemplateCode map[BackRepoPerStructSubTemplate]string = map[BackRepoPerStructSubTemplate]string{

	BackRepoDeclarationBasicField: `

	// Declation for basic field {{structname}}DB.{{FieldName}}
	{{FieldName}}_Data sql.{{SqlNullType}}`,

	BackRepoDeclarationTimeField: `

	// Declation for basic field {{structname}}DB.{{FieldName}}
	{{FieldName}}_Data sql.NullTime`,

	BackRepoDeclarationBasicBooleanField: `

	// Declation for basic field {{structname}}DB.{{FieldName}}
	// provide the sql storage for the boolan
	{{FieldName}}_Data sql.NullBool`,

	BackRepoPointerEncoding: `

	// field {{FieldName}} is a pointer to another Struct (optional or 0..1)
	// This field is generated into another field to enable AS ONE association
	{{FieldName}}ID sql.NullInt64`,

	BackRepoSliceOfPointersEncoding: `

	// field {{FieldName}} is a slice of pointers to another Struct (optional or 0..1)
	{{FieldName}} IntSlice` + " `" + `gorm:"type:TEXT"` + "`",

	BackRepoCommitBasicField: `

	{{structname}}DB.{{FieldName}}_Data.{{SqlNullType}} = {{structname}}.{{FieldName}}
	{{structname}}DB.{{FieldName}}_Data.Valid = true`,

	BackRepoCommitBasicFieldEnum: `

	{{structname}}DB.{{FieldName}}_Data.String = {{structname}}.{{FieldName}}.ToString()
	{{structname}}DB.{{FieldName}}_Data.Valid = true`,

	BackRepoCommitBasicFieldInt: `

	{{structname}}DB.{{FieldName}}_Data.Int64 = int64({{structname}}.{{FieldName}})
	{{structname}}DB.{{FieldName}}_Data.Valid = true`,

	BackRepoCommitTimeField: `

	{{structname}}DB.{{FieldName}}_Data.Time = {{structname}}.{{FieldName}}
	{{structname}}DB.{{FieldName}}_Data.Valid = true`,

	BackRepoCommitBasicBooleanField: `

	{{structname}}DB.{{FieldName}}_Data.Bool = {{structname}}.{{FieldName}}
	{{structname}}DB.{{FieldName}}_Data.Valid = true`,

	BackRepoCommitPointerToStructField: `
		// commit pointer value {{structname}}.{{FieldName}} translates to updating the {{structname}}.{{FieldName}}ID
		{{structname}}DB.{{FieldName}}ID.Valid = true // allow for a 0 value (nil association)
		if {{structname}}.{{FieldName}} != nil {
			if {{FieldName}}Id, ok := backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}Ptr_{{AssociationStructName}}DBID[{{structname}}.{{FieldName}}]; ok {
				{{structname}}DB.{{FieldName}}ID.Int64 = int64({{FieldName}}Id)
				{{structname}}DB.{{FieldName}}ID.Valid = true
			}
		} else {
			{{structname}}DB.{{FieldName}}ID.Int64 = 0
			{{structname}}DB.{{FieldName}}ID.Valid = true
		}
`,

	BackRepoCommitSliceOfPointerToStructField: `
		// 1. reset
		{{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} = make([]int, 0)
		// 2. encode
		for _, {{associationStructName}}AssocEnd := range {{structname}}.{{FieldName}} {
			{{associationStructName}}AssocEnd_DB :=
				backRepo.BackRepo{{AssociationStructName}}.Get{{AssociationStructName}}DBFrom{{AssociationStructName}}Ptr({{associationStructName}}AssocEnd)
			
			// the stage might be inconsistant, meaning that the {{associationStructName}}AssocEnd_DB might
			// be missing from the stage. In this case, the commit operation is robust
			// An alternative would be to crash here to reveal the missing element.
			if {{associationStructName}}AssocEnd_DB == nil {
				continue
			}
			
			{{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} =
				append({{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}}, int({{associationStructName}}AssocEnd_DB.ID))
		}
`,

	BackRepoCheckoutBasicField: `
	{{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.{{SqlNullType}}`,

	BackRepoCheckoutTimeField: `
	{{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.Time`,

	BackRepoCheckoutBasicFieldEnum: `
	{{structname}}.{{FieldName}}.FromString({{structname}}DB.{{FieldName}}_Data.String)`,

	BackRepoCheckoutBasicFieldInt: `
	{{structname}}.{{FieldName}} = {{FieldType}}({{structname}}DB.{{FieldName}}_Data.Int64)`,

	BackRepoCheckoutBasicFieldIntEnum: `
	{{structname}}.{{FieldName}} = models.{{FieldType}}({{structname}}DB.{{FieldName}}_Data.Int64)`,

	BackRepoCheckoutBasicFieldBoolean: `
	{{structname}}.{{FieldName}} = {{structname}}DB.{{FieldName}}_Data.Bool`,

	BackRepoCheckoutPointerToStructStageField: `
	// {{FieldName}} field
	{{structname}}.{{FieldName}} = nil
	if {{structname}}DB.{{FieldName}}ID.Int64 != 0 {
		{{structname}}.{{FieldName}} = backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}DBID_{{AssociationStructName}}Ptr[uint({{structname}}DB.{{FieldName}}ID.Int64)]
	}`,

	BackRepoReindexingPointerToStruct: `
		// reindexing {{FieldName}} field
		if {{structname}}DB.{{FieldName}}ID.Int64 != 0 {
			{{structname}}DB.{{FieldName}}ID.Int64 = int64(BackRepo{{AssociationStructName}}id_atBckpTime_newID[uint({{structname}}DB.{{FieldName}}ID.Int64)])
			{{structname}}DB.{{FieldName}}ID.Valid = true
		}
`,

	BackRepoCheckoutSliceOfPointerToStructStageField: `
	// This loop redeem {{structname}}.{{FieldName}} in the stage from the encode in the back repo
	// It parses all {{AssociationStructName}}DB in the back repo and if the reverse pointer encoding matches the back repo ID
	// it appends the stage instance
	// 1. reset the slice
	{{structname}}.{{FieldName}} = {{structname}}.{{FieldName}}[:0]
	for _, _{{AssociationStructName}}id := range {{structname}}DB.{{Structname}}PointersEncoding.{{FieldName}} {
		{{structname}}.{{FieldName}} = append({{structname}}.{{FieldName}}, backRepo.BackRepo{{AssociationStructName}}.Map_{{AssociationStructName}}DBID_{{AssociationStructName}}Ptr[uint(_{{AssociationStructName}}id)])
	}
`,
}
View Source
var BackRepoSubTemplate map[string]string = map[string]string{

	string(rune(BackRepoPerStructDeclarations)): `
	BackRepo{{Structname}} BackRepo{{Structname}}Struct
`,

	string(rune(BackRepoPerStructInits)): `
	backRepo.BackRepo{{Structname}} = BackRepo{{Structname}}Struct{
		Map_{{Structname}}DBID_{{Structname}}Ptr: make(map[uint]*models.{{Structname}}, 0),
		Map_{{Structname}}DBID_{{Structname}}DB:  make(map[uint]*{{Structname}}DB, 0),
		Map_{{Structname}}Ptr_{{Structname}}DBID: make(map[*models.{{Structname}}]uint, 0),

		db:    db,
		stage: stage,
	}`,

	string(rune(BackRepoPerStructRefToStructDB)): `
		&{{Structname}}DB{},`,

	string(rune(BackRepoPerStructPhaseOneCommits)): `
	backRepo.BackRepo{{Structname}}.CommitPhaseOne(stage)`,

	string(rune(BackRepoPerStructPhaseTwoCommits)): `
	backRepo.BackRepo{{Structname}}.CommitPhaseTwo(backRepo)`,

	string(rune(BackRepoPerStructPhaseOneCheckouts)): `
	backRepo.BackRepo{{Structname}}.CheckoutPhaseOne()`,

	string(rune(BackRepoPerStructPhaseTwoCheckouts)): `
	backRepo.BackRepo{{Structname}}.CheckoutPhaseTwo(backRepo)`,

	string(rune(BackRepoInitAndCommit)): `
	map_{{Structname}}DBID_{{Structname}}DB = nil
	map_{{Structname}}Ptr_{{Structname}}DBID = nil
	map_{{Structname}}DBID_{{Structname}}Ptr = nil
	if err := BackRepo{{Structname}}Init(
		CreateMode,
		db); err != nil {
		return err
	}
`,

	string(rune(BackRepoInitAndCheckout)): `
	map_{{Structname}}DBID_{{Structname}}DB = nil
	map_{{Structname}}Ptr_{{Structname}}DBID = nil
	map_{{Structname}}DBID_{{Structname}}Ptr = nil
	if err := BackRepo{{Structname}}Init(
		CreateMode,
		db); err != nil {
		err := errors.New("AllORMToModels, CreateMode Translation of {{Structname}} failed")
		return err
	}
`,

	string(rune(BackRepoCheckout)): `
	if err := BackRepo{{Structname}}Init(
		UpdateMode,
		db); err != nil {
		err := errors.New("AllORMToModels, UpdateMode Translation of {{Structname}} failed")
		return err
	}
`,

	string(rune(BackRepoCommit)): `
	if err := BackRepo{{Structname}}Init(
		UpdateMode,
		db); err != nil {
		return err
	}
`,

	string(rune(BackRepoBackup)): `
	backRepo.BackRepo{{Structname}}.Backup(dirPath)`,

	string(rune(BackRepoBackupXL)): `
	backRepo.BackRepo{{Structname}}.BackupXL(file)`,

	string(rune(BackRepoRestorePhaseOne)): `
	backRepo.BackRepo{{Structname}}.RestorePhaseOne(dirPath)`,

	string(rune(BackRepoRestoreXLPhaseOne)): `
	backRepo.BackRepo{{Structname}}.RestoreXLPhaseOne(file)`,

	string(rune(BackRepoRestorePhaseTwo)): `
	backRepo.BackRepo{{Structname}}.RestorePhaseTwo()`,
}
View Source
var GetInstanceDBFromInstanceSubTemplate map[string]string = map[string]string{

	string(rune(GetInstanceDBFromInstanceSwitchCaseGetID)): `
	case *models.{{Structname}}:
		tmp := GetInstanceDBFromInstance[models.{{Structname}}, {{Structname}}DB](
			stage, backRepo, inst,
		)
		id = int(tmp.ID)`,
	string(rune(GetInstanceDBFromInstanceSwitchCaseGet)): `
	case *models.{{Structname}}:
		{{structname}}Instance := any(concreteInstance).(*models.{{Structname}})
		ret2 := backRepo.BackRepo{{Structname}}.Get{{Structname}}DBFrom{{Structname}}Ptr({{structname}}Instance)
		ret = any(ret2).(*T2)`,
	string(rune(GetInstanceDBFromInstanceGonstructDBDefinition)): ` | {{Structname}}DB`,
}
View Source
var GetReverseFieldOwnerNameSubSubTemplateCode map[GetReverseFieldOwnerNameSubTemplateId]string = map[GetReverseFieldOwnerNameSubTemplateId]string{

	GetReverseFieldOwnerNameMasterSwitchCodeStart: `
		case "{{AssocStructName}}":
			switch reverseField.Fieldname {`,
	GetReverseFieldOwnerNameSwitchCode: `
			case "{{FieldName}}":
				if _{{assocStructName}}, ok := stage.{{AssocStructName}}_{{FieldName}}_reverseMap[inst]; ok {
					res = _{{assocStructName}}.Name
				}`,
	GetReverseFieldOwnerSwitchCode: `
			case "{{FieldName}}":
				res = stage.{{AssocStructName}}_{{FieldName}}_reverseMap[inst]`,
	GetReverseFieldOwnerNameMasterSwitchCodeEnd: `
			}`,
}
View Source
var GetReverseFieldOwnerNameSubTemplateCode map[GetReverseFieldOwnerNameId]string = map[GetReverseFieldOwnerNameId]string{
	GetReverseFieldOwnerNameSwitch: `
	case *models.{{Structname}}:
		tmp := GetInstanceDBFromInstance[models.{{Structname}}, {{Structname}}DB](
			stage, backRepo, inst,
		)
		_ = tmp
		switch reverseField.GongstructName {
		// insertion point{{fieldToFormCodeName}}
		}
`,
	GetReverseFieldOwnerSwitch: `
	case *models.{{Structname}}:
		tmp := GetInstanceDBFromInstance[models.{{Structname}}, {{Structname}}DB](
			stage, backRepo, inst,
		)
		_ = tmp
		switch reverseField.GongstructName {
		// insertion point{{fieldToFormCode}}
		}
`,
}

Functions

func CodeGeneratorGetReverseFieldOwnerName

func CodeGeneratorGetReverseFieldOwnerName(
	modelPkg *models.ModelPkg,
	pkgName string,
	pkgPath string,
	pkgGoPath string)

func MultiCodeGeneratorBackRepo

func MultiCodeGeneratorBackRepo(
	modelPkg *models.ModelPkg,
	pkgName string,
	pkgGoPath string,
	dirPath string)

MultiCodeGeneratorBackRepo parses mdlPkg and generates the code for the back repository code

func ReplaceInFile

func ReplaceInFile(filePath string, toReplace string, replacement string) error

Types

type BackRepoDataSubTemplateInsertion

type BackRepoDataSubTemplateInsertion int
const (
	BackRepoDataSlice BackRepoDataSubTemplateInsertion = iota
	BackRepoDataSliceCopies
)

type BackRepoInsertionPoint

type BackRepoInsertionPoint int

insertion points

const (
	BackRepoBasicFieldsDeclaration BackRepoInsertionPoint = iota
	BackRepoBasicAndTimeFieldsName
	BackRepoWOPInitialIndex
	BackRepoBasicAndTimeFieldsWOPDeclaration
	BackRepoPointerEncodingFieldsDeclaration
	BackRepoPointerEncodingFieldsWOPDeclaration
	BackRepoBasicFieldsCommit
	BackRepoPointerEncodingFieldsCommit
	BackRepoBasicFieldsCheckout
	BackRepoPointerEncodingFieldsCheckout
	BackRepoPointerEncodingFieldsReindexing
	BackRepoPointerReversePointersReseting

	BackRepoNbInsertionPoints
)

type BackRepoPerStructSubTemplate

type BackRepoPerStructSubTemplate int
const (
	BackRepoDeclarationBasicField BackRepoPerStructSubTemplate = iota
	BackRepoCommitBasicField
	BackRepoCheckoutBasicField

	BackRepoDeclarationTimeField
	BackRepoCommitTimeField
	BackRepoCheckoutTimeField

	BackRepoCommitBasicFieldEnum
	BackRepoCheckoutBasicFieldEnum

	BackRepoCommitBasicFieldInt
	BackRepoCheckoutBasicFieldInt
	BackRepoCheckoutBasicFieldIntEnum

	BackRepoDeclarationBasicBooleanField
	BackRepoCommitBasicBooleanField
	BackRepoCheckoutBasicFieldBoolean

	BackRepoPointerEncoding
	BackRepoSliceOfPointersEncoding

	BackRepoCommitPointerToStructField
	BackRepoCheckoutPointerToStructStageField
	BackRepoReindexingPointerToStruct

	BackRepoCommitSliceOfPointerToStructField
	BackRepoCheckoutSliceOfPointerToStructStageField
)

type BackRepoSubTemplateInsertion

type BackRepoSubTemplateInsertion int
const (
	BackRepoPerStructDeclarations BackRepoSubTemplateInsertion = iota
	BackRepoPerStructInits
	BackRepoPerStructRefToStructDB
	BackRepoPerStructPhaseOneCommits
	BackRepoPerStructPhaseTwoCommits
	BackRepoPerStructPhaseOneCheckouts
	BackRepoPerStructPhaseTwoCheckouts
	BackRepoInitAndCommit
	BackRepoInitAndCheckout
	BackRepoCommit
	BackRepoCheckout
	BackRepoBackup
	BackRepoBackupXL
	BackRepoRestorePhaseOne
	BackRepoRestoreXLPhaseOne
	BackRepoRestorePhaseTwo
)

type GetInstanceDBFromInstanceSubTemplateInsertion

type GetInstanceDBFromInstanceSubTemplateInsertion int
const (
	GetInstanceDBFromInstanceSwitchCaseGetID GetInstanceDBFromInstanceSubTemplateInsertion = iota
	GetInstanceDBFromInstanceSwitchCaseGet
	GetInstanceDBFromInstanceGonstructDBDefinition
)

type GetReverseFieldOwnerNameId

type GetReverseFieldOwnerNameId int
const (
	GetReverseFieldOwnerNameSwitch GetReverseFieldOwnerNameId = iota
	GetReverseFieldOwnerSwitch
	GetReverseFieldOwnerNameNb
)

type GetReverseFieldOwnerNameSubTemplateId

type GetReverseFieldOwnerNameSubTemplateId int
const (
	GetReverseFieldOwnerNameSwitchCode GetReverseFieldOwnerNameSubTemplateId = iota
	GetReverseFieldOwnerSwitchCode
	GetReverseFieldOwnerNameMasterSwitchCodeStart
	GetReverseFieldOwnerNameMasterSwitchCodeEnd
)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL