diff options
Diffstat (limited to 'pkg/blockchain')
-rw-r--r-- | pkg/blockchain/blockchain.go | 261 | ||||
-rw-r--r-- | pkg/blockchain/blockinfodatabase/blockinfodatabase.go | 32 | ||||
-rw-r--r-- | pkg/blockchain/blockinfodatabase/blockrecord.go | 65 | ||||
-rw-r--r-- | pkg/blockchain/blockinfodatabase/config.go | 12 | ||||
-rw-r--r-- | pkg/blockchain/chainwriter/chainwriter.go | 127 | ||||
-rw-r--r-- | pkg/blockchain/chainwriter/config.go | 23 | ||||
-rw-r--r-- | pkg/blockchain/chainwriter/fileinfo.go | 8 | ||||
-rw-r--r-- | pkg/blockchain/chainwriter/readwrite.go | 41 | ||||
-rw-r--r-- | pkg/blockchain/chainwriter/undoblock.go | 60 | ||||
-rw-r--r-- | pkg/blockchain/coindatabase/coin.go | 29 | ||||
-rw-r--r-- | pkg/blockchain/coindatabase/coindatabase.go | 265 | ||||
-rw-r--r-- | pkg/blockchain/coindatabase/coinrecord.go | 48 | ||||
-rw-r--r-- | pkg/blockchain/coindatabase/config.go | 15 | ||||
-rw-r--r-- | pkg/blockchain/config.go | 39 |
14 files changed, 1025 insertions, 0 deletions
diff --git a/pkg/blockchain/blockchain.go b/pkg/blockchain/blockchain.go new file mode 100644 index 0000000..c456be1 --- /dev/null +++ b/pkg/blockchain/blockchain.go @@ -0,0 +1,261 @@ +package blockchain + +import ( + "Chain/pkg/block" + "Chain/pkg/blockchain/blockinfodatabase" + "Chain/pkg/blockchain/chainwriter" + "Chain/pkg/blockchain/coindatabase" + "Chain/pkg/utils" +) + +// BlockChain is the main type of this project. +// Length is the length of the active chain. +// LastBlock is the last block of the active chain. +// LastHash is the hash of the last block of the active chain. +// UnsafeHashes are the hashes of the "unsafe" blocks on the +// active chain. These "unsafe" blocks may be reverted during a +// fork. +// maxHashes is the number of unsafe hashes that the chain keeps track of. +// BlockInfoDB is a pointer to a block info database +// ChainWriter is a pointer to a chain writer. +// CoinDB is a pointer to a coin database. +type BlockChain struct { + Length uint32 + LastBlock *block.Block + LastHash string + UnsafeHashes []string + maxHashes int + + BlockInfoDB *blockinfodatabase.BlockInfoDatabase + ChainWriter *chainwriter.ChainWriter + CoinDB *coindatabase.CoinDatabase +} + +// New returns a blockchain given a Config. +func New(config *Config) *BlockChain { + genBlock := GenesisBlock(config) + hash := genBlock.Hash() + bc := &BlockChain{ + Length: 1, + LastBlock: genBlock, + LastHash: hash, + UnsafeHashes: []string{}, + maxHashes: 6, + BlockInfoDB: blockinfodatabase.New(blockinfodatabase.DefaultConfig()), + ChainWriter: chainwriter.New(chainwriter.DefaultConfig()), + CoinDB: coindatabase.New(coindatabase.DefaultConfig()), + } + // have to store the genesis block + bc.CoinDB.StoreBlock(genBlock.Transactions, true) + ub := &chainwriter.UndoBlock{} + br := bc.ChainWriter.StoreBlock(genBlock, ub, 1) + bc.BlockInfoDB.StoreBlockRecord(hash, br) + return bc +} + +// GenesisBlock creates the genesis Block, using the Config's +// InitialSubsidy and GenesisPublicKey. +func GenesisBlock(config *Config) *block.Block { + txo := &block.TransactionOutput{ + Amount: config.InitialSubsidy, + LockingScript: config.GenesisPublicKey, + } + genTx := &block.Transaction{ + Version: 0, + Inputs: nil, + Outputs: []*block.TransactionOutput{txo}, + LockTime: 0, + } + return &block.Block{ + Header: &block.Header{ + Version: 0, + PreviousHash: "", + MerkleRoot: "", + DifficultyTarget: "", + Nonce: 0, + Timestamp: 0, + }, + Transactions: []*block.Transaction{genTx}, + } +} + +// HandleBlock handles a new Block. At a high level, it: +// (1) Validates and stores the Block. +// (2) Stores the Block and resulting Undoblock to Disk. +// (3) Stores the BlockRecord in the BlockInfoDatabase. +// (4) Handles a fork, if necessary. +// (5) Updates the BlockChain's fields. +func (bc *BlockChain) HandleBlock(b *block.Block) { + //TODO +} + +// makeUndoBlock returns an UndoBlock given a slice of Transaction. +func (bc *BlockChain) makeUndoBlock(txs []*block.Transaction) *chainwriter.UndoBlock { + var transactionHashes []string + var outputIndexes []uint32 + var amounts []uint32 + var lockingScripts []string + for _, tx := range txs { + for _, txi := range tx.Inputs { + cl := coindatabase.CoinLocator{ + ReferenceTransactionHash: txi.ReferenceTransactionHash, + OutputIndex: txi.OutputIndex, + } + coin := bc.CoinDB.GetCoin(cl) + // if the coin is nil it means this isn't even a possible fork + if coin == nil { + return &chainwriter.UndoBlock{ + TransactionInputHashes: nil, + OutputIndexes: nil, + Amounts: nil, + LockingScripts: nil, + } + } + transactionHashes = append(transactionHashes, txi.ReferenceTransactionHash) + outputIndexes = append(outputIndexes, txi.OutputIndex) + amounts = append(amounts, coin.TransactionOutput.Amount) + lockingScripts = append(lockingScripts, coin.TransactionOutput.LockingScript) + } + } + return &chainwriter.UndoBlock{ + TransactionInputHashes: transactionHashes, + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: lockingScripts, + } +} + +// getBlock uses the ChainWriter to retrieve a Block from Disk +// given that Block's hash +func (bc *BlockChain) getBlock(blockHash string) *block.Block { + br := bc.BlockInfoDB.GetBlockRecord(blockHash) + fi := &chainwriter.FileInfo{ + FileName: br.BlockFile, + StartOffset: br.BlockStartOffset, + EndOffset: br.BlockEndOffset, + } + return bc.ChainWriter.ReadBlock(fi) +} + +// getUndoBlock uses the ChainWriter to retrieve an UndoBlock +// from Disk given the corresponding Block's hash +func (bc *BlockChain) getUndoBlock(blockHash string) *chainwriter.UndoBlock { + br := bc.BlockInfoDB.GetBlockRecord(blockHash) + fi := &chainwriter.FileInfo{ + FileName: br.UndoFile, + StartOffset: br.UndoStartOffset, + EndOffset: br.UndoEndOffset, + } + return bc.ChainWriter.ReadUndoBlock(fi) +} + +// GetBlocks retrieves a slice of blocks from the main chain given a +// starting and ending height, inclusive. Given a chain of length 50, +// GetBlocks(10, 20) returns blocks 10 through 20. +func (bc *BlockChain) GetBlocks(start, end uint32) []*block.Block { + if start >= end || end <= 0 || start <= 0 || end > bc.Length { + utils.Debug.Printf("cannot get chain blocks with values start: %v end: %v", start, end) + } + + var blocks []*block.Block + currentHeight := bc.Length + nextHash := bc.LastBlock.Hash() + + for currentHeight >= start { + br := bc.BlockInfoDB.GetBlockRecord(nextHash) + fi := &chainwriter.FileInfo{ + FileName: br.BlockFile, + StartOffset: br.BlockStartOffset, + EndOffset: br.BlockEndOffset, + } + if currentHeight <= end { + nextBlock := bc.ChainWriter.ReadBlock(fi) + blocks = append(blocks, nextBlock) + } + nextHash = br.Header.PreviousHash + currentHeight-- + } + return reverseBlocks(blocks) +} + +// GetHashes retrieves a slice of hashes from the main chain given a +// starting and ending height, inclusive. Given a BlockChain of length +// 50, GetHashes(10, 20) returns the hashes of Blocks 10 through 20. +func (bc *BlockChain) GetHashes(start, end uint32) []string { + if start >= end || end <= 0 || start <= 0 || end > bc.Length { + utils.Debug.Printf("cannot get chain blocks with values start: %v end: %v", start, end) + } + + var hashes []string + currentHeight := bc.Length + nextHash := bc.LastBlock.Hash() + + for currentHeight >= start { + br := bc.BlockInfoDB.GetBlockRecord(nextHash) + if currentHeight <= end { + hashes = append(hashes, nextHash) + } + nextHash = br.Header.PreviousHash + currentHeight-- + } + return reverseHashes(hashes) +} + +// appendsToActiveChain returns whether a Block appends to the +// BlockChain's active chain or not. +func (bc *BlockChain) appendsToActiveChain(b *block.Block) bool { + return bc.LastBlock.Hash() == b.Header.PreviousHash +} + +// getForkedBlocks returns a slice of Blocks given a starting hash. +// It returns a maximum of maxHashes Blocks, where maxHashes is the +// BlockChain's maximum number of unsafe hashes. +func (bc *BlockChain) getForkedBlocks(startHash string) []*block.Block { + unsafeHashes := make(map[string]bool) + for _, h := range bc.UnsafeHashes { + unsafeHashes[h] = true + } + var forkedBlocks []*block.Block + nextHash := startHash + for i := 0; i < len(bc.UnsafeHashes); i++ { + forkedBlock := bc.getBlock(nextHash) + forkedBlocks = append(forkedBlocks, forkedBlock) + if _, ok := unsafeHashes[nextHash]; ok { + return forkedBlocks + } + nextHash = forkedBlock.Header.PreviousHash + } + return forkedBlocks +} + +// getBlocksAndUndoBlocks returns a slice of n Blocks with a +// corresponding slice of n UndoBlocks. +func (bc *BlockChain) getBlocksAndUndoBlocks(n int) ([]*block.Block, []*chainwriter.UndoBlock) { + var blocks []*block.Block + var undoBlocks []*chainwriter.UndoBlock + nextHash := bc.LastHash + for i := 0; i < n; i++ { + b := bc.getBlock(nextHash) + ub := bc.getUndoBlock(nextHash) + blocks = append(blocks, b) + undoBlocks = append(undoBlocks, ub) + nextHash = b.Header.PreviousHash + } + return blocks, undoBlocks +} + +// reverseBlocks returns a reversed slice of Blocks. +func reverseBlocks(s []*block.Block) []*block.Block { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} + +// reverseHashes returns a reversed slice of hashes. +func reverseHashes(s []string) []string { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} diff --git a/pkg/blockchain/blockinfodatabase/blockinfodatabase.go b/pkg/blockchain/blockinfodatabase/blockinfodatabase.go new file mode 100644 index 0000000..c49a625 --- /dev/null +++ b/pkg/blockchain/blockinfodatabase/blockinfodatabase.go @@ -0,0 +1,32 @@ +package blockinfodatabase + +import ( + "Chain/pkg/utils" + "github.com/syndtr/goleveldb/leveldb" +) + +// BlockInfoDatabase is a wrapper for a levelDB +type BlockInfoDatabase struct { + db *leveldb.DB +} + +// New returns a BlockInfoDatabase given a Config +func New(config *Config) *BlockInfoDatabase { + db, err := leveldb.OpenFile(config.DatabasePath, nil) + if err != nil { + utils.Debug.Printf("Unable to initialize BlockInfoDatabase with path {%v}", config.DatabasePath) + } + return &BlockInfoDatabase{db: db} +} + +// StoreBlockRecord stores a BlockRecord in the BlockInfoDatabase. +func (blockInfoDB *BlockInfoDatabase) StoreBlockRecord(hash string, blockRecord *BlockRecord) { + //TODO +} + +// GetBlockRecord returns a BlockRecord from the BlockInfoDatabase given +// the relevant block's hash. +func (blockInfoDB *BlockInfoDatabase) GetBlockRecord(hash string) *BlockRecord { + //TODO + return nil +} diff --git a/pkg/blockchain/blockinfodatabase/blockrecord.go b/pkg/blockchain/blockinfodatabase/blockrecord.go new file mode 100644 index 0000000..8f8846a --- /dev/null +++ b/pkg/blockchain/blockinfodatabase/blockrecord.go @@ -0,0 +1,65 @@ +package blockinfodatabase + +import ( + "Chain/pkg/block" + "Chain/pkg/pro" +) + +// BlockRecord contains information about where a Block +// and its UndoBlock are stored on Disk. +// Header is the Block's Header. +// Height is the height of the Block. +// NumberOfTransactions is the number of Transactions in the Block. +// BlockFile is the name of the file where the Block is stored. +// BlockStartOffset is the starting offset of the Block within the +// BlockFile. +// BlockEndOffset is the ending offset of the Block within +// the BlockFile. +// UndoFile is the name of the file where the UndoBlock is stored. +// UndoStartOffset is the starting offset of the UndoBlock within +// the UndoFile. +// UndoEndOffset is the ending offset of the UndoBlock within the +// UndoFile. +type BlockRecord struct { + Header *block.Header + Height uint32 + NumberOfTransactions uint32 + + BlockFile string + BlockStartOffset uint32 + BlockEndOffset uint32 + + UndoFile string + UndoStartOffset uint32 + UndoEndOffset uint32 +} + +// EncodeBlockRecord returns a pro.BlockRecord given a BlockRecord. +func EncodeBlockRecord(br *BlockRecord) *pro.BlockRecord { + return &pro.BlockRecord{ + Header: block.EncodeHeader(br.Header), + Height: br.Height, + NumberOfTransactions: br.NumberOfTransactions, + BlockFile: br.BlockFile, + BlockStartOffset: br.BlockStartOffset, + BlockEndOffset: br.BlockEndOffset, + UndoFile: br.UndoFile, + UndoStartOffset: br.UndoStartOffset, + UndoEndOffset: br.UndoEndOffset, + } +} + +// DecodeBlockRecord returns a BlockRecord given a pro.BlockRecord. +func DecodeBlockRecord(pbr *pro.BlockRecord) *BlockRecord { + return &BlockRecord{ + Header: block.DecodeHeader(pbr.GetHeader()), + Height: pbr.GetHeight(), + NumberOfTransactions: pbr.GetNumberOfTransactions(), + BlockFile: pbr.GetBlockFile(), + BlockStartOffset: pbr.GetBlockStartOffset(), + BlockEndOffset: pbr.GetBlockEndOffset(), + UndoFile: pbr.GetUndoFile(), + UndoStartOffset: pbr.GetUndoStartOffset(), + UndoEndOffset: pbr.GetUndoEndOffset(), + } +} diff --git a/pkg/blockchain/blockinfodatabase/config.go b/pkg/blockchain/blockinfodatabase/config.go new file mode 100644 index 0000000..ec1990b --- /dev/null +++ b/pkg/blockchain/blockinfodatabase/config.go @@ -0,0 +1,12 @@ +package blockinfodatabase + +// Config is the BlockInfoDatabase's configuration options. +type Config struct { + DatabasePath string +} + +// DefaultConfig returns the default configuration for the +// BlockInfoDatabase. +func DefaultConfig() *Config { + return &Config{DatabasePath: "./blockinfodata"} +} diff --git a/pkg/blockchain/chainwriter/chainwriter.go b/pkg/blockchain/chainwriter/chainwriter.go new file mode 100644 index 0000000..67a7d49 --- /dev/null +++ b/pkg/blockchain/chainwriter/chainwriter.go @@ -0,0 +1,127 @@ +package chainwriter + +import ( + "Chain/pkg/block" + "Chain/pkg/blockchain/blockinfodatabase" + "Chain/pkg/pro" + "Chain/pkg/utils" + "google.golang.org/protobuf/proto" + "log" + "os" +) + +// ChainWriter handles all I/O for the BlockChain. It stores and retrieves +// Blocks and UndoBlocks. +// See config.go for more information on its fields. +// Block files are of the format: +// "DataDirectory/BlockFileName_CurrentBlockFileNumber.FileExtension" +// Ex: "data/block_0.txt" +// UndoBlock files are of the format: +// "DataDirectory/UndoFileName_CurrentUndoFileNumber.FileExtension" +// Ex: "data/undo_0.txt" +type ChainWriter struct { + // data storage information + FileExtension string + DataDirectory string + + // block information + BlockFileName string + CurrentBlockFileNumber uint32 + CurrentBlockOffset uint32 + MaxBlockFileSize uint32 + + // undo block information + UndoFileName string + CurrentUndoFileNumber uint32 + CurrentUndoOffset uint32 + MaxUndoFileSize uint32 +} + +// New returns a ChainWriter given a Config. +func New(config *Config) *ChainWriter { + if err := os.MkdirAll(config.DataDirectory, 0700); err != nil { + log.Fatalf("Could not create ChainWriter's data directory") + } + return &ChainWriter{ + FileExtension: config.FileExtension, + DataDirectory: config.DataDirectory, + BlockFileName: config.BlockFileName, + CurrentBlockFileNumber: 0, + CurrentBlockOffset: 0, + MaxBlockFileSize: config.MaxBlockFileSize, + UndoFileName: config.UndoFileName, + CurrentUndoFileNumber: 0, + CurrentUndoOffset: 0, + MaxUndoFileSize: config.MaxUndoFileSize, + } +} + +// StoreBlock stores a Block and its corresponding UndoBlock to Disk, +// returning a BlockRecord that contains information for later retrieval. +func (cw *ChainWriter) StoreBlock(bl *block.Block, undoBlock *UndoBlock, height uint32) *blockinfodatabase.BlockRecord { + // serialize block + b := block.EncodeBlock(bl) + serializedBlock, err := proto.Marshal(b) + if err != nil { + utils.Debug.Printf("Failed to marshal block") + } + // serialize undo block + ub := EncodeUndoBlock(undoBlock) + serializedUndoBlock, err := proto.Marshal(ub) + if err != nil { + utils.Debug.Printf("Failed to marshal undo block") + } + // write block to disk + bfi := cw.WriteBlock(serializedBlock) + // create an empty file info, which we will update if the function is passed an undo block. + ufi := &FileInfo{} + if undoBlock.Amounts != nil { + ufi = cw.WriteUndoBlock(serializedUndoBlock) + } + + return &blockinfodatabase.BlockRecord{ + Header: bl.Header, + Height: height, + NumberOfTransactions: uint32(len(bl.Transactions)), + BlockFile: bfi.FileName, + BlockStartOffset: bfi.StartOffset, + BlockEndOffset: bfi.EndOffset, + UndoFile: ufi.FileName, + UndoStartOffset: ufi.StartOffset, + UndoEndOffset: ufi.EndOffset, + } +} + +// WriteBlock writes a serialized Block to Disk and returns +// a FileInfo for storage information. +func (cw *ChainWriter) WriteBlock(serializedBlock []byte) *FileInfo { + //TODO + return nil +} + +// WriteUndoBlock writes a serialized UndoBlock to Disk and returns +// a FileInfo for storage information. +func (cw *ChainWriter) WriteUndoBlock(serializedUndoBlock []byte) *FileInfo { + //TODO + return nil +} + +// ReadBlock returns a Block given a FileInfo. +func (cw *ChainWriter) ReadBlock(fi *FileInfo) *block.Block { + bytes := readFromDisk(fi) + pb := &pro.Block{} + if err := proto.Unmarshal(bytes, pb); err != nil { + utils.Debug.Printf("failed to unmarshal block from file info {%v}", fi) + } + return block.DecodeBlock(pb) +} + +// ReadUndoBlock returns an UndoBlock given a FileInfo. +func (cw *ChainWriter) ReadUndoBlock(fi *FileInfo) *UndoBlock { + bytes := readFromDisk(fi) + pub := &pro.UndoBlock{} + if err := proto.Unmarshal(bytes, pub); err != nil { + utils.Debug.Printf("failed to unmarshal undo block from file info {%v}", fi) + } + return DecodeUndoBlock(pub) +} diff --git a/pkg/blockchain/chainwriter/config.go b/pkg/blockchain/chainwriter/config.go new file mode 100644 index 0000000..e217f7a --- /dev/null +++ b/pkg/blockchain/chainwriter/config.go @@ -0,0 +1,23 @@ +package chainwriter + +// Config is the ChainWriter's configuration options. +type Config struct { + FileExtension string + DataDirectory string + BlockFileName string + UndoFileName string + MaxBlockFileSize uint32 + MaxUndoFileSize uint32 +} + +// DefaultConfig returns the default Config for the ChainWriter. +func DefaultConfig() *Config { + return &Config{ + FileExtension: ".txt", + DataDirectory: "data", + BlockFileName: "block", + UndoFileName: "undo", + MaxBlockFileSize: 1024, + MaxUndoFileSize: 1024, + } +} diff --git a/pkg/blockchain/chainwriter/fileinfo.go b/pkg/blockchain/chainwriter/fileinfo.go new file mode 100644 index 0000000..95987b0 --- /dev/null +++ b/pkg/blockchain/chainwriter/fileinfo.go @@ -0,0 +1,8 @@ +package chainwriter + +// FileInfo determines where a Block or UndoBlock is stored. +type FileInfo struct { + FileName string + StartOffset uint32 + EndOffset uint32 +} diff --git a/pkg/blockchain/chainwriter/readwrite.go b/pkg/blockchain/chainwriter/readwrite.go new file mode 100644 index 0000000..32944a4 --- /dev/null +++ b/pkg/blockchain/chainwriter/readwrite.go @@ -0,0 +1,41 @@ +package chainwriter + +import ( + "log" + "os" +) + +// writeToDisk appends a slice of bytes to a file. +func writeToDisk(fileName string, data []byte) { + file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatalf("Unable to open file {%v}", fileName) + } + if _, err := file.Write(data); err != nil { + file.Close() // ignore error; Write error takes precedence + log.Fatalf("Failed to write to file {%v}", fileName) + } + if err := file.Close(); err != nil { + log.Fatalf("Failed to close file {%v}", fileName) + } +} + +// readFromDisk return a slice of bytes from a file, given a FileInfo. +func readFromDisk(info *FileInfo) []byte { + file, err := os.Open(info.FileName) + if err != nil { + log.Fatalf("Unable to open file {%v}", info.FileName) + } + if _, err2 := file.Seek(int64(info.StartOffset), 0); err2 != nil { + log.Fatalf("Failed to seek to {%v} in file {%v}", info.StartOffset, info.FileName) + } + numBytes := info.EndOffset - info.StartOffset + buf := make([]byte, numBytes) + if n, err3 := file.Read(buf); uint32(n) != info.EndOffset-info.StartOffset || err3 != nil { + log.Fatalf("Failed to read {%v} bytes from file {%v}", numBytes, info.FileName) + } + if err4 := file.Close(); err4 != nil { + log.Fatalf("Failed to close file {%v}", info.FileName) + } + return buf +} diff --git a/pkg/blockchain/chainwriter/undoblock.go b/pkg/blockchain/chainwriter/undoblock.go new file mode 100644 index 0000000..5827f57 --- /dev/null +++ b/pkg/blockchain/chainwriter/undoblock.go @@ -0,0 +1,60 @@ +package chainwriter + +import "Chain/pkg/pro" + +// UndoBlock is used to reverse the side effects causes by a Block. +// When the chain reverts a block's Transactions, it must both (1) +// remove newly created TransactionOutputs and (2) convert +// TransactionInputs back into available TransactionOutputs. +// This struct helps with (2). +// TransactionInputHashes are the hashes of the TransactionInputs that +// the UndoBlock must revert. +// OutputIndexes are the OutputIndexes of the TransactionInputs. +// Amounts are the amounts of the parent TransactionOutputs. +// LockingScripts are the locking scripts of the parent TransactionOutputs. +type UndoBlock struct { + TransactionInputHashes []string + OutputIndexes []uint32 + Amounts []uint32 + LockingScripts []string +} + +// EncodeUndoBlock returns a pro.UndoBlock given an UndoBlock. +func EncodeUndoBlock(ub *UndoBlock) *pro.UndoBlock { + var transactionInputHashes []string + var outputIndexes []uint32 + var amounts []uint32 + var lockingScripts []string + for i := 0; i < len(ub.TransactionInputHashes); i++ { + transactionInputHashes = append(transactionInputHashes, ub.TransactionInputHashes[i]) + outputIndexes = append(outputIndexes, ub.OutputIndexes[i]) + amounts = append(amounts, ub.Amounts[i]) + lockingScripts = append(lockingScripts, ub.LockingScripts[i]) + } + return &pro.UndoBlock{ + TransactionInputHashes: transactionInputHashes, + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: lockingScripts, + } +} + +// DecodeUndoBlock returns an UndoBlock given a pro.UndoBlock +func DecodeUndoBlock(pub *pro.UndoBlock) *UndoBlock { + var transactionInputHashes []string + var outputIndexes []uint32 + var amounts []uint32 + var lockingScripts []string + for i := 0; i < len(pub.GetTransactionInputHashes()); i++ { + transactionInputHashes = append(transactionInputHashes, pub.GetTransactionInputHashes()[i]) + outputIndexes = append(outputIndexes, pub.GetOutputIndexes()[i]) + amounts = append(amounts, pub.GetAmounts()[i]) + lockingScripts = append(lockingScripts, pub.GetLockingScripts()[i]) + } + return &UndoBlock{ + TransactionInputHashes: transactionInputHashes, + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: lockingScripts, + } +} diff --git a/pkg/blockchain/coindatabase/coin.go b/pkg/blockchain/coindatabase/coin.go new file mode 100644 index 0000000..4281ac1 --- /dev/null +++ b/pkg/blockchain/coindatabase/coin.go @@ -0,0 +1,29 @@ +package coindatabase + +import "Chain/pkg/block" + +// Coin is used by the CoinDatabase to keep track of unspent +// TransactionOutputs. +// TransactionOutput is the underlying TransactionOutput. +// IsSpent is whether that TransactionOutput has been spent. +// Active is whether that TransactionOutput is one created by +// Blocks on the active Chain. +type Coin struct { + TransactionOutput *block.TransactionOutput + IsSpent bool +} + +// CoinLocator is a dumbed down TransactionInput, used +// as a key to Coins in the CoinDatabase's mainCache. +type CoinLocator struct { + ReferenceTransactionHash string + OutputIndex uint32 +} + +// makeCoinLocator returns a CoinLocator given a TransactionInput. +func makeCoinLocator(txi *block.TransactionInput) CoinLocator { + return CoinLocator{ + ReferenceTransactionHash: txi.ReferenceTransactionHash, + OutputIndex: txi.OutputIndex, + } +} diff --git a/pkg/blockchain/coindatabase/coindatabase.go b/pkg/blockchain/coindatabase/coindatabase.go new file mode 100644 index 0000000..83b3026 --- /dev/null +++ b/pkg/blockchain/coindatabase/coindatabase.go @@ -0,0 +1,265 @@ +package coindatabase + +import ( + "Chain/pkg/block" + "Chain/pkg/blockchain/chainwriter" + "Chain/pkg/pro" + "Chain/pkg/utils" + "fmt" + "github.com/syndtr/goleveldb/leveldb" + "google.golang.org/protobuf/proto" +) + +// CoinDatabase keeps track of Coins. +// db is a levelDB for persistent storage. +// mainCache stores as many Coins as possible for rapid validation. +// mainCacheSize is how many Coins are currently in the mainCache. +// mainCacheCapacity is the maximum number of Coins that the mainCache +// can store before it must flush. +type CoinDatabase struct { + db *leveldb.DB + mainCache map[CoinLocator]*Coin + mainCacheSize uint32 + mainCacheCapacity uint32 +} + +// New returns a CoinDatabase given a Config. +func New(config *Config) *CoinDatabase { + db, err := leveldb.OpenFile(config.DatabasePath, nil) + if err != nil { + utils.Debug.Printf("Unable to initialize BlockInfoDatabase with path {%v}", config.DatabasePath) + } + return &CoinDatabase{ + db: db, + mainCache: make(map[CoinLocator]*Coin), + mainCacheSize: 0, + mainCacheCapacity: config.MainCacheCapacity, + } +} + +// ValidateBlock returns whether a Block's Transactions are valid. +func (coinDB *CoinDatabase) ValidateBlock(transactions []*block.Transaction) bool { + for _, tx := range transactions { + if err := coinDB.validateTransaction(tx); err != nil { + utils.Debug.Printf("%v", err) + return false + } + } + return true +} + +// validateTransaction checks whether a Transaction's inputs are valid Coins. +// If the Coins have already been spent or do not exist, validateTransaction +// returns an error. +func (coinDB *CoinDatabase) validateTransaction(transaction *block.Transaction) error { + for _, txi := range transaction.Inputs { + key := makeCoinLocator(txi) + if coin, ok := coinDB.mainCache[key]; ok { + if coin.IsSpent { + return fmt.Errorf("[validateTransaction] coin already spent") + } + continue + } + if data, err := coinDB.db.Get([]byte(txi.ReferenceTransactionHash), nil); err != nil { + return fmt.Errorf("[validateTransaction] coin not in leveldb") + } else { + pcr := &pro.CoinRecord{} + if err2 := proto.Unmarshal(data, pcr); err2 != nil { + utils.Debug.Printf("Failed to unmarshal record from hash {%v}:", txi.ReferenceTransactionHash, err) + } + cr := DecodeCoinRecord(pcr) + if !contains(cr.OutputIndexes, txi.OutputIndex) { + return fmt.Errorf("[validateTransaction] coin record did not still contain output required for transaction input ") + } + } + } + return nil +} + +// UndoCoins handles reverting a Block. It: +// (1) erases the Coins created by a Block and +// (2) marks the Coins used to create those Transactions as unspent. +func (coinDB *CoinDatabase) UndoCoins(blocks []*block.Block, undoBlocks []*chainwriter.UndoBlock) { + //TODO +} + +// addCoinsToRecord adds coins to a CoinRecord given an UndoBlock and +// returns the updated CoinRecord. +func (coinDB *CoinDatabase) addCoinsToRecord(cr *CoinRecord, ub *chainwriter.UndoBlock) *CoinRecord { + cr.OutputIndexes = append(cr.OutputIndexes, ub.OutputIndexes...) + cr.Amounts = append(cr.Amounts, ub.Amounts...) + cr.LockingScripts = append(cr.LockingScripts, ub.LockingScripts...) + return cr +} + +// FlushMainCache flushes the mainCache to the db. +func (coinDB *CoinDatabase) FlushMainCache() { + // update coin records + updatedCoinRecords := make(map[string]*CoinRecord) + for cl := range coinDB.mainCache { + // check whether we already updated this record + var cr *CoinRecord + + // (1) get our coin record + // first check our map, in case we already updated the coin record given + // a previous coin + if cr2, ok := updatedCoinRecords[cl.ReferenceTransactionHash]; ok { + cr = cr2 + } else { + // if we haven't already update this coin record, retrieve from db + data, err := coinDB.db.Get([]byte(cl.ReferenceTransactionHash), nil) + if err != nil { + utils.Debug.Printf("[FlushMainCache] coin record not in leveldb") + } + pcr := &pro.CoinRecord{} + if err = proto.Unmarshal(data, pcr); err != nil { + utils.Debug.Printf("Failed to unmarshal record from hash {%v}:%v", cl.ReferenceTransactionHash, err) + } + cr = DecodeCoinRecord(pcr) + } + // (2) remove the coin from the record if it's been spent + if coinDB.mainCache[cl].IsSpent { + cr = coinDB.removeCoinFromRecord(cr, cl.OutputIndex) + } + updatedCoinRecords[cl.ReferenceTransactionHash] = cr + delete(coinDB.mainCache, cl) + } + coinDB.mainCacheSize = 0 + // write the new records + for key, cr := range updatedCoinRecords { + if len(cr.OutputIndexes) <= 1 { + err := coinDB.db.Delete([]byte(key), nil) + if err != nil { + utils.Debug.Printf("[FlushMainCache] failed to delete key {%v}", key) + } + } else { + coinDB.putRecordInDB(key, cr) + } + } +} + +// StoreBlock handles storing a newly minted Block. It: +// (1) removes spent TransactionOutputs (if active) +// (2) stores new TransactionOutputs as Coins in the mainCache (if active) +// (3) stores CoinRecords for the Transactions in the db. +func (coinDB *CoinDatabase) StoreBlock(transactions []*block.Transaction, active bool) { + //TODO +} + +// removeCoinFromDB removes a Coin from a CoinRecord, deleting the CoinRecord +// from the db entirely if it is the last remaining Coin in the CoinRecord. +func (coinDB *CoinDatabase) removeCoinFromDB(txHash string, cl CoinLocator) { + // 3. If the coin is not in the main cache, retrieve from the database + // 4. Delete coin records if they only have one coin + // 5. Remove coins from the record + cr := coinDB.getCoinRecordFromDB(txHash) + switch { + case cr == nil: + return + case len(cr.Amounts) <= 1: + if err := coinDB.db.Delete([]byte(txHash), nil); err != nil { + utils.Debug.Printf("[removeCoinFromDB] failed to remove {%v} from db", txHash) + } + default: + cr = coinDB.removeCoinFromRecord(cr, cl.OutputIndex) + coinDB.putRecordInDB(txHash, cr) + } +} + +// putRecordInDB puts a CoinRecord into the db. +func (coinDB *CoinDatabase) putRecordInDB(txHash string, cr *CoinRecord) { + record := EncodeCoinRecord(cr) + if err2 := coinDB.db.Put([]byte(txHash), []byte(record.String()), nil); err2 != nil { + utils.Debug.Printf("Unable to store block record for key {%v}", txHash) + } +} + +// removeCoinFromRecord returns an updated CoinRecord. It removes the Coin +// with the given outputIndex, if the Coin exists in the CoinRecord. +func (coinDB *CoinDatabase) removeCoinFromRecord(cr *CoinRecord, outputIndex uint32) *CoinRecord { + index := indexOf(cr.OutputIndexes, outputIndex) + if index < 0 { + return cr + } + cr.OutputIndexes = append(cr.OutputIndexes[:index], cr.OutputIndexes[index+1:]...) + cr.Amounts = append(cr.Amounts[:index], cr.Amounts[index+1:]...) + cr.LockingScripts = append(cr.LockingScripts[:index], cr.LockingScripts[index+1:]...) + return cr +} + +// createCoinRecord returns a CoinRecord for the provided Transaction. +func (coinDB *CoinDatabase) createCoinRecord(tx *block.Transaction) *CoinRecord { + var outputIndexes []uint32 + var amounts []uint32 + var LockingScripts []string + for i, txo := range tx.Outputs { + outputIndexes = append(outputIndexes, uint32(i)) + amounts = append(amounts, txo.Amount) + LockingScripts = append(LockingScripts, txo.LockingScript) + } + cr := &CoinRecord{ + Version: 0, + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: LockingScripts, + } + return cr +} + +// getCoinRecordFromDB returns a CoinRecord from the db given a hash. +func (coinDB *CoinDatabase) getCoinRecordFromDB(txHash string) *CoinRecord { + if data, err := coinDB.db.Get([]byte(txHash), nil); err != nil { + utils.Debug.Printf("[validateTransaction] coin not in leveldb") + return nil + } else { + pcr := &pro.CoinRecord{} + if err := proto.Unmarshal(data, pcr); err != nil { + utils.Debug.Printf("Failed to unmarshal record from hash {%v}:", txHash, err) + } + cr := DecodeCoinRecord(pcr) + return cr + } +} + +// GetCoin returns a Coin given a CoinLocator. It first checks the +// mainCache, then checks the db. If the Coin doesn't exist, it returns nil. +func (coinDB *CoinDatabase) GetCoin(cl CoinLocator) *Coin { + if coin, ok := coinDB.mainCache[cl]; ok { + return coin + } + cr := coinDB.getCoinRecordFromDB(cl.ReferenceTransactionHash) + if cr == nil { + return nil + } + index := indexOf(cr.OutputIndexes, cl.OutputIndex) + if index < 0 { + return nil + } + return &Coin{ + TransactionOutput: &block.TransactionOutput{ + Amount: cr.Amounts[index], + LockingScript: cr.LockingScripts[index], + }, + IsSpent: false, + } +} + +// contains returns true if an int slice s contains element e, false if it does not. +func contains(s []uint32, e uint32) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +//indexOf returns the index of element e in int slice s, -1 if the element does not exist. +func indexOf(s []uint32, e uint32) int { + for i, a := range s { + if a == e { + return i + } + } + return -1 +} diff --git a/pkg/blockchain/coindatabase/coinrecord.go b/pkg/blockchain/coindatabase/coinrecord.go new file mode 100644 index 0000000..6de45c3 --- /dev/null +++ b/pkg/blockchain/coindatabase/coinrecord.go @@ -0,0 +1,48 @@ +package coindatabase + +import "Chain/pkg/pro" + +// CoinRecord is a record of which coins created by a Transaction +// have been spent. It is stored in the CoinDatabase's db. +type CoinRecord struct { + Version uint32 + OutputIndexes []uint32 + Amounts []uint32 + LockingScripts []string +} + +// EncodeCoinRecord returns a pro.CoinRecord given a CoinRecord. +func EncodeCoinRecord(cr *CoinRecord) *pro.CoinRecord { + var outputIndexes []uint32 + var amounts []uint32 + var lockingScripts []string + for i := 0; i < len(cr.OutputIndexes); i++ { + outputIndexes = append(outputIndexes, cr.OutputIndexes[i]) + amounts = append(amounts, cr.Amounts[i]) + lockingScripts = append(lockingScripts, cr.LockingScripts[i]) + } + return &pro.CoinRecord{ + Version: cr.Version, + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: lockingScripts, + } +} + +// DecodeCoinRecord returns a CoinRecord given a pro.CoinRecord. +func DecodeCoinRecord(pcr *pro.CoinRecord) *CoinRecord { + var outputIndexes []uint32 + var amounts []uint32 + var lockingScripts []string + for i := 0; i < len(pcr.GetOutputIndexes()); i++ { + outputIndexes = append(outputIndexes, pcr.GetOutputIndexes()[i]) + amounts = append(amounts, pcr.GetAmounts()[i]) + lockingScripts = append(lockingScripts, pcr.GetLockingScripts()[i]) + } + return &CoinRecord{ + Version: pcr.GetVersion(), + OutputIndexes: outputIndexes, + Amounts: amounts, + LockingScripts: lockingScripts, + } +} diff --git a/pkg/blockchain/coindatabase/config.go b/pkg/blockchain/coindatabase/config.go new file mode 100644 index 0000000..bfb372b --- /dev/null +++ b/pkg/blockchain/coindatabase/config.go @@ -0,0 +1,15 @@ +package coindatabase + +// Config is the CoinDatabase's configuration options. +type Config struct { + DatabasePath string + MainCacheCapacity uint32 +} + +// DefaultConfig returns the CoinDatabase's default Config. +func DefaultConfig() *Config { + return &Config{ + DatabasePath: "./coindata", + MainCacheCapacity: 30, + } +} diff --git a/pkg/blockchain/config.go b/pkg/blockchain/config.go new file mode 100644 index 0000000..0ea3e7b --- /dev/null +++ b/pkg/blockchain/config.go @@ -0,0 +1,39 @@ +package blockchain + +import ( + "Chain/pkg/blockchain/blockinfodatabase" + "Chain/pkg/blockchain/chainwriter" + "Chain/pkg/blockchain/coindatabase" +) + +// Config is the BlockChain's configuration options. +type Config struct { + GenesisPublicKey string + InitialSubsidy uint32 + HasChn bool + BlockInfoDBPath string + ChainWriterDBPath string + CoinDBPath string +} + +// GENPK is the public key that was used +// for the genesis transaction on the +// genesis block. +var GENPK = "3059301306072a8648ce3d020106082a8648ce3d030107034200042418a20458559ae13a0d4bb6ac284c66a5cebb5689563d4cf573473d8c6d5abfa9a21a65dbb3ba2f2d930be7f763f940f9864abaf199a0f0d8d14bedda2dcad9" + +// GENPVK is the public key that was used +// for the genesis transaction on the +// genesis block. +var GENPVK = "307702010104202456b0e8bed5c27dcadb044df1af8eaf714084b61a23d17359fb09f3c3f5fff5a00a06082a8648ce3d030107a144034200042418a20458559ae13a0d4bb6ac284c66a5cebb5689563d4cf573473d8c6d5abfa9a21a65dbb3ba2f2d930be7f763f940f9864abaf199a0f0d8d14bedda2dcad9" + +// DefaultConfig returns the default configuration for the blockchain. +func DefaultConfig() *Config { + return &Config{ + GenesisPublicKey: GENPK, + InitialSubsidy: 0, + HasChn: true, + BlockInfoDBPath: blockinfodatabase.DefaultConfig().DatabasePath, + ChainWriterDBPath: chainwriter.DefaultConfig().DataDirectory, + CoinDBPath: coindatabase.DefaultConfig().DatabasePath, + } +} |