2020-01-09 20:10:01 +00:00
|
|
|
// Package blake3 implements the BLAKE3 cryptographic hash function.
|
|
|
|
package blake3
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2020-01-10 17:43:02 +00:00
|
|
|
"errors"
|
2020-01-09 20:10:01 +00:00
|
|
|
"hash"
|
2020-01-10 17:43:02 +00:00
|
|
|
"io"
|
2020-01-10 19:39:34 +00:00
|
|
|
"math"
|
2020-01-10 17:37:31 +00:00
|
|
|
"math/bits"
|
2020-01-09 20:10:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-01-10 21:01:33 +00:00
|
|
|
blockSize = 64
|
2020-01-10 21:37:28 +00:00
|
|
|
chunkSize = 1024
|
2020-01-09 20:10:01 +00:00
|
|
|
)
|
|
|
|
|
2020-01-09 22:58:48 +00:00
|
|
|
// flags
|
|
|
|
const (
|
|
|
|
flagChunkStart = 1 << iota
|
|
|
|
flagChunkEnd
|
|
|
|
flagParent
|
|
|
|
flagRoot
|
|
|
|
flagKeyedHash
|
|
|
|
flagDeriveKeyContext
|
|
|
|
flagDeriveKeyMaterial
|
|
|
|
)
|
2020-01-09 20:10:01 +00:00
|
|
|
|
2020-01-09 22:58:48 +00:00
|
|
|
var iv = [8]uint32{
|
|
|
|
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
|
|
|
|
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// helper functions for converting between bytes and BLAKE3 "words"
|
|
|
|
|
|
|
|
func bytesToWords(bytes []byte, words []uint32) {
|
|
|
|
for i := range words {
|
|
|
|
words[i] = binary.LittleEndian.Uint32(bytes[i*4:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func wordsToBytes(words []uint32, bytes []byte) {
|
|
|
|
for i, w := range words {
|
|
|
|
binary.LittleEndian.PutUint32(bytes[i*4:], w)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The g function, split into two parts so that the compiler will inline it.
|
2020-01-10 17:37:31 +00:00
|
|
|
func gx(state *[16]uint32, a, b, c, d int, mx uint32) {
|
|
|
|
state[a] += state[b] + mx
|
|
|
|
state[d] = bits.RotateLeft32(state[d]^state[a], -16)
|
|
|
|
state[c] += state[d]
|
|
|
|
state[b] = bits.RotateLeft32(state[b]^state[c], -12)
|
|
|
|
}
|
|
|
|
|
|
|
|
func gy(state *[16]uint32, a, b, c, d int, my uint32) {
|
|
|
|
state[a] += state[b] + my
|
|
|
|
state[d] = bits.RotateLeft32(state[d]^state[a], -8)
|
|
|
|
state[c] += state[d]
|
|
|
|
state[b] = bits.RotateLeft32(state[b]^state[c], -7)
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// A node represents a chunk or parent in the BLAKE3 Merkle tree. In BLAKE3
|
|
|
|
// terminology, the elements of the bottom layer (aka "leaves") of the tree are
|
|
|
|
// called chunk nodes, and the elements of upper layers (aka "interior nodes")
|
|
|
|
// are called parent nodes.
|
|
|
|
//
|
|
|
|
// Computing a BLAKE3 hash involves splitting the input into chunk nodes, then
|
|
|
|
// repeatedly merging these nodes into parent nodes, until only a single "root"
|
|
|
|
// node remains. The root node can then be used to generate up to 2^64 - 1 bytes
|
|
|
|
// of pseudorandom output.
|
2020-01-10 03:46:19 +00:00
|
|
|
type node struct {
|
2020-01-12 01:09:16 +00:00
|
|
|
// the chaining value from the previous state
|
|
|
|
cv [8]uint32
|
|
|
|
// the current state
|
2020-01-10 03:46:19 +00:00
|
|
|
block [16]uint32
|
|
|
|
counter uint64
|
|
|
|
blockLen uint32
|
|
|
|
flags uint32
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// compress is the core hash function, generating 16 pseudorandom words from a
|
|
|
|
// node. When nodes are being merged into parents, only the first 8 words are
|
|
|
|
// used. When the root node is being used to generate output, the full 16 words
|
|
|
|
// are used.
|
2020-01-10 03:46:19 +00:00
|
|
|
func (n node) compress() [16]uint32 {
|
2020-01-09 20:10:01 +00:00
|
|
|
state := [16]uint32{
|
2020-01-10 03:46:19 +00:00
|
|
|
n.cv[0], n.cv[1], n.cv[2], n.cv[3],
|
|
|
|
n.cv[4], n.cv[5], n.cv[6], n.cv[7],
|
2020-01-09 22:58:48 +00:00
|
|
|
iv[0], iv[1], iv[2], iv[3],
|
2020-01-10 03:46:19 +00:00
|
|
|
uint32(n.counter), uint32(n.counter >> 32), n.blockLen, n.flags,
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 14:47:47 +00:00
|
|
|
// round1
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[0])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[1])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[2])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[3])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[4])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[5])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[6])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[7])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[8])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[9])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[10])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[11])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[12])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[13])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[14])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[15])
|
|
|
|
|
|
|
|
// round2
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[2])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[6])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[3])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[10])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[7])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[0])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[4])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[13])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[1])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[11])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[12])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[5])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[9])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[14])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[15])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[8])
|
|
|
|
|
|
|
|
// round3
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[3])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[4])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[10])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[12])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[13])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[2])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[7])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[14])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[6])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[5])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[9])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[0])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[11])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[15])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[8])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[1])
|
|
|
|
|
|
|
|
// round4
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[10])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[7])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[12])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[9])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[14])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[3])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[13])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[15])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[4])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[0])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[11])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[2])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[5])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[8])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[1])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[6])
|
|
|
|
|
|
|
|
// round5
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[12])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[13])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[9])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[11])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[15])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[10])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[14])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[8])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[7])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[2])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[5])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[3])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[0])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[1])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[6])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[4])
|
|
|
|
|
|
|
|
// round6
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[9])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[14])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[11])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[5])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[8])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[12])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[15])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[1])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[13])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[3])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[0])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[10])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[2])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[6])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[4])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[7])
|
|
|
|
|
|
|
|
// round7
|
|
|
|
|
|
|
|
// Mix the columns.
|
|
|
|
gx(&state, 0, 4, 8, 12, n.block[11])
|
|
|
|
gy(&state, 0, 4, 8, 12, n.block[15])
|
|
|
|
gx(&state, 1, 5, 9, 13, n.block[5])
|
|
|
|
gy(&state, 1, 5, 9, 13, n.block[0])
|
|
|
|
gx(&state, 2, 6, 10, 14, n.block[1])
|
|
|
|
gy(&state, 2, 6, 10, 14, n.block[9])
|
|
|
|
gx(&state, 3, 7, 11, 15, n.block[8])
|
|
|
|
gy(&state, 3, 7, 11, 15, n.block[6])
|
|
|
|
|
|
|
|
// Mix the diagonals.
|
|
|
|
gx(&state, 0, 5, 10, 15, n.block[14])
|
|
|
|
gy(&state, 0, 5, 10, 15, n.block[10])
|
|
|
|
gx(&state, 1, 6, 11, 12, n.block[2])
|
|
|
|
gy(&state, 1, 6, 11, 12, n.block[12])
|
|
|
|
gx(&state, 2, 7, 8, 13, n.block[3])
|
|
|
|
gy(&state, 2, 7, 8, 13, n.block[4])
|
|
|
|
gx(&state, 3, 4, 9, 14, n.block[7])
|
|
|
|
gy(&state, 3, 4, 9, 14, n.block[13])
|
2020-01-09 20:10:01 +00:00
|
|
|
|
2020-01-10 03:46:19 +00:00
|
|
|
for i := range n.cv {
|
2020-01-09 20:10:01 +00:00
|
|
|
state[i] ^= state[i+8]
|
2020-01-10 03:46:19 +00:00
|
|
|
state[i+8] ^= n.cv[i]
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
return state
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// chainingValue returns the first 8 words of the compressed node. This is used
|
|
|
|
// in two places. First, when a chunk node is being constructed, its cv is
|
|
|
|
// overwritten with this value after each block of input is processed. Second,
|
|
|
|
// when two nodes are merged into a parent, each of their chaining values
|
|
|
|
// supplies half of the new node's block. Second, when
|
2020-01-10 03:46:19 +00:00
|
|
|
func (n node) chainingValue() (cv [8]uint32) {
|
|
|
|
full := n.compress()
|
|
|
|
copy(cv[:], full[:8])
|
2020-01-09 20:10:01 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// chunkState manages the state involved in hashing a single chunk of input.
|
2020-01-09 20:10:01 +00:00
|
|
|
type chunkState struct {
|
2020-01-10 03:46:19 +00:00
|
|
|
n node
|
2020-01-10 21:01:33 +00:00
|
|
|
block [blockSize]byte
|
2020-01-09 22:58:48 +00:00
|
|
|
blockLen int
|
|
|
|
bytesConsumed int
|
2020-01-10 03:46:19 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// chunkCounter is the index of this chunk, i.e. the number of chunks that have
|
|
|
|
// been processed prior to this one.
|
2020-01-10 03:46:19 +00:00
|
|
|
func (cs *chunkState) chunkCounter() uint64 {
|
|
|
|
return cs.n.counter
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 02:45:09 +00:00
|
|
|
func (cs *chunkState) complete() bool {
|
|
|
|
return cs.bytesConsumed == chunkSize
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// update incorporates input into the chunkState.
|
2020-01-09 20:10:01 +00:00
|
|
|
func (cs *chunkState) update(input []byte) {
|
|
|
|
for len(input) > 0 {
|
|
|
|
// If the block buffer is full, compress it and clear it. More
|
2020-01-09 22:58:48 +00:00
|
|
|
// input is coming, so this compression is not flagChunkEnd.
|
2020-01-10 21:01:33 +00:00
|
|
|
if cs.blockLen == blockSize {
|
2020-01-12 01:09:16 +00:00
|
|
|
// copy the chunk block (bytes) into the node block and chain it.
|
2020-01-10 03:46:19 +00:00
|
|
|
bytesToWords(cs.block[:], cs.n.block[:])
|
|
|
|
cs.n.cv = cs.n.chainingValue()
|
2020-01-12 01:09:16 +00:00
|
|
|
// clear the start flag for all but the first block
|
|
|
|
cs.n.flags &^= flagChunkStart
|
2020-01-09 22:58:48 +00:00
|
|
|
cs.blockLen = 0
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// Copy input bytes into the chunk block.
|
2020-01-09 22:58:48 +00:00
|
|
|
n := copy(cs.block[cs.blockLen:], input)
|
|
|
|
cs.blockLen += n
|
|
|
|
cs.bytesConsumed += n
|
2020-01-09 20:10:01 +00:00
|
|
|
input = input[n:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:58:07 +00:00
|
|
|
// compiles to memclr
|
|
|
|
func clear(b []byte) {
|
|
|
|
for i := range b {
|
|
|
|
b[i] = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// node returns a node containing the chunkState's current state, with the
|
|
|
|
// ChunkEnd flag set.
|
2020-01-10 03:46:19 +00:00
|
|
|
func (cs *chunkState) node() node {
|
|
|
|
n := cs.n
|
2020-01-12 01:58:07 +00:00
|
|
|
// pad the remaining space in the block with zeros
|
|
|
|
clear(cs.block[cs.blockLen:])
|
2020-01-10 03:46:19 +00:00
|
|
|
bytesToWords(cs.block[:], n.block[:])
|
|
|
|
n.blockLen = uint32(cs.blockLen)
|
|
|
|
n.flags |= flagChunkEnd
|
|
|
|
return n
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
func newChunkState(iv [8]uint32, chunkCounter uint64, flags uint32) chunkState {
|
2020-01-09 20:10:01 +00:00
|
|
|
return chunkState{
|
2020-01-10 03:46:19 +00:00
|
|
|
n: node{
|
2020-01-12 01:09:16 +00:00
|
|
|
cv: iv,
|
2020-01-10 03:46:19 +00:00
|
|
|
counter: chunkCounter,
|
2020-01-10 21:01:33 +00:00
|
|
|
blockLen: blockSize,
|
2020-01-12 01:09:16 +00:00
|
|
|
// compress the first block with the start flag set
|
2020-01-10 03:46:19 +00:00
|
|
|
flags: flags | flagChunkStart,
|
|
|
|
},
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// parentNode returns a node that incorporates the chaining values of two child
|
|
|
|
// nodes.
|
2020-01-10 03:46:19 +00:00
|
|
|
func parentNode(left, right [8]uint32, key [8]uint32, flags uint32) node {
|
2020-01-09 22:58:48 +00:00
|
|
|
var blockWords [16]uint32
|
|
|
|
copy(blockWords[:8], left[:])
|
|
|
|
copy(blockWords[8:], right[:])
|
2020-01-10 03:46:19 +00:00
|
|
|
return node{
|
|
|
|
cv: key,
|
|
|
|
block: blockWords,
|
2020-01-12 01:09:16 +00:00
|
|
|
counter: 0, // counter is reset for parents
|
|
|
|
blockLen: blockSize, // block is full: 8 words from left, 8 from right
|
2020-01-10 03:46:19 +00:00
|
|
|
flags: flags | flagParent,
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hasher implements hash.Hash.
|
|
|
|
type Hasher struct {
|
2020-01-12 01:59:28 +00:00
|
|
|
cs chunkState
|
|
|
|
key [8]uint32
|
|
|
|
flags uint32
|
|
|
|
size int // output size, for Sum
|
|
|
|
|
|
|
|
// log(n) set of Merkle subtree roots, at most one per height.
|
|
|
|
stack [54][8]uint32 // 2^54 * chunkSize = 2^64
|
|
|
|
used uint64 // bit vector indicating which stack elems are valid; also number of chunks added
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *Hasher) hasSubtreeAtHeight(i uint64) bool {
|
|
|
|
return h.used&(1<<i) != 0
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// addChunkChainingValue appends a chunk to the right edge of the Merkle tree.
|
2020-01-12 01:59:28 +00:00
|
|
|
func (h *Hasher) addChunkChainingValue(cv [8]uint32) {
|
|
|
|
// seek to first open stack slot, merging subtrees as we go
|
|
|
|
i := uint64(0)
|
|
|
|
for ; h.hasSubtreeAtHeight(i); i++ {
|
|
|
|
cv = parentNode(h.stack[i], cv, h.key, h.flags).chainingValue()
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
2020-01-12 01:59:28 +00:00
|
|
|
h.stack[i] = cv
|
|
|
|
h.used++
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// rootNode computes the root of the Merkle tree. It does not modify the
|
|
|
|
// chainStack.
|
|
|
|
func (h *Hasher) rootNode() node {
|
2020-01-10 22:17:39 +00:00
|
|
|
n := h.cs.node()
|
2020-01-12 01:59:28 +00:00
|
|
|
for i := uint64(bits.TrailingZeros64(h.used)); i < 64; i++ {
|
|
|
|
if h.hasSubtreeAtHeight(i) {
|
|
|
|
n = parentNode(h.stack[i], n.chainingValue(), h.key, h.flags)
|
|
|
|
}
|
2020-01-10 22:17:39 +00:00
|
|
|
}
|
|
|
|
n.flags |= flagRoot
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2020-01-09 20:10:01 +00:00
|
|
|
// Reset implements hash.Hash.
|
|
|
|
func (h *Hasher) Reset() {
|
2020-01-09 22:58:48 +00:00
|
|
|
h.cs = newChunkState(h.key, 0, h.flags)
|
2020-01-12 01:59:28 +00:00
|
|
|
h.used = 0
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// BlockSize implements hash.Hash.
|
2020-01-09 20:39:50 +00:00
|
|
|
func (h *Hasher) BlockSize() int { return 64 }
|
2020-01-09 20:10:01 +00:00
|
|
|
|
|
|
|
// Size implements hash.Hash.
|
2020-01-09 22:58:48 +00:00
|
|
|
func (h *Hasher) Size() int { return h.size }
|
2020-01-09 20:10:01 +00:00
|
|
|
|
|
|
|
// Write implements hash.Hash.
|
2020-01-09 22:58:48 +00:00
|
|
|
func (h *Hasher) Write(p []byte) (int, error) {
|
|
|
|
lenp := len(p)
|
|
|
|
for len(p) > 0 {
|
2020-01-12 01:09:16 +00:00
|
|
|
// If the current chunk is complete, finalize it and add it to the tree,
|
|
|
|
// then reset the chunk state (but keep incrementing the counter across
|
|
|
|
// chunks).
|
2020-01-12 02:45:09 +00:00
|
|
|
if h.cs.complete() {
|
2020-01-10 03:46:19 +00:00
|
|
|
cv := h.cs.node().chainingValue()
|
2020-01-12 01:59:28 +00:00
|
|
|
h.addChunkChainingValue(cv)
|
|
|
|
h.cs = newChunkState(h.key, h.cs.chunkCounter()+1, h.flags)
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compress input bytes into the current chunk state.
|
2020-01-10 21:37:28 +00:00
|
|
|
n := chunkSize - h.cs.bytesConsumed
|
2020-01-09 22:58:48 +00:00
|
|
|
if n > len(p) {
|
|
|
|
n = len(p)
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
2020-01-09 22:58:48 +00:00
|
|
|
h.cs.update(p[:n])
|
|
|
|
p = p[n:]
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
2020-01-09 22:58:48 +00:00
|
|
|
return lenp, nil
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sum implements hash.Hash.
|
2020-01-12 01:09:16 +00:00
|
|
|
func (h *Hasher) Sum(b []byte) (sum []byte) {
|
|
|
|
// We need to append h.Size() bytes to b. Reuse b's capacity if possible;
|
|
|
|
// otherwise, allocate a new slice.
|
|
|
|
if total := len(b) + h.Size(); cap(b) >= total {
|
|
|
|
sum = b[:total]
|
|
|
|
} else {
|
|
|
|
sum = make([]byte, total)
|
|
|
|
copy(sum, b)
|
|
|
|
}
|
|
|
|
// Read into the appended portion of sum
|
|
|
|
h.XOF().Read(sum[len(b):])
|
|
|
|
return
|
2020-01-09 21:25:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// XOF returns an OutputReader initialized with the current hash state.
|
|
|
|
func (h *Hasher) XOF() *OutputReader {
|
|
|
|
return &OutputReader{
|
2020-01-12 01:09:16 +00:00
|
|
|
n: h.rootNode(),
|
2020-01-09 21:25:24 +00:00
|
|
|
}
|
2020-01-09 20:10:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 02:45:09 +00:00
|
|
|
func newHasher(key [8]uint32, flags uint32, size int) *Hasher {
|
|
|
|
return &Hasher{
|
|
|
|
cs: newChunkState(key, 0, flags),
|
|
|
|
key: key,
|
|
|
|
flags: flags,
|
|
|
|
size: size,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// New returns a Hasher for the specified size and key. If key is nil, the hash
|
|
|
|
// is unkeyed.
|
|
|
|
func New(size int, key []byte) *Hasher {
|
|
|
|
if key == nil {
|
|
|
|
return newHasher(iv, 0, size)
|
|
|
|
}
|
|
|
|
var keyWords [8]uint32
|
|
|
|
bytesToWords(key[:], keyWords[:])
|
|
|
|
return newHasher(keyWords, flagKeyedHash, size)
|
|
|
|
}
|
|
|
|
|
2020-01-10 03:46:19 +00:00
|
|
|
// Sum256 returns the unkeyed BLAKE3 hash of b, truncated to 256 bits.
|
2020-01-10 22:17:39 +00:00
|
|
|
func Sum256(b []byte) (out [32]byte) {
|
|
|
|
h := newHasher(iv, 0, 0)
|
2020-01-10 03:46:19 +00:00
|
|
|
h.Write(b)
|
2020-01-10 22:17:39 +00:00
|
|
|
h.XOF().Read(out[:])
|
|
|
|
return
|
2020-01-10 03:46:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sum512 returns the unkeyed BLAKE3 hash of b, truncated to 512 bits.
|
2020-01-10 22:17:39 +00:00
|
|
|
func Sum512(b []byte) (out [64]byte) {
|
|
|
|
h := newHasher(iv, 0, 0)
|
2020-01-10 03:46:19 +00:00
|
|
|
h.Write(b)
|
2020-01-10 22:17:39 +00:00
|
|
|
h.XOF().Read(out[:])
|
|
|
|
return
|
2020-01-10 03:46:19 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 01:09:16 +00:00
|
|
|
// DeriveKey derives a subkey from ctx and srcKey. ctx should be hardcoded,
|
|
|
|
// globally unique, and application-specific. A good format for ctx strings is:
|
|
|
|
//
|
|
|
|
// [application] [commit timestamp] [purpose]
|
|
|
|
//
|
|
|
|
// e.g.:
|
|
|
|
//
|
|
|
|
// example.com 2019-12-25 16:18:03 session tokens v1
|
|
|
|
//
|
|
|
|
// The purpose of these requirements is to ensure that an attacker cannot trick
|
|
|
|
// two different applications into using the same context string.
|
2020-01-10 05:46:44 +00:00
|
|
|
func DeriveKey(subKey []byte, ctx string, srcKey []byte) {
|
|
|
|
// construct the derivation Hasher
|
|
|
|
const derivationIVLen = 32
|
|
|
|
h := newHasher(iv, flagDeriveKeyContext, 32)
|
|
|
|
h.Write([]byte(ctx))
|
|
|
|
var derivationIV [8]uint32
|
|
|
|
bytesToWords(h.Sum(make([]byte, 0, derivationIVLen)), derivationIV[:])
|
2020-01-12 01:09:16 +00:00
|
|
|
h = newHasher(derivationIV, flagDeriveKeyMaterial, 0)
|
2020-01-10 05:46:44 +00:00
|
|
|
// derive the subKey
|
|
|
|
h.Write(srcKey)
|
2020-01-12 01:09:16 +00:00
|
|
|
h.XOF().Read(subKey)
|
2020-01-10 05:46:44 +00:00
|
|
|
}
|
|
|
|
|
2020-01-12 02:45:09 +00:00
|
|
|
// An OutputReader produces an seekable stream of 2^64 - 1 pseudorandom output
|
|
|
|
// bytes.
|
|
|
|
type OutputReader struct {
|
|
|
|
n node
|
|
|
|
block [blockSize]byte
|
|
|
|
off uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read implements io.Reader. Callers may assume that Read returns len(p), nil
|
|
|
|
// unless the read would extend beyond the end of the stream.
|
|
|
|
func (or *OutputReader) Read(p []byte) (int, error) {
|
|
|
|
if or.off == math.MaxUint64 {
|
|
|
|
return 0, io.EOF
|
|
|
|
} else if rem := math.MaxUint64 - or.off; uint64(len(p)) > rem {
|
|
|
|
p = p[:rem]
|
|
|
|
}
|
|
|
|
lenp := len(p)
|
|
|
|
for len(p) > 0 {
|
|
|
|
if or.off%blockSize == 0 {
|
|
|
|
or.n.counter = or.off / blockSize
|
|
|
|
words := or.n.compress()
|
|
|
|
wordsToBytes(words[:], or.block[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
n := copy(p, or.block[or.off%blockSize:])
|
|
|
|
p = p[n:]
|
|
|
|
or.off += uint64(n)
|
|
|
|
}
|
|
|
|
return lenp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Seek implements io.Seeker.
|
|
|
|
func (or *OutputReader) Seek(offset int64, whence int) (int64, error) {
|
|
|
|
off := or.off
|
|
|
|
switch whence {
|
|
|
|
case io.SeekStart:
|
|
|
|
if offset < 0 {
|
|
|
|
return 0, errors.New("seek position cannot be negative")
|
|
|
|
}
|
|
|
|
off = uint64(offset)
|
|
|
|
case io.SeekCurrent:
|
|
|
|
if offset < 0 {
|
|
|
|
if uint64(-offset) > off {
|
|
|
|
return 0, errors.New("seek position cannot be negative")
|
|
|
|
}
|
|
|
|
off -= uint64(-offset)
|
|
|
|
} else {
|
|
|
|
off += uint64(offset)
|
|
|
|
}
|
|
|
|
case io.SeekEnd:
|
|
|
|
off = uint64(offset) - 1
|
|
|
|
default:
|
|
|
|
panic("invalid whence")
|
|
|
|
}
|
|
|
|
or.off = off
|
|
|
|
or.n.counter = uint64(off) / blockSize
|
|
|
|
if or.off%blockSize != 0 {
|
|
|
|
words := or.n.compress()
|
|
|
|
wordsToBytes(words[:], or.block[:])
|
|
|
|
}
|
|
|
|
// NOTE: or.off >= 2^63 will result in a negative return value.
|
|
|
|
// Nothing we can do about this.
|
|
|
|
return int64(or.off), nil
|
|
|
|
}
|
|
|
|
|
2020-01-09 20:10:01 +00:00
|
|
|
// ensure that Hasher implements hash.Hash
|
|
|
|
var _ hash.Hash = (*Hasher)(nil)
|