blockLen -> blockSize

This commit is contained in:
lukechampine 2020-01-10 16:01:33 -05:00
parent 4833fabd0e
commit 4c13e3a25c
1 changed files with 14 additions and 14 deletions

View File

@ -14,8 +14,8 @@ import (
) )
const ( const (
blockLen = 64 blockSize = 64
chunkLen = 1024 chunkLen = 1024
) )
// flags // flags
@ -140,7 +140,7 @@ func wordsToBytes(words []uint32, bytes []byte) {
// An OutputReader produces an seekable stream of 2^64 - 1 output bytes. // An OutputReader produces an seekable stream of 2^64 - 1 output bytes.
type OutputReader struct { type OutputReader struct {
n node n node
block [blockLen]byte block [blockSize]byte
off uint64 off uint64
} }
@ -154,13 +154,13 @@ func (or *OutputReader) Read(p []byte) (int, error) {
} }
lenp := len(p) lenp := len(p)
for len(p) > 0 { for len(p) > 0 {
if or.off%blockLen == 0 { if or.off%blockSize == 0 {
or.n.counter = or.off / blockLen or.n.counter = or.off / blockSize
words := or.n.compress() words := or.n.compress()
wordsToBytes(words[:], or.block[:]) wordsToBytes(words[:], or.block[:])
} }
n := copy(p, or.block[or.off%blockLen:]) n := copy(p, or.block[or.off%blockSize:])
p = p[n:] p = p[n:]
or.off += uint64(n) or.off += uint64(n)
} }
@ -191,8 +191,8 @@ func (or *OutputReader) Seek(offset int64, whence int) (int64, error) {
panic("invalid whence") panic("invalid whence")
} }
or.off = off or.off = off
or.n.counter = uint64(off) / blockLen or.n.counter = uint64(off) / blockSize
if or.off%blockLen != 0 { if or.off%blockSize != 0 {
words := or.n.compress() words := or.n.compress()
wordsToBytes(words[:], or.block[:]) wordsToBytes(words[:], or.block[:])
} }
@ -203,7 +203,7 @@ func (or *OutputReader) Seek(offset int64, whence int) (int64, error) {
type chunkState struct { type chunkState struct {
n node n node
block [blockLen]byte block [blockSize]byte
blockLen int blockLen int
bytesConsumed int bytesConsumed int
} }
@ -216,10 +216,10 @@ func (cs *chunkState) update(input []byte) {
for len(input) > 0 { for len(input) > 0 {
// If the block buffer is full, compress it and clear it. More // If the block buffer is full, compress it and clear it. More
// input is coming, so this compression is not flagChunkEnd. // input is coming, so this compression is not flagChunkEnd.
if cs.blockLen == blockLen { if cs.blockLen == blockSize {
bytesToWords(cs.block[:], cs.n.block[:]) bytesToWords(cs.block[:], cs.n.block[:])
cs.n.cv = cs.n.chainingValue() cs.n.cv = cs.n.chainingValue()
cs.block = [blockLen]byte{} cs.block = [blockSize]byte{}
cs.blockLen = 0 cs.blockLen = 0
// After the first chunk has been compressed, clear the start flag. // After the first chunk has been compressed, clear the start flag.
cs.n.flags &^= flagChunkStart cs.n.flags &^= flagChunkStart
@ -246,7 +246,7 @@ func newChunkState(key [8]uint32, chunkCounter uint64, flags uint32) chunkState
n: node{ n: node{
cv: key, cv: key,
counter: chunkCounter, counter: chunkCounter,
blockLen: blockLen, blockLen: blockSize,
// compress the first chunk with the start flag set // compress the first chunk with the start flag set
flags: flags | flagChunkStart, flags: flags | flagChunkStart,
}, },
@ -260,8 +260,8 @@ func parentNode(left, right [8]uint32, key [8]uint32, flags uint32) node {
return node{ return node{
cv: key, cv: key,
block: blockWords, block: blockWords,
counter: 0, // Always 0 for parent nodes. counter: 0, // Always 0 for parent nodes.
blockLen: blockLen, // Always blockLen (64) for parent nodes. blockLen: blockSize, // Always blockSize (64) for parent nodes.
flags: flags | flagParent, flags: flags | flagParent,
} }
} }