enhance readability, comments and errors

This commit is contained in:
Markus Kienast 2017-08-26 02:31:27 +02:00
parent 8831a98c34
commit 68e6bb8c41
1 changed files with 29 additions and 17 deletions

View File

@ -562,38 +562,50 @@ func isAwsError(err error, code string) bool {
}
func (store S3Store) CalcOptimalPartSize(size int64) (optimalPartSize int64, err error) {
var errorstrings []string
switch {
// When upload is smaller or equal MinPartSize, we upload in just one part.
case size <= store.MinPartSize:
optimalPartSize = store.MinPartSize
// When we need 9999 parts or less with MinPartSize.
case size/store.MinPartSize < store.MaxMultipartParts:
// Does the upload fit in MaxMultipartParts parts or less with MinPartSize.
case size <= store.MinPartSize*store.MaxMultipartParts:
optimalPartSize = store.MinPartSize
// If our upload divides up exactly into MaxMultipartParts parts with
// no bytes leftover, we will not need an spare last part. So we can go with
// a straight division. Otherwise we would exceed MaxPartSize in a scenario,
// where MaxObjectSize is equal to MaxPartSize * MaxMultipartParts
// (which is not the case with the current AWS S3 API specification, but
// might be in the future or with other S3-aware stores).
// Prerequisite: Be aware, that the result of an integer division (x/y) is
// ALWAYS rounded DOWN, as there are no digits behind the comma.
// In order to find out, whether we have an exact result or a rounded down
// one, we can check, whether the remainder of that division is 0 (x%y == 0).
//
// So if the result of (size/MaxMultipartParts) is not a rounded down value,
// then we can use it as our optimalPartSize. But if this division produces a
// remainder, we have to round up the result by adding +1. Otherwise our
// upload would not fit into MaxMultipartParts number of parts with that
// size. We would need an additional part in order to upload everything.
// While in almost all cases, we could skip the check for the remainder and
// just add +1 to every result, but there is one case, where doing that would
// doom our upload. When (MaxObjectSize == MaxPartSize * MaxMultipartParts),
// by adding +1, we would end up with an optimalPartSize > MaxPartSize.
// With the current S3 API specifications, we will not run into this problem,
// but these specs are subject to change, and there are other stores as well,
// which are implementing the S3 API (e.g. RIAK, Ceph RadosGW), but might
// have different settings.
case size%store.MaxMultipartParts == 0:
optimalPartSize = size / store.MaxMultipartParts
// In all other cases, we need to round up to the next integer.
// Having a remainder larger than 0 means, the float result would have
// digits after the comma (e.g. be something like 10.9). As a result, we can
// only squeeze our upload into MaxMultipartParts parts, if we rounded UP
// this division's result. That is what is happending here. We round up by
// adding +1, if the prior test for (remainder == 0) did not succeed.
default:
optimalPartSize = size/store.MaxMultipartParts + 1
}
// an upload larger than MaxObjectSize must throw an error
if size > store.MaxObjectSize {
errorstrings = append(errorstrings, fmt.Sprintf("CalcOptimalPartSize: upload size of %v bytes exceeds MaxObjectSize of %v bytes", size, store.MaxObjectSize))
return optimalPartSize, fmt.Errorf("CalcOptimalPartSize: upload size of %v bytes exceeds MaxObjectSize of %v bytes", size, store.MaxObjectSize)
}
// optimalPartSize must never exceed MaxPartSize
if optimalPartSize > store.MaxPartSize {
errorstrings = append(errorstrings, fmt.Sprintf("CalcOptimalPartSize: to upload %v bytes optimalPartSize %v must exceed MaxPartSize %v", size, optimalPartSize, store.MaxPartSize))
return optimalPartSize, fmt.Errorf("CalcOptimalPartSize: to upload %v bytes optimalPartSize %v must exceed MaxPartSize %v", size, optimalPartSize, store.MaxPartSize)
}
if len(errorstrings) > 0 {
err = fmt.Errorf(strings.Join(errorstrings, "\n"))
}
return optimalPartSize, err
return optimalPartSize, nil
}