Store data on S3 even for io.ErrUnexpectedEOF

Previously, some data would be discarded if the user pauseed the upload.
Pausing causes the connection to be interrupted which makes Go's
net/http return an io.ErrUnexpectedEOF.
Before https://github.com/tus/tusd/pull/219 from @acj this would
not be noticed since data is discarded anyway. However, after this
PR, every byte can now be saved on S3 even if we don't have enough
data for a multipart upload part.
This commit is contained in:
Acconut 2019-01-28 21:41:46 +01:00
parent 5919636d6f
commit fa58f91bb4
2 changed files with 90 additions and 0 deletions

View File

@ -278,6 +278,16 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
limitedReader := io.LimitReader(src, optimalPartSize)
n, err := io.Copy(file, limitedReader)
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
// However, for S3Store it's not important whether the stream has ended
// on purpose or accidentally. Therefore, we ignore this error to not
// prevent the remaining chunk to be stored on S3.
if err == io.ErrUnexpectedEOF {
err = nil
}
// io.Copy does not return io.EOF, so we not have to handle it differently.
if err != nil {
return bytesUploaded, err

View File

@ -3,6 +3,7 @@ package s3store
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"testing"
@ -555,6 +556,85 @@ func TestWriteChunk(t *testing.T) {
assert.Equal(int64(14), bytesRead)
}
// TestWriteChunkWithUnexpectedEOF ensures that WriteChunk does not error out
// if the io.Reader returns an io.ErrUnexpectedEOF. This happens when a HTTP
// PATCH request gets interrupted.
func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
s3obj := NewMockS3API(mockCtrl)
store := New("bucket", s3obj)
store.MaxPartSize = 500
store.MinPartSize = 100
store.MaxMultipartParts = 10000
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
gomock.InOrder(
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))),
}, nil),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
Size: aws.Int64(200),
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{
{
Size: aws.Int64(100),
},
{
Size: aws.Int64(200),
},
},
}, nil),
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
Body: bytes.NewReader([]byte("1234567890ABCD")),
})).Return(nil, nil),
)
reader, writer := io.Pipe()
go func() {
writer.Write([]byte("1234567890ABCD"))
writer.CloseWithError(io.ErrUnexpectedEOF)
}()
bytesRead, err := store.WriteChunk("uploadId+multipartId", 300, reader)
assert.Nil(err)
assert.Equal(int64(14), bytesRead)
}
func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()