diff --git a/s3store/s3store.go b/s3store/s3store.go index 0f92f00..f8598f8 100644 --- a/s3store/s3store.go +++ b/s3store/s3store.go @@ -278,6 +278,16 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64, limitedReader := io.LimitReader(src, optimalPartSize) n, err := io.Copy(file, limitedReader) + + // If the HTTP PATCH request gets interrupted in the middle (e.g. because + // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. + // However, for S3Store it's not important whether the stream has ended + // on purpose or accidentally. Therefore, we ignore this error to not + // prevent the remaining chunk to be stored on S3. + if err == io.ErrUnexpectedEOF { + err = nil + } + // io.Copy does not return io.EOF, so we not have to handle it differently. if err != nil { return bytesUploaded, err diff --git a/s3store/s3store_test.go b/s3store/s3store_test.go index fee6ecd..2582d33 100644 --- a/s3store/s3store_test.go +++ b/s3store/s3store_test.go @@ -3,6 +3,7 @@ package s3store import ( "bytes" "fmt" + "io" "io/ioutil" "testing" @@ -555,6 +556,85 @@ func TestWriteChunk(t *testing.T) { assert.Equal(int64(14), bytesRead) } +// TestWriteChunkWithUnexpectedEOF ensures that WriteChunk does not error out +// if the io.Reader returns an io.ErrUnexpectedEOF. This happens when a HTTP +// PATCH request gets interrupted. +func TestWriteChunkWithUnexpectedEOF(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + assert := assert.New(t) + + s3obj := NewMockS3API(mockCtrl) + store := New("bucket", s3obj) + store.MaxPartSize = 500 + store.MinPartSize = 100 + store.MaxMultipartParts = 10000 + store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024 + + gomock.InOrder( + s3obj.EXPECT().GetObject(&s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId.info"), + }).Return(&s3.GetObjectOutput{ + Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`))), + }, nil), + s3obj.EXPECT().ListParts(&s3.ListPartsInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId"), + UploadId: aws.String("multipartId"), + PartNumberMarker: aws.Int64(0), + }).Return(&s3.ListPartsOutput{ + Parts: []*s3.Part{ + { + Size: aws.Int64(100), + }, + { + Size: aws.Int64(200), + }, + }, + }, nil), + s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId.part"), + }).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)), + s3obj.EXPECT().ListParts(&s3.ListPartsInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId"), + UploadId: aws.String("multipartId"), + PartNumberMarker: aws.Int64(0), + }).Return(&s3.ListPartsOutput{ + Parts: []*s3.Part{ + { + Size: aws.Int64(100), + }, + { + Size: aws.Int64(200), + }, + }, + }, nil), + s3obj.EXPECT().GetObject(&s3.GetObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId.part"), + }).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)), + s3obj.EXPECT().PutObject(NewPutObjectInputMatcher(&s3.PutObjectInput{ + Bucket: aws.String("bucket"), + Key: aws.String("uploadId.part"), + Body: bytes.NewReader([]byte("1234567890ABCD")), + })).Return(nil, nil), + ) + + reader, writer := io.Pipe() + + go func() { + writer.Write([]byte("1234567890ABCD")) + writer.CloseWithError(io.ErrUnexpectedEOF) + }() + + bytesRead, err := store.WriteChunk("uploadId+multipartId", 300, reader) + assert.Nil(err) + assert.Equal(int64(14), bytesRead) +} + func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish()