s3store: Use DeleteObject and GetObject to fix IAM issues for incomplete parts (#233)

* Use GetObject instead of HeadObject to locate incomplete part

This avoids confusion around the errors that are returned by HeadObject, especially when the request has limited permissions for the bucket.

* Remove unused HeadObject function

* Add DeleteObject to S3API interface

* Use DeleteObject to remove .part objects

* Update tests
This commit is contained in:
Adam Jensen 2019-02-23 15:24:38 -05:00 committed by Marius
parent 0baa9eaf48
commit 5e06fc54b0
3 changed files with 50 additions and 71 deletions

View File

@ -134,10 +134,10 @@ type S3API interface {
PutObject(input *s3.PutObjectInput) (*s3.PutObjectOutput, error)
ListParts(input *s3.ListPartsInput) (*s3.ListPartsOutput, error)
UploadPart(input *s3.UploadPartInput) (*s3.UploadPartOutput, error)
HeadObject(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
GetObject(input *s3.GetObjectInput) (*s3.GetObjectOutput, error)
CreateMultipartUpload(input *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
DeleteObject(input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
DeleteObjects(input *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
CompleteMultipartUpload(input *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
UploadPartCopy(input *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
@ -369,19 +369,13 @@ func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
offset += *part.Size
}
headResult, err := store.Service.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(uploadId + ".part"),
})
incompletePartObject, err := store.getIncompletePartForUpload(uploadId)
if err != nil {
if !isAwsError(err, s3.ErrCodeNoSuchKey) && !isAwsError(err, "NotFound") && !isAwsError(err, "AccessDenied") {
return info, err
}
err = nil
}
if headResult != nil && headResult.ContentLength != nil {
offset += *headResult.ContentLength
if incompletePartObject != nil {
defer incompletePartObject.Body.Close()
offset += *incompletePartObject.ContentLength
}
info.Offset = offset
@ -642,7 +636,7 @@ func (store S3Store) getIncompletePartForUpload(uploadId string) (*s3.GetObjectO
Key: store.keyWithPrefix(uploadId + ".part"),
})
if err != nil && (isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "AccessDenied")) {
if err != nil && (isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "NotFound") || isAwsError(err, "AccessDenied")) {
return nil, nil
}
@ -659,15 +653,9 @@ func (store S3Store) putIncompletePartForUpload(uploadId string, r io.ReadSeeker
}
func (store S3Store) deleteIncompletePartForUpload(uploadId string) error {
_, err := store.Service.DeleteObjects(&s3.DeleteObjectsInput{
_, err := store.Service.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(store.Bucket),
Delete: &s3.Delete{
Objects: []*s3.ObjectIdentifier{
{
Key: store.keyWithPrefix(uploadId + ".part"),
},
},
},
})
return err
}

View File

@ -62,6 +62,17 @@ func (_mr *_MockS3APIRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.C
return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateMultipartUpload", arg0)
}
func (_m *MockS3API) DeleteObject(_param0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) {
ret := _m.ctrl.Call(_m, "DeleteObject", _param0)
ret0, _ := ret[0].(*s3.DeleteObjectOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockS3APIRecorder) DeleteObject(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteObject", arg0)
}
func (_m *MockS3API) DeleteObjects(_param0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) {
ret := _m.ctrl.Call(_m, "DeleteObjects", _param0)
ret0, _ := ret[0].(*s3.DeleteObjectsOutput)
@ -84,17 +95,6 @@ func (_mr *_MockS3APIRecorder) GetObject(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetObject", arg0)
}
func (_m *MockS3API) HeadObject(_param0 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
ret := _m.ctrl.Call(_m, "HeadObject", _param0)
ret0, _ := ret[0].(*s3.HeadObjectOutput)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockS3APIRecorder) HeadObject(arg0 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "HeadObject", arg0)
}
func (_m *MockS3API) ListParts(_param0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) {
ret := _m.ctrl.Call(_m, "ListParts", _param0)
ret0, _ := ret[0].(*s3.ListPartsOutput)

View File

@ -203,10 +203,10 @@ func TestGetInfo(t *testing.T) {
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
)
info, err := store.GetInfo("uploadId+multipartId")
@ -239,11 +239,12 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{
}).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(10),
Body: ioutil.NopCloser(bytes.NewReader([]byte("0123456789"))),
}, nil),
)
@ -379,10 +380,10 @@ func TestDeclareLength(t *testing.T) {
}).Return(&s3.ListPartsOutput{
Parts: []*s3.Part{},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NotFound", "Not Found", nil)),
}).Return(nil, awserr.New("NotFound", "Not Found", nil)),
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"),
@ -500,10 +501,10 @@ func TestWriteChunk(t *testing.T) {
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -593,10 +594,10 @@ func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -665,10 +666,10 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil)),
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil)),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -725,11 +726,12 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{
}).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
@ -741,19 +743,13 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil),
s3obj.EXPECT().DeleteObjects(&s3.DeleteObjectsInput{
s3obj.EXPECT().DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(store.Bucket),
Delete: &s3.Delete{
Objects: []*s3.ObjectIdentifier{
{
Key: aws.String("uploadId.part"),
},
},
},
}).Return(nil, nil),
}).Return(&s3.DeleteObjectOutput{}, nil),
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -800,11 +796,12 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
UploadId: aws.String("multipartId"),
PartNumberMarker: aws.Int64(0),
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{
}).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
@ -819,16 +816,10 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
ContentLength: aws.Int64(3),
}, nil),
s3obj.EXPECT().DeleteObjects(&s3.DeleteObjectsInput{
s3obj.EXPECT().DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(store.Bucket),
Delete: &s3.Delete{
Objects: []*s3.ObjectIdentifier{
{
Key: aws.String("uploadId.part"),
},
},
},
}).Return(nil, nil),
}).Return(&s3.DeleteObjectOutput{}, nil),
s3obj.EXPECT().UploadPart(NewUploadPartInputMatcher(&s3.UploadPartInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),
@ -879,10 +870,10 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
},
},
}, nil),
s3obj.EXPECT().HeadObject(&s3.HeadObjectInput{
s3obj.EXPECT().GetObject(&s3.GetObjectInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"),
}).Return(&s3.HeadObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil)),
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil)),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"),
Key: aws.String("uploadId"),