s3store: Fix bug when completing empty upload
This commit is contained in:
parent
8ef7648713
commit
973a4fe066
|
@ -406,7 +406,10 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
||||||
// This statement is in an else-if block to avoid causing duplicate calls
|
// This statement is in an else-if block to avoid causing duplicate calls
|
||||||
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
||||||
handler.finishUploadIfComplete(ctx, upload, info, r)
|
if err := handler.finishUploadIfComplete(ctx, upload, info, r); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
|
|
|
@ -566,6 +566,30 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(parts) == 0 {
|
||||||
|
// AWS expects at least one part to be present when completing the multipart
|
||||||
|
// upload. So if the tus upload has a size of 0, we create an empty part
|
||||||
|
// and use that for completing the multipart upload.
|
||||||
|
res, err := store.Service.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
UploadId: aws.String(multipartId),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
Body: bytes.NewReader([]byte{}),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = []*s3.Part{
|
||||||
|
&s3.Part{
|
||||||
|
ETag: res.ETag,
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Transform the []*s3.Part slice to a []*s3.CompletedPart slice for the next
|
// Transform the []*s3.Part slice to a []*s3.CompletedPart slice for the next
|
||||||
// request.
|
// request.
|
||||||
completedParts := make([]*s3.CompletedPart, len(parts))
|
completedParts := make([]*s3.CompletedPart, len(parts))
|
||||||
|
|
|
@ -169,6 +169,72 @@ func TestNewUploadWithMetadataObjectPrefix(t *testing.T) {
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEmptyUpload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
Metadata: map[string]*string{},
|
||||||
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
|
ContentLength: aws.Int64(int64(208)),
|
||||||
|
}),
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{},
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
Body: bytes.NewReader([]byte("")),
|
||||||
|
})).Return(&s3.UploadPartOutput{
|
||||||
|
ETag: aws.String("etag"),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||||
|
Parts: []*s3.CompletedPart{
|
||||||
|
{
|
||||||
|
ETag: aws.String("etag"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).Return(nil, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
info := handler.FileInfo{
|
||||||
|
ID: "uploadId",
|
||||||
|
Size: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(upload)
|
||||||
|
err = upload.FinishUpload(context.Background())
|
||||||
|
assert.Nil(err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
|
Loading…
Reference in New Issue