Add support for meta data in S3Store
This commit is contained in:
parent
1e40295e88
commit
29047eddc2
|
@ -29,6 +29,10 @@
|
|||
// created. Whenever a new chunk is uploaded to tusd using a PATCH request, a
|
||||
// new part is pushed to the multipart upload on S3.
|
||||
//
|
||||
// If meta data is associated with the upload during creation, it will be added
|
||||
// to the multipart upload and after finishing it, the meta data will be passed
|
||||
// to the final object.
|
||||
//
|
||||
// Once the upload is finish, the multipart upload is completed, resulting in
|
||||
// the entire file being stored in the bucket. The info object, containing
|
||||
// meta data is not deleted. It is recommended to copy the finished upload to
|
||||
|
@ -151,10 +155,20 @@ func (store S3Store) NewUpload(info tusd.FileInfo) (id string, err error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
// Convert meta data into a map of pointers for AWS Go SDK, sigh.
|
||||
metadata := make(map[string]*string, len(info.MetaData))
|
||||
for key, value := range info.MetaData {
|
||||
// Copying the value is required in order to prevent it from being
|
||||
// overwritten by the next iteration.
|
||||
v := value
|
||||
metadata[key] = &v
|
||||
}
|
||||
|
||||
// Create the actual multipart upload
|
||||
res, err := store.Service.CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(store.Bucket),
|
||||
Key: aws.String(uploadId),
|
||||
Metadata: metadata,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
|
@ -28,16 +28,23 @@ func TestNewUpload(t *testing.T) {
|
|||
assert.Equal(store.Bucket, "bucket")
|
||||
assert.Equal(store.Service, s3obj)
|
||||
|
||||
s1 := "hello"
|
||||
s2 := "world"
|
||||
|
||||
gomock.InOrder(
|
||||
s3obj.EXPECT().PutObject(&s3.PutObjectInput{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("uploadId.info"),
|
||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
|
||||
ContentLength: aws.Int64(int64(111)),
|
||||
Body: bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":{"bar":"world","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
|
||||
ContentLength: aws.Int64(int64(136)),
|
||||
}),
|
||||
s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("uploadId"),
|
||||
Metadata: map[string]*string{
|
||||
"foo": &s1,
|
||||
"bar": &s2,
|
||||
},
|
||||
}).Return(&s3.CreateMultipartUploadOutput{
|
||||
UploadId: aws.String("multipartId"),
|
||||
}, nil),
|
||||
|
@ -46,6 +53,10 @@ func TestNewUpload(t *testing.T) {
|
|||
info := tusd.FileInfo{
|
||||
ID: "uploadId",
|
||||
Size: 500,
|
||||
MetaData: map[string]string{
|
||||
"foo": "hello",
|
||||
"bar": "world",
|
||||
},
|
||||
}
|
||||
|
||||
id, err := store.NewUpload(info)
|
||||
|
|
Loading…
Reference in New Issue