s3store: Filter newlines out of metadata

This commit is contained in:
Marius 2019-07-21 22:40:18 +02:00
parent 30811b6579
commit 85a26150a3
3 changed files with 12 additions and 10 deletions

View File

@ -47,7 +47,7 @@ type Config struct {
func (config *Config) validate() error { func (config *Config) validate() error {
if config.Logger == nil { if config.Logger == nil {
config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate | log.Ltime) config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
} }
base := config.BasePath base := config.BasePath

View File

@ -90,7 +90,9 @@ import (
// This regular expression matches every character which is not defined in the // This regular expression matches every character which is not defined in the
// ASCII tables which range from 00 to 7F, inclusive. // ASCII tables which range from 00 to 7F, inclusive.
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F])`) // It also matches the \r and \n characters which are not allowed in values
// for HTTP headers.
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F]|[\r\n])`)
// See the tusd.DataStore interface for documentation about the different // See the tusd.DataStore interface for documentation about the different
// methods. // methods.

View File

@ -37,7 +37,7 @@ func TestNewUpload(t *testing.T) {
assert.Equal(s3obj, store.Service) assert.Equal(s3obj, store.Service)
s1 := "hello" s1 := "hello"
s2 := "men?" s2 := "men???hi"
gomock.InOrder( gomock.InOrder(
s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{ s3obj.EXPECT().CreateMultipartUpload(&s3.CreateMultipartUploadInput{
@ -53,8 +53,8 @@ func TestNewUpload(t *testing.T) {
s3obj.EXPECT().PutObject(&s3.PutObjectInput{ s3obj.EXPECT().PutObject(&s3.PutObjectInput{
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
Key: aws.String("uploadId.info"), Key: aws.String("uploadId.info"),
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)), Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü\r\nhi","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null}`)),
ContentLength: aws.Int64(int64(171)), ContentLength: aws.Int64(int64(177)),
}), }),
) )
@ -63,7 +63,7 @@ func TestNewUpload(t *testing.T) {
Size: 500, Size: 500,
MetaData: map[string]string{ MetaData: map[string]string{
"foo": "hello", "foo": "hello",
"bar": "menü", "bar": "menü\r\nhi",
}, },
} }
@ -243,8 +243,8 @@ func TestGetInfoWithIncompletePart(t *testing.T) {
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"), Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{ }).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(10), ContentLength: aws.Int64(10),
Body: ioutil.NopCloser(bytes.NewReader([]byte("0123456789"))), Body: ioutil.NopCloser(bytes.NewReader([]byte("0123456789"))),
}, nil), }, nil),
) )
@ -730,8 +730,8 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),
Key: aws.String("uploadId.part"), Key: aws.String("uploadId.part"),
}).Return(&s3.GetObjectOutput{ }).Return(&s3.GetObjectOutput{
ContentLength: aws.Int64(3), ContentLength: aws.Int64(3),
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))), Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
}, nil), }, nil),
s3obj.EXPECT().ListParts(&s3.ListPartsInput{ s3obj.EXPECT().ListParts(&s3.ListPartsInput{
Bucket: aws.String("bucket"), Bucket: aws.String("bucket"),