Merge branch 'master' into vendor_govendor

This commit is contained in:
Marius 2016-09-30 00:13:44 +02:00
commit c61a300693
21 changed files with 214 additions and 90 deletions

View File

@ -33,15 +33,9 @@ infra variable {
} }
infra output { infra output {
public_address { public_address { value = "${aws_instance.tusd.0.public_dns}" }
value = "${aws_instance.tusd.0.public_dns}" public_addresses { value = "${join("\n", aws_instance.tusd.*.public_dns)}" }
} endpoint { value = "http://${aws_route53_record.www.name}:80/" }
public_addresses {
value = "${join("\n", aws_instance.tusd.*.public_dns)}"
}
endpoint {
value = "http://${aws_route53_record.www.name}:80/"
}
} }
infra resource aws_instance tusd { infra resource aws_instance tusd {
@ -53,13 +47,11 @@ infra resource aws_instance tusd {
key_file = "{{{config.global.ssh.privatekey_file}}}" key_file = "{{{config.global.ssh.privatekey_file}}}"
user = "{{{config.global.ssh.user}}}" user = "{{{config.global.ssh.user}}}"
} }
tags { tags { Name = "master.tus.io" }
"Name" = "${var.FREY_DOMAIN}"
}
} }
infra resource "aws_route53_record" www { infra resource "aws_route53_record" www {
name = "${var.FREY_DOMAIN}" name = "master.tus.io"
records = ["${aws_instance.tusd.public_dns}"] records = ["${aws_instance.tusd.public_dns}"]
ttl = "300" ttl = "300"
type = "CNAME" type = "CNAME"
@ -110,7 +102,7 @@ install {
} }
tasks { tasks {
name = "Common | Set motd" name = "Common | Set motd"
copy = "content='Welcome to {{lookup('env', 'FREY_DOMAIN')}}' dest=/etc/motd owner=root group=root mode=0644 backup=yes" copy = "content='Welcome to master.tus.io' dest=/etc/motd owner=root group=root mode=0644 backup=yes"
} }
tasks { tasks {
name = "Common | Set timezone variables" name = "Common | Set timezone variables"
@ -151,12 +143,20 @@ setup {
} }
roles { roles {
role = "{{{init.paths.roles_dir}}}/fqdn/v1.0.0" role = "{{{init.paths.roles_dir}}}/fqdn/v1.0.0"
fqdn = "{{lookup('env', 'FREY_DOMAIN')}}" fqdn = "master.tus.io"
} }
tasks { tasks {
file = "path=/mnt/tusd-data state=directory owner=www-data group=www-data mode=0755 recurse=yes" file = "path=/mnt/tusd-data state=directory owner=www-data group=ubuntu mode=ug+rwX,o= recurse=yes"
name = "tusd | Create tusd data dir" name = "tusd | Create tusd data dir"
} }
tasks {
name = "tusd | Create purger crontab (clean up >24h (1400minutes) files)"
cron {
name = "purger"
special_time = "hourly"
job = "find /mnt/tusd-data -type f -mmin +1440 -print0 | xargs -n 200 -r -0 rm || true"
}
}
} }
} }

View File

@ -2,9 +2,7 @@
# So suitable for adding secret keys and such # So suitable for adding secret keys and such
# export DEBUG="frey:*" # export DEBUG="frey:*"
# export FREY_DOMAIN="master.tus.io"
# export FREY_ENCRYPTION_SECRET="***" # export FREY_ENCRYPTION_SECRET="***"
# source env.sh # source env.sh
# travis encrypt --add env.global "FREY_DOMAIN=${FREY_DOMAIN}"
# travis encrypt --add env.global "FREY_ENCRYPTION_SECRET=${FREY_ENCRYPTION_SECRET}" # travis encrypt --add env.global "FREY_ENCRYPTION_SECRET=${FREY_ENCRYPTION_SECRET}"

84
.scripts/build_all.sh Executable file
View File

@ -0,0 +1,84 @@
#!/usr/bin/env bash
set -e
version=$TRAVIS_TAG
commit=$TRAVIS_COMMIT
function compile {
local os=$1
local arch=$2
local ext=$3
echo "Compiling for $os/$arch..."
local dir="tusd_${os}_${arch}"
rm -rf "$dir"
mkdir -p "$dir"
GOOS=$os GOARCH=$arch go build \
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
-o "$dir/tusd$ext" ./cmd/tusd/main.go
}
function makezip {
local os=$1
local arch=$2
local ext=$3
echo "Zipping for $os/$arch..."
local dir="tusd_${os}_${arch}"
zip "$dir.zip" "$dir/tusd$ext" LICENSE.txt README.md
}
function maketar {
local os=$1
local arch=$2
echo "Tarring for $os/$arch..."
local dir="tusd_${os}_${arch}"
tar -czf "$dir.tar.gz" "$dir/tusd" LICENSE.txt README.md
}
function makedep {
local arch=$1
echo "Debbing for $arch..."
local dir="tusd_snapshot_${arch}"
rm -rf "$dir"
mkdir -p "$dir"
mkdir -p "$dir/DEBIAN"
mkdir -p "$dir/usr/bin"
cp "./tusd_linux_${arch}/tusd" "./$dir/usr/bin/tusd"
echo "Package: tusd" >> "./$dir/DEBIAN/control"
echo "Maintainer: Marius <maerious@gmail.com>" >> "./$dir/DEBIAN/control"
echo "Section: devel" >> "./$dir/DEBIAN/control"
echo "Priority: optional" >> "./$dir/DEBIAN/control"
echo "Version: ${version}" >> "./$dir/DEBIAN/control"
echo "Architecture: ${arch}" >> "./$dir/DEBIAN/control"
echo "Homepage: https://github.com/tus/tusd" >> "./$dir/DEBIAN/control"
echo "Built-Using: $(go version)" >> "./$dir/DEBIAN/control"
echo "Description: The official server implementation of the tus resumable upload protocol." >> "./$dir/DEBIAN/control"
dpkg-deb --build "$dir"
}
compile linux 386
compile linux amd64
compile linux arm
compile darwin 386
compile darwin amd64
compile windows 386 .exe
compile windows amd64 .exe
maketar linux 386
maketar linux amd64
maketar linux arm
makezip darwin 386
makezip darwin amd64
makezip windows 386 .exe
makezip windows amd64 .exe
makedep amd64

View File

@ -28,29 +28,18 @@ install:
script: script:
- go test $PACKAGES - go test $PACKAGES
before_deploy: before_deploy:
- export GOROOT_BOOTSTRAP=$GOROOT - ./.scripts/build_all.sh
- go get github.com/laher/goxc
- goxc -t -bc="linux darwin windows"
- goxc -d=./ -wd=./cmd/tusd -bc="linux darwin windows" -build-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=$TRAVIS_TAG -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=$TRAVIS_COMMIT -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'"
deploy: deploy:
provider: releases provider: releases
api_key: api_key:
secure: dV3wr9ebEps3YrzIoqmkYc7fw0IECz7QLPRENPSxTJyd5TTYXGsnTS26cMe2LdGwYrXw0njt2GGovMyBZFTtxyYI3mMO4AZRwvZfx/yGzPWJBbVi6NjZVRg/bpyK+mQJ5BUlkPAYJmRpdc6qD+nvCGakBOxoByC5XDK+yM+bKFs= secure: dV3wr9ebEps3YrzIoqmkYc7fw0IECz7QLPRENPSxTJyd5TTYXGsnTS26cMe2LdGwYrXw0njt2GGovMyBZFTtxyYI3mMO4AZRwvZfx/yGzPWJBbVi6NjZVRg/bpyK+mQJ5BUlkPAYJmRpdc6qD+nvCGakBOxoByC5XDK+yM+bKFs=
file: file_glob: true
- snapshot/tusd_darwin_386.zip file: tusd_*.*
- snapshot/tusd_darwin_amd64.zip
- snapshot/tusd_linux_386.tar.gz
- snapshot/tusd_linux_amd64.tar.gz
- snapshot/tusd_linux_arm.tar.gz
- snapshot/tusd_snapshot_amd64.deb
- snapshot/tusd_snapshot_armhf.deb
- snapshot/tusd_snapshot_i386.deb
- snapshot/tusd_windows_386.zip
- snapshot/tusd_windows_amd64.zip
skip_cleanup: true skip_cleanup: true
on: on:
all_branches: true
tags: true tags: true
go: 1.5 go: 1.7
repo: tus/tusd repo: tus/tusd
after_deploy: after_deploy:
- make frey && frey setup --force-yes --projectDir .infra - make frey && frey setup --force-yes --projectDir .infra

View File

@ -10,10 +10,13 @@ install:
build_script: build_script:
- go env - go env
- go version - go version
- go get ./s3store
- go get ./consullocker
test_script: test_script:
- go test . - go test .
- go test ./filestore - go test ./filestore
- go test ./limitedstore - go test ./limitedstore
- go test ./memorylocker - go test ./memorylocker
- go test ./consullocker
- go test ./s3store - go test ./s3store

View File

@ -51,19 +51,17 @@ func SetupPostHooks(handler *tusd.Handler) {
func invokeHook(typ HookType, info tusd.FileInfo) { func invokeHook(typ HookType, info tusd.FileInfo) {
go func() { go func() {
_, err := invokeHookSync(typ, info, false) // Error handling is token care of by the function.
if err != nil { _, _ = invokeHookSync(typ, info, false)
stderr.Printf("Error running %s hook for %s: %s", string(typ), info.ID, err)
}
}() }()
} }
func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, error) { func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, error) {
switch typ { switch typ {
case HookPostFinish: case HookPostFinish:
stdout.Printf("Upload %s (%d bytes) finished\n", info.ID, info.Size) logEv("UploadFinished", "id", info.ID, "size", strconv.FormatInt(info.Size, 10))
case HookPostTerminate: case HookPostTerminate:
stdout.Printf("Upload %s terminated\n", info.ID) logEv("UploadTerminated", "id", info.ID)
} }
if !Flags.HooksInstalled { if !Flags.HooksInstalled {
@ -71,7 +69,7 @@ func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byt
} }
name := string(typ) name := string(typ)
stdout.Printf("Invoking %s hook…\n", name) logEv("HookInvocationStart", "type", name, "id", info.ID)
cmd := exec.Command(Flags.HooksDir + "/" + name) cmd := exec.Command(Flags.HooksDir + "/" + name)
env := os.Environ() env := os.Environ()
@ -100,10 +98,15 @@ func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byt
output, err = cmd.Output() output, err = cmd.Output()
} }
if err != nil {
logEv("HookInvocationError", "type", string(typ), "id", info.ID, "error", err.Error())
} else {
logEv("HookInvocationFinish", "type", string(typ), "id", info.ID)
}
// Ignore the error, only, if the hook's file could not be found. This usually // Ignore the error, only, if the hook's file could not be found. This usually
// means that the user is only using a subset of the available hooks. // means that the user is only using a subset of the available hooks.
if os.IsNotExist(err) { if os.IsNotExist(err) {
stdout.Printf("Unable to invoke %s hook: %s\n", name, err)
err = nil err = nil
} }

View File

@ -3,7 +3,13 @@ package cli
import ( import (
"log" "log"
"os" "os"
"github.com/tus/tusd"
) )
var stdout = log.New(os.Stdout, "[tusd] ", 0) var stdout = log.New(os.Stdout, "[tusd] ", 0)
var stderr = log.New(os.Stderr, "[tusd] ", 0) var stderr = log.New(os.Stderr, "[tusd] ", 0)
func logEv(eventName string, details ...string) {
tusd.LogEvent(stderr, eventName, details...)
}

View File

@ -35,7 +35,7 @@ type Config struct {
Logger *log.Logger Logger *log.Logger
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers // Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
// potentially set by proxies when generating an absolute URL in the // potentially set by proxies when generating an absolute URL in the
// reponse to POST requests. // response to POST requests.
RespectForwardedHeaders bool RespectForwardedHeaders bool
} }

View File

@ -22,9 +22,9 @@ func TestCORS(t *testing.T) {
}, },
Code: http.StatusOK, Code: http.StatusOK,
ResHeader: map[string]string{ ResHeader: map[string]string{
"Access-Control-Allow-Headers": "", "Access-Control-Allow-Headers": "Origin, X-Requested-With, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata",
"Access-Control-Allow-Methods": "", "Access-Control-Allow-Methods": "POST, GET, HEAD, PATCH, DELETE, OPTIONS",
"Access-Control-Max-Age": "", "Access-Control-Max-Age": "86400",
"Access-Control-Allow-Origin": "tus.io", "Access-Control-Allow-Origin": "tus.io",
}, },
}).Run(handler, t) }).Run(handler, t)
@ -37,7 +37,7 @@ func TestCORS(t *testing.T) {
}, },
Code: http.StatusMethodNotAllowed, Code: http.StatusMethodNotAllowed,
ResHeader: map[string]string{ ResHeader: map[string]string{
"Access-Control-Expose-Headers": "", "Access-Control-Expose-Headers": "Upload-Offset, Location, Upload-Length, Tus-Version, Tus-Resumable, Tus-Max-Size, Tus-Extension, Upload-Metadata",
"Access-Control-Allow-Origin": "tus.io", "Access-Control-Allow-Origin": "tus.io",
}, },
}).Run(handler, t) }).Run(handler, t)

View File

@ -69,7 +69,7 @@ type FinisherDataStore interface {
// Common ways to store this information is in memory, on disk or using an // Common ways to store this information is in memory, on disk or using an
// external service, such as ZooKeeper. // external service, such as ZooKeeper.
// When multiple processes are attempting to access an upload, whether it be // When multiple processes are attempting to access an upload, whether it be
// by reading or writing, a syncronization mechanism is required to prevent // by reading or writing, a synchronization mechanism is required to prevent
// data corruption, especially to ensure correct offset values and the proper // data corruption, especially to ensure correct offset values and the proper
// order of chunks inside a single upload. // order of chunks inside a single upload.
type LockerDataStore interface { type LockerDataStore interface {

View File

@ -7,9 +7,9 @@
// No cleanup is performed so you may want to run a cronjob to ensure your disk // No cleanup is performed so you may want to run a cronjob to ensure your disk
// is not filled up with old and finished uploads. // is not filled up with old and finished uploads.
// //
// In addition, it provides an exclusive upload locking mechansim using lock files // In addition, it provides an exclusive upload locking mechanism using lock files
// which are stored on disk. Each of them stores the PID of the process which // which are stored on disk. Each of them stores the PID of the process which
// aquired the lock. This allows locks to be automatically freed when a process // acquired the lock. This allows locks to be automatically freed when a process
// is unable to release it on its own because the process is not alive anymore. // is unable to release it on its own because the process is not alive anymore.
// For more information, consult the documentation for tusd.LockerDataStore // For more information, consult the documentation for tusd.LockerDataStore
// interface, which is implemented by FileStore // interface, which is implemented by FileStore
@ -161,12 +161,12 @@ func (store FileStore) UnlockUpload(id string) error {
// A "no such file or directory" will be returned if no lockfile was found. // A "no such file or directory" will be returned if no lockfile was found.
// Since this means that the file has never been locked, we drop the error // Since this means that the file has never been locked, we drop the error
// and continue as if nothing happend. // and continue as if nothing happened.
if os.IsNotExist(err) { if os.IsNotExist(err) {
err = nil err = nil
} }
return nil return err
} }
// newLock contructs a new Lockfile instance. // newLock contructs a new Lockfile instance.

View File

@ -61,11 +61,7 @@ func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.Response
for key, value := range test.ResHeader { for key, value := range test.ResHeader {
header := w.HeaderMap.Get(key) header := w.HeaderMap.Get(key)
if value == "" && header == "" { if value != header {
t.Errorf("Expected '%s' in response", key)
}
if value != "" && value != header {
t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header) t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header)
} }
} }

27
log.go Normal file
View File

@ -0,0 +1,27 @@
package tusd
import (
"log"
)
func (h *UnroutedHandler) log(eventName string, details ...string) {
LogEvent(h.logger, eventName, details...)
}
func LogEvent(logger *log.Logger, eventName string, details ...string) {
result := make([]byte, 0, 100)
result = append(result, `event="`...)
result = append(result, eventName...)
result = append(result, `" `...)
for i := 0; i < len(details); i += 2 {
result = append(result, details[i]...)
result = append(result, `="`...)
result = append(result, details[i+1]...)
result = append(result, `" `...)
}
result = append(result, "\n"...)
logger.Output(2, string(result))
}

View File

@ -1,12 +1,12 @@
// Package memorylocker provides an in-memory locking mechanism. // Package memorylocker provides an in-memory locking mechanism.
// //
// When multiple processes are attempting to access an upload, whether it be // When multiple processes are attempting to access an upload, whether it be
// by reading or writing, a syncronization mechanism is required to prevent // by reading or writing, a synchronization mechanism is required to prevent
// data corruption, especially to ensure correct offset values and the proper // data corruption, especially to ensure correct offset values and the proper
// order of chunks inside a single upload. // order of chunks inside a single upload.
// //
// MemoryLocker persists locks using memory and therefore allowing a simple and // MemoryLocker persists locks using memory and therefore allowing a simple and
// cheap mechansim. Locks will only exist as long as this object is kept in // cheap mechanism. Locks will only exist as long as this object is kept in
// reference and will be erased if the program exits. // reference and will be erased if the program exits.
package memorylocker package memorylocker
@ -17,7 +17,7 @@ import (
) )
// MemoryLocker persists locks using memory and therefore allowing a simple and // MemoryLocker persists locks using memory and therefore allowing a simple and
// cheap mechansim. Locks will only exist as long as this object is kept in // cheap mechanism. Locks will only exist as long as this object is kept in
// reference and will be erased if the program exits. // reference and will be erased if the program exits.
type MemoryLocker struct { type MemoryLocker struct {
locks map[string]bool locks map[string]bool

View File

@ -80,7 +80,7 @@ func newMetrics() Metrics {
func newErrorsTotalMap() map[string]*uint64 { func newErrorsTotalMap() map[string]*uint64 {
m := make(map[string]*uint64, len(ErrStatusCodes)+1) m := make(map[string]*uint64, len(ErrStatusCodes)+1)
for err, _ := range ErrStatusCodes { for err := range ErrStatusCodes {
m[err.Error()] = new(uint64) m[err.Error()] = new(uint64)
} }

View File

@ -201,10 +201,16 @@ func TestPostWithUpload(t *testing.T) {
Method: "POST", Method: "POST",
ReqHeader: map[string]string{ ReqHeader: map[string]string{
"Tus-Resumable": "1.0.0", "Tus-Resumable": "1.0.0",
"Upload-Length": "300",
"Upload-Metadata": "foo aGVsbG8=, bar d29ybGQ=",
"Content-Type": "application/false", "Content-Type": "application/false",
}, },
ReqBody: strings.NewReader("hello"), ReqBody: strings.NewReader("hello"),
Code: http.StatusBadRequest, Code: http.StatusCreated,
ResHeader: map[string]string{
"Location": "http://tus.io/files/foo",
"Upload-Offset": "",
},
}).Run(handler, t) }).Run(handler, t)
(&httpTest{ (&httpTest{

View File

@ -5,7 +5,7 @@ import (
) )
func newMultiError(errs []error) error { func newMultiError(errs []error) error {
message := "Multiple errors occured:\n" message := "Multiple errors occurred:\n"
for _, err := range errs { for _, err := range errs {
message += "\t" + err.Error() + "\n" message += "\t" + err.Error() + "\n"
} }

View File

@ -272,7 +272,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) { func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
uploadId, multipartId := splitIds(id) uploadId, multipartId := splitIds(id)
// Get file info stored in seperate object // Get file info stored in separate object
res, err := store.Service.GetObject(&s3.GetObjectInput{ res, err := store.Service.GetObject(&s3.GetObjectInput{
Bucket: aws.String(store.Bucket), Bucket: aws.String(store.Bucket),
Key: aws.String(uploadId + ".info"), Key: aws.String(uploadId + ".info"),
@ -335,7 +335,7 @@ func (store S3Store) GetReader(id string) (io.Reader, error) {
Key: aws.String(uploadId), Key: aws.String(uploadId),
}) })
if err == nil { if err == nil {
// No error occured, and we are able to stream the object // No error occurred, and we are able to stream the object
return res.Body, nil return res.Body, nil
} }

View File

@ -538,7 +538,7 @@ func TestTerminateWithErrors(t *testing.T) {
}, nil) }, nil)
err := store.Terminate("uploadId+multipartId") err := store.Terminate("uploadId+multipartId")
assert.Equal("Multiple errors occured:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error()) assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
} }
func TestConcatUploads(t *testing.T) { func TestConcatUploads(t *testing.T) {

View File

@ -15,7 +15,7 @@ func Uid() string {
id := make([]byte, 16) id := make([]byte, 16)
_, err := io.ReadFull(rand.Reader, id) _, err := io.ReadFull(rand.Reader, id)
if err != nil { if err != nil {
// This is probably an appropiate way to handle errors from our source // This is probably an appropriate way to handle errors from our source
// for random bits. // for random bits.
panic(err) panic(err)
} }

View File

@ -126,10 +126,9 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
r.Method = newMethod r.Method = newMethod
} }
go func() { handler.log("RequestIncoming", "method", r.Method, "path", r.URL.Path)
handler.logger.Println(r.Method, r.URL.Path)
handler.Metrics.incRequestsTotal(r.Method) go handler.Metrics.incRequestsTotal(r.Method)
}()
header := w.Header() header := w.Header()
@ -171,7 +170,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// will be ignored or interpreted as a rejection. // will be ignored or interpreted as a rejection.
// For example, the Presto engine, which is used in older versions of // For example, the Presto engine, which is used in older versions of
// Opera, Opera Mobile and Opera Mini, handles CORS this way. // Opera, Opera Mobile and Opera Mini, handles CORS this way.
w.WriteHeader(http.StatusOK) handler.sendResp(w, r, http.StatusOK)
return return
} }
@ -191,15 +190,10 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
// PostFile creates a new file upload using the datastore after validating the // PostFile creates a new file upload using the datastore after validating the
// length and parsing the metadata. // length and parsing the metadata.
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) { func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
// Check for presence of application/offset+octet-stream // Check for presence of application/offset+octet-stream. If another content
containsChunk := false // type is defined, it will be ignored and treated as none was set because
if contentType := r.Header.Get("Content-Type"); contentType != "" { // some HTTP clients may enforce a default value for this header.
if contentType != "application/offset+octet-stream" { containsChunk := r.Header.Get("Content-Type") == "application/offset+octet-stream"
handler.sendError(w, r, ErrInvalidContentType)
return
}
containsChunk = true
}
// Only use the proper Upload-Concat header if the concatenation extension // Only use the proper Upload-Concat header if the concatenation extension
// is even supported by the data store. // is even supported by the data store.
@ -268,6 +262,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
w.Header().Set("Location", url) w.Header().Set("Location", url)
go handler.Metrics.incUploadsCreated() go handler.Metrics.incUploadsCreated()
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
if isFinal { if isFinal {
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil { if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
@ -299,7 +294,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
} }
} }
w.WriteHeader(http.StatusCreated) handler.sendResp(w, r, http.StatusCreated)
} }
// HeadFile returns the length and offset for the HEAD request // HeadFile returns the length and offset for the HEAD request
@ -347,7 +342,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
w.Header().Set("Cache-Control", "no-store") w.Header().Set("Cache-Control", "no-store")
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10)) w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10)) w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
w.WriteHeader(http.StatusOK) handler.sendResp(w, r, http.StatusOK)
} }
// PatchFile adds a chunk to an upload. Only allowed enough space is left. // PatchFile adds a chunk to an upload. Only allowed enough space is left.
@ -402,7 +397,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
// Do not proxy the call to the data store if the upload is already completed // Do not proxy the call to the data store if the upload is already completed
if info.Offset == info.Size { if info.Offset == info.Size {
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10)) w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
w.WriteHeader(http.StatusNoContent) handler.sendResp(w, r, http.StatusNoContent)
return return
} }
@ -411,7 +406,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
return return
} }
w.WriteHeader(http.StatusNoContent) handler.sendResp(w, r, http.StatusNoContent)
} }
// PatchFile adds a chunk to an upload. Only allowed enough space is left. // PatchFile adds a chunk to an upload. Only allowed enough space is left.
@ -430,6 +425,8 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
maxSize = length maxSize = length
} }
handler.log("ChunkWriteStart", "id", id, "maxSize", i64toa(maxSize), "offset", i64toa(offset))
var bytesWritten int64 var bytesWritten int64
// Prevent a nil pointer derefernce when accessing the body which may not be // Prevent a nil pointer derefernce when accessing the body which may not be
// available in the case of a malicious request. // available in the case of a malicious request.
@ -444,6 +441,8 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
} }
} }
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
// Send new offset to client // Send new offset to client
newOffset := offset + bytesWritten newOffset := offset + bytesWritten
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10)) w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
@ -502,7 +501,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
// Do not do anything if no data is stored yet. // Do not do anything if no data is stored yet.
if info.Offset == 0 { if info.Offset == 0 {
w.WriteHeader(http.StatusNoContent) handler.sendResp(w, r, http.StatusNoContent)
return return
} }
@ -518,7 +517,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
} }
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10)) w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
w.WriteHeader(http.StatusOK) handler.sendResp(w, r, http.StatusOK)
io.Copy(w, src) io.Copy(w, src)
// Try to close the reader if the io.Closer interface is implemented // Try to close the reader if the io.Closer interface is implemented
@ -566,7 +565,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
return return
} }
w.WriteHeader(http.StatusNoContent) handler.sendResp(w, r, http.StatusNoContent)
if handler.config.NotifyTerminatedUploads { if handler.config.NotifyTerminatedUploads {
handler.TerminatedUploads <- info handler.TerminatedUploads <- info
@ -598,9 +597,18 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
w.WriteHeader(status) w.WriteHeader(status)
w.Write([]byte(reason)) w.Write([]byte(reason))
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path, "error", err.Error())
go handler.Metrics.incErrorsTotal(err) go handler.Metrics.incErrorsTotal(err)
} }
// sendResp writes the header to w with the specified status code.
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
w.WriteHeader(status)
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path)
}
// Make an absolute URLs to the given upload id. If the base path is absolute // Make an absolute URLs to the given upload id. If the base path is absolute
// it will be prepended else the host and protocol from the request is used. // it will be prepended else the host and protocol from the request is used.
func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string { func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string {
@ -772,3 +780,7 @@ func extractIDFromPath(url string) (string, error) {
} }
return result[1], nil return result[1], nil
} }
func i64toa(num int64) string {
return strconv.FormatInt(num, 10)
}