Merge branch 'master' into vendor_govendor
This commit is contained in:
commit
c61a300693
|
@ -33,15 +33,9 @@ infra variable {
|
|||
}
|
||||
|
||||
infra output {
|
||||
public_address {
|
||||
value = "${aws_instance.tusd.0.public_dns}"
|
||||
}
|
||||
public_addresses {
|
||||
value = "${join("\n", aws_instance.tusd.*.public_dns)}"
|
||||
}
|
||||
endpoint {
|
||||
value = "http://${aws_route53_record.www.name}:80/"
|
||||
}
|
||||
public_address { value = "${aws_instance.tusd.0.public_dns}" }
|
||||
public_addresses { value = "${join("\n", aws_instance.tusd.*.public_dns)}" }
|
||||
endpoint { value = "http://${aws_route53_record.www.name}:80/" }
|
||||
}
|
||||
|
||||
infra resource aws_instance tusd {
|
||||
|
@ -53,13 +47,11 @@ infra resource aws_instance tusd {
|
|||
key_file = "{{{config.global.ssh.privatekey_file}}}"
|
||||
user = "{{{config.global.ssh.user}}}"
|
||||
}
|
||||
tags {
|
||||
"Name" = "${var.FREY_DOMAIN}"
|
||||
}
|
||||
tags { Name = "master.tus.io" }
|
||||
}
|
||||
|
||||
infra resource "aws_route53_record" www {
|
||||
name = "${var.FREY_DOMAIN}"
|
||||
name = "master.tus.io"
|
||||
records = ["${aws_instance.tusd.public_dns}"]
|
||||
ttl = "300"
|
||||
type = "CNAME"
|
||||
|
@ -110,7 +102,7 @@ install {
|
|||
}
|
||||
tasks {
|
||||
name = "Common | Set motd"
|
||||
copy = "content='Welcome to {{lookup('env', 'FREY_DOMAIN')}}' dest=/etc/motd owner=root group=root mode=0644 backup=yes"
|
||||
copy = "content='Welcome to master.tus.io' dest=/etc/motd owner=root group=root mode=0644 backup=yes"
|
||||
}
|
||||
tasks {
|
||||
name = "Common | Set timezone variables"
|
||||
|
@ -151,12 +143,20 @@ setup {
|
|||
}
|
||||
roles {
|
||||
role = "{{{init.paths.roles_dir}}}/fqdn/v1.0.0"
|
||||
fqdn = "{{lookup('env', 'FREY_DOMAIN')}}"
|
||||
fqdn = "master.tus.io"
|
||||
}
|
||||
tasks {
|
||||
file = "path=/mnt/tusd-data state=directory owner=www-data group=www-data mode=0755 recurse=yes"
|
||||
file = "path=/mnt/tusd-data state=directory owner=www-data group=ubuntu mode=ug+rwX,o= recurse=yes"
|
||||
name = "tusd | Create tusd data dir"
|
||||
}
|
||||
tasks {
|
||||
name = "tusd | Create purger crontab (clean up >24h (1400minutes) files)"
|
||||
cron {
|
||||
name = "purger"
|
||||
special_time = "hourly"
|
||||
job = "find /mnt/tusd-data -type f -mmin +1440 -print0 | xargs -n 200 -r -0 rm || true"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
# So suitable for adding secret keys and such
|
||||
|
||||
# export DEBUG="frey:*"
|
||||
# export FREY_DOMAIN="master.tus.io"
|
||||
# export FREY_ENCRYPTION_SECRET="***"
|
||||
|
||||
# source env.sh
|
||||
# travis encrypt --add env.global "FREY_DOMAIN=${FREY_DOMAIN}"
|
||||
# travis encrypt --add env.global "FREY_ENCRYPTION_SECRET=${FREY_ENCRYPTION_SECRET}"
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
version=$TRAVIS_TAG
|
||||
commit=$TRAVIS_COMMIT
|
||||
|
||||
function compile {
|
||||
local os=$1
|
||||
local arch=$2
|
||||
local ext=$3
|
||||
|
||||
echo "Compiling for $os/$arch..."
|
||||
|
||||
local dir="tusd_${os}_${arch}"
|
||||
rm -rf "$dir"
|
||||
mkdir -p "$dir"
|
||||
GOOS=$os GOARCH=$arch go build \
|
||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||
-o "$dir/tusd$ext" ./cmd/tusd/main.go
|
||||
}
|
||||
|
||||
function makezip {
|
||||
local os=$1
|
||||
local arch=$2
|
||||
local ext=$3
|
||||
|
||||
echo "Zipping for $os/$arch..."
|
||||
|
||||
local dir="tusd_${os}_${arch}"
|
||||
zip "$dir.zip" "$dir/tusd$ext" LICENSE.txt README.md
|
||||
}
|
||||
|
||||
function maketar {
|
||||
local os=$1
|
||||
local arch=$2
|
||||
|
||||
echo "Tarring for $os/$arch..."
|
||||
|
||||
local dir="tusd_${os}_${arch}"
|
||||
tar -czf "$dir.tar.gz" "$dir/tusd" LICENSE.txt README.md
|
||||
}
|
||||
|
||||
function makedep {
|
||||
local arch=$1
|
||||
|
||||
echo "Debbing for $arch..."
|
||||
|
||||
local dir="tusd_snapshot_${arch}"
|
||||
rm -rf "$dir"
|
||||
mkdir -p "$dir"
|
||||
mkdir -p "$dir/DEBIAN"
|
||||
mkdir -p "$dir/usr/bin"
|
||||
cp "./tusd_linux_${arch}/tusd" "./$dir/usr/bin/tusd"
|
||||
|
||||
echo "Package: tusd" >> "./$dir/DEBIAN/control"
|
||||
echo "Maintainer: Marius <maerious@gmail.com>" >> "./$dir/DEBIAN/control"
|
||||
echo "Section: devel" >> "./$dir/DEBIAN/control"
|
||||
echo "Priority: optional" >> "./$dir/DEBIAN/control"
|
||||
echo "Version: ${version}" >> "./$dir/DEBIAN/control"
|
||||
echo "Architecture: ${arch}" >> "./$dir/DEBIAN/control"
|
||||
echo "Homepage: https://github.com/tus/tusd" >> "./$dir/DEBIAN/control"
|
||||
echo "Built-Using: $(go version)" >> "./$dir/DEBIAN/control"
|
||||
echo "Description: The official server implementation of the tus resumable upload protocol." >> "./$dir/DEBIAN/control"
|
||||
|
||||
dpkg-deb --build "$dir"
|
||||
}
|
||||
|
||||
compile linux 386
|
||||
compile linux amd64
|
||||
compile linux arm
|
||||
compile darwin 386
|
||||
compile darwin amd64
|
||||
compile windows 386 .exe
|
||||
compile windows amd64 .exe
|
||||
|
||||
maketar linux 386
|
||||
maketar linux amd64
|
||||
maketar linux arm
|
||||
makezip darwin 386
|
||||
makezip darwin amd64
|
||||
makezip windows 386 .exe
|
||||
makezip windows amd64 .exe
|
||||
makedep amd64
|
21
.travis.yml
21
.travis.yml
|
@ -28,29 +28,18 @@ install:
|
|||
script:
|
||||
- go test $PACKAGES
|
||||
before_deploy:
|
||||
- export GOROOT_BOOTSTRAP=$GOROOT
|
||||
- go get github.com/laher/goxc
|
||||
- goxc -t -bc="linux darwin windows"
|
||||
- goxc -d=./ -wd=./cmd/tusd -bc="linux darwin windows" -build-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=$TRAVIS_TAG -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=$TRAVIS_COMMIT -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'"
|
||||
- ./.scripts/build_all.sh
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: dV3wr9ebEps3YrzIoqmkYc7fw0IECz7QLPRENPSxTJyd5TTYXGsnTS26cMe2LdGwYrXw0njt2GGovMyBZFTtxyYI3mMO4AZRwvZfx/yGzPWJBbVi6NjZVRg/bpyK+mQJ5BUlkPAYJmRpdc6qD+nvCGakBOxoByC5XDK+yM+bKFs=
|
||||
file:
|
||||
- snapshot/tusd_darwin_386.zip
|
||||
- snapshot/tusd_darwin_amd64.zip
|
||||
- snapshot/tusd_linux_386.tar.gz
|
||||
- snapshot/tusd_linux_amd64.tar.gz
|
||||
- snapshot/tusd_linux_arm.tar.gz
|
||||
- snapshot/tusd_snapshot_amd64.deb
|
||||
- snapshot/tusd_snapshot_armhf.deb
|
||||
- snapshot/tusd_snapshot_i386.deb
|
||||
- snapshot/tusd_windows_386.zip
|
||||
- snapshot/tusd_windows_amd64.zip
|
||||
file_glob: true
|
||||
file: tusd_*.*
|
||||
skip_cleanup: true
|
||||
on:
|
||||
all_branches: true
|
||||
tags: true
|
||||
go: 1.5
|
||||
go: 1.7
|
||||
repo: tus/tusd
|
||||
after_deploy:
|
||||
- make frey && frey setup --force-yes --projectDir .infra
|
||||
|
|
|
@ -10,10 +10,13 @@ install:
|
|||
build_script:
|
||||
- go env
|
||||
- go version
|
||||
- go get ./s3store
|
||||
- go get ./consullocker
|
||||
|
||||
test_script:
|
||||
- go test .
|
||||
- go test ./filestore
|
||||
- go test ./limitedstore
|
||||
- go test ./memorylocker
|
||||
- go test ./consullocker
|
||||
- go test ./s3store
|
||||
|
|
|
@ -51,19 +51,17 @@ func SetupPostHooks(handler *tusd.Handler) {
|
|||
|
||||
func invokeHook(typ HookType, info tusd.FileInfo) {
|
||||
go func() {
|
||||
_, err := invokeHookSync(typ, info, false)
|
||||
if err != nil {
|
||||
stderr.Printf("Error running %s hook for %s: %s", string(typ), info.ID, err)
|
||||
}
|
||||
// Error handling is token care of by the function.
|
||||
_, _ = invokeHookSync(typ, info, false)
|
||||
}()
|
||||
}
|
||||
|
||||
func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byte, error) {
|
||||
switch typ {
|
||||
case HookPostFinish:
|
||||
stdout.Printf("Upload %s (%d bytes) finished\n", info.ID, info.Size)
|
||||
logEv("UploadFinished", "id", info.ID, "size", strconv.FormatInt(info.Size, 10))
|
||||
case HookPostTerminate:
|
||||
stdout.Printf("Upload %s terminated\n", info.ID)
|
||||
logEv("UploadTerminated", "id", info.ID)
|
||||
}
|
||||
|
||||
if !Flags.HooksInstalled {
|
||||
|
@ -71,7 +69,7 @@ func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byt
|
|||
}
|
||||
|
||||
name := string(typ)
|
||||
stdout.Printf("Invoking %s hook…\n", name)
|
||||
logEv("HookInvocationStart", "type", name, "id", info.ID)
|
||||
|
||||
cmd := exec.Command(Flags.HooksDir + "/" + name)
|
||||
env := os.Environ()
|
||||
|
@ -100,10 +98,15 @@ func invokeHookSync(typ HookType, info tusd.FileInfo, captureOutput bool) ([]byt
|
|||
output, err = cmd.Output()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logEv("HookInvocationError", "type", string(typ), "id", info.ID, "error", err.Error())
|
||||
} else {
|
||||
logEv("HookInvocationFinish", "type", string(typ), "id", info.ID)
|
||||
}
|
||||
|
||||
// Ignore the error, only, if the hook's file could not be found. This usually
|
||||
// means that the user is only using a subset of the available hooks.
|
||||
if os.IsNotExist(err) {
|
||||
stdout.Printf("Unable to invoke %s hook: %s\n", name, err)
|
||||
err = nil
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,13 @@ package cli
|
|||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/tus/tusd"
|
||||
)
|
||||
|
||||
var stdout = log.New(os.Stdout, "[tusd] ", 0)
|
||||
var stderr = log.New(os.Stderr, "[tusd] ", 0)
|
||||
|
||||
func logEv(eventName string, details ...string) {
|
||||
tusd.LogEvent(stderr, eventName, details...)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ type Config struct {
|
|||
Logger *log.Logger
|
||||
// Respect the X-Forwarded-Host, X-Forwarded-Proto and Forwarded headers
|
||||
// potentially set by proxies when generating an absolute URL in the
|
||||
// reponse to POST requests.
|
||||
// response to POST requests.
|
||||
RespectForwardedHeaders bool
|
||||
}
|
||||
|
||||
|
|
|
@ -22,9 +22,9 @@ func TestCORS(t *testing.T) {
|
|||
},
|
||||
Code: http.StatusOK,
|
||||
ResHeader: map[string]string{
|
||||
"Access-Control-Allow-Headers": "",
|
||||
"Access-Control-Allow-Methods": "",
|
||||
"Access-Control-Max-Age": "",
|
||||
"Access-Control-Allow-Headers": "Origin, X-Requested-With, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata",
|
||||
"Access-Control-Allow-Methods": "POST, GET, HEAD, PATCH, DELETE, OPTIONS",
|
||||
"Access-Control-Max-Age": "86400",
|
||||
"Access-Control-Allow-Origin": "tus.io",
|
||||
},
|
||||
}).Run(handler, t)
|
||||
|
@ -37,7 +37,7 @@ func TestCORS(t *testing.T) {
|
|||
},
|
||||
Code: http.StatusMethodNotAllowed,
|
||||
ResHeader: map[string]string{
|
||||
"Access-Control-Expose-Headers": "",
|
||||
"Access-Control-Expose-Headers": "Upload-Offset, Location, Upload-Length, Tus-Version, Tus-Resumable, Tus-Max-Size, Tus-Extension, Upload-Metadata",
|
||||
"Access-Control-Allow-Origin": "tus.io",
|
||||
},
|
||||
}).Run(handler, t)
|
||||
|
|
|
@ -69,7 +69,7 @@ type FinisherDataStore interface {
|
|||
// Common ways to store this information is in memory, on disk or using an
|
||||
// external service, such as ZooKeeper.
|
||||
// When multiple processes are attempting to access an upload, whether it be
|
||||
// by reading or writing, a syncronization mechanism is required to prevent
|
||||
// by reading or writing, a synchronization mechanism is required to prevent
|
||||
// data corruption, especially to ensure correct offset values and the proper
|
||||
// order of chunks inside a single upload.
|
||||
type LockerDataStore interface {
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
// No cleanup is performed so you may want to run a cronjob to ensure your disk
|
||||
// is not filled up with old and finished uploads.
|
||||
//
|
||||
// In addition, it provides an exclusive upload locking mechansim using lock files
|
||||
// In addition, it provides an exclusive upload locking mechanism using lock files
|
||||
// which are stored on disk. Each of them stores the PID of the process which
|
||||
// aquired the lock. This allows locks to be automatically freed when a process
|
||||
// acquired the lock. This allows locks to be automatically freed when a process
|
||||
// is unable to release it on its own because the process is not alive anymore.
|
||||
// For more information, consult the documentation for tusd.LockerDataStore
|
||||
// interface, which is implemented by FileStore
|
||||
|
@ -161,12 +161,12 @@ func (store FileStore) UnlockUpload(id string) error {
|
|||
|
||||
// A "no such file or directory" will be returned if no lockfile was found.
|
||||
// Since this means that the file has never been locked, we drop the error
|
||||
// and continue as if nothing happend.
|
||||
// and continue as if nothing happened.
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// newLock contructs a new Lockfile instance.
|
||||
|
|
|
@ -61,11 +61,7 @@ func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.Response
|
|||
for key, value := range test.ResHeader {
|
||||
header := w.HeaderMap.Get(key)
|
||||
|
||||
if value == "" && header == "" {
|
||||
t.Errorf("Expected '%s' in response", key)
|
||||
}
|
||||
|
||||
if value != "" && value != header {
|
||||
if value != header {
|
||||
t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
package tusd
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func (h *UnroutedHandler) log(eventName string, details ...string) {
|
||||
LogEvent(h.logger, eventName, details...)
|
||||
}
|
||||
|
||||
func LogEvent(logger *log.Logger, eventName string, details ...string) {
|
||||
result := make([]byte, 0, 100)
|
||||
|
||||
result = append(result, `event="`...)
|
||||
result = append(result, eventName...)
|
||||
result = append(result, `" `...)
|
||||
|
||||
for i := 0; i < len(details); i += 2 {
|
||||
result = append(result, details[i]...)
|
||||
result = append(result, `="`...)
|
||||
result = append(result, details[i+1]...)
|
||||
result = append(result, `" `...)
|
||||
}
|
||||
|
||||
result = append(result, "\n"...)
|
||||
logger.Output(2, string(result))
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
// Package memorylocker provides an in-memory locking mechanism.
|
||||
//
|
||||
// When multiple processes are attempting to access an upload, whether it be
|
||||
// by reading or writing, a syncronization mechanism is required to prevent
|
||||
// by reading or writing, a synchronization mechanism is required to prevent
|
||||
// data corruption, especially to ensure correct offset values and the proper
|
||||
// order of chunks inside a single upload.
|
||||
//
|
||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||
// cheap mechansim. Locks will only exist as long as this object is kept in
|
||||
// cheap mechanism. Locks will only exist as long as this object is kept in
|
||||
// reference and will be erased if the program exits.
|
||||
package memorylocker
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
// MemoryLocker persists locks using memory and therefore allowing a simple and
|
||||
// cheap mechansim. Locks will only exist as long as this object is kept in
|
||||
// cheap mechanism. Locks will only exist as long as this object is kept in
|
||||
// reference and will be erased if the program exits.
|
||||
type MemoryLocker struct {
|
||||
locks map[string]bool
|
||||
|
|
|
@ -80,7 +80,7 @@ func newMetrics() Metrics {
|
|||
func newErrorsTotalMap() map[string]*uint64 {
|
||||
m := make(map[string]*uint64, len(ErrStatusCodes)+1)
|
||||
|
||||
for err, _ := range ErrStatusCodes {
|
||||
for err := range ErrStatusCodes {
|
||||
m[err.Error()] = new(uint64)
|
||||
}
|
||||
|
||||
|
|
|
@ -201,10 +201,16 @@ func TestPostWithUpload(t *testing.T) {
|
|||
Method: "POST",
|
||||
ReqHeader: map[string]string{
|
||||
"Tus-Resumable": "1.0.0",
|
||||
"Upload-Length": "300",
|
||||
"Upload-Metadata": "foo aGVsbG8=, bar d29ybGQ=",
|
||||
"Content-Type": "application/false",
|
||||
},
|
||||
ReqBody: strings.NewReader("hello"),
|
||||
Code: http.StatusBadRequest,
|
||||
Code: http.StatusCreated,
|
||||
ResHeader: map[string]string{
|
||||
"Location": "http://tus.io/files/foo",
|
||||
"Upload-Offset": "",
|
||||
},
|
||||
}).Run(handler, t)
|
||||
|
||||
(&httpTest{
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func newMultiError(errs []error) error {
|
||||
message := "Multiple errors occured:\n"
|
||||
message := "Multiple errors occurred:\n"
|
||||
for _, err := range errs {
|
||||
message += "\t" + err.Error() + "\n"
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ func (store S3Store) WriteChunk(id string, offset int64, src io.Reader) (int64,
|
|||
func (store S3Store) GetInfo(id string) (info tusd.FileInfo, err error) {
|
||||
uploadId, multipartId := splitIds(id)
|
||||
|
||||
// Get file info stored in seperate object
|
||||
// Get file info stored in separate object
|
||||
res, err := store.Service.GetObject(&s3.GetObjectInput{
|
||||
Bucket: aws.String(store.Bucket),
|
||||
Key: aws.String(uploadId + ".info"),
|
||||
|
@ -335,7 +335,7 @@ func (store S3Store) GetReader(id string) (io.Reader, error) {
|
|||
Key: aws.String(uploadId),
|
||||
})
|
||||
if err == nil {
|
||||
// No error occured, and we are able to stream the object
|
||||
// No error occurred, and we are able to stream the object
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ func TestTerminateWithErrors(t *testing.T) {
|
|||
}, nil)
|
||||
|
||||
err := store.Terminate("uploadId+multipartId")
|
||||
assert.Equal("Multiple errors occured:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||
}
|
||||
|
||||
func TestConcatUploads(t *testing.T) {
|
||||
|
|
|
@ -15,7 +15,7 @@ func Uid() string {
|
|||
id := make([]byte, 16)
|
||||
_, err := io.ReadFull(rand.Reader, id)
|
||||
if err != nil {
|
||||
// This is probably an appropiate way to handle errors from our source
|
||||
// This is probably an appropriate way to handle errors from our source
|
||||
// for random bits.
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -126,10 +126,9 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
|||
r.Method = newMethod
|
||||
}
|
||||
|
||||
go func() {
|
||||
handler.logger.Println(r.Method, r.URL.Path)
|
||||
handler.Metrics.incRequestsTotal(r.Method)
|
||||
}()
|
||||
handler.log("RequestIncoming", "method", r.Method, "path", r.URL.Path)
|
||||
|
||||
go handler.Metrics.incRequestsTotal(r.Method)
|
||||
|
||||
header := w.Header()
|
||||
|
||||
|
@ -171,7 +170,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
|||
// will be ignored or interpreted as a rejection.
|
||||
// For example, the Presto engine, which is used in older versions of
|
||||
// Opera, Opera Mobile and Opera Mini, handles CORS this way.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
handler.sendResp(w, r, http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -191,15 +190,10 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
|||
// PostFile creates a new file upload using the datastore after validating the
|
||||
// length and parsing the metadata.
|
||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||
// Check for presence of application/offset+octet-stream
|
||||
containsChunk := false
|
||||
if contentType := r.Header.Get("Content-Type"); contentType != "" {
|
||||
if contentType != "application/offset+octet-stream" {
|
||||
handler.sendError(w, r, ErrInvalidContentType)
|
||||
return
|
||||
}
|
||||
containsChunk = true
|
||||
}
|
||||
// Check for presence of application/offset+octet-stream. If another content
|
||||
// type is defined, it will be ignored and treated as none was set because
|
||||
// some HTTP clients may enforce a default value for this header.
|
||||
containsChunk := r.Header.Get("Content-Type") == "application/offset+octet-stream"
|
||||
|
||||
// Only use the proper Upload-Concat header if the concatenation extension
|
||||
// is even supported by the data store.
|
||||
|
@ -268,6 +262,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
|||
w.Header().Set("Location", url)
|
||||
|
||||
go handler.Metrics.incUploadsCreated()
|
||||
handler.log("UploadCreated", "id", id, "size", i64toa(size), "url", url)
|
||||
|
||||
if isFinal {
|
||||
if err := handler.composer.Concater.ConcatUploads(id, partialUploads); err != nil {
|
||||
|
@ -299,7 +294,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
handler.sendResp(w, r, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HeadFile returns the length and offset for the HEAD request
|
||||
|
@ -347,7 +342,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
|||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
||||
w.Header().Set("Upload-Offset", strconv.FormatInt(info.Offset, 10))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
handler.sendResp(w, r, http.StatusOK)
|
||||
}
|
||||
|
||||
// PatchFile adds a chunk to an upload. Only allowed enough space is left.
|
||||
|
@ -402,7 +397,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
|||
// Do not proxy the call to the data store if the upload is already completed
|
||||
if info.Offset == info.Size {
|
||||
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
handler.sendResp(w, r, http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -411,7 +406,7 @@ func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
handler.sendResp(w, r, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// PatchFile adds a chunk to an upload. Only allowed enough space is left.
|
||||
|
@ -430,6 +425,8 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
|
|||
maxSize = length
|
||||
}
|
||||
|
||||
handler.log("ChunkWriteStart", "id", id, "maxSize", i64toa(maxSize), "offset", i64toa(offset))
|
||||
|
||||
var bytesWritten int64
|
||||
// Prevent a nil pointer derefernce when accessing the body which may not be
|
||||
// available in the case of a malicious request.
|
||||
|
@ -444,6 +441,8 @@ func (handler *UnroutedHandler) writeChunk(id string, info FileInfo, w http.Resp
|
|||
}
|
||||
}
|
||||
|
||||
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
||||
|
||||
// Send new offset to client
|
||||
newOffset := offset + bytesWritten
|
||||
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
|
||||
|
@ -502,7 +501,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
// Do not do anything if no data is stored yet.
|
||||
if info.Offset == 0 {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
handler.sendResp(w, r, http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -518,7 +517,7 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(info.Offset, 10))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
handler.sendResp(w, r, http.StatusOK)
|
||||
io.Copy(w, src)
|
||||
|
||||
// Try to close the reader if the io.Closer interface is implemented
|
||||
|
@ -566,7 +565,7 @@ func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
handler.sendResp(w, r, http.StatusNoContent)
|
||||
|
||||
if handler.config.NotifyTerminatedUploads {
|
||||
handler.TerminatedUploads <- info
|
||||
|
@ -598,9 +597,18 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
|
|||
w.WriteHeader(status)
|
||||
w.Write([]byte(reason))
|
||||
|
||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path, "error", err.Error())
|
||||
|
||||
go handler.Metrics.incErrorsTotal(err)
|
||||
}
|
||||
|
||||
// sendResp writes the header to w with the specified status code.
|
||||
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
|
||||
w.WriteHeader(status)
|
||||
|
||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path)
|
||||
}
|
||||
|
||||
// Make an absolute URLs to the given upload id. If the base path is absolute
|
||||
// it will be prepended else the host and protocol from the request is used.
|
||||
func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string {
|
||||
|
@ -772,3 +780,7 @@ func extractIDFromPath(url string) (string, error) {
|
|||
}
|
||||
return result[1], nil
|
||||
}
|
||||
|
||||
func i64toa(num int64) string {
|
||||
return strconv.FormatInt(num, 10)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue