azurestore: Add implementation

* Add azure-storage-blob-go dependency

* Implement Azure BlobStorage store

* Add AzureStore Mock test

* Refactor Blob interfaces to use uppercase fields

* Refactor and remove the Create function
When getting the offset, and we get the status code BlobNotFound, we can say the offset is 0, and start from the beginning

* Update the mock

* Refactor error checking of GetOffset to actually check the service code

* Begin testing azurestore

* Write more tests

* New feature allows to set access type on new containers and blob access tier

* Write more docs

* Upgrade azure-storage-blob-go to v0.13.0

* Remove AzError, not needed

* Update link to container access type information

* Remove ?toc from link in comments

* Remove trailing spaces from workflow

* Run tests with go1.15 and 1.16

* Don't fail fast
This lets all other tests complete, and makes it easier to see if it's just a one-off fail, or on different OSes and versions

* Remove darwin 386 from `build_all.sh` script
Removed in go1.15 https://github.com/golang/go/issues/37610

* Update go version in `Dockerfile`

* Compile for Apple Silicone (darwin arm64)
Only go1.16 supports it
This commit is contained in:
Ole-Martin Bratteng 2021-07-29 01:14:50 +02:00 committed by GitHub
parent 5be2afa2f8
commit 1b11885823
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 1209 additions and 9 deletions

View File

@ -5,8 +5,9 @@ name: Test
jobs: jobs:
test: test:
strategy: strategy:
fail-fast: false
matrix: matrix:
go-version: [1.12.x, 1.13.x] go-version: [1.15.x, 1.16.x]
platform: [ubuntu-latest, macos-latest, windows-latest] platform: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.platform }} runs-on: ${{ matrix.platform }}
steps: steps:

View File

@ -1,4 +1,4 @@
FROM golang:1.13-alpine AS builder FROM golang:1.16-alpine AS builder
# Copy in the git repo from the build context # Copy in the git repo from the build context
COPY . /go/src/github.com/tus/tusd/ COPY . /go/src/github.com/tus/tusd/

View File

@ -1,10 +1,12 @@
package cli package cli
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/tus/tusd/pkg/azurestore"
"github.com/tus/tusd/pkg/filelocker" "github.com/tus/tusd/pkg/filelocker"
"github.com/tus/tusd/pkg/filestore" "github.com/tus/tusd/pkg/filestore"
"github.com/tus/tusd/pkg/gcsstore" "github.com/tus/tusd/pkg/gcsstore"
@ -91,6 +93,49 @@ func CreateComposer() {
locker := memorylocker.New() locker := memorylocker.New()
locker.UseIn(Composer) locker.UseIn(Composer)
} else if Flags.AzStorage != "" {
accountName := os.Getenv("AZURE_STORAGE_ACCOUNT")
if accountName == "" {
stderr.Fatalf("No service account name for Azure BlockBlob Storage using the AZURE_STORAGE_ACCOUNT environment variable.\n")
}
accountKey := os.Getenv("AZURE_STORAGE_KEY")
if accountKey == "" {
stderr.Fatalf("No service account key for Azure BlockBlob Storage using the AZURE_STORAGE_KEY environment variable.\n")
}
azureEndpoint := Flags.AzEndpoint
// Enables support for using Azurite as a storage emulator without messing with proxies and stuff
// e.g. http://127.0.0.1:10000/devstoreaccount1
if azureEndpoint == "" {
azureEndpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
stdout.Printf("Custom Azure Endpoint not specified in flag variable azure-endpoint.\n"+
"Using endpoint %s\n", azureEndpoint)
} else {
stdout.Printf("Using Azure endpoint %s\n", azureEndpoint)
}
azConfig := &azurestore.AzConfig{
AccountName: accountName,
AccountKey: accountKey,
ContainerName: Flags.AzStorage,
ContainerAccessType: Flags.AzContainerAccessType,
BlobAccessTier: Flags.AzBlobAccessTier,
Endpoint: azureEndpoint,
}
azService, err := azurestore.NewAzureService(azConfig)
if err != nil {
stderr.Fatalf(err.Error())
}
store := azurestore.New(azService)
store.ObjectPrefix = Flags.AzObjectPrefix
store.Container = Flags.AzStorage
store.UseIn(Composer)
} else { } else {
dir, err := filepath.Abs(Flags.UploadDir) dir, err := filepath.Abs(Flags.UploadDir)
if err != nil { if err != nil {

View File

@ -29,6 +29,11 @@ var Flags struct {
S3DisableSSL bool S3DisableSSL bool
GCSBucket string GCSBucket string
GCSObjectPrefix string GCSObjectPrefix string
AzStorage string
AzContainerAccessType string
AzBlobAccessTier string
AzObjectPrefix string
AzEndpoint string
EnabledHooksString string EnabledHooksString string
FileHooksDir string FileHooksDir string
HttpHooksEndpoint string HttpHooksEndpoint string
@ -70,6 +75,11 @@ func ParseFlags() {
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)") flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)") flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names (can't contain underscore character)") flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names (can't contain underscore character)")
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY environment variable to be set)")
flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')")
flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')")
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)")
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events") flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events")
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts") flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to") flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")

1
go.mod
View File

@ -4,6 +4,7 @@ go 1.12
require ( require (
cloud.google.com/go v0.40.0 cloud.google.com/go v0.40.0
github.com/Azure/azure-storage-blob-go v0.13.0
github.com/aws/aws-sdk-go v1.20.1 github.com/aws/aws-sdk-go v1.20.1
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
github.com/golang/mock v1.3.1 github.com/golang/mock v1.3.1

29
go.sum
View File

@ -3,6 +3,17 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk=
cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro=
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-storage-blob-go v0.12.0 h1:7bFXA1QB+lOK2/ASWHhp6/vnxjaeeZq6t8w1Jyp0Iaw=
github.com/Azure/azure-storage-blob-go v0.12.0/go.mod h1:A0u4VjtpgZJ7Y7um/+ix2DHBuEKFC6sEIlj0xc13a4Q=
github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc=
github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -20,6 +31,7 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
@ -51,6 +63,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
@ -71,6 +85,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@ -80,10 +98,12 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
@ -122,6 +142,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -134,8 +155,11 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@ -149,9 +173,13 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4 h1:kCCpuwSAoYJPkNc6x0xT9yTtV4oKtARo4RGBQWOfg9E=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
@ -196,6 +224,7 @@ gopkg.in/Acconut/lockfile.v1 v1.1.0/go.mod h1:6UCz3wJ8tSFUsPR6uP/j8uegEtDuEEqFxl
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM= gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=

View File

@ -0,0 +1,310 @@
// Package azurestore provides a Azure Blob Storage based backend
// AzureStore is a storage backend that uses the AzService interface in order to store uploads in Azure Blob Storage.
// It stores the uploads in a container specified in two different BlockBlob: The `[id].info` blobs are used to store the fileinfo in JSON format. The `[id]` blobs without an extension contain the raw binary data uploaded.
// If the upload is not finished within a week, the uncommited blocks will be discarded.
// Support for setting the default Continaer access type and Blob access tier varies on your Azure Storage Account and its limits.
// More information about Container access types and limts
// https://docs.microsoft.com/en-us/azure/storage/blobs/anonymous-read-access-configure?tabs=portal
// More information about Blob access tiers and limits
// https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-performance-tiers
// https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#access-tiers-for-block-blob-data
package azurestore
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"fmt"
"io"
"net/url"
"sort"
"strings"
"github.com/Azure/azure-storage-blob-go/azblob"
)
const (
InfoBlobSuffix string = ".info"
MaxBlockBlobSize int64 = azblob.BlockBlobMaxBlocks * azblob.BlockBlobMaxStageBlockBytes
MaxBlockBlobChunkSize int64 = azblob.BlockBlobMaxStageBlockBytes
)
type azService struct {
BlobAccessTier azblob.AccessTierType
ContainerURL *azblob.ContainerURL
ContainerName string
}
type AzService interface {
NewBlob(ctx context.Context, name string) (AzBlob, error)
}
type AzConfig struct {
AccountName string
AccountKey string
BlobAccessTier string
ContainerName string
ContainerAccessType string
Endpoint string
}
type AzBlob interface {
// Delete the blob
Delete(ctx context.Context) error
// Upload the blob
Upload(ctx context.Context, body io.ReadSeeker) error
// Download the contents of the blob
Download(ctx context.Context) ([]byte, error)
// Get the offset of the blob and its indexes
GetOffset(ctx context.Context) (int64, error)
// Commit the uploaded blocks to the BlockBlob
Commit(ctx context.Context) error
}
type BlockBlob struct {
Blob *azblob.BlockBlobURL
AccessTier azblob.AccessTierType
Indexes []int
}
type InfoBlob struct {
Blob *azblob.BlockBlobURL
}
// New Azure service for communication to Azure BlockBlob Storage API
func NewAzureService(config *AzConfig) (AzService, error) {
// struct to store your credentials.
credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
if err != nil {
return nil, err
}
// Might be limited by the storage account
// "" or default inherits the access type from the Storage Account
var containerAccessType azblob.PublicAccessType
switch config.ContainerAccessType {
case "container":
containerAccessType = azblob.PublicAccessContainer
case "blob":
containerAccessType = azblob.PublicAccessBlob
case "":
default:
containerAccessType = azblob.PublicAccessNone
}
// Does not support the premium access tiers
var blobAccessTierType azblob.AccessTierType
switch config.BlobAccessTier {
case "archive":
blobAccessTierType = azblob.AccessTierArchive
case "cool":
blobAccessTierType = azblob.AccessTierCool
case "hot":
blobAccessTierType = azblob.AccessTierHot
case "":
default:
blobAccessTierType = azblob.DefaultAccessTier
}
// The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
cURL, _ := url.Parse(fmt.Sprintf("%s/%s", config.Endpoint, config.ContainerName))
// Get the ContainerURL URL
containerURL := azblob.NewContainerURL(*cURL, p)
// Do not care about response since it will fail if container exists and create if it does not.
_, _ = containerURL.Create(context.Background(), azblob.Metadata{}, containerAccessType)
return &azService{
BlobAccessTier: blobAccessTierType,
ContainerURL: &containerURL,
ContainerName: config.ContainerName,
}, nil
}
// Determine if we return a InfoBlob or BlockBlob, based on the name
func (service *azService) NewBlob(ctx context.Context, name string) (AzBlob, error) {
var fileBlob AzBlob
bb := service.ContainerURL.NewBlockBlobURL(name)
if strings.HasSuffix(name, InfoBlobSuffix) {
fileBlob = &InfoBlob{
Blob: &bb,
}
} else {
fileBlob = &BlockBlob{
Blob: &bb,
Indexes: []int{},
AccessTier: service.BlobAccessTier,
}
}
return fileBlob, nil
}
// Delete the blockBlob from Azure Blob Storage
func (blockBlob *BlockBlob) Delete(ctx context.Context) error {
_, err := blockBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
return err
}
// Upload a block to Azure Blob Storage and add it to the indexes to be after upload is finished
func (blockBlob *BlockBlob) Upload(ctx context.Context, body io.ReadSeeker) error {
// Keep track of the indexes
var index int
if len(blockBlob.Indexes) == 0 {
index = 0
} else {
index = blockBlob.Indexes[len(blockBlob.Indexes)-1] + 1
}
blockBlob.Indexes = append(blockBlob.Indexes, index)
_, err := blockBlob.Blob.StageBlock(ctx, blockIDIntToBase64(index), body, azblob.LeaseAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{})
if err != nil {
return err
}
return nil
}
// Download the blockBlob from Azure Blob Storage
func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err error) {
downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
// If the file does not exist, it will not return an error, but a 404 status and body
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
return nil, fmt.Errorf("File %s does not exist", blockBlob.Blob.ToBlockBlobURL())
}
if err != nil {
return nil, err
}
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
downloadedData := bytes.Buffer{}
_, err = downloadedData.ReadFrom(bodyStream)
if err != nil {
return nil, err
}
return downloadedData.Bytes(), nil
}
func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
// Get the offset of the file from azure storage
// For the blob, show each block (ID and size) that is a committed part of it.
var indexes []int
var offset int64
getBlock, err := blockBlob.Blob.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{})
if err != nil {
if err.(azblob.StorageError).ServiceCode() == azblob.ServiceCodeBlobNotFound {
return 0, nil
}
return 0, err
}
// Need committed blocks to be added to offset to know how big the file really is
for _, block := range getBlock.CommittedBlocks {
offset += block.Size
indexes = append(indexes, blockIDBase64ToInt(block.Name))
}
// Need to get the uncommitted blocks so that we can commit them
for _, block := range getBlock.UncommittedBlocks {
offset += block.Size
indexes = append(indexes, blockIDBase64ToInt(block.Name))
}
// Sort the block IDs in ascending order. This is required as Azure returns the block lists alphabetically
// and we store the indexes as base64 encoded ints.
sort.Ints(indexes)
blockBlob.Indexes = indexes
return offset, nil
}
// After all the blocks have been uploaded, we commit the unstaged blocks by sending a Block List
func (blockBlob *BlockBlob) Commit(ctx context.Context) error {
base64BlockIDs := make([]string, len(blockBlob.Indexes))
for index, id := range blockBlob.Indexes {
base64BlockIDs[index] = blockIDIntToBase64(id)
}
_, err := blockBlob.Blob.CommitBlockList(ctx, base64BlockIDs, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, blockBlob.AccessTier, nil, azblob.ClientProvidedKeyOptions{})
return err
}
// Delete the infoBlob from Azure Blob Storage
func (infoBlob *InfoBlob) Delete(ctx context.Context) error {
_, err := infoBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
return err
}
// Upload the infoBlob to Azure Blob Storage
// Because the info file is presumed to be smaller than azblob.BlockBlobMaxUploadBlobBytes (256MiB), we can upload it all in one go
// New uploaded data will create a new, or overwrite the existing block blob
func (infoBlob *InfoBlob) Upload(ctx context.Context, body io.ReadSeeker) error {
_, err := infoBlob.Blob.Upload(ctx, body, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{})
return err
}
// Download the infoBlob from Azure Blob Storage
func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
// If the file does not exist, it will not return an error, but a 404 status and body
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
return nil, fmt.Errorf("File %s does not exist", infoBlob.Blob.ToBlockBlobURL())
}
if err != nil {
return nil, err
}
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
downloadedData := bytes.Buffer{}
_, err = downloadedData.ReadFrom(bodyStream)
if err != nil {
return nil, err
}
return downloadedData.Bytes(), nil
}
// infoBlob does not utilise offset, so just return 0, nil
func (infoBlob *InfoBlob) GetOffset(ctx context.Context) (int64, error) {
return 0, nil
}
// infoBlob does not have uncommited blocks, so just return nil
func (infoBlob *InfoBlob) Commit(ctx context.Context) error {
return nil
}
// === Helper Functions ===
// These helper functions convert a binary block ID to a base-64 string and vice versa
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
func blockIDBinaryToBase64(blockID []byte) string {
return base64.StdEncoding.EncodeToString(blockID)
}
func blockIDBase64ToBinary(blockID string) []byte {
binary, _ := base64.StdEncoding.DecodeString(blockID)
return binary
}
// These helper functions convert an int block ID to a base-64 string and vice versa
func blockIDIntToBase64(blockID int) string {
binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
return blockIDBinaryToBase64(binaryBlockID)
}
func blockIDBase64ToInt(blockID string) int {
blockIDBase64ToBinary(blockID)
return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
}

View File

@ -0,0 +1,232 @@
package azurestore
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"strings"
"github.com/tus/tusd/internal/uid"
"github.com/tus/tusd/pkg/handler"
)
type AzureStore struct {
Service AzService
ObjectPrefix string
Container string
}
type AzUpload struct {
ID string
InfoBlob AzBlob
BlockBlob AzBlob
InfoHandler *handler.FileInfo
}
func New(service AzService) *AzureStore {
return &AzureStore{
Service: service,
}
}
// UseIn sets this store as the core data store in the passed composer and adds
// all possible extension to it.
func (store AzureStore) UseIn(composer *handler.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
composer.UseLengthDeferrer(store)
}
func (store AzureStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
if info.ID == "" {
info.ID = uid.Uid()
}
if info.Size > MaxBlockBlobSize {
return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes",
info.Size, MaxBlockBlobSize)
}
blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID))
if err != nil {
return nil, err
}
infoFile := store.keyWithPrefix(store.infoPath(info.ID))
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
if err != nil {
return nil, err
}
info.Storage = map[string]string{
"Type": "azurestore",
"Container": store.Container,
"Key": store.keyWithPrefix(info.ID),
}
azUpload := &AzUpload{
ID: info.ID,
InfoHandler: &info,
InfoBlob: infoBlob,
BlockBlob: blockBlob,
}
err = azUpload.writeInfo(ctx)
if err != nil {
return nil, fmt.Errorf("azurestore: unable to create InfoHandler file:\n%s", err)
}
return azUpload, nil
}
func (store AzureStore) GetUpload(ctx context.Context, id string) (handle handler.Upload, err error) {
info := handler.FileInfo{}
infoFile := store.keyWithPrefix(store.infoPath(id))
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
if err != nil {
return nil, err
}
// Download the info file from Azure Storage
data, err := infoBlob.Download(ctx)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, &info); err != nil {
return nil, err
}
if info.Size > MaxBlockBlobSize {
return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes",
info.Size, MaxBlockBlobSize)
}
blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID))
if err != nil {
return nil, err
}
offset, err := blockBlob.GetOffset(ctx)
if err != nil {
return nil, err
}
info.Offset = offset
return &AzUpload{
ID: id,
InfoHandler: &info,
InfoBlob: infoBlob,
BlockBlob: blockBlob,
}, nil
}
func (store AzureStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
return upload.(*AzUpload)
}
func (store AzureStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
return upload.(*AzUpload)
}
func (upload *AzUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
r := bufio.NewReader(src)
buf := new(bytes.Buffer)
n, err := r.WriteTo(buf)
if err != nil {
return 0, err
}
chunkSize := int64(binary.Size(buf.Bytes()))
if chunkSize > MaxBlockBlobChunkSize {
return 0, fmt.Errorf("azurestore: Chunk of size %v too large. Max chunk size is %v", chunkSize, MaxBlockBlobChunkSize)
}
re := bytes.NewReader(buf.Bytes())
err = upload.BlockBlob.Upload(ctx, re)
if err != nil {
return 0, err
}
upload.InfoHandler.Offset += n
return n, nil
}
func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
info := handler.FileInfo{}
if upload.InfoHandler != nil {
return *upload.InfoHandler, nil
}
data, err := upload.InfoBlob.Download(ctx)
if err != nil {
return info, err
}
if err := json.Unmarshal(data, &info); err != nil {
return info, err
}
upload.InfoHandler = &info
return info, nil
}
// Get the uploaded file from the Azure storage
func (upload *AzUpload) GetReader(ctx context.Context) (io.Reader, error) {
b, err := upload.BlockBlob.Download(ctx)
if err != nil {
return nil, err
}
return bytes.NewReader(b), nil
}
// Finish the file upload and commit the block list
func (upload *AzUpload) FinishUpload(ctx context.Context) error {
return upload.BlockBlob.Commit(ctx)
}
func (upload *AzUpload) Terminate(ctx context.Context) error {
// Delete info file
err := upload.InfoBlob.Delete(ctx)
if err != nil {
return err
}
// Delete file
return upload.BlockBlob.Delete(ctx)
}
func (upload *AzUpload) DeclareLength(ctx context.Context, length int64) error {
upload.InfoHandler.Size = length
upload.InfoHandler.SizeIsDeferred = false
return upload.writeInfo(ctx)
}
func (store AzureStore) infoPath(id string) string {
return id + InfoBlobSuffix
}
func (upload *AzUpload) writeInfo(ctx context.Context) error {
data, err := json.Marshal(upload.InfoHandler)
if err != nil {
return err
}
reader := bytes.NewReader(data)
return upload.InfoBlob.Upload(ctx, reader)
}
func (store *AzureStore) keyWithPrefix(key string) string {
prefix := store.ObjectPrefix
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
return prefix + key
}

View File

@ -0,0 +1,146 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/tus/tusd/pkg/azurestore (interfaces: AzService,AzBlob)
// Package azurestore_test is a generated GoMock package.
package azurestore_test
import (
context "context"
gomock "github.com/golang/mock/gomock"
azurestore "github.com/tus/tusd/pkg/azurestore"
io "io"
reflect "reflect"
)
// MockAzService is a mock of AzService interface
type MockAzService struct {
ctrl *gomock.Controller
recorder *MockAzServiceMockRecorder
}
// MockAzServiceMockRecorder is the mock recorder for MockAzService
type MockAzServiceMockRecorder struct {
mock *MockAzService
}
// NewMockAzService creates a new mock instance
func NewMockAzService(ctrl *gomock.Controller) *MockAzService {
mock := &MockAzService{ctrl: ctrl}
mock.recorder = &MockAzServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder {
return m.recorder
}
// NewBlob mocks base method
func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBlob", arg0, arg1)
ret0, _ := ret[0].(azurestore.AzBlob)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// NewBlob indicates an expected call of NewBlob
func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1)
}
// MockAzBlob is a mock of AzBlob interface
type MockAzBlob struct {
ctrl *gomock.Controller
recorder *MockAzBlobMockRecorder
}
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob
type MockAzBlobMockRecorder struct {
mock *MockAzBlob
}
// NewMockAzBlob creates a new mock instance
func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob {
mock := &MockAzBlob{ctrl: ctrl}
mock.recorder = &MockAzBlobMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder {
return m.recorder
}
// Commit mocks base method
func (m *MockAzBlob) Commit(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit
func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0)
}
// Delete mocks base method
func (m *MockAzBlob) Delete(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete
func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0)
}
// Download mocks base method
func (m *MockAzBlob) Download(arg0 context.Context) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Download", arg0)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Download indicates an expected call of Download
func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0)
}
// GetOffset mocks base method
func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetOffset", arg0)
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetOffset indicates an expected call of GetOffset
func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0)
}
// Upload mocks base method
func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Upload", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Upload indicates an expected call of Upload
func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1)
}

View File

@ -0,0 +1,426 @@
package azurestore_test
import (
"bytes"
"context"
"encoding/json"
"errors"
"testing"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/tus/tusd/pkg/azurestore"
"github.com/tus/tusd/pkg/handler"
)
//go:generate mockgen -destination=./azurestore_mock_test.go -package=azurestore_test github.com/tus/tusd/pkg/azurestore AzService,AzBlob
// Test interface implementations
var _ handler.DataStore = azurestore.AzureStore{}
var _ handler.TerminaterDataStore = azurestore.AzureStore{}
var _ handler.LengthDeferrerDataStore = azurestore.AzureStore{}
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
const mockContainer = "tusd"
const mockSize int64 = 4096
const mockReaderData = "Hello World"
var mockTusdInfo = handler.FileInfo{
ID: mockID,
Size: mockSize,
MetaData: map[string]string{
"foo": "bar",
},
Storage: map[string]string{
"Type": "azurestore",
"Container": mockContainer,
"Key": mockID,
},
}
func TestNewUpload(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
r := bytes.NewReader(data)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
)
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
assert.Nil(err)
assert.NotNil(upload)
}
func TestNewUploadWithPrefix(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
objectPrefix := "/path/to/file/"
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
store.ObjectPrefix = objectPrefix
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
info := mockTusdInfo
info.Storage = map[string]string{
"Type": "azurestore",
"Container": mockContainer,
"Key": objectPrefix + mockID,
}
data, err := json.Marshal(info)
assert.Nil(err)
r := bytes.NewReader(data)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, objectPrefix+mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1),
service.EXPECT().NewBlob(ctx, objectPrefix+mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
)
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
assert.Nil(err)
assert.NotNil(upload)
}
func TestNewUploadTooLargeBlob(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
info := mockTusdInfo
info.Size = azurestore.MaxBlockBlobSize + 1
upload, err := store.NewUpload(ctx, info)
assert.Nil(upload)
assert.NotNil(err)
assert.Contains(err.Error(), "exceeded MaxBlockBlobSize")
assert.Contains(err.Error(), "209715200000001")
}
func TestGetUpload(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
info, err := upload.GetInfo(ctx)
assert.Nil(err)
assert.NotNil(info)
cancel()
}
func TestGetUploadTooLargeBlob(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
info := mockTusdInfo
info.Size = azurestore.MaxBlockBlobSize + 1
data, err := json.Marshal(info)
assert.Nil(err)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(upload)
assert.NotNil(err)
assert.Contains(err.Error(), "exceeded MaxBlockBlobSize")
assert.Contains(err.Error(), "209715200000001")
cancel()
}
func TestGetUploadNotFound(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
ctx := context.Background()
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(nil, errors.New(string(azblob.StorageErrorCodeBlobNotFound))).Times(1),
)
_, err := store.GetUpload(context.Background(), mockID)
assert.NotNil(err)
assert.Equal(err.Error(), "BlobNotFound")
}
func TestGetReader(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
blockBlob.EXPECT().Download(ctx).Return([]byte(mockReaderData), nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
reader, err := upload.GetReader(ctx)
assert.Nil(err)
assert.NotNil(reader)
cancel()
}
func TestWriteChunk(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
var offset int64 = mockSize / 2
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
_, err = upload.WriteChunk(ctx, offset, bytes.NewReader([]byte(mockReaderData)))
assert.Nil(err)
cancel()
}
func TestFinishUpload(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
var offset int64 = mockSize / 2
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
err = upload.FinishUpload(ctx)
assert.Nil(err)
cancel()
}
func TestTerminate(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
data, err := json.Marshal(mockTusdInfo)
assert.Nil(err)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
blockBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
err = store.AsTerminatableUpload(upload).Terminate(ctx)
assert.Nil(err)
cancel()
}
func TestDeclareLength(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
assert := assert.New(t)
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
service := NewMockAzService(mockCtrl)
store := azurestore.New(service)
store.Container = mockContainer
blockBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(blockBlob)
infoBlob := NewMockAzBlob(mockCtrl)
assert.NotNil(infoBlob)
info := mockTusdInfo
info.Size = mockSize * 2
data, err := json.Marshal(info)
assert.Nil(err)
r := bytes.NewReader(data)
gomock.InOrder(
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
)
upload, err := store.GetUpload(ctx, mockID)
assert.Nil(err)
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, mockSize*2)
assert.Nil(err)
info, err = upload.GetInfo(ctx)
assert.Nil(err)
assert.NotNil(info)
assert.Equal(info.Size, mockSize*2)
cancel()
}

View File

@ -10,8 +10,8 @@ compile linux 386
compile linux amd64 compile linux amd64
compile linux arm compile linux arm
compile linux arm64 compile linux arm64
compile darwin 386
compile darwin amd64 compile darwin amd64
compile darwin arm64
compile windows 386 .exe compile windows 386 .exe
compile windows amd64 .exe compile windows amd64 .exe
@ -19,8 +19,8 @@ maketar linux 386
maketar linux amd64 maketar linux amd64
maketar linux arm maketar linux arm
maketar linux arm64 maketar linux arm64
makezip darwin 386
makezip darwin amd64 makezip darwin amd64
makezip darwin arm64
makezip windows 386 .exe makezip windows 386 .exe
makezip windows amd64 .exe makezip windows amd64 .exe
makedep amd64 makedep amd64