Compare commits
389 Commits
Author | SHA1 | Date |
---|---|---|
Derrick Hammer | 7b20ce6a9a | |
dependabot[bot] | 2a401bc274 | |
dependabot[bot] | 596742fd98 | |
github-actions[bot] | fb0284b993 | |
Marius | e475df9895 | |
dependabot[bot] | 318aab451b | |
dependabot[bot] | 76fc247408 | |
dependabot[bot] | 912ff3df05 | |
dependabot[bot] | cfa859f866 | |
dependabot[bot] | d2bf62776b | |
Jonas Thelemann | db2b4918e2 | |
Marius | ee9f40158e | |
Marius | dcec3df83f | |
dependabot[bot] | a16d9a2ac6 | |
dependabot[bot] | 59afa4c213 | |
dependabot[bot] | 86518148b2 | |
dependabot[bot] | ae120982ae | |
dependabot[bot] | ec843312e5 | |
dependabot[bot] | 007e05618b | |
dependabot[bot] | c52a482720 | |
dependabot[bot] | d7b990e3c2 | |
dependabot[bot] | 3053ade119 | |
Christian Kaps | 7225439860 | |
Marius | 9cf626bf0c | |
Amir Noorani | 932bb30ad1 | |
dependabot[bot] | 4a46cf667e | |
dependabot[bot] | e48471d774 | |
dependabot[bot] | e8282eaa85 | |
dependabot[bot] | f4d0c2a0f0 | |
dependabot[bot] | b6a368c041 | |
dependabot[bot] | 969a6d3258 | |
dependabot[bot] | f8f13d65ef | |
Marius | 93f0cb2670 | |
dependabot[bot] | f1abfc2ec2 | |
dependabot[bot] | 5456d64d24 | |
dependabot[bot] | 3fa99b126f | |
dependabot[bot] | 75d8355990 | |
Ole-Martin Bratteng | f496cc14b3 | |
dependabot[bot] | 9ff2d54f0a | |
dependabot[bot] | 0e1169bdf1 | |
dependabot[bot] | 7b77b3e1ab | |
dependabot[bot] | ed5d1463c0 | |
dependabot[bot] | b3088981a0 | |
dependabot[bot] | a993a4c851 | |
dependabot[bot] | 3cc609b469 | |
dependabot[bot] | aa2fc1ea6e | |
dependabot[bot] | 53e7f7c64c | |
dependabot[bot] | d8090eb7c5 | |
zain naveed | bc2eb4763c | |
dependabot[bot] | 74d6904500 | |
Marius | 4be9e5f28f | |
dependabot[bot] | e4566bb289 | |
dependabot[bot] | 4f6ef0717e | |
dependabot[bot] | 02af377298 | |
dependabot[bot] | aedfe5536e | |
dependabot[bot] | 9d27c6a24e | |
dependabot[bot] | 3eea530d51 | |
dependabot[bot] | a1a5fd2121 | |
dependabot[bot] | 8512216524 | |
Marius | 537bdf02c5 | |
dependabot[bot] | 54c975fb1f | |
dependabot[bot] | f4a2f67a6e | |
dependabot[bot] | 0b7817d27e | |
Jonas Thelemann | eeffabf14a | |
dependabot[bot] | f6ab32c617 | |
dependabot[bot] | 57b66a43ac | |
dependabot[bot] | 7249a0a784 | |
dependabot[bot] | 88ad56af60 | |
dependabot[bot] | 8585f4da33 | |
dependabot[bot] | ea313da3ed | |
dependabot[bot] | 090f08b021 | |
dependabot[bot] | 074d583007 | |
dependabot[bot] | 8889869811 | |
dependabot[bot] | e2fb5aee2a | |
dependabot[bot] | bc55cd014b | |
dependabot[bot] | 44b6ad5b6c | |
dependabot[bot] | 9060d884d9 | |
dependabot[bot] | 07b48e3411 | |
dependabot[bot] | 7d88427703 | |
dependabot[bot] | 8664d8c484 | |
dependabot[bot] | c25751e310 | |
dependabot[bot] | 290286c88d | |
dependabot[bot] | 6a3af6203b | |
dependabot[bot] | 7fbc2624d3 | |
dependabot[bot] | d613af66bd | |
dependabot[bot] | 19b1bccaa7 | |
dependabot[bot] | c9668b001f | |
dependabot[bot] | c48ce10fb3 | |
Marius | a349a9296a | |
dependabot[bot] | e0a7b6035b | |
genofire | 306a031953 | |
dependabot[bot] | f0b63f4371 | |
dependabot[bot] | 39b16f2582 | |
dependabot[bot] | 2c1374f932 | |
Christoph, René Pardon | 6ca6ef69a2 | |
dependabot[bot] | 0ded7b624d | |
dependabot[bot] | 5620e5e91d | |
dependabot[bot] | 612567d14e | |
dependabot[bot] | 6b1ce5b35e | |
Shun | 7e6cd89114 | |
Marius | f898cd2b25 | |
dependabot[bot] | 58c2b65e43 | |
dependabot[bot] | 4fd3c5ebcb | |
dependabot[bot] | 9468d7f2d3 | |
dependabot[bot] | 512c824588 | |
dependabot[bot] | d5e8a3c417 | |
dependabot[bot] | ee0afcb366 | |
dependabot[bot] | 9ffd89aeee | |
dependabot[bot] | 69c567b4fa | |
dependabot[bot] | c4c57692b1 | |
dependabot[bot] | 3c2e430a67 | |
dependabot[bot] | 0e38b07fa6 | |
dependabot[bot] | 1ead453a97 | |
dependabot[bot] | 8efaf4be4c | |
Marius | 83015bfdf6 | |
dependabot[bot] | 884591d1c9 | |
dependabot[bot] | 43fde218c5 | |
dependabot[bot] | 160e4d7439 | |
dependabot[bot] | ecf62b809d | |
dependabot[bot] | 569d7a93a6 | |
dependabot[bot] | 527cc4b6ca | |
Abdelhadi Khiati | e0189d1bac | |
dependabot[bot] | 397fff4baa | |
dependabot[bot] | 3f2d51522d | |
dependabot[bot] | e51ad62a32 | |
dependabot[bot] | e0261bcfd5 | |
Roberto Villalba | e423e30135 | |
dependabot[bot] | 15e745d5de | |
dependabot[bot] | 161c47f392 | |
dependabot[bot] | 2ed94427cb | |
dependabot[bot] | 8bd570222a | |
Walter Pesce | 884cf6ecc1 | |
dependabot[bot] | 31469fcfed | |
dependabot[bot] | 9809a1e73c | |
dependabot[bot] | 8e1ad18503 | |
dependabot[bot] | f173d495aa | |
dependabot[bot] | 78836e3c8e | |
André Fontenele | b5cbafdbd5 | |
Marius | 63830b35d1 | |
dependabot[bot] | 5c7eddb659 | |
dependabot[bot] | 97944e46b4 | |
dependabot[bot] | e6a2995d2c | |
dependabot[bot] | 50c71431b0 | |
dependabot[bot] | 97c4a01f05 | |
dependabot[bot] | a2813be5f4 | |
dependabot[bot] | 4d5b02723e | |
dependabot[bot] | 9f0417480a | |
dependabot[bot] | b6a7dda4d9 | |
dependabot[bot] | 1b4d040cb4 | |
dependabot[bot] | 457ca870ea | |
dependabot[bot] | 492a75c938 | |
dependabot[bot] | 14b705ee24 | |
dependabot[bot] | 92aacdbce6 | |
dependabot[bot] | 671e6dd5b2 | |
dependabot[bot] | 4d48033e4f | |
dependabot[bot] | 9272bd238e | |
dependabot[bot] | d72854fa04 | |
dependabot[bot] | 4531cec078 | |
dependabot[bot] | 53e35a87e5 | |
dependabot[bot] | ea182008f7 | |
dependabot[bot] | a4bf8339f6 | |
dependabot[bot] | d441581f6c | |
Tom Berger | ebf767b2e9 | |
dependabot[bot] | 0ef0e6f9e0 | |
dependabot[bot] | ebb1669e9e | |
dependabot[bot] | 5c13de2a23 | |
dependabot[bot] | bb7d698fcf | |
dependabot[bot] | b3143e12c3 | |
dependabot[bot] | e8f0949cf0 | |
dependabot[bot] | f8dc7e0551 | |
dependabot[bot] | 7eda9a5fa3 | |
dependabot[bot] | 3a3916af46 | |
dependabot[bot] | 0c7cbfc976 | |
dependabot[bot] | df3089f52d | |
Ole-Martin Bratteng | 3feef174fd | |
dependabot[bot] | 374404c26c | |
dependabot[bot] | 5117253175 | |
Veniamin Krol | fbc83f508e | |
dependabot[bot] | ff35768afd | |
dependabot[bot] | 4a19bfa082 | |
dependabot[bot] | 8b7ea0e57d | |
dependabot[bot] | c3c667c430 | |
dependabot[bot] | 603d021adf | |
Ole-Martin Bratteng | 35dbca86f1 | |
dependabot[bot] | bcbe1c2f98 | |
dependabot[bot] | e1e4d094c8 | |
dependabot[bot] | bcae0b5a5b | |
dependabot[bot] | 8b799f1774 | |
dependabot[bot] | e925ba362e | |
dependabot[bot] | 2fa910fe98 | |
dependabot[bot] | 4791636bd3 | |
dependabot[bot] | 894670e547 | |
dependabot[bot] | cdc9edbdc6 | |
dependabot[bot] | 8c5a2db945 | |
Ole-Martin Bratteng | 0ccbcc829f | |
Marius | b47294267a | |
Marius | 965a011b1b | |
dependabot[bot] | d6e0abee28 | |
dependabot[bot] | 85e2f17520 | |
dependabot[bot] | 97c339b337 | |
dependabot[bot] | 2c8fe24b0c | |
dependabot[bot] | ec205e0743 | |
dependabot[bot] | 76c6bdac4a | |
dependabot[bot] | 6b7fb05022 | |
dependabot[bot] | 0d781500f9 | |
dependabot[bot] | 55d686f6a7 | |
dependabot[bot] | 6136e92d48 | |
dependabot[bot] | 0302a6b449 | |
dependabot[bot] | 10ae43a360 | |
dependabot[bot] | 373a888202 | |
dependabot[bot] | 596aae1b82 | |
dependabot[bot] | 53613ca7da | |
dependabot[bot] | 07b8638c9e | |
dependabot[bot] | 759b597680 | |
dependabot[bot] | 428f4b375b | |
dependabot[bot] | cbfa474464 | |
dependabot[bot] | 7ceababa15 | |
dependabot[bot] | fb67ad125c | |
dependabot[bot] | 7660a81dc6 | |
dependabot[bot] | 5c73a038ba | |
dependabot[bot] | aa4d0f582e | |
dependabot[bot] | 7d318a2197 | |
dependabot[bot] | cee9d929c2 | |
dependabot[bot] | ce7613fa46 | |
dependabot[bot] | b6c9577b58 | |
dependabot[bot] | e77a0a108f | |
dependabot[bot] | 26d7c76330 | |
dependabot[bot] | 8a4de8144f | |
dependabot[bot] | 3655950ad4 | |
dependabot[bot] | df40880c96 | |
dependabot[bot] | 3975a5a059 | |
dependabot[bot] | f188182d9e | |
dependabot[bot] | 512011ced1 | |
dependabot[bot] | 2068a0f0b4 | |
Anatoly | 6828cbdce1 | |
dependabot[bot] | 880c71fa82 | |
dependabot[bot] | 5819efb0dd | |
dependabot[bot] | c55989f721 | |
dependabot[bot] | 795ee96a46 | |
dependabot[bot] | 91118fc1f7 | |
dependabot[bot] | c929b9adf7 | |
dependabot[bot] | f4bcc50d1e | |
dependabot[bot] | 119023f1f7 | |
dependabot[bot] | d8e3401651 | |
dependabot[bot] | a7f29fd88a | |
dependabot[bot] | d460d45414 | |
dependabot[bot] | f2272fee3f | |
dependabot[bot] | b1c0f015d8 | |
dependabot[bot] | 2fde81b447 | |
dependabot[bot] | f32ccf86dd | |
dependabot[bot] | a2dcb9d956 | |
dependabot[bot] | ff979df260 | |
dependabot[bot] | 0a538cd2b3 | |
dependabot[bot] | 9ad763d12b | |
dependabot[bot] | 3785119fc3 | |
dependabot[bot] | c69dfffdbf | |
dependabot[bot] | f5ae9886ba | |
dependabot[bot] | 7cc595ee3c | |
dependabot[bot] | d900815e25 | |
dependabot[bot] | 91bcb1ba5b | |
dependabot[bot] | f394c87d81 | |
dependabot[bot] | 94c1564b7c | |
dependabot[bot] | 9c9cfc534e | |
dependabot[bot] | 1d7fc224a8 | |
dependabot[bot] | 2e229f85c1 | |
dependabot[bot] | 112742c433 | |
dependabot[bot] | 79bb52b50f | |
dependabot[bot] | 774b7bec6e | |
dependabot[bot] | 8833ca36a7 | |
dependabot[bot] | 981b8ba910 | |
dependabot[bot] | fe8065dd59 | |
dependabot[bot] | ce5126d8e1 | |
dependabot[bot] | b501f02496 | |
dependabot[bot] | 85178f6ddd | |
dependabot[bot] | 7666298f81 | |
Mikael Finstad | 15e9db67a4 | |
dependabot[bot] | 08ea0db865 | |
Marius | 48ffebec56 | |
dependabot[bot] | 38207e3dcd | |
dependabot[bot] | 2bf6a5598f | |
Ole-Martin Bratteng | da47c1229e | |
dependabot[bot] | 85c3815ddf | |
dependabot[bot] | 1a84f7069f | |
dependabot[bot] | 50463e6aa5 | |
dependabot[bot] | 8627dfde18 | |
dependabot[bot] | 9e91baf19d | |
dependabot[bot] | c153ad4f26 | |
Marius | fa09e71262 | |
Ole-Martin Bratteng | 0f4f81a1fc | |
Ole-Martin Bratteng | a8d8b0fea6 | |
dependabot[bot] | 968bb1ddcc | |
dependabot[bot] | 75f2e5a439 | |
Ole-Martin Bratteng | 511ebaed0e | |
dependabot[bot] | d1e710d065 | |
dependabot[bot] | 13ae6d2a3a | |
dependabot[bot] | 2b794e4b9b | |
dependabot[bot] | 7e93a8abaa | |
Ole-Martin Bratteng | c758437a8a | |
Ole-Martin Bratteng | 2d27132938 | |
dependabot[bot] | 025ebe47c8 | |
dependabot[bot] | 00bdee0835 | |
Ole-Martin Bratteng | 1401222528 | |
Marius | 21b3de4c71 | |
Marius | cd717e580c | |
Marius | 84faa14987 | |
Tinco Andringa | bae0ffb5e5 | |
benito | 16a3747ec4 | |
Marius | dd44267c08 | |
Marius | 5614cecb60 | |
Marius | d973f9a81a | |
CharlyBr | 484e506cd3 | |
Marius | 0ad435b4c8 | |
Ole-Martin Bratteng | 1b11885823 | |
Marius | 5be2afa2f8 | |
Marius | 781324986d | |
Juanjo Rodriguez | b50c2936a1 | |
Marius | d560c4e753 | |
Márk Sági-Kazár | f1b183022b | |
Marius | 8c5192c254 | |
Marius | b3bf854712 | |
Marius | 27957bd22b | |
Marius | 6d987aa226 | |
Marius | 97602c3d62 | |
Márk Sági-Kazár | 36ecc269ae | |
Marius | 8031aabb7e | |
Ole-Martin Bratteng | 56723a5ea6 | |
Adam Jensen | e85d630748 | |
Marius | d152c5bbf8 | |
Marius | fdb19a7904 | |
Abdelhadi Khiati | 734c630069 | |
Abdelhadi Khiati | 8b81859a80 | |
Marius | 67fd74e129 | |
Ahmed J | 0822c0ac43 | |
Ruben Garcia | f863189009 | |
Marius | 9b934ebca7 | |
Marius | e5a79650cf | |
Joey Coleman | f4d1b7c443 | |
Ifedapo .A. Olarewaju | ed85c526cb | |
Ifedapo .A. Olarewaju | 2c7735e697 | |
Kevin McGee | 90e208a286 | |
Márk Sági-Kazár | 3a344c5e81 | |
Márk Sági-Kazár | ec9c56bb03 | |
Marius | 9831c8b4da | |
Adam Jensen | 6662f43d01 | |
Marius | 26b84bcb1c | |
Marius | 59c3d42f8f | |
Marius | ec5f50090a | |
Jens Steinhauser | 87045a8fbf | |
Vincenzo Demasi | c822a3afb1 | |
Marius | 52181920c2 | |
Hamish Forbes | fdf168fbb6 | |
Hamish Forbes | a4a733fb39 | |
josh-marshall-jax | c9dc9e6c06 | |
Marius | 5692b8f34a | |
Marius | 9210fbe0fc | |
kiloreux | 9deabf9d80 | |
Abdelhadi Khiati | 5604bbc8d4 | |
Abdelhadi Khiati | 3034947e30 | |
Abdelhadi Khiati | 25eec49577 | |
Marius | cfebf1778e | |
Marius | 936f465cc3 | |
Marius | 286d2d1e23 | |
Hamish Forbes | 15a89ec676 | |
Clement Peyrabere | acfa974c10 | |
Ibrahim Almuqrin | e5e9a74c66 | |
Iñigo | 1f0b0f515d | |
Rafael Cossovan | c7f6428884 | |
dmmakita | 43bd4b0ccb | |
Marius | 973a4fe066 | |
Iñigo | 8ef7648713 | |
Adam Jensen | 9c0e0c8f11 | |
Marius | b4db495cc6 | |
Abdelhadi Khiati | 4b7cda81a4 | |
Marius | 39b90f9148 | |
Marius | 790d6ea849 | |
Marius | 14aebe2985 | |
oliverpool | e138fc3e9e | |
Marius | d6ac521667 | |
Marius | 1e5ff7fe24 | |
Marius | f47bee870c | |
Ankit Pokhrel | 898f3fe72a | |
Ankit Pokhrel | 321acd1821 | |
Marius | 5414e88fbb | |
Marius | a045b8c4fc | |
kiloreux | a32ce3e9ab | |
kiloreux | f5e8541a06 | |
kiloreux | 232236f167 | |
kiloreux | 10175799e1 | |
kiloreux | ee5d881ad3 |
|
@ -0,0 +1,16 @@
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: github-actions
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: monthly
|
||||||
|
|
||||||
|
- package-ecosystem: docker
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: monthly
|
||||||
|
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: monthly
|
|
@ -0,0 +1,156 @@
|
||||||
|
# Taken from https://github.com/hrvey/combine-prs-workflow
|
||||||
|
# This action can be triggered manually to combine multiple PRs for
|
||||||
|
# dependency upgrades into a single PR. See the above links for
|
||||||
|
# more details.
|
||||||
|
name: 'Combine PRs'
|
||||||
|
|
||||||
|
# Controls when the action will run - in this case triggered manually
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
branchPrefix:
|
||||||
|
description: 'Branch prefix to find combinable PRs based on'
|
||||||
|
required: true
|
||||||
|
default: 'dependabot'
|
||||||
|
mustBeGreen:
|
||||||
|
description: 'Only combine PRs that are green (status is success). Set to false if repo does not run checks'
|
||||||
|
type: boolean
|
||||||
|
required: true
|
||||||
|
default: true
|
||||||
|
combineBranchName:
|
||||||
|
description: 'Name of the branch to combine PRs into'
|
||||||
|
required: true
|
||||||
|
default: 'combine-prs-branch'
|
||||||
|
ignoreLabel:
|
||||||
|
description: 'Exclude PRs with this label'
|
||||||
|
required: true
|
||||||
|
default: 'nocombine'
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
# This workflow contains a single job called "combine-prs"
|
||||||
|
combine-prs:
|
||||||
|
# The type of runner that the job will run on
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
- uses: actions/github-script@v6
|
||||||
|
id: create-combined-pr
|
||||||
|
name: Create Combined PR
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
script: |
|
||||||
|
const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
let branchesAndPRStrings = [];
|
||||||
|
let baseBranch = null;
|
||||||
|
let baseBranchSHA = null;
|
||||||
|
for (const pull of pulls) {
|
||||||
|
const branch = pull['head']['ref'];
|
||||||
|
console.log('Pull for branch: ' + branch);
|
||||||
|
if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) {
|
||||||
|
console.log('Branch matched prefix: ' + branch);
|
||||||
|
let statusOK = true;
|
||||||
|
if(${{ github.event.inputs.mustBeGreen }}) {
|
||||||
|
console.log('Checking green status: ' + branch);
|
||||||
|
const stateQuery = `query($owner: String!, $repo: String!, $pull_number: Int!) {
|
||||||
|
repository(owner: $owner, name: $repo) {
|
||||||
|
pullRequest(number:$pull_number) {
|
||||||
|
commits(last: 1) {
|
||||||
|
nodes {
|
||||||
|
commit {
|
||||||
|
statusCheckRollup {
|
||||||
|
state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
const vars = {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
pull_number: pull['number']
|
||||||
|
};
|
||||||
|
const result = await github.graphql(stateQuery, vars);
|
||||||
|
const [{ commit }] = result.repository.pullRequest.commits.nodes;
|
||||||
|
const state = commit.statusCheckRollup.state
|
||||||
|
console.log('Validating status: ' + state);
|
||||||
|
if(state != 'SUCCESS') {
|
||||||
|
console.log('Discarding ' + branch + ' with status ' + state);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log('Checking labels: ' + branch);
|
||||||
|
const labels = pull['labels'];
|
||||||
|
for(const label of labels) {
|
||||||
|
const labelName = label['name'];
|
||||||
|
console.log('Checking label: ' + labelName);
|
||||||
|
if(labelName == '${{ github.event.inputs.ignoreLabel }}') {
|
||||||
|
console.log('Discarding ' + branch + ' with label ' + labelName);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (statusOK) {
|
||||||
|
console.log('Adding branch to array: ' + branch);
|
||||||
|
const prString = '#' + pull['number'] + ' ' + pull['title'];
|
||||||
|
branchesAndPRStrings.push({ branch, prString });
|
||||||
|
baseBranch = pull['base']['ref'];
|
||||||
|
baseBranchSHA = pull['base']['sha'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (branchesAndPRStrings.length == 0) {
|
||||||
|
core.setFailed('No PRs/branches matched criteria');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: 'refs/heads/' + '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
sha: baseBranchSHA
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
console.log(error);
|
||||||
|
core.setFailed('Failed to create combined branch - maybe a branch by that name already exists?');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let combinedPRs = [];
|
||||||
|
let mergeFailedPRs = [];
|
||||||
|
for(const { branch, prString } of branchesAndPRStrings) {
|
||||||
|
try {
|
||||||
|
await github.rest.repos.merge({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
base: '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
head: branch,
|
||||||
|
});
|
||||||
|
console.log('Merged branch ' + branch);
|
||||||
|
combinedPRs.push(prString);
|
||||||
|
} catch (error) {
|
||||||
|
console.log('Failed to merge branch ' + branch);
|
||||||
|
mergeFailedPRs.push(prString);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('Creating combined PR');
|
||||||
|
const combinedPRsString = combinedPRs.join('\n');
|
||||||
|
let body = '✅ This PR was created by the Combine PRs action by combining the following PRs:\n' + combinedPRsString;
|
||||||
|
if(mergeFailedPRs.length > 0) {
|
||||||
|
const mergeFailedPRsString = mergeFailedPRs.join('\n');
|
||||||
|
body += '\n\n⚠️ The following PRs were left out due to merge conflicts:\n' + mergeFailedPRsString
|
||||||
|
}
|
||||||
|
await github.rest.pulls.create({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
title: 'Combined PR',
|
||||||
|
head: '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
base: baseBranch,
|
||||||
|
body: body
|
||||||
|
});
|
|
@ -0,0 +1,36 @@
|
||||||
|
name: continuous-integration
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
go-version: [stable, oldstable]
|
||||||
|
platform: [ubuntu-latest, macos-latest, windows-latest]
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
env:
|
||||||
|
GO111MODULE: on
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Install Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Test code
|
||||||
|
run: |
|
||||||
|
go test ./pkg/...
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Vet code
|
||||||
|
run: |
|
||||||
|
go vet ./pkg/...
|
|
@ -0,0 +1,113 @@
|
||||||
|
name: release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- "v*"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
echo "GIT_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: docker_meta
|
||||||
|
uses: docker/metadata-action@v4.4.0
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
ghcr.io/tus/tusd
|
||||||
|
tusproject/tusd
|
||||||
|
tags: |
|
||||||
|
type=sha
|
||||||
|
type=semver,pattern=v{{version}}
|
||||||
|
type=semver,pattern=v{{major}}.{{minor}}
|
||||||
|
type=semver,pattern=v{{major}}
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
id: buildx
|
||||||
|
uses: docker/setup-buildx-action@v2.5.0
|
||||||
|
with:
|
||||||
|
install: true
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v2.1.0
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ github.token }}
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Login to Docker Container Registry
|
||||||
|
uses: docker/login-action@v2.1.0
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Build and push
|
||||||
|
id: build
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
builder: ${{ steps.buildx.outputs.name }}
|
||||||
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha
|
||||||
|
build-args: |
|
||||||
|
GIT_VERSION=${{ env.GIT_VERSION }}
|
||||||
|
GIT_COMMIT=${{ github.sha }}
|
||||||
|
platforms: linux/amd64,linux/arm64/v8
|
||||||
|
|
||||||
|
build-binary:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
env:
|
||||||
|
GO111MODULE: on
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Install Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: 'stable'
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Build TUSD
|
||||||
|
run: ./scripts/build_all.sh
|
||||||
|
|
||||||
|
-
|
||||||
|
name: GitHub Release
|
||||||
|
uses: softprops/action-gh-release@v0.1.15
|
||||||
|
with:
|
||||||
|
files: tusd_*.*
|
||||||
|
|
||||||
|
deploy-heroku:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Deploy to heroku
|
||||||
|
uses: akhileshns/heroku-deploy@v3.12.14
|
||||||
|
with:
|
||||||
|
heroku_api_key: ${{secrets.HEROKU_API_KEY}}
|
||||||
|
heroku_app_name: ${{secrets.HEROKU_APP_NAME}}
|
||||||
|
heroku_email: ${{secrets.HEROKU_USER_EMAIL}}
|
||||||
|
stack: heroku-22
|
|
@ -3,3 +3,6 @@ cover.out
|
||||||
data/
|
data/
|
||||||
node_modules/
|
node_modules/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
./tusd
|
||||||
|
tusd_*_*
|
||||||
|
.idea/
|
||||||
|
|
44
.travis.yml
44
.travis.yml
|
@ -1,44 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.12
|
|
||||||
- 1.13
|
|
||||||
env:
|
|
||||||
- GO111MODULE=on
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- windows
|
|
||||||
sudo: required
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- docker-ce
|
|
||||||
cache:
|
|
||||||
apt: true
|
|
||||||
directories:
|
|
||||||
- $HOME/.gimme
|
|
||||||
- "$HOME/google-cloud-sdk/"
|
|
||||||
install:
|
|
||||||
- true
|
|
||||||
script:
|
|
||||||
- ./scripts/test_all.sh
|
|
||||||
before_deploy:
|
|
||||||
- if [[ "$TRAVIS_TAG" != "" ]]; then ./scripts/build_all.sh; fi
|
|
||||||
deploy:
|
|
||||||
- provider: releases
|
|
||||||
api_key:
|
|
||||||
secure: dV3wr9ebEps3YrzIoqmkYc7fw0IECz7QLPRENPSxTJyd5TTYXGsnTS26cMe2LdGwYrXw0njt2GGovMyBZFTtxyYI3mMO4AZRwvZfx/yGzPWJBbVi6NjZVRg/bpyK+mQJ5BUlkPAYJmRpdc6qD+nvCGakBOxoByC5XDK+yM+bKFs=
|
|
||||||
file_glob: true
|
|
||||||
file: tusd_*.*
|
|
||||||
skip_cleanup: true
|
|
||||||
on:
|
|
||||||
tags: true
|
|
||||||
go: 1.13
|
|
||||||
repo: tus/tusd
|
|
||||||
condition: $TRAVIS_OS_NAME = linux
|
|
||||||
- provider: script
|
|
||||||
script: scripts/deploy_kube.sh
|
|
||||||
on:
|
|
||||||
branch: master
|
|
||||||
go: 1.13
|
|
||||||
repo: tus/tusd
|
|
||||||
condition: $TRAVIS_OS_NAME = linux
|
|
64
Dockerfile
64
Dockerfile
|
@ -1,37 +1,51 @@
|
||||||
FROM golang:1.12-alpine AS builder
|
FROM --platform=$BUILDPLATFORM golang:1.20.4-alpine AS builder
|
||||||
|
|
||||||
# Copy in the git repo from the build context
|
|
||||||
COPY . /go/src/github.com/tus/tusd/
|
|
||||||
|
|
||||||
# Create app directory
|
|
||||||
WORKDIR /go/src/github.com/tus/tusd
|
WORKDIR /go/src/github.com/tus/tusd
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
# Add gcc and libc-dev early so it is cached
|
||||||
git gcc libc-dev \
|
RUN set -xe \
|
||||||
&& go get -d -v ./... \
|
&& apk add --no-cache gcc libc-dev
|
||||||
&& version="$(git tag -l --points-at HEAD)" \
|
|
||||||
&& commit=$(git log --format="%H" -n 1) \
|
# Install dependencies earlier so they are cached between builds
|
||||||
&& GOOS=linux GOARCH=amd64 go build \
|
COPY go.mod go.sum ./
|
||||||
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${version} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${commit} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
RUN set -xe \
|
||||||
-o "/go/bin/tusd" ./cmd/tusd/main.go \
|
&& go mod download
|
||||||
&& rm -r /go/src/* \
|
|
||||||
&& apk del git
|
# Copy the source code, because directories are special, there are separate layers
|
||||||
|
COPY cmd/ ./cmd/
|
||||||
|
COPY internal/ ./internal/
|
||||||
|
COPY pkg/ ./pkg/
|
||||||
|
|
||||||
|
# Get the version name and git commit as a build argument
|
||||||
|
ARG GIT_VERSION
|
||||||
|
ARG GIT_COMMIT
|
||||||
|
|
||||||
|
# Get the operating system and architecture to build for
|
||||||
|
ARG TARGETOS
|
||||||
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
RUN set -xe \
|
||||||
|
&& GOOS=$TARGETOS GOARCH=$TARGETARCH go build \
|
||||||
|
-ldflags="-X github.com/tus/tusd/cmd/tusd/cli.VersionName=${GIT_VERSION} -X github.com/tus/tusd/cmd/tusd/cli.GitCommit=${GIT_COMMIT} -X 'github.com/tus/tusd/cmd/tusd/cli.BuildDate=$(date --utc)'" \
|
||||||
|
-o /go/bin/tusd ./cmd/tusd/main.go
|
||||||
|
|
||||||
# start a new stage that copies in the binary built in the previous stage
|
# start a new stage that copies in the binary built in the previous stage
|
||||||
FROM alpine:3.9
|
FROM alpine:3.18.0
|
||||||
|
WORKDIR /srv/tusd-data
|
||||||
|
|
||||||
COPY --from=builder /go/bin/tusd /usr/local/bin/tusd
|
COPY ./docker/entrypoint.sh /usr/local/share/docker-entrypoint.sh
|
||||||
|
COPY ./docker/load-env.sh /usr/local/share/load-env.sh
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates jq gcc \
|
RUN apk add --no-cache ca-certificates jq bash \
|
||||||
&& addgroup -g 1000 tusd \
|
&& addgroup -g 1000 tusd \
|
||||||
&& adduser -u 1000 -G tusd -s /bin/sh -D tusd \
|
&& adduser -u 1000 -G tusd -s /bin/sh -D tusd \
|
||||||
&& mkdir -p /srv/tusd-hooks \
|
&& mkdir -p /srv/tusd-hooks \
|
||||||
&& mkdir -p /srv/tusd-data \
|
&& chown tusd:tusd /srv/tusd-data \
|
||||||
&& chown tusd:tusd /srv/tusd-data
|
&& chmod +x /usr/local/share/docker-entrypoint.sh /usr/local/share/load-env.sh
|
||||||
|
|
||||||
|
COPY --from=builder /go/bin/tusd /usr/local/bin/tusd
|
||||||
|
|
||||||
WORKDIR /srv/tusd-data
|
|
||||||
EXPOSE 1080
|
EXPOSE 1080
|
||||||
ENTRYPOINT ["tusd"]
|
|
||||||
CMD ["--hooks-dir","/srv/tusd-hooks"]
|
|
||||||
|
|
||||||
USER tusd
|
USER tusd
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/share/docker-entrypoint.sh"]
|
||||||
|
CMD [ "--hooks-dir", "/srv/tusd-hooks" ]
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
web: bin/tusd -s3-bucket tusdtest.transloadit.com -port=$PORT -behind-proxy -max-size=128849018880 -timeout=6000
|
|
@ -1,6 +1,6 @@
|
||||||
# tusd
|
# tusd
|
||||||
|
|
||||||
<img alt="Tus logo" src="https://github.com/tus/tus.io/blob/master/assets/img/tus1.png?raw=true" width="30%" align="right" />
|
<img alt="Tus logo" src="https://github.com/tus/tus.io/blob/main/assets/img/tus1.png?raw=true" width="30%" align="right" />
|
||||||
|
|
||||||
> **tus** is a protocol based on HTTP for *resumable file uploads*. Resumable
|
> **tus** is a protocol based on HTTP for *resumable file uploads*. Resumable
|
||||||
> means that an upload can be interrupted at any moment and can be resumed without
|
> means that an upload can be interrupted at any moment and can be resumed without
|
||||||
|
@ -35,8 +35,8 @@ breaking changes have been introduced, please look at the [0.14.0 tag](https://g
|
||||||
|
|
||||||
## Build status
|
## Build status
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/tus/tusd.svg?branch=master)](https://travis-ci.org/tus/tusd)
|
[![release](https://github.com/tus/tusd/actions/workflows/release.yaml/badge.svg)](https://github.com/tus/tusd/actions/workflows/release.yaml)
|
||||||
[![Build status](https://ci.appveyor.com/api/projects/status/2y6fa4nyknoxmyc8/branch/master?svg=true)](https://ci.appveyor.com/project/Acconut/tusd/branch/master)
|
[![continuous-integration](https://github.com/tus/tusd/actions/workflows/continuous-integration.yaml/badge.svg)](https://github.com/tus/tusd/actions/workflows/continuous-integration.yaml)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|
14
appveyor.yml
14
appveyor.yml
|
@ -1,14 +0,0 @@
|
||||||
clone_folder: c:\projects\go\src\github.com\tus\tusd
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: c:\projects\go
|
|
||||||
GO111MODULE: on
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- set PATH=%GOPATH%\bin;%PATH%
|
|
||||||
- go env
|
|
||||||
- go version
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- go test ./pkg/...
|
|
||||||
- go vet ./pkg/...
|
|
|
@ -1,10 +1,12 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/pkg/azurestore"
|
||||||
"github.com/tus/tusd/pkg/filelocker"
|
"github.com/tus/tusd/pkg/filelocker"
|
||||||
"github.com/tus/tusd/pkg/filestore"
|
"github.com/tus/tusd/pkg/filestore"
|
||||||
"github.com/tus/tusd/pkg/gcsstore"
|
"github.com/tus/tusd/pkg/gcsstore"
|
||||||
|
@ -26,8 +28,29 @@ func CreateComposer() {
|
||||||
if Flags.S3Bucket != "" {
|
if Flags.S3Bucket != "" {
|
||||||
s3Config := aws.NewConfig()
|
s3Config := aws.NewConfig()
|
||||||
|
|
||||||
|
if Flags.S3TransferAcceleration {
|
||||||
|
s3Config = s3Config.WithS3UseAccelerate(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.S3DisableContentHashes {
|
||||||
|
// Prevent the S3 service client from automatically
|
||||||
|
// adding the Content-MD5 header to S3 Object Put and Upload API calls.
|
||||||
|
s3Config = s3Config.WithS3DisableContentMD5Validation(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if Flags.S3DisableSSL {
|
||||||
|
// Disable HTTPS and only use HTTP (helpful for debugging requests).
|
||||||
|
s3Config = s3Config.WithDisableSSL(true)
|
||||||
|
}
|
||||||
|
|
||||||
if Flags.S3Endpoint == "" {
|
if Flags.S3Endpoint == "" {
|
||||||
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
|
|
||||||
|
if Flags.S3TransferAcceleration {
|
||||||
|
stdout.Printf("Using 's3://%s' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.\n", Flags.S3Bucket)
|
||||||
|
} else {
|
||||||
|
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
|
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
|
||||||
|
|
||||||
|
@ -38,6 +61,8 @@ func CreateComposer() {
|
||||||
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
|
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
|
||||||
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
|
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
|
||||||
store.ObjectPrefix = Flags.S3ObjectPrefix
|
store.ObjectPrefix = Flags.S3ObjectPrefix
|
||||||
|
store.PreferredPartSize = Flags.S3PartSize
|
||||||
|
store.DisableContentHashes = Flags.S3DisableContentHashes
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
|
|
||||||
locker := memorylocker.New()
|
locker := memorylocker.New()
|
||||||
|
@ -66,6 +91,50 @@ func CreateComposer() {
|
||||||
store.ObjectPrefix = Flags.GCSObjectPrefix
|
store.ObjectPrefix = Flags.GCSObjectPrefix
|
||||||
store.UseIn(Composer)
|
store.UseIn(Composer)
|
||||||
|
|
||||||
|
locker := memorylocker.New()
|
||||||
|
locker.UseIn(Composer)
|
||||||
|
} else if Flags.AzStorage != "" {
|
||||||
|
|
||||||
|
accountName := os.Getenv("AZURE_STORAGE_ACCOUNT")
|
||||||
|
if accountName == "" {
|
||||||
|
stderr.Fatalf("No service account name for Azure BlockBlob Storage using the AZURE_STORAGE_ACCOUNT environment variable.\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
accountKey := os.Getenv("AZURE_STORAGE_KEY")
|
||||||
|
if accountKey == "" {
|
||||||
|
stderr.Fatalf("No service account key for Azure BlockBlob Storage using the AZURE_STORAGE_KEY environment variable.\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
azureEndpoint := Flags.AzEndpoint
|
||||||
|
// Enables support for using Azurite as a storage emulator without messing with proxies and stuff
|
||||||
|
// e.g. http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
if azureEndpoint == "" {
|
||||||
|
azureEndpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
|
||||||
|
stdout.Printf("Custom Azure Endpoint not specified in flag variable azure-endpoint.\n"+
|
||||||
|
"Using endpoint %s\n", azureEndpoint)
|
||||||
|
} else {
|
||||||
|
stdout.Printf("Using Azure endpoint %s\n", azureEndpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
azConfig := &azurestore.AzConfig{
|
||||||
|
AccountName: accountName,
|
||||||
|
AccountKey: accountKey,
|
||||||
|
ContainerName: Flags.AzStorage,
|
||||||
|
ContainerAccessType: Flags.AzContainerAccessType,
|
||||||
|
BlobAccessTier: Flags.AzBlobAccessTier,
|
||||||
|
Endpoint: azureEndpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
azService, err := azurestore.NewAzureService(azConfig)
|
||||||
|
if err != nil {
|
||||||
|
stderr.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
store := azurestore.New(azService)
|
||||||
|
store.ObjectPrefix = Flags.AzObjectPrefix
|
||||||
|
store.Container = Flags.AzStorage
|
||||||
|
store.UseIn(Composer)
|
||||||
|
|
||||||
locker := memorylocker.New()
|
locker := memorylocker.New()
|
||||||
locker.UseIn(Composer)
|
locker.UseIn(Composer)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2,38 +2,65 @@ package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime/pprof"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
"github.com/tus/tusd/cmd/tusd/cli/hooks"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Flags struct {
|
var Flags struct {
|
||||||
HttpHost string
|
HttpHost string
|
||||||
HttpPort string
|
HttpPort string
|
||||||
HttpSock string
|
HttpSock string
|
||||||
MaxSize int64
|
MaxSize int64
|
||||||
UploadDir string
|
UploadDir string
|
||||||
Basepath string
|
Basepath string
|
||||||
Timeout int64
|
ShowGreeting bool
|
||||||
S3Bucket string
|
DisableDownload bool
|
||||||
S3ObjectPrefix string
|
DisableTermination bool
|
||||||
S3Endpoint string
|
DisableCors bool
|
||||||
GCSBucket string
|
Timeout int64
|
||||||
GCSObjectPrefix string
|
S3Bucket string
|
||||||
EnabledHooksString string
|
S3ObjectPrefix string
|
||||||
FileHooksDir string
|
S3Endpoint string
|
||||||
HttpHooksEndpoint string
|
S3PartSize int64
|
||||||
HttpHooksRetry int
|
S3DisableContentHashes bool
|
||||||
HttpHooksBackoff int
|
S3DisableSSL bool
|
||||||
HooksStopUploadCode int
|
GCSBucket string
|
||||||
PluginHookPath string
|
GCSObjectPrefix string
|
||||||
EnabledHooks []hooks.HookType
|
AzStorage string
|
||||||
ShowVersion bool
|
AzContainerAccessType string
|
||||||
ExposeMetrics bool
|
AzBlobAccessTier string
|
||||||
MetricsPath string
|
AzObjectPrefix string
|
||||||
BehindProxy bool
|
AzEndpoint string
|
||||||
VerboseOutput bool
|
EnabledHooksString string
|
||||||
|
FileHooksDir string
|
||||||
|
HttpHooksEndpoint string
|
||||||
|
HttpHooksForwardHeaders string
|
||||||
|
HttpHooksRetry int
|
||||||
|
HttpHooksBackoff int
|
||||||
|
GrpcHooksEndpoint string
|
||||||
|
GrpcHooksRetry int
|
||||||
|
GrpcHooksBackoff int
|
||||||
|
HooksStopUploadCode int
|
||||||
|
PluginHookPath string
|
||||||
|
EnabledHooks []hooks.HookType
|
||||||
|
ShowVersion bool
|
||||||
|
ExposeMetrics bool
|
||||||
|
MetricsPath string
|
||||||
|
BehindProxy bool
|
||||||
|
VerboseOutput bool
|
||||||
|
S3TransferAcceleration bool
|
||||||
|
TLSCertFile string
|
||||||
|
TLSKeyFile string
|
||||||
|
TLSMode string
|
||||||
|
|
||||||
|
CPUProfile string
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFlags() {
|
func ParseFlags() {
|
||||||
|
@ -43,17 +70,33 @@ func ParseFlags() {
|
||||||
flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes")
|
flag.Int64Var(&Flags.MaxSize, "max-size", 0, "Maximum size of a single upload in bytes")
|
||||||
flag.StringVar(&Flags.UploadDir, "upload-dir", "./data", "Directory to store uploads in")
|
flag.StringVar(&Flags.UploadDir, "upload-dir", "./data", "Directory to store uploads in")
|
||||||
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
flag.StringVar(&Flags.Basepath, "base-path", "/files/", "Basepath of the HTTP server")
|
||||||
|
flag.BoolVar(&Flags.ShowGreeting, "show-greeting", true, "Show the greeting message")
|
||||||
|
flag.BoolVar(&Flags.DisableDownload, "disable-download", false, "Disable the download endpoint")
|
||||||
|
flag.BoolVar(&Flags.DisableTermination, "disable-termination", false, "Disable the termination endpoint")
|
||||||
|
flag.BoolVar(&Flags.DisableCors, "disable-cors", false, "Disable CORS headers")
|
||||||
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
flag.Int64Var(&Flags.Timeout, "timeout", 6*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
|
||||||
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
flag.StringVar(&Flags.S3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
|
||||||
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
flag.StringVar(&Flags.S3ObjectPrefix, "s3-object-prefix", "", "Prefix for S3 object names")
|
||||||
flag.StringVar(&Flags.S3Endpoint, "s3-endpoint", "", "Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)")
|
flag.StringVar(&Flags.S3Endpoint, "s3-endpoint", "", "Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)")
|
||||||
|
flag.Int64Var(&Flags.S3PartSize, "s3-part-size", 50*1024*1024, "Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future)")
|
||||||
|
flag.BoolVar(&Flags.S3DisableContentHashes, "s3-disable-content-hashes", false, "Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)")
|
||||||
|
flag.BoolVar(&Flags.S3DisableSSL, "s3-disable-ssl", false, "Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)")
|
||||||
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
flag.StringVar(&Flags.GCSBucket, "gcs-bucket", "", "Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)")
|
||||||
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names (can't contain underscore character)")
|
flag.StringVar(&Flags.GCSObjectPrefix, "gcs-object-prefix", "", "Prefix for GCS object names")
|
||||||
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable all events")
|
flag.StringVar(&Flags.AzStorage, "azure-storage", "", "Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)")
|
||||||
|
flag.StringVar(&Flags.AzContainerAccessType, "azure-container-access-type", "", "Access type when creating a new container if it does not exist (possible values: blob, container, '')")
|
||||||
|
flag.StringVar(&Flags.AzBlobAccessTier, "azure-blob-access-tier", "", "Blob access tier when uploading new files (possible values: archive, cool, hot, '')")
|
||||||
|
flag.StringVar(&Flags.AzObjectPrefix, "azure-object-prefix", "", "Prefix for Azure object names")
|
||||||
|
flag.StringVar(&Flags.AzEndpoint, "azure-endpoint", "", "Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)")
|
||||||
|
flag.StringVar(&Flags.EnabledHooksString, "hooks-enabled-events", "pre-create,post-create,post-receive,post-terminate,post-finish", "Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events")
|
||||||
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
|
flag.StringVar(&Flags.FileHooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
|
||||||
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")
|
flag.StringVar(&Flags.HttpHooksEndpoint, "hooks-http", "", "An HTTP endpoint to which hook events will be sent to")
|
||||||
|
flag.StringVar(&Flags.HttpHooksForwardHeaders, "hooks-http-forward-headers", "", "List of HTTP request headers to be forwarded from the client request to the hook endpoint")
|
||||||
flag.IntVar(&Flags.HttpHooksRetry, "hooks-http-retry", 3, "Number of times to retry on a 500 or network timeout")
|
flag.IntVar(&Flags.HttpHooksRetry, "hooks-http-retry", 3, "Number of times to retry on a 500 or network timeout")
|
||||||
flag.IntVar(&Flags.HttpHooksBackoff, "hooks-http-backoff", 1, "Number of seconds to wait before retrying each retry")
|
flag.IntVar(&Flags.HttpHooksBackoff, "hooks-http-backoff", 1, "Number of seconds to wait before retrying each retry")
|
||||||
|
flag.StringVar(&Flags.GrpcHooksEndpoint, "hooks-grpc", "", "An gRPC endpoint to which hook events will be sent to")
|
||||||
|
flag.IntVar(&Flags.GrpcHooksRetry, "hooks-grpc-retry", 3, "Number of times to retry on a server error or network timeout")
|
||||||
|
flag.IntVar(&Flags.GrpcHooksBackoff, "hooks-grpc-backoff", 1, "Number of seconds to wait before retrying each retry")
|
||||||
flag.IntVar(&Flags.HooksStopUploadCode, "hooks-stop-code", 0, "Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped")
|
flag.IntVar(&Flags.HooksStopUploadCode, "hooks-stop-code", 0, "Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped")
|
||||||
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)")
|
flag.StringVar(&Flags.PluginHookPath, "hooks-plugin", "", "Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)")
|
||||||
flag.BoolVar(&Flags.ShowVersion, "version", false, "Print tusd version information")
|
flag.BoolVar(&Flags.ShowVersion, "version", false, "Print tusd version information")
|
||||||
|
@ -61,6 +104,11 @@ func ParseFlags() {
|
||||||
flag.StringVar(&Flags.MetricsPath, "metrics-path", "/metrics", "Path under which the metrics endpoint will be accessible")
|
flag.StringVar(&Flags.MetricsPath, "metrics-path", "/metrics", "Path under which the metrics endpoint will be accessible")
|
||||||
flag.BoolVar(&Flags.BehindProxy, "behind-proxy", false, "Respect X-Forwarded-* and similar headers which may be set by proxies")
|
flag.BoolVar(&Flags.BehindProxy, "behind-proxy", false, "Respect X-Forwarded-* and similar headers which may be set by proxies")
|
||||||
flag.BoolVar(&Flags.VerboseOutput, "verbose", true, "Enable verbose logging output")
|
flag.BoolVar(&Flags.VerboseOutput, "verbose", true, "Enable verbose logging output")
|
||||||
|
flag.BoolVar(&Flags.S3TransferAcceleration, "s3-transfer-acceleration", false, "Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)")
|
||||||
|
flag.StringVar(&Flags.TLSCertFile, "tls-certificate", "", "Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.")
|
||||||
|
flag.StringVar(&Flags.TLSKeyFile, "tls-key", "", "Path to the file containing the key for the TLS certificate.")
|
||||||
|
flag.StringVar(&Flags.TLSMode, "tls-mode", "tls12", "Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong.")
|
||||||
|
flag.StringVar(&Flags.CPUProfile, "cpuprofile", "", "write cpu profile to file")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
SetEnabledHooks()
|
SetEnabledHooks()
|
||||||
|
@ -68,6 +116,20 @@ func ParseFlags() {
|
||||||
if Flags.FileHooksDir != "" {
|
if Flags.FileHooksDir != "" {
|
||||||
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
Flags.FileHooksDir, _ = filepath.Abs(Flags.FileHooksDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if Flags.CPUProfile != "" {
|
||||||
|
f, err := os.Create(Flags.CPUProfile)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
pprof.StartCPUProfile(f)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-time.After(20 * time.Second)
|
||||||
|
pprof.StopCPUProfile()
|
||||||
|
fmt.Println("Stopped CPU profile")
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetEnabledHooks() {
|
func SetEnabledHooks() {
|
||||||
|
|
|
@ -8,6 +8,12 @@ import (
|
||||||
var greeting string
|
var greeting string
|
||||||
|
|
||||||
func PrepareGreeting() {
|
func PrepareGreeting() {
|
||||||
|
// Do not show information about metric endpoint, if it is not exposed
|
||||||
|
metricsInfo := ""
|
||||||
|
if Flags.ExposeMetrics {
|
||||||
|
metricsInfo = fmt.Sprintf("- %s - gather statistics to keep tusd running smoothly\n", Flags.MetricsPath)
|
||||||
|
}
|
||||||
|
|
||||||
greeting = fmt.Sprintf(
|
greeting = fmt.Sprintf(
|
||||||
`Welcome to tusd
|
`Welcome to tusd
|
||||||
===============
|
===============
|
||||||
|
@ -20,15 +26,14 @@ While you did an awesome job on getting tusd running, this is just the welcome
|
||||||
message, so let's talk about the places that really matter:
|
message, so let's talk about the places that really matter:
|
||||||
|
|
||||||
- %s - send your tus uploads to this endpoint
|
- %s - send your tus uploads to this endpoint
|
||||||
- %s - gather statistics to keep tusd running smoothly
|
%s- https://github.com/tus/tusd/issues - report your bugs here
|
||||||
- https://github.com/tus/tusd/issues - report your bugs here
|
|
||||||
|
|
||||||
So quit lollygagging, send over your files and experience the future!
|
So quit lollygagging, send over your files and experience the future!
|
||||||
|
|
||||||
Version = %s
|
Version = %s
|
||||||
GitCommit = %s
|
GitCommit = %s
|
||||||
BuildDate = %s
|
BuildDate = %s
|
||||||
`, Flags.Basepath, Flags.MetricsPath, VersionName, GitCommit, BuildDate)
|
`, Flags.Basepath, metricsInfo, VersionName, GitCommit, BuildDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DisplayGreeting(w http.ResponseWriter, r *http.Request) {
|
func DisplayGreeting(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
|
@ -20,27 +20,36 @@ func hookTypeInSlice(a hooks.HookType, list []hooks.HookType) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func preCreateCallback(info handler.HookEvent) error {
|
func hookCallback(typ hooks.HookType, info handler.HookEvent) error {
|
||||||
if output, err := invokeHookSync(hooks.HookPreCreate, info, true); err != nil {
|
if output, err := invokeHookSync(typ, info, true); err != nil {
|
||||||
if hookErr, ok := err.(hooks.HookError); ok {
|
if hookErr, ok := err.(hooks.HookError); ok {
|
||||||
return hooks.NewHookError(
|
return hooks.NewHookError(
|
||||||
fmt.Errorf("pre-create hook failed: %s", err),
|
fmt.Errorf("%s hook failed: %s", typ, err),
|
||||||
hookErr.StatusCode(),
|
hookErr.StatusCode(),
|
||||||
hookErr.Body(),
|
hookErr.Body(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("pre-create hook failed: %s\n%s", err, string(output))
|
return fmt.Errorf("%s hook failed: %s\n%s", typ, err, string(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func preCreateCallback(info handler.HookEvent) error {
|
||||||
|
return hookCallback(hooks.HookPreCreate, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
func preFinishCallback(info handler.HookEvent) error {
|
||||||
|
return hookCallback(hooks.HookPreFinish, info)
|
||||||
|
}
|
||||||
|
|
||||||
func SetupHookMetrics() {
|
func SetupHookMetrics() {
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostFinish)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostTerminate)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostReceive)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostReceive)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPostCreate)).Add(0)
|
||||||
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreCreate)).Add(0)
|
||||||
|
MetricsHookErrorsTotal.WithLabelValues(string(hooks.HookPreFinish)).Add(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupPreHooks(config *handler.Config) error {
|
func SetupPreHooks(config *handler.Config) error {
|
||||||
|
@ -54,9 +63,18 @@ func SetupPreHooks(config *handler.Config) error {
|
||||||
stdout.Printf("Using '%s' as the endpoint for hooks", Flags.HttpHooksEndpoint)
|
stdout.Printf("Using '%s' as the endpoint for hooks", Flags.HttpHooksEndpoint)
|
||||||
|
|
||||||
hookHandler = &hooks.HttpHook{
|
hookHandler = &hooks.HttpHook{
|
||||||
Endpoint: Flags.HttpHooksEndpoint,
|
Endpoint: Flags.HttpHooksEndpoint,
|
||||||
MaxRetries: Flags.HttpHooksRetry,
|
MaxRetries: Flags.HttpHooksRetry,
|
||||||
Backoff: Flags.HttpHooksBackoff,
|
Backoff: Flags.HttpHooksBackoff,
|
||||||
|
ForwardHeaders: strings.Split(Flags.HttpHooksForwardHeaders, ","),
|
||||||
|
}
|
||||||
|
} else if Flags.GrpcHooksEndpoint != "" {
|
||||||
|
stdout.Printf("Using '%s' as the endpoint for gRPC hooks", Flags.GrpcHooksEndpoint)
|
||||||
|
|
||||||
|
hookHandler = &hooks.GrpcHook{
|
||||||
|
Endpoint: Flags.GrpcHooksEndpoint,
|
||||||
|
MaxRetries: Flags.GrpcHooksRetry,
|
||||||
|
Backoff: Flags.GrpcHooksBackoff,
|
||||||
}
|
}
|
||||||
} else if Flags.PluginHookPath != "" {
|
} else if Flags.PluginHookPath != "" {
|
||||||
stdout.Printf("Using '%s' to load plugin for hooks", Flags.PluginHookPath)
|
stdout.Printf("Using '%s' to load plugin for hooks", Flags.PluginHookPath)
|
||||||
|
@ -80,6 +98,7 @@ func SetupPreHooks(config *handler.Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
config.PreUploadCreateCallback = preCreateCallback
|
config.PreUploadCreateCallback = preCreateCallback
|
||||||
|
config.PreFinishResponseCallback = preFinishCallback
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
package hooks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
pb "github.com/tus/tusd/pkg/proto/v1"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GrpcHook struct {
|
||||||
|
Endpoint string
|
||||||
|
MaxRetries int
|
||||||
|
Backoff int
|
||||||
|
Client pb.HookServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GrpcHook) Setup() error {
|
||||||
|
opts := []grpc_retry.CallOption{
|
||||||
|
grpc_retry.WithBackoff(grpc_retry.BackoffLinear(time.Duration(g.Backoff) * time.Second)),
|
||||||
|
grpc_retry.WithMax(uint(g.MaxRetries)),
|
||||||
|
}
|
||||||
|
grpcOpts := []grpc.DialOption{
|
||||||
|
grpc.WithInsecure(),
|
||||||
|
grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(opts...)),
|
||||||
|
}
|
||||||
|
conn, err := grpc.Dial(g.Endpoint, grpcOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
g.Client = pb.NewHookServiceClient(conn)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *GrpcHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput bool) ([]byte, int, error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
req := &pb.SendRequest{Hook: marshal(typ, info)}
|
||||||
|
resp, err := g.Client.Send(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := status.FromError(err); ok {
|
||||||
|
return nil, int(e.Code()), err
|
||||||
|
}
|
||||||
|
return nil, 2, err
|
||||||
|
}
|
||||||
|
if captureOutput {
|
||||||
|
return resp.Response.GetValue(), 0, err
|
||||||
|
}
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(typ HookType, info handler.HookEvent) *pb.Hook {
|
||||||
|
return &pb.Hook{
|
||||||
|
Upload: &pb.Upload{
|
||||||
|
Id: info.Upload.ID,
|
||||||
|
Size: info.Upload.Size,
|
||||||
|
SizeIsDeferred: info.Upload.SizeIsDeferred,
|
||||||
|
Offset: info.Upload.Offset,
|
||||||
|
MetaData: info.Upload.MetaData,
|
||||||
|
IsPartial: info.Upload.IsPartial,
|
||||||
|
IsFinal: info.Upload.IsFinal,
|
||||||
|
PartialUploads: info.Upload.PartialUploads,
|
||||||
|
Storage: info.Upload.Storage,
|
||||||
|
},
|
||||||
|
HttpRequest: &pb.HTTPRequest{
|
||||||
|
Method: info.HTTPRequest.Method,
|
||||||
|
Uri: info.HTTPRequest.URI,
|
||||||
|
RemoteAddr: info.HTTPRequest.RemoteAddr,
|
||||||
|
},
|
||||||
|
Name: string(typ),
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,9 +17,10 @@ const (
|
||||||
HookPostReceive HookType = "post-receive"
|
HookPostReceive HookType = "post-receive"
|
||||||
HookPostCreate HookType = "post-create"
|
HookPostCreate HookType = "post-create"
|
||||||
HookPreCreate HookType = "pre-create"
|
HookPreCreate HookType = "pre-create"
|
||||||
|
HookPreFinish HookType = "pre-finish"
|
||||||
)
|
)
|
||||||
|
|
||||||
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish}
|
var AvailableHooks []HookType = []HookType{HookPreCreate, HookPostCreate, HookPostReceive, HookPostTerminate, HookPostFinish, HookPreFinish}
|
||||||
|
|
||||||
type hookDataStore struct {
|
type hookDataStore struct {
|
||||||
handler.DataStore
|
handler.DataStore
|
||||||
|
|
|
@ -14,9 +14,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type HttpHook struct {
|
type HttpHook struct {
|
||||||
Endpoint string
|
Endpoint string
|
||||||
MaxRetries int
|
MaxRetries int
|
||||||
Backoff int
|
Backoff int
|
||||||
|
ForwardHeaders []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ HttpHook) Setup() error {
|
func (_ HttpHook) Setup() error {
|
||||||
|
@ -34,6 +35,14 @@ func (h HttpHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutput
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, k := range h.ForwardHeaders {
|
||||||
|
// Lookup the Canonicalised version of the specified header
|
||||||
|
if vals, ok := info.HTTPRequest.Header[http.CanonicalHeaderKey(k)]; ok {
|
||||||
|
// but set the case specified by the user
|
||||||
|
req.Header[k] = vals
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
req.Header.Set("Hook-Name", string(typ))
|
req.Header.Set("Hook-Name", string(typ))
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ type PluginHookHandler interface {
|
||||||
PostReceive(info handler.HookEvent) error
|
PostReceive(info handler.HookEvent) error
|
||||||
PostFinish(info handler.HookEvent) error
|
PostFinish(info handler.HookEvent) error
|
||||||
PostTerminate(info handler.HookEvent) error
|
PostTerminate(info handler.HookEvent) error
|
||||||
|
PreFinish(info handler.HookEvent) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type PluginHook struct {
|
type PluginHook struct {
|
||||||
|
@ -54,6 +55,8 @@ func (h PluginHook) InvokeHook(typ HookType, info handler.HookEvent, captureOutp
|
||||||
err = h.handler.PostCreate(info)
|
err = h.handler.PostCreate(info)
|
||||||
case HookPreCreate:
|
case HookPreCreate:
|
||||||
err = h.handler.PreCreate(info)
|
err = h.handler.PreCreate(info)
|
||||||
|
case HookPreFinish:
|
||||||
|
err = h.handler.PreFinish(info)
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("hooks: unknown hook named %s", typ)
|
err = fmt.Errorf("hooks: unknown hook named %s", typ)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
// If this file gets changed, you must recompile the generate package in pkg/proto.
|
||||||
|
// To do this, install the Go protobuf toolchain as mentioned in
|
||||||
|
// https://github.com/golang/protobuf#installation.
|
||||||
|
// Then use following command to recompile it with gRPC support:
|
||||||
|
// protoc --go_out=plugins=grpc:../../../../../pkg/proto/ v1/hook.proto
|
||||||
|
// In addition, it may be necessary to update the protobuf or gRPC dependencies as well.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package v1;
|
||||||
|
|
||||||
|
import "google/protobuf/any.proto";
|
||||||
|
|
||||||
|
// Uploaded data
|
||||||
|
message Upload {
|
||||||
|
// Unique integer identifier of the uploaded file
|
||||||
|
string id = 1;
|
||||||
|
// Total file size in bytes specified in the NewUpload call
|
||||||
|
int64 Size = 2;
|
||||||
|
// Indicates whether the total file size is deferred until later
|
||||||
|
bool SizeIsDeferred = 3;
|
||||||
|
// Offset in bytes (zero-based)
|
||||||
|
int64 Offset = 4;
|
||||||
|
map<string, string> metaData = 5;
|
||||||
|
// Indicates that this is a partial upload which will later be used to form
|
||||||
|
// a final upload by concatenation. Partial uploads should not be processed
|
||||||
|
// when they are finished since they are only incomplete chunks of files.
|
||||||
|
bool isPartial = 6;
|
||||||
|
// Indicates that this is a final upload
|
||||||
|
bool isFinal = 7;
|
||||||
|
// If the upload is a final one (see IsFinal) this will be a non-empty
|
||||||
|
// ordered slice containing the ids of the uploads of which the final upload
|
||||||
|
// will consist after concatenation.
|
||||||
|
repeated string partialUploads = 8;
|
||||||
|
// Storage contains information about where the data storage saves the upload,
|
||||||
|
// for example a file path. The available values vary depending on what data
|
||||||
|
// store is used. This map may also be nil.
|
||||||
|
map <string, string> storage = 9;
|
||||||
|
}
|
||||||
|
|
||||||
|
message HTTPRequest {
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH
|
||||||
|
string method = 1;
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo
|
||||||
|
string uri = 2;
|
||||||
|
// RemoteAddr contains the network address that sent the request
|
||||||
|
string remoteAddr = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook's data
|
||||||
|
message Hook {
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
Upload upload = 1;
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HTTPRequest httpRequest = 2;
|
||||||
|
// The hook name
|
||||||
|
string name = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request data to send hook
|
||||||
|
message SendRequest {
|
||||||
|
// The hook data
|
||||||
|
Hook hook = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response that contains data for sended hook
|
||||||
|
message SendResponse {
|
||||||
|
// The response of the hook.
|
||||||
|
google.protobuf.Any response = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The hook service definition.
|
||||||
|
service HookService {
|
||||||
|
// Sends a hook
|
||||||
|
rpc Send (SendRequest) returns (SendResponse) {}
|
||||||
|
}
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stdout = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
|
var stdout = log.New(os.Stdout, "[tusd] ", log.LstdFlags|log.Lmicroseconds)
|
||||||
var stderr = log.New(os.Stderr, "[tusd] ", log.Ldate|log.Ltime)
|
var stderr = log.New(os.Stderr, "[tusd] ", log.LstdFlags|log.Lmicroseconds)
|
||||||
|
|
||||||
func logEv(logOutput *log.Logger, eventName string, details ...string) {
|
func logEv(logOutput *log.Logger, eventName string, details ...string) {
|
||||||
handler.LogEvent(logOutput, eventName, details...)
|
handler.LogEvent(logOutput, eventName, details...)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -9,6 +10,12 @@ import (
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TLS13 = "tls13"
|
||||||
|
TLS12 = "tls12"
|
||||||
|
TLS12STRONG = "tls12-strong"
|
||||||
|
)
|
||||||
|
|
||||||
// Setups the different components, starts a Listener and give it to
|
// Setups the different components, starts a Listener and give it to
|
||||||
// http.Serve().
|
// http.Serve().
|
||||||
//
|
//
|
||||||
|
@ -20,6 +27,9 @@ func Serve() {
|
||||||
MaxSize: Flags.MaxSize,
|
MaxSize: Flags.MaxSize,
|
||||||
BasePath: Flags.Basepath,
|
BasePath: Flags.Basepath,
|
||||||
RespectForwardedHeaders: Flags.BehindProxy,
|
RespectForwardedHeaders: Flags.BehindProxy,
|
||||||
|
DisableDownload: Flags.DisableDownload,
|
||||||
|
DisableTermination: Flags.DisableTermination,
|
||||||
|
DisableCors: Flags.DisableCors,
|
||||||
StoreComposer: Composer,
|
StoreComposer: Composer,
|
||||||
NotifyCompleteUploads: true,
|
NotifyCompleteUploads: true,
|
||||||
NotifyTerminatedUploads: true,
|
NotifyTerminatedUploads: true,
|
||||||
|
@ -58,18 +68,22 @@ func Serve() {
|
||||||
|
|
||||||
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
|
stdout.Printf("Supported tus extensions: %s\n", handler.SupportedExtensions())
|
||||||
|
|
||||||
// Do not display the greeting if the tusd handler will be mounted at the root
|
if basepath == "/" {
|
||||||
// path. Else this would cause a "multiple registrations for /" panic.
|
// If the basepath is set to the root path, only install the tusd handler
|
||||||
if basepath != "/" {
|
// and do not show a greeting.
|
||||||
http.HandleFunc("/", DisplayGreeting)
|
http.Handle("/", http.StripPrefix("/", handler))
|
||||||
}
|
} else {
|
||||||
|
// If a custom basepath is defined, we show a greeting at the root path...
|
||||||
|
if Flags.ShowGreeting {
|
||||||
|
http.HandleFunc("/", DisplayGreeting)
|
||||||
|
}
|
||||||
|
|
||||||
http.Handle(basepath, http.StripPrefix(basepath, handler))
|
// ... and register a route with and without the trailing slash, so we can
|
||||||
|
// handle uploads for /files/ and /files, for example.
|
||||||
// Also register a route without the trailing slash, so we can handle uploads
|
|
||||||
// for /files/ and /files, for example.
|
|
||||||
if strings.HasSuffix(basepath, "/") {
|
|
||||||
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
|
basepathWithoutSlash := strings.TrimSuffix(basepath, "/")
|
||||||
|
basepathWithSlash := basepathWithoutSlash + "/"
|
||||||
|
|
||||||
|
http.Handle(basepathWithSlash, http.StripPrefix(basepathWithSlash, handler))
|
||||||
http.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
|
http.Handle(basepathWithoutSlash, http.StripPrefix(basepathWithoutSlash, handler))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,11 +100,68 @@ func Serve() {
|
||||||
stderr.Fatalf("Unable to create listener: %s", err)
|
stderr.Fatalf("Unable to create listener: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if Flags.HttpSock == "" {
|
protocol := "http"
|
||||||
stdout.Printf("You can now upload files to: http://%s%s", address, basepath)
|
if Flags.TLSCertFile != "" && Flags.TLSKeyFile != "" {
|
||||||
|
protocol = "https"
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = http.Serve(listener, nil); err != nil {
|
if Flags.HttpSock == "" {
|
||||||
|
stdout.Printf("You can now upload files to: %s://%s%s", protocol, address, basepath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not using TLS just start the server and, if http.Serve() returns, just return.
|
||||||
|
if protocol == "http" {
|
||||||
|
if err = http.Serve(listener, nil); err != nil {
|
||||||
|
stderr.Fatalf("Unable to serve: %s", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall-through for TLS mode.
|
||||||
|
server := &http.Server{}
|
||||||
|
switch Flags.TLSMode {
|
||||||
|
case TLS13:
|
||||||
|
server.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS13}
|
||||||
|
|
||||||
|
case TLS12:
|
||||||
|
// Ciphersuite selection comes from
|
||||||
|
// https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6
|
||||||
|
// 128-bit AES modes remain as TLSv1.3 is enabled in this mode, and TLSv1.3 compatibility requires an AES-128 ciphersuite.
|
||||||
|
server.TLSConfig = &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
case TLS12STRONG:
|
||||||
|
// Ciphersuite selection as above, but intersected with
|
||||||
|
// https://github.com/denji/golang-tls#perfect-ssl-labs-score-with-go
|
||||||
|
// TLSv1.3 is disabled as it requires an AES-128 ciphersuite.
|
||||||
|
server.TLSConfig = &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
MaxVersion: tls.VersionTLS12,
|
||||||
|
PreferServerCipherSuites: true,
|
||||||
|
CipherSuites: []uint16{
|
||||||
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
stderr.Fatalf("Invalid TLS mode chosen. Recommended valid modes are tls13, tls12 (default), and tls12-strong")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable HTTP/2; the default non-TLS mode doesn't support it
|
||||||
|
server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0)
|
||||||
|
|
||||||
|
if err = server.ServeTLS(listener, Flags.TLSCertFile, Flags.TLSKeyFile); err != nil {
|
||||||
stderr.Fatalf("Unable to serve: %s", err)
|
stderr.Fatalf("Unable to serve: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
. /usr/local/share/load-env.sh
|
||||||
|
|
||||||
|
exec tusd "$@"
|
|
@ -0,0 +1,29 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
tusd_env_vars=(
|
||||||
|
AWS_ACCESS_KEY_ID
|
||||||
|
AWS_SECRET_ACCESS_KEY
|
||||||
|
AWS_REGION
|
||||||
|
GCS_SERVICE_ACCOUNT_FILE
|
||||||
|
AZURE_STORAGE_ACCOUNT
|
||||||
|
AZURE_STORAGE_KEY
|
||||||
|
)
|
||||||
|
|
||||||
|
for env_var in "${tusd_env_vars[@]}"; do
|
||||||
|
file_env_var="${env_var}_FILE"
|
||||||
|
|
||||||
|
if [[ -n "${!file_env_var:-}" ]]; then
|
||||||
|
if [[ -r "${!file_env_var:-}" ]]; then
|
||||||
|
export "${env_var}=$(< "${!file_env_var}")"
|
||||||
|
unset "${file_env_var}"
|
||||||
|
else
|
||||||
|
warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
unset tusd_env_vars
|
46
docs/faq.md
46
docs/faq.md
|
@ -2,7 +2,9 @@
|
||||||
|
|
||||||
### How can I access tusd using HTTPS?
|
### How can I access tusd using HTTPS?
|
||||||
|
|
||||||
The tusd binary, once executed, listens on the provided port for only non-encrypted HTTP requests and *does not accept* HTTPS connections. This decision has been made to limit the functionality inside this repository which has to be developed, tested and maintained. If you want to send requests to tusd in a secure fashion - what we absolutely encourage, we recommend you to utilize a reverse proxy in front of tusd which accepts incoming HTTPS connections and forwards them to tusd using plain HTTP. More information about this topic, including sample configurations for Nginx and Apache, can be found in [issue #86](https://github.com/tus/tusd/issues/86#issuecomment-269569077) and in the [Apache example configuration](/examples/apache2.conf).
|
Enable HTTPS by using the `-tls-certificate` and `-tls-key` flags. Note that the support for HTTPS is limited to a small subset of the many possible TLS configuration options. Available options are TLSv1.3-only; TLSv1.3+TLSv1.2 with support cipher suites per the guidelines on [Mozilla's SSL Configuration Generator](https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6); and TLSv1.2 with 256-bit AES ciphers only. Also note that the key file must not be encrypted/require a passphrase.
|
||||||
|
|
||||||
|
If your needs are more complex than provided for here, you will need to use a reverse proxy in front of tusd. This includes further fine-tuning of ciphers, and the addition of things like HSTS headers. More information about this topic, including sample configurations for Nginx and Apache, can be found in [issue #86](https://github.com/tus/tusd/issues/86#issuecomment-269569077) and in the [Apache example configuration](/examples/apache2.conf); rationale for why HTTPS is supported at all can be found in [issue #418](https://github.com/tus/tusd/issues/418).
|
||||||
|
|
||||||
### Can I run tusd behind a reverse proxy?
|
### Can I run tusd behind a reverse proxy?
|
||||||
|
|
||||||
|
@ -14,11 +16,11 @@ Yes, it is absolutely possible to do so. Firstly, you should execute the tusd bi
|
||||||
|
|
||||||
- *Forward hostname and scheme.* If the proxy rewrites the request URL, the tusd server does not know the original URL which was used to reach the proxy. This behavior can lead to situations, where tusd returns a redirect to a URL which can not be reached by the client. To avoid this confusion, you can explicitly tell tusd which hostname and scheme to use by supplying the `X-Forwarded-Host` and `X-Forwarded-Proto` headers.
|
- *Forward hostname and scheme.* If the proxy rewrites the request URL, the tusd server does not know the original URL which was used to reach the proxy. This behavior can lead to situations, where tusd returns a redirect to a URL which can not be reached by the client. To avoid this confusion, you can explicitly tell tusd which hostname and scheme to use by supplying the `X-Forwarded-Host` and `X-Forwarded-Proto` headers.
|
||||||
|
|
||||||
Explicit examples for the above points can be found in the [Nginx configuration](/examples/nginx.conf) which is used to power the [master.tus.io](https://master.tus.io) instace.
|
Explicit examples for the above points can be found in the [Nginx configuration](/examples/nginx.conf) which is used to power the [tusd.tusdemo.net](https://tusd.tusdemo.net) instace.
|
||||||
|
|
||||||
### Can I run custom verification/authentication checks before an upload begins?
|
### Can I run custom verification/authentication checks before an upload begins?
|
||||||
|
|
||||||
Yes, this is made possible by the [hook system](/docs/hooks.md) inside the tusd binary. It enables custom routines to be executed when certain events occurs, such as a new upload being created which can be handled by the `pre-create` hook. Inside the corresponding hook file, you can run your own validations against the provided upload metadata to determine whether the action is actually allowed or should be rejected by tusd. Please have a look at the [corresponding documentation](docs/hooks.md#pre-create) for a more detailed explanation.
|
Yes, this is made possible by the [hook system](/docs/hooks.md) inside the tusd binary. It enables custom routines to be executed when certain events occurs, such as a new upload being created which can be handled by the `pre-create` hook. Inside the corresponding hook file, you can run your own validations against the provided upload metadata to determine whether the action is actually allowed or should be rejected by tusd. Please have a look at the [corresponding documentation](/docs/hooks.md#pre-create) for a more detailed explanation.
|
||||||
|
|
||||||
### Can I run tusd inside a VM/Vagrant/VirtualBox?
|
### Can I run tusd inside a VM/Vagrant/VirtualBox?
|
||||||
|
|
||||||
|
@ -31,3 +33,41 @@ This error can occur when you are running tusd's disk storage on a file system w
|
||||||
### How can I prevent users from downloading the uploaded files?
|
### How can I prevent users from downloading the uploaded files?
|
||||||
|
|
||||||
tusd allows any user to retrieve a previously uploaded file by issuing a HTTP GET request to the corresponding upload URL. This is possible as long as the uploaded files on the datastore have not been deleted or moved to another location. While it is a handy feature for debugging and testing your setup, we know that there are situations where you don't want to allow downloads or where you want more control about who downloads what. In these scenarios we recommend to place a proxy in front of tusd which takes on the task of access control or even preventing HTTP GET requests entirely. tusd has no feature built in for controling or disabling downloads on its own because the main focus is on accepting uploads, not serving files.
|
tusd allows any user to retrieve a previously uploaded file by issuing a HTTP GET request to the corresponding upload URL. This is possible as long as the uploaded files on the datastore have not been deleted or moved to another location. While it is a handy feature for debugging and testing your setup, we know that there are situations where you don't want to allow downloads or where you want more control about who downloads what. In these scenarios we recommend to place a proxy in front of tusd which takes on the task of access control or even preventing HTTP GET requests entirely. tusd has no feature built in for controling or disabling downloads on its own because the main focus is on accepting uploads, not serving files.
|
||||||
|
|
||||||
|
### How can I keep the original filename for the uploads?
|
||||||
|
|
||||||
|
tusd will generate a unique ID for every upload, e.g. `1881febb4343e9b806cad2e676989c0d`, which is also used as the filename for storing the upload. If you want to keep the original filename, e.g. `my_image.png`, you will have to rename the uploaded file manually after the upload is completed. One can use the [`post-finish` hook](https://github.com/tus/tusd/blob/main/docs/hooks.md#post-finish) to be notified once the upload is completed. The client must also be configured to add the filename to the upload's metadata, which can be [accessed inside the hooks](https://github.com/tus/tusd/blob/main/docs/hooks.md#the-hooks-environment) and used for the renaming operation.
|
||||||
|
|
||||||
|
### Does tusd support Cross-Origin Resource Sharing (CORS)?
|
||||||
|
|
||||||
|
[Cross-Origin Resource Sharing (CORS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) is a technique to allow sharing of data between websites, which are hosted on different origins/domains. This is a common task with tus where you have your main application running on `https://example.org` but your tus server is hosted at `https://uploads.example.org`. In this case, the tus server needs to support CORS to signal the browser that it will accept requests from `https://example.org`.
|
||||||
|
|
||||||
|
To make your setup easier, tusd already includes the necessary CORS configuration to allow communication with tus clients. By default, it will allow incoming requests from any origin. Following headers are allowed to be included into HTTP requests:
|
||||||
|
|
||||||
|
* `Authorization`: Defined in [RFC 2617](https://tools.ietf.org/html/rfc2617#section-2), used in various HTTP authentication protocols.
|
||||||
|
* `Origin`: Defined in [RFC 6454](https://tools.ietf.org/html/rfc6454), used to specify the origin of a HTTP request. This header is often used to aid in HTTP security.
|
||||||
|
* `X-Requested-With`: Used to identify AJAX requests. See [here](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) for details.
|
||||||
|
* `X-Request-ID`: Correlates HTTP requests between a client and server. See [here](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) for details.
|
||||||
|
* `X-HTTP-Method-Override`: Requests a web application to override the method specified in the request with the method given in the header field. See [here](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields) for details.
|
||||||
|
* `Content-Type`: Defined in [RFC 2616](https://tools.ietf.org/html/rfc2616#section-14.17), indicates the media type of the entity-body.
|
||||||
|
* `Upload-Length`: A tus specific header used to indicate the total size of an uploaded file. See [here](https://tus.io/protocols/resumable-upload.html#upload-length) for details.
|
||||||
|
* `Upload-Offset`: A tus specific header used to indicate the starting byte that a PATCH should be used on to upload a chunk of a file. See [here](https://tus.io/protocols/resumable-upload.html#upload-offset) for details.
|
||||||
|
* `Tus-Resumable`: A tus specific header used to match the client version with the server version of the tus protocol. See [here](https://tus.io/protocols/resumable-upload.html#tus-resumable) for details.
|
||||||
|
* `Upload-Metadata`: A tus specific header used for integrators to communicate general metadata between a client and server. See [here](https://tus.io/protocols/resumable-upload.html#upload-metadata) for details.
|
||||||
|
* `Upload-Defer-Length`: A tus specific header used to communicate if the upload file size is not known during the HTTP request it is in. See [here](https://tus.io/protocols/resumable-upload.html#upload-defer-length) for details.
|
||||||
|
* `Upload-Concat`: A tus specific header used to indicate if the containing HTTP request is the final request for uploading a file or not. See [here](https://tus.io/protocols/resumable-upload.html#upload-concat) for details.
|
||||||
|
|
||||||
|
If you are looking for a way to communicate additional information from a client to a server, use the `Upload-Metadata` header.
|
||||||
|
|
||||||
|
### How to use Docker Secrets for credentials (Swarm mode only)
|
||||||
|
|
||||||
|
Example usage with "minio"/S3 (AWS). Create the secrets:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
printf "minio" | docker secret create minio-username -
|
||||||
|
printf "miniosecret" | docker secret create minio-password -
|
||||||
|
```
|
||||||
|
|
||||||
|
Those commands create two secrets which are used inside the example [docker-compose.yml](../examples/docker-compose.yml) file.
|
||||||
|
The provided example assumes, that you also have a service named "minio" inside the same Docker Network.
|
||||||
|
We just append a _FILE suffix to the corresponding environment variables. The contents of the mounted file will be added to the environment variable without _FILE suffix.
|
||||||
|
|
117
docs/hooks.md
117
docs/hooks.md
|
@ -1,6 +1,6 @@
|
||||||
# Hooks
|
# Hooks
|
||||||
|
|
||||||
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables uses ranging from logging over validation and authorization to processing the uploaded files.
|
When integrating tusd into an application, it is important to establish a communication channel between the two components. The tusd binary accomplishes this by providing a system which triggers actions when certain events happen, such as an upload being created or finished. This simple-but-powerful system enables use cases ranging from logging over validation and authorization to processing the uploaded files.
|
||||||
|
|
||||||
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
When a specific action happens during an upload (pre-create, post-receive, post-finish, or post-terminate), the hook system enables tusd to fire off a specific event. Tusd provides two ways of doing this:
|
||||||
|
|
||||||
|
@ -13,13 +13,13 @@ If not otherwise noted, all hooks are invoked in a *non-blocking* way, meaning t
|
||||||
|
|
||||||
## Blocking Hooks
|
## Blocking Hooks
|
||||||
|
|
||||||
On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` event. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
|
On the other hand, there are a few *blocking* hooks, such as caused by the `pre-create` and `pre-finish` events. Because their exit code will dictate whether tusd will accept the current incoming request, tusd will wait until the hook process has exited. Therefore, in order to keep the response times low, one should avoid to make time-consuming operations inside the processes for blocking hooks.
|
||||||
|
|
||||||
### Blocking File Hooks
|
### Blocking File Hooks
|
||||||
|
|
||||||
An exit code of `0` indicates that tusd should continue handling the request as normal. On the other hand, a non-zero exit code tells tusd to reject the request with a `500 Internal Server Error` response containing the process' output from stderr. For the sake of logging, the process' output from stdout will always be piped to tusd's stdout.
|
An exit code of `0` indicates that tusd should continue handling the request as normal. On the other hand, a non-zero exit code tells tusd to reject the request with a `500 Internal Server Error` response containing the process' output from stderr. For the sake of logging, the process' output from stdout will always be piped to tusd's stdout.
|
||||||
|
|
||||||
### Blocking HTTP Hooks
|
### Blocking HTTP(S) Hooks
|
||||||
|
|
||||||
A successful HTTP response code (i.e. smaller than `400`) indicates that tusd should continue handling the request as normal. On the other hand, an HTTP response code greater than `400` will be forwarded to the client performing the upload, along with the body of the hook response. Only the response code will be logged by tusd.
|
A successful HTTP response code (i.e. smaller than `400`) indicates that tusd should continue handling the request as normal. On the other hand, an HTTP response code greater than `400` will be forwarded to the client performing the upload, along with the body of the hook response. Only the response code will be logged by tusd.
|
||||||
|
|
||||||
|
@ -27,15 +27,21 @@ A successful HTTP response code (i.e. smaller than `400`) indicates that tusd sh
|
||||||
|
|
||||||
### pre-create
|
### pre-create
|
||||||
|
|
||||||
This event will be triggered before an upload is created, allowing you to run certain routines. For example, validating that specific metadata values are set, or verifying that a corresponding entity belonging to the upload (e.g. a user) exists. Because this event will result in a blocking hook, you can determine whether the upload should be created or rejected using the exit code. An exit code of `0` will allow the upload to be created and continued as usual. A non-zero exit code will reject an upload creation request, making it a good place for authentication and authorization. Please be aware, that during this stage the upload ID will be an empty string as the entity has not been created and therefore this piece of information is not yet available.
|
This event will be triggered before an upload is created, allowing you to run certain routines. For example, validating that specific metadata values are set, or verifying that a corresponding entity belonging to the upload (e.g. a user) exists. Because this event will result in a blocking hook, you can determine whether the upload should be created or rejected using the exit code. An exit code of `0` will allow the upload to be created and continued as usual. A non-zero exit code will reject an upload creation request, making it a good place for authentication and authorization. Please be aware that during this stage the upload ID will be an empty string and `Storage` will be null. This is because the entity has not been created and therefore this piece of information is not yet available.
|
||||||
|
|
||||||
### post-create
|
### post-create
|
||||||
|
|
||||||
This event will be triggered after an upload is created, allowing you to run certain routines. For example, notifying other parts of your system that a new upload has to be handled. At this point the upload may have received some data already since the invocation of these hooks may be delayed by a short duration.
|
This event will be triggered after an upload is created, allowing you to run certain routines. For example, notifying other parts of your system that a new upload has to be handled. At this point the upload may have received some data already since the invocation of these hooks may be delayed by a short duration.
|
||||||
|
|
||||||
|
### pre-finish
|
||||||
|
|
||||||
|
This event will be triggered after an upload is fully finished but before a response has been returned to the client.
|
||||||
|
This is a blocking hook, as such it can be used to validate or post-process an uploaded file.
|
||||||
|
A non-zero exit code or HTTP response greater than `400` will return a HTTP 500 error to the client.
|
||||||
|
|
||||||
### post-finish
|
### post-finish
|
||||||
|
|
||||||
This event will be triggered after an upload is fully finished, meaning that all chunks have been transfered and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload.
|
This event will be triggered after an upload is fully finished, meaning that all chunks have been transferred and saved in the storage. After this point, no further modifications, except possible deletion, can be made to the upload entity and it may be desirable to use the file for further processing or notify other applications of the completions of this upload.
|
||||||
|
|
||||||
### post-terminate
|
### post-terminate
|
||||||
|
|
||||||
|
@ -43,11 +49,11 @@ This event will be triggered after an upload has been terminated, meaning that t
|
||||||
|
|
||||||
### post-receive
|
### post-receive
|
||||||
|
|
||||||
This event will be triggered for every running upload to indicate its current progress. It will be emitted whenever the server has received more data from the client but at most every second. The offset property will be set to the number of bytes which have been transfered to the server, at the time in total. Please be aware that this number may be higher than the number of bytes which have been stored by the data store!
|
This event will be triggered for every running upload to indicate its current progress. It will be emitted whenever the server has received more data from the client but at most every second. The offset property will be set to the number of bytes which have been transferred to the server, at the time in total. Please be aware that this number may be higher than the number of bytes which have been stored by the data store!
|
||||||
|
|
||||||
## Whitelisting Hook Events
|
## Whitelisting Hook Events
|
||||||
|
|
||||||
The `--hooks-enabled-events` option for the tusd binary works as a whitelist for hook events and takes a comma separated list of hook events (for instance: `pre-create,post-create`). This can be useful to limit the number of hook executions and save resources if you are only interested in some events. If the `--hooks-enabled-events` option is omitted, all hook events are enabled.
|
The `--hooks-enabled-events` option for the tusd binary works as a whitelist for hook events and takes a comma separated list of hook events (for instance: `pre-create,post-create`). This can be useful to limit the number of hook executions and save resources if you are only interested in some events. If the `--hooks-enabled-events` option is omitted, all default hook events are enabled (pre-create, post-create, post-receive, post-terminate, post-finish).
|
||||||
|
|
||||||
## File Hooks
|
## File Hooks
|
||||||
### The Hook Directory
|
### The Hook Directory
|
||||||
|
@ -87,7 +93,7 @@ The process of the hook files are provided with information about the event and
|
||||||
// If the upload is a final one, this value will be an array of upload IDs
|
// If the upload is a final one, this value will be an array of upload IDs
|
||||||
// which are concatenated to produce the upload.
|
// which are concatenated to produce the upload.
|
||||||
"PartialUploads": null,
|
"PartialUploads": null,
|
||||||
// The upload's meta data which can be supplied by the clients as it wishes.
|
// The upload's metadata which can be supplied by the clients as it wishes.
|
||||||
// All keys and values in this object will be strings.
|
// All keys and values in this object will be strings.
|
||||||
// Be aware that it may contain maliciously crafted values and you must not
|
// Be aware that it may contain maliciously crafted values and you must not
|
||||||
// trust it without escaping it first!
|
// trust it without escaping it first!
|
||||||
|
@ -95,7 +101,7 @@ The process of the hook files are provided with information about the event and
|
||||||
"filename": "transloadit.png"
|
"filename": "transloadit.png"
|
||||||
},
|
},
|
||||||
// Details about where the data store saved the uploaded file. The different
|
// Details about where the data store saved the uploaded file. The different
|
||||||
// availabl keys vary depending on the used data store.
|
// available keys vary depending on the used data store.
|
||||||
"Storage": {
|
"Storage": {
|
||||||
// For example, the filestore supplies the absolute file path:
|
// For example, the filestore supplies the absolute file path:
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
|
@ -114,16 +120,16 @@ The process of the hook files are provided with information about the event and
|
||||||
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
"RemoteAddr": "1.2.3.4:47689",
|
"RemoteAddr": "1.2.3.4:47689",
|
||||||
"Header": {
|
"Header": {
|
||||||
"Host": "myuploads.net",
|
"Host": ["myuploads.net"],
|
||||||
"Cookies": "..."
|
"Cookies": ["..."]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## HTTP Hooks
|
## HTTP(S) Hooks
|
||||||
|
|
||||||
HTTP Hooks are the second type of hooks supported by tusd. Like the file hooks, it is disabled by default. To enable it, pass the `--hooks-http` option to the tusd binary. The flag's value will be an HTTP URL endpoint, which the tusd binary will issue POST requests to:
|
HTTP(S) Hooks are the second type of hooks supported by tusd. Like the file hooks, it is disabled by default. To enable it, pass the `--hooks-http` option to the tusd binary. The flag's value will be an HTTP(S) URL endpoint, which the tusd binary will issue POST requests to:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ tusd --hooks-http http://localhost:8081/write
|
$ tusd --hooks-http http://localhost:8081/write
|
||||||
|
@ -133,10 +139,12 @@ $ tusd --hooks-http http://localhost:8081/write
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the URL must include the `http://` prefix!
|
Note that the URL must include the `http://` or `https://` prefix!
|
||||||
|
|
||||||
In case of a blocking hook, HTTP Status Code 400 or greater tells tusd to reject the request (in the same way as non-zero exit code for File Hooks). See also [issue #170](https://github.com/tus/tusd/issues/170) regarding further improvements.
|
In case of a blocking hook, HTTP Status Code 400 or greater tells tusd to reject the request (in the same way as non-zero exit code for File Hooks). See also [issue #170](https://github.com/tus/tusd/issues/170) regarding further improvements.
|
||||||
|
|
||||||
|
Headers from the client's upload request can be copied to the hook request with the `-hooks-http-forward-headers` flag.
|
||||||
|
This is particularly useful for including authentication headers such as `Authorization` or `Cookie`.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
|
@ -160,7 +168,7 @@ Tusd will issue a `POST` request to the specified URL endpoint, specifying the h
|
||||||
// If the upload is a final one, this value will be an array of upload IDs
|
// If the upload is a final one, this value will be an array of upload IDs
|
||||||
// which are concatenated to produce the upload.
|
// which are concatenated to produce the upload.
|
||||||
"PartialUploads": null,
|
"PartialUploads": null,
|
||||||
// The upload's meta data which can be supplied by the clients as it wishes.
|
// The upload's metadata which can be supplied by the clients as it wishes.
|
||||||
// All keys and values in this object will be strings.
|
// All keys and values in this object will be strings.
|
||||||
// Be aware that it may contain maliciously crafted values and you must not
|
// Be aware that it may contain maliciously crafted values and you must not
|
||||||
// trust it without escaping it first!
|
// trust it without escaping it first!
|
||||||
|
@ -168,7 +176,7 @@ Tusd will issue a `POST` request to the specified URL endpoint, specifying the h
|
||||||
"filename": "transloadit.png"
|
"filename": "transloadit.png"
|
||||||
},
|
},
|
||||||
// Details about where the data store saved the uploaded file. The different
|
// Details about where the data store saved the uploaded file. The different
|
||||||
// availabl keys vary depending on the used data store.
|
// available keys vary depending on the used data store.
|
||||||
"Storage": {
|
"Storage": {
|
||||||
// For example, the filestore supplies the absolute file path:
|
// For example, the filestore supplies the absolute file path:
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
|
@ -187,8 +195,8 @@ Tusd will issue a `POST` request to the specified URL endpoint, specifying the h
|
||||||
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
"RemoteAddr": "1.2.3.4:47689",
|
"RemoteAddr": "1.2.3.4:47689",
|
||||||
"Header": {
|
"Header": {
|
||||||
"Host": "myuploads.net",
|
"Host": ["myuploads.net"],
|
||||||
"Cookies": "..."
|
"Cookies": ["..."]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,3 +210,76 @@ Tusd uses the [Pester library](https://github.com/sethgrid/pester) to issue requ
|
||||||
$ # Retrying 5 times with a 2 second backoff
|
$ # Retrying 5 times with a 2 second backoff
|
||||||
$ tusd --hooks-http http://localhost:8081/write --hooks-http-retry 5 --hooks-http-backoff 2
|
$ tusd --hooks-http http://localhost:8081/write --hooks-http-retry 5 --hooks-http-backoff 2
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## GRPC Hooks
|
||||||
|
|
||||||
|
GRPC Hooks are the third type of hooks supported by tusd. Like the others hooks, it is disabled by default. To enable it, pass the `--hooks-grpc` option to the tusd binary. The flag's value will be a gRPC endpoint, which the tusd binary will be sent to:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ tusd --hooks-grpc localhost:8080
|
||||||
|
|
||||||
|
[tusd] Using 'localhost:8080' as the endpoint for gRPC hooks
|
||||||
|
[tusd] Using './data' as directory storage.
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
Tusd will issue a `gRPC` request to the specified endpoint, specifying the hook name, such as pre-create or post-finish, in the `Hook-Name` header and following body:
|
||||||
|
|
||||||
|
```js
|
||||||
|
{
|
||||||
|
// The upload object contains the upload's details
|
||||||
|
"Upload": {
|
||||||
|
// The upload's ID. Will be empty during the pre-create event
|
||||||
|
"ID": "14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
// The upload's total size in bytes.
|
||||||
|
"Size": 46205,
|
||||||
|
// The upload's current offset in bytes.
|
||||||
|
"Offset": 1592,
|
||||||
|
// These properties will be set to true, if the upload as a final or partial
|
||||||
|
// one. See the Concatenation extension for details:
|
||||||
|
// http://tus.io/protocols/resumable-upload.html#concatenation
|
||||||
|
"IsFinal": false,
|
||||||
|
"IsPartial": false,
|
||||||
|
// If the upload is a final one, this value will be an array of upload IDs
|
||||||
|
// which are concatenated to produce the upload.
|
||||||
|
"PartialUploads": null,
|
||||||
|
// The upload's metadata which can be supplied by the clients as it wishes.
|
||||||
|
// All keys and values in this object will be strings.
|
||||||
|
// Be aware that it may contain maliciously crafted values and you must not
|
||||||
|
// trust it without escaping it first!
|
||||||
|
"MetaData": {
|
||||||
|
"filename": "transloadit.png"
|
||||||
|
},
|
||||||
|
// Details about where the data store saved the uploaded file. The different
|
||||||
|
// available keys vary depending on the used data store.
|
||||||
|
"Storage": {
|
||||||
|
// For example, the filestore supplies the absolute file path:
|
||||||
|
"Type": "filestore",
|
||||||
|
"Path": "/my/upload/directory/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
|
||||||
|
// The S3Store and GCSStore supply the bucket name and object key:
|
||||||
|
"Type": "s3store",
|
||||||
|
"Bucket": "my-upload-bucket",
|
||||||
|
"Key": "my-prefix/14b1c4c77771671a8479bc0444bbc5ce"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Details about the HTTP request which caused this hook to be fired.
|
||||||
|
// It can be used to record the client's IP address or inspect the headers.
|
||||||
|
"HTTPRequest": {
|
||||||
|
"Method": "PATCH",
|
||||||
|
"URI": "/files/14b1c4c77771671a8479bc0444bbc5ce",
|
||||||
|
"RemoteAddr": "1.2.3.4:47689"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
By default, tusd will retry 3 times based on the gRPC status response or network error, with a 1 second backoff. This can be configured with the flags `--hooks-grpc-retry` and `--hooks-grpc-backoff`, like so:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ # Retrying 5 times with a 2 second backoff
|
||||||
|
$ tusd --hooks-grpc localhost:8081/ --hooks-grpc-retry 5 --hooks-grpc-backoff 2
|
||||||
|
```
|
||||||
|
|
|
@ -9,14 +9,32 @@ Windows in various formats of the
|
||||||
## Compile from source
|
## Compile from source
|
||||||
|
|
||||||
The only requirement for building tusd is [Go](http://golang.org/doc/install).
|
The only requirement for building tusd is [Go](http://golang.org/doc/install).
|
||||||
Currently only Go 1.12 and 1.13 is tested and supported and in the future only the two latest
|
We only test and support the [two latest major releases](https://go.dev/dl/) of
|
||||||
major releases will be supported.
|
Go, although tusd might also run with other versions.
|
||||||
If you meet this criteria, you can clone the git repository, install the remaining
|
|
||||||
dependencies and build the binary:
|
Once a recent Go version is installed, you can clone the git repository, install
|
||||||
|
the remaining dependencies and build the binary:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone git@github.com:tus/tusd.git
|
git clone https://github.com/tus/tusd.git
|
||||||
cd tusd
|
cd tusd
|
||||||
|
|
||||||
go build -o tusd cmd/tusd/main.go
|
go build -o tusd cmd/tusd/main.go
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Kubernetes installation
|
||||||
|
|
||||||
|
A Helm chart for installing Tusd on Kubernetes is available [here](https://github.com/sagikazarmark/helm-charts/tree/master/charts/tusd).
|
||||||
|
|
||||||
|
You can install it by running the following commands:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
helm repo add skm https://charts.sagikazarmark.dev
|
||||||
|
helm install --generate-name --wait skm/tusd
|
||||||
|
```
|
||||||
|
|
||||||
|
Minimum requirements:
|
||||||
|
- Helm 3+
|
||||||
|
- Kubernetes 1.16+
|
||||||
|
|
||||||
|
Check out the available [values](https://github.com/sagikazarmark/helm-charts/tree/master/charts/tusd#values) for customizing the installation.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Monitoring tusd
|
# Monitoring tusd
|
||||||
|
|
||||||
tusd exposes metrics at the `/metrics` endpoint ([example](https://master.tus.io/metrics)) in the [Prometheus Text Format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format). This allows you to hook up Prometheus or any other compatible service to your tusd instance and let it monitor tusd. Alternatively, there are many [parsers and client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) available for consuming the metrics format directly.
|
tusd exposes metrics at the `/metrics` endpoint ([example](https://tusd.tusdemo.net/metrics)) in the [Prometheus Text Format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format). This allows you to hook up Prometheus or any other compatible service to your tusd instance and let it monitor tusd. Alternatively, there are many [parsers and client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) available for consuming the metrics format directly.
|
||||||
|
|
||||||
The endpoint contains details about Go's internals, general HTTP numbers and details about tus uploads and tus-specific errors. It can be completely disabled using the `-expose-metrics false` flag and it's path can be changed using the `-metrics-path /my/numbers` flag.
|
The endpoint contains details about Go's internals, general HTTP numbers and details about tus uploads and tus-specific errors. It can be completely disabled using the `-expose-metrics false` flag and it's path can be changed using the `-metrics-path /my/numbers` flag.
|
||||||
|
|
|
@ -34,7 +34,26 @@ $ tusd -s3-bucket=my-test-bucket.com
|
||||||
[tusd] 2019/09/29 21:11:23 You can now upload files to: http://0.0.0.0:1080/files/
|
[tusd] 2019/09/29 21:11:23 You can now upload files to: http://0.0.0.0:1080/files/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If your S3 bucket has been configured for AWS S3 Transfer Acceleration and you want to make use of that advanced service,
|
||||||
|
you can direct tusd to automatically use the designated AWS acceleration endpoint for your bucket by including the optional
|
||||||
|
command line flag `s3-transfer-acceleration` as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ export AWS_ACCESS_KEY_ID=xxxxx
|
||||||
|
$ export AWS_SECRET_ACCESS_KEY=xxxxx
|
||||||
|
$ export AWS_REGION=eu-west-1
|
||||||
|
$ tusd -s3-bucket=my-test-bucket.com -s3-transfer-acceleration
|
||||||
|
[tusd] 2019/09/29 21:11:23 Using 's3://my-test-bucket.com' as S3 bucket for storage with AWS S3 Transfer Acceleration enabled.
|
||||||
|
[tusd] 2019/09/29 21:11:23 Using 0.00MB as maximum size.
|
||||||
|
[tusd] 2019/09/29 21:11:23 Using 0.0.0.0:1080 as address to listen.
|
||||||
|
[tusd] 2019/09/29 21:11:23 Using /files/ as the base path.
|
||||||
|
[tusd] 2019/09/29 21:11:23 Using /metrics as the metrics path.
|
||||||
|
[tusd] 2019/09/29 21:11:23 Supported tus extensions: creation,creation-with-upload,termination,concatenation,creation-defer-length
|
||||||
|
[tusd] 2019/09/29 21:11:23 You can now upload files to: http://0.0.0.0:1080/files/
|
||||||
|
```
|
||||||
|
|
||||||
tusd is also able to read the credentials automatically from a shared credentials file (~/.aws/credentials) as described in https://github.com/aws/aws-sdk-go#configuring-credentials.
|
tusd is also able to read the credentials automatically from a shared credentials file (~/.aws/credentials) as described in https://github.com/aws/aws-sdk-go#configuring-credentials.
|
||||||
|
But be mindful of the need to declare the AWS_REGION value which isn't conventionally associated with credentials.
|
||||||
|
|
||||||
Furthermore, tusd also has support for storing uploads on Google Cloud Storage. In order to enable this feature, supply the path to your account file containing the necessary credentials:
|
Furthermore, tusd also has support for storing uploads on Google Cloud Storage. In order to enable this feature, supply the path to your account file containing the necessary credentials:
|
||||||
|
|
||||||
|
@ -48,58 +67,160 @@ $ tusd -gcs-bucket=my-test-bucket.com
|
||||||
[tusd] Using /metrics as the metrics path.
|
[tusd] Using /metrics as the metrics path.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Tusd also supports storing uploads on Microsoft Azure Blob Storage. In order to enable this feature, provide the
|
||||||
|
corresponding access credentials using environment variables.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ export AZURE_STORAGE_ACCOUNT=xxxxx
|
||||||
|
$ export AZURE_STORAGE_KEY=xxxxx
|
||||||
|
$ tusd -azure-storage my-test-container
|
||||||
|
[tusd] 2023/02/13 16:13:20.937373 Custom Azure Endpoint not specified in flag variable azure-endpoint.
|
||||||
|
Using endpoint https://xxxxx.blob.core.windows.net
|
||||||
|
[tusd] Using 0.00MB as maximum size.
|
||||||
|
[tusd] Using 0.0.0.0:1080 as address to listen.
|
||||||
|
[tusd] Using /files/ as the base path.
|
||||||
|
[tusd] Using /metrics as the metrics path.
|
||||||
|
```
|
||||||
|
|
||||||
|
If you want to upload to Microsoft Azure Blob Storage using a custom endpoint, e.g when using [Azurite](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#configure-a-connection-string-for-azurite) for local development,
|
||||||
|
you can specify the endpoint using the `-azure-endpoint` flag.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ export AZURE_STORAGE_ACCOUNT=devstoreaccount1
|
||||||
|
$ export AZURE_STORAGE_KEY=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
||||||
|
$ tusd -azure-storage my-test-container -azure-endpoint https://my-custom-endpoint.com
|
||||||
|
[tusd] 2023/02/13 16:15:18.641937 Using Azure endpoint http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
[tusd] Using 0.00MB as maximum size.
|
||||||
|
[tusd] Using 0.0.0.0:1080 as address to listen.
|
||||||
|
[tusd] Using /files/ as the base path.
|
||||||
|
[tusd] Using /metrics as the metrics path.
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also upload blobs to Microsoft Azure Blob Storage with a different storage tier, than what is set as the default for the storage account.
|
||||||
|
This can be done by using the `-azure-blob-access-tier` flag.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ export AZURE_STORAGE_ACCOUNT=xxxxx
|
||||||
|
$ export AZURE_STORAGE_KEY=xxxxx
|
||||||
|
$ tusd -azure-storage my-test-container -azure-blob-access-tier cool
|
||||||
|
[tusd] 2023/02/13 16:13:20.937373 Custom Azure Endpoint not specified in flag variable azure-endpoint.
|
||||||
|
Using endpoint https://xxxxx.blob.core.windows.net
|
||||||
|
[tusd] Using 0.00MB as maximum size.
|
||||||
|
[tusd] Using 0.0.0.0:1080 as address to listen.
|
||||||
|
[tusd] Using /files/ as the base path.
|
||||||
|
[tusd] Using /metrics as the metrics path.
|
||||||
|
```
|
||||||
|
|
||||||
|
TLS support for HTTPS connections can be enabled by supplying a certificate and private key. Note that the certificate file must include the entire chain of certificates up to the CA certificate. The default configuration supports TLSv1.2 and TLSv1.3. It is possible to use only TLSv1.3 with `-tls-mode=tls13`; alternately, it is possible to disable TLSv1.3 and use only 256-bit AES ciphersuites with `-tls-mode=tls12-strong`. The following example generates a self-signed certificate for `localhost` and then uses it to serve files on the loopback address; that this certificate is not appropriate for production use. Note also that the key file must not be encrypted/require a passphrase.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ openssl req -x509 -new -newkey rsa:4096 -nodes -sha256 -days 3650 -keyout localhost.key -out localhost.pem -subj "/CN=localhost"
|
||||||
|
Generating a 4096 bit RSA private key
|
||||||
|
........................++
|
||||||
|
..........................................++
|
||||||
|
writing new private key to 'localhost.key'
|
||||||
|
-----
|
||||||
|
$ tusd -upload-dir=./data -host=127.0.0.1 -port=8443 -tls-certificate=localhost.pem -tls-key=localhost.key
|
||||||
|
[tusd] Using './data' as directory storage.
|
||||||
|
[tusd] Using 0.00MB as maximum size.
|
||||||
|
[tusd] Using 127.0.0.1:8443 as address to listen.
|
||||||
|
[tusd] Using /files/ as the base path.
|
||||||
|
[tusd] Using /metrics as the metrics path.
|
||||||
|
[tusd] Supported tus extensions: creation,creation-with-upload,termination,concatenation,creation-defer-length
|
||||||
|
[tusd] You can now upload files to: https://127.0.0.1:8443/files/
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
Besides these simple examples, tusd can be easily configured using a variety of command line
|
Besides these simple examples, tusd can be easily configured using a variety of command line
|
||||||
options:
|
options:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ tusd -help
|
$ tusd -help
|
||||||
Usage of tusd:
|
-azure-blob-access-tier string
|
||||||
|
Blob access tier when uploading new files (possible values: archive, cool, hot, '')
|
||||||
|
-azure-container-access-type string
|
||||||
|
Access type when creating a new container if it does not exist (possible values: blob, container, '')
|
||||||
|
-azure-endpoint string
|
||||||
|
Custom Endpoint to use for Azure BlockBlob Storage (requires azure-storage to be pass)
|
||||||
|
-azure-object-prefix string
|
||||||
|
Prefix for Azure object names
|
||||||
|
-azure-storage string
|
||||||
|
Use Azure BlockBlob Storage with this container name as a storage backend (requires the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variable to be set)
|
||||||
-base-path string
|
-base-path string
|
||||||
Basepath of the HTTP server (default "/files/")
|
Basepath of the HTTP server (default "/files/")
|
||||||
-behind-proxy
|
-behind-proxy
|
||||||
Respect X-Forwarded-* and similar headers which may be set by proxies
|
Respect X-Forwarded-* and similar headers which may be set by proxies
|
||||||
|
-cpuprofile string
|
||||||
|
write cpu profile to file
|
||||||
-expose-metrics
|
-expose-metrics
|
||||||
Expose metrics about tusd usage (default true)
|
Expose metrics about tusd usage (default true)
|
||||||
-gcs-bucket string
|
-gcs-bucket string
|
||||||
Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)
|
Use Google Cloud Storage with this bucket as storage backend (requires the GCS_SERVICE_ACCOUNT_FILE environment variable to be set)
|
||||||
-gcs-object-prefix string
|
-gcs-object-prefix string
|
||||||
Prefix for GCS object names (can't contain underscore character)
|
Prefix for GCS object names
|
||||||
-hooks-dir string
|
-hooks-dir string
|
||||||
Directory to search for available hooks scripts
|
Directory to search for available hooks scripts
|
||||||
-hooks-enabled-events string
|
-hooks-enabled-events string
|
||||||
Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable all events
|
Comma separated list of enabled hook events (e.g. post-create,post-finish). Leave empty to enable default events (default "pre-create,post-create,post-receive,post-terminate,post-finish")
|
||||||
|
-hooks-grpc string
|
||||||
|
An gRPC endpoint to which hook events will be sent to
|
||||||
|
-hooks-grpc-backoff int
|
||||||
|
Number of seconds to wait before retrying each retry (default 1)
|
||||||
|
-hooks-grpc-retry int
|
||||||
|
Number of times to retry on a server error or network timeout (default 3)
|
||||||
-hooks-http string
|
-hooks-http string
|
||||||
An HTTP endpoint to which hook events will be sent to
|
An HTTP endpoint to which hook events will be sent to
|
||||||
-hooks-http-backoff int
|
-hooks-http-backoff int
|
||||||
Number of seconds to wait before retrying each retry (default 1)
|
Number of seconds to wait before retrying each retry (default 1)
|
||||||
|
-hooks-http-forward-headers string
|
||||||
|
List of HTTP request headers to be forwarded from the client request to the hook endpoint
|
||||||
-hooks-http-retry int
|
-hooks-http-retry int
|
||||||
Number of times to retry on a 500 or network timeout (default 3)
|
Number of times to retry on a 500 or network timeout (default 3)
|
||||||
-hooks-plugin string
|
-hooks-plugin string
|
||||||
Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)
|
Path to a Go plugin for loading hook functions (only supported on Linux and macOS; highly EXPERIMENTAL and may BREAK in the future)
|
||||||
-hooks-stop-code int
|
-hooks-stop-code int
|
||||||
Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped
|
Return code from post-receive hook which causes tusd to stop and delete the current upload. A zero value means that no uploads will be stopped
|
||||||
-host string
|
-host string
|
||||||
Host to bind HTTP server to (default "0.0.0.0")
|
Host to bind HTTP server to (default "0.0.0.0")
|
||||||
-max-size int
|
-max-size int
|
||||||
Maximum size of a single upload in bytes
|
Maximum size of a single upload in bytes
|
||||||
-metrics-path string
|
-metrics-path string
|
||||||
Path under which the metrics endpoint will be accessible (default "/metrics")
|
Path under which the metrics endpoint will be accessible (default "/metrics")
|
||||||
-port string
|
-port string
|
||||||
Port to bind HTTP server to (default "1080")
|
Port to bind HTTP server to (default "1080")
|
||||||
-s3-bucket string
|
-s3-bucket string
|
||||||
Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)
|
Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)
|
||||||
|
-s3-disable-content-hashes
|
||||||
|
Disable the calculation of MD5 and SHA256 hashes for the content that gets uploaded to S3 for minimized CPU usage (experimental and may be removed in the future)
|
||||||
|
-s3-disable-ssl
|
||||||
|
Disable SSL and only use HTTP for communication with S3 (experimental and may be removed in the future)
|
||||||
-s3-endpoint string
|
-s3-endpoint string
|
||||||
Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)
|
Endpoint to use S3 compatible implementations like minio (requires s3-bucket to be pass)
|
||||||
-s3-object-prefix string
|
-s3-object-prefix string
|
||||||
Prefix for S3 object names
|
Prefix for S3 object names
|
||||||
|
-s3-part-size int
|
||||||
|
Size in bytes of the individual upload requests made to the S3 API. Defaults to 50MiB (experimental and may be removed in the future) (default 52428800)
|
||||||
|
-s3-transfer-acceleration
|
||||||
|
Use AWS S3 transfer acceleration endpoint (requires -s3-bucket option and Transfer Acceleration property on S3 bucket to be set)
|
||||||
|
-show-greeting
|
||||||
|
Show the greeting message (default true)
|
||||||
-timeout int
|
-timeout int
|
||||||
Read timeout for connections in milliseconds. A zero value means that reads will not timeout (default 30000)
|
Read timeout for connections in milliseconds. A zero value means that reads will not timeout (default 6000)
|
||||||
|
-tls-certificate string
|
||||||
|
Path to the file containing the x509 TLS certificate to be used. The file should also contain any intermediate certificates and the CA certificate.
|
||||||
|
-tls-key string
|
||||||
|
Path to the file containing the key for the TLS certificate.
|
||||||
|
-tls-mode string
|
||||||
|
Specify which TLS mode to use; valid modes are tls13, tls12, and tls12-strong. (default "tls12")
|
||||||
-unix-sock string
|
-unix-sock string
|
||||||
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
If set, will listen to a UNIX socket at this location instead of a TCP socket
|
||||||
-upload-dir string
|
-upload-dir string
|
||||||
Directory to store uploads in (default "./data")
|
Directory to store uploads in (default "./data")
|
||||||
|
-disable-cors
|
||||||
|
Disables CORS headers. If set to true, tusd will not send any CORS related header. This is useful if you have a proxy sitting in front of tusd that handles CORS (default false)
|
||||||
-verbose
|
-verbose
|
||||||
Enable verbose logging output (default true)
|
Enable verbose logging output (default true)
|
||||||
-version
|
-version
|
||||||
Print tusd version information
|
Print tusd version information
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/tus/tusd"
|
"github.com/tus/tusd/pkg/filestore"
|
||||||
"github.com/tus/tusd/filestore"
|
tusd "github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -34,13 +34,24 @@ func main() {
|
||||||
// Create a new HTTP handler for the tusd server by providing a configuration.
|
// Create a new HTTP handler for the tusd server by providing a configuration.
|
||||||
// The StoreComposer property must be set to allow the handler to function.
|
// The StoreComposer property must be set to allow the handler to function.
|
||||||
handler, err := tusd.NewHandler(tusd.Config{
|
handler, err := tusd.NewHandler(tusd.Config{
|
||||||
BasePath: "/files/",
|
BasePath: "/files/",
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
NotifyCompleteUploads: true,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("Unable to create handler: %s", err))
|
panic(fmt.Errorf("Unable to create handler: %s", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start another goroutine for receiving events from the handler whenever
|
||||||
|
// an upload is completed. The event will contains details about the upload
|
||||||
|
// itself and the relevant HTTP request.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
event := <-handler.CompleteUploads
|
||||||
|
fmt.Printf("Upload %s finished\n", event.Upload.ID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Right now, nothing has happened since we need to start the HTTP server on
|
// Right now, nothing has happened since we need to start the HTTP server on
|
||||||
// our own. In the end, tusd will start listening on and accept request at
|
// our own. In the end, tusd will start listening on and accept request at
|
||||||
// http://localhost:8080/files
|
// http://localhost:8080/files
|
||||||
|
@ -57,7 +68,7 @@ Please consult the [online documentation](https://godoc.org/github.com/tus/tusd/
|
||||||
|
|
||||||
## Implementing own storages
|
## Implementing own storages
|
||||||
|
|
||||||
The tusd server is built to be as flexible as possible and to allow the use of different upload storage mechanisms. B
|
The tusd server is built to be as flexible as possible and to allow the use of different upload storage mechanisms.
|
||||||
|
|
||||||
If you have different requirements, you can build your own storage backend which will save the files to a remote FTP server or similar. Doing so is as simple as implementing the [`tusd.DataStore`](https://godoc.org/github.com/tus/tusd/pkg/#DataStore) interface and using the new struct in the [configuration object](https://godoc.org/github.com/tus/tusd/pkg/#Config). Please consult the documentation about detailed information about the required methods.
|
If you have different requirements, you can build your own storage backend which will save the files to a remote FTP server or similar. Doing so is as simple as implementing the [`tusd.DataStore`](https://godoc.org/github.com/tus/tusd/pkg/#DataStore) interface and using the new struct in the [configuration object](https://godoc.org/github.com/tus/tusd/pkg/#Config). Please consult the documentation about detailed information about the required methods.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
version: "3.9"
|
||||||
|
services:
|
||||||
|
tusd:
|
||||||
|
image: tusproject/tusd:v1.9
|
||||||
|
command: -verbose -s3-bucket mybucket -s3-endpoint http://minio:9000
|
||||||
|
volumes:
|
||||||
|
- tusd:/data
|
||||||
|
environment:
|
||||||
|
- AWS_REGION=us-east-1
|
||||||
|
- AWS_ACCESS_KEY_ID_FILE=/run/secrets/minio-username
|
||||||
|
- AWS_SECRET_ACCESS_KEY_FILE=/run/secrets/minio-password
|
||||||
|
secrets:
|
||||||
|
- minio-username
|
||||||
|
- minio-password
|
||||||
|
networks:
|
||||||
|
- tusd
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
tusd:
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
minio-username:
|
||||||
|
external: true
|
||||||
|
minio-password:
|
||||||
|
external: true
|
||||||
|
|
||||||
|
networks:
|
||||||
|
tusd:
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
filename=$(cat /dev/stdin | jq .MetaData.filename)
|
filename=$(cat /dev/stdin | jq .Upload.MetaData.filename)
|
||||||
if [ -z "$filename" ]; then
|
if [ -z "$filename" ]; then
|
||||||
echo "Error: no filename provided"
|
echo "Error: no filename provided"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
@ -5,9 +5,9 @@ server {
|
||||||
listen 443 http2 ssl;
|
listen 443 http2 ssl;
|
||||||
listen [::]:443 http2 ipv6only=on ssl;
|
listen [::]:443 http2 ipv6only=on ssl;
|
||||||
|
|
||||||
ssl_certificate /etc/letsencrypt/live/master.tus.io/fullchain.pem;
|
ssl_certificate /etc/letsencrypt/live/tusd.tusdemo.net/fullchain.pem;
|
||||||
ssl_certificate_key /etc/letsencrypt/live/master.tus.io/privkey.pem;
|
ssl_certificate_key /etc/letsencrypt/live/tusd.tusdemo.net/privkey.pem;
|
||||||
ssl_trusted_certificate /etc/letsencrypt/live/master.tus.io/fullchain.pem;
|
ssl_trusted_certificate /etc/letsencrypt/live/tusd.tusdemo.net/fullchain.pem;
|
||||||
|
|
||||||
# Load custom parameters for Diffie Hellman key exchange to avoid the usage
|
# Load custom parameters for Diffie Hellman key exchange to avoid the usage
|
||||||
# of common primes
|
# of common primes
|
||||||
|
@ -38,7 +38,7 @@ server {
|
||||||
ssl_session_cache shared:SSL:5m;
|
ssl_session_cache shared:SSL:5m;
|
||||||
ssl_session_tickets off;
|
ssl_session_tickets off;
|
||||||
|
|
||||||
server_name master.tus.io;
|
server_name tusd.tusdemo.net;
|
||||||
|
|
||||||
# certbot will place the files required for the HTTP challenge in the
|
# certbot will place the files required for the HTTP challenge in the
|
||||||
# webroot under the .well-known/acme-challenge directory. Therefore we must
|
# webroot under the .well-known/acme-challenge directory. Therefore we must
|
||||||
|
@ -57,7 +57,7 @@ server {
|
||||||
proxy_http_version 1.1;
|
proxy_http_version 1.1;
|
||||||
|
|
||||||
# Add X-Forwarded-* headers
|
# Add X-Forwarded-* headers
|
||||||
proxy_set_header X-Forwarded-Host $hostname;
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/pkg/filestore"
|
||||||
|
tusd "github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create a new FileStore instance which is responsible for
|
||||||
|
// storing the uploaded file on disk in the specified directory.
|
||||||
|
// This path _must_ exist before tusd will store uploads in it.
|
||||||
|
// If you want to save them on a different medium, for example
|
||||||
|
// a remote FTP server, you can implement your own storage backend
|
||||||
|
// by implementing the tusd.DataStore interface.
|
||||||
|
store := filestore.FileStore{
|
||||||
|
Path: "./uploads",
|
||||||
|
}
|
||||||
|
|
||||||
|
// A storage backend for tusd may consist of multiple different parts which
|
||||||
|
// handle upload creation, locking, termination and so on. The composer is a
|
||||||
|
// place where all those separated pieces are joined together. In this example
|
||||||
|
// we only use the file store but you may plug in multiple.
|
||||||
|
composer := tusd.NewStoreComposer()
|
||||||
|
store.UseIn(composer)
|
||||||
|
|
||||||
|
// Create a new HTTP handler for the tusd server by providing a configuration.
|
||||||
|
// The StoreComposer property must be set to allow the handler to function.
|
||||||
|
handler, err := tusd.NewHandler(tusd.Config{
|
||||||
|
BasePath: "/files/",
|
||||||
|
StoreComposer: composer,
|
||||||
|
NotifyCompleteUploads: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("Unable to create handler: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start another goroutine for receiving events from the handler whenever
|
||||||
|
// an upload is completed. The event will contains details about the upload
|
||||||
|
// itself and the relevant HTTP request.
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
event := <-handler.CompleteUploads
|
||||||
|
fmt.Printf("Upload %s finished\n", event.Upload.ID)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Right now, nothing has happened since we need to start the HTTP server on
|
||||||
|
// our own. In the end, tusd will start listening on and accept request at
|
||||||
|
// http://localhost:8080/files
|
||||||
|
http.Handle("/files/", http.StripPrefix("/files/", handler))
|
||||||
|
err = http.ListenAndServe(":8080", nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("Unable to listen: %s", err))
|
||||||
|
}
|
||||||
|
}
|
27
go.mod
27
go.mod
|
@ -1,17 +1,24 @@
|
||||||
module github.com/tus/tusd
|
module github.com/tus/tusd
|
||||||
|
|
||||||
go 1.12
|
// Specify the Go version needed for the Heroku deployment
|
||||||
|
// See https://github.com/heroku/heroku-buildpack-go#go-module-specifics
|
||||||
|
// +heroku goVersion go1.19
|
||||||
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.40.0
|
cloud.google.com/go/storage v1.30.1
|
||||||
github.com/aws/aws-sdk-go v1.20.1
|
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||||
|
github.com/aws/aws-sdk-go v1.44.275
|
||||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40
|
||||||
github.com/golang/mock v1.3.1
|
github.com/golang/mock v1.6.0
|
||||||
github.com/prometheus/client_golang v1.0.0
|
github.com/golang/protobuf v1.5.3
|
||||||
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
|
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||||
github.com/stretchr/testify v1.3.0
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/vimeo/go-util v1.2.0
|
github.com/sethgrid/pester v1.2.0
|
||||||
google.golang.org/api v0.6.0
|
github.com/stretchr/testify v1.8.4
|
||||||
|
github.com/vimeo/go-util v1.4.1
|
||||||
|
google.golang.org/api v0.125.0
|
||||||
|
google.golang.org/grpc v1.55.0
|
||||||
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
gopkg.in/Acconut/lockfile.v1 v1.1.0
|
||||||
gopkg.in/h2non/gock.v1 v1.0.14
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Copyright (c) 2018, Transloadit Ltd.
|
|
||||||
# Authors:
|
|
||||||
# - Kevin van Zonneveld <kevin@transloadit.com>
|
|
||||||
|
|
||||||
set -o pipefail
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
# set -o xtrace
|
|
||||||
|
|
||||||
# # Set magic variables for current FILE & DIR
|
|
||||||
# __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
# __file="${__dir}/$(basename "${0}")"
|
|
||||||
# __base="$(basename ${__file})"
|
|
||||||
# __root="$(cd "$(dirname "${__dir}")" && pwd)"
|
|
||||||
|
|
||||||
kubectl exec -it $(kubectl get pods --namespace tus -o go-template --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') --namespace tus -- /bin/sh
|
|
|
@ -1,82 +0,0 @@
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: tusd
|
|
||||||
namespace: tus
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: tusd
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- image: docker.io/tusproject/tusd:latest
|
|
||||||
imagePullPolicy: Always
|
|
||||||
args: ["-s3-bucket","tusdtest.transloadit.com","-port=8080","-behind-proxy","-max-size=20000000000","-timeout=6000"]
|
|
||||||
name: tusd
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: "1Gi"
|
|
||||||
requests:
|
|
||||||
memory: "1Gi"
|
|
||||||
ports:
|
|
||||||
- name: tusd-web
|
|
||||||
containerPort: 8080
|
|
||||||
envFrom:
|
|
||||||
- secretRef:
|
|
||||||
name: tusd-env
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: tusd
|
|
||||||
namespace: tus
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- name: tusd-web
|
|
||||||
port: 80
|
|
||||||
targetPort: 8080
|
|
||||||
protocol: TCP
|
|
||||||
selector:
|
|
||||||
app: tusd
|
|
||||||
---
|
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: tusd
|
|
||||||
namespace: tus
|
|
||||||
annotations:
|
|
||||||
certmanager.k8s.io/cluster-issuer: "letsencrypt-prod"
|
|
||||||
certmanager.k8s.io/acme-challenge-type: "http01"
|
|
||||||
kubernetes.io/tls-acme: "true"
|
|
||||||
kubernetes.io/ingress.class: "nginx"
|
|
||||||
nginx.ingress.kubernetes.io/proxy-body-size: 0m
|
|
||||||
nginx.ingress.kubernetes.io/proxy-connect-timeout: "300"
|
|
||||||
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
|
|
||||||
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
|
|
||||||
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
|
|
||||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
|
||||||
spec:
|
|
||||||
tls:
|
|
||||||
- hosts:
|
|
||||||
- tusd.tus.io
|
|
||||||
secretName: tusd-tls
|
|
||||||
- hosts:
|
|
||||||
- master.tus.io
|
|
||||||
secretName: master-tls
|
|
||||||
rules:
|
|
||||||
- host: tusd.tus.io
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
backend:
|
|
||||||
serviceName: tusd
|
|
||||||
servicePort: 80
|
|
||||||
- host: master.tus.io
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- path: /
|
|
||||||
backend:
|
|
||||||
serviceName: tusd
|
|
||||||
servicePort: 80
|
|
|
@ -0,0 +1,325 @@
|
||||||
|
// Package azurestore provides a Azure Blob Storage based backend
|
||||||
|
|
||||||
|
// AzureStore is a storage backend that uses the AzService interface in order to store uploads in Azure Blob Storage.
|
||||||
|
// It stores the uploads in a container specified in two different BlockBlob: The `[id].info` blobs are used to store the fileinfo in JSON format. The `[id]` blobs without an extension contain the raw binary data uploaded.
|
||||||
|
// If the upload is not finished within a week, the uncommited blocks will be discarded.
|
||||||
|
|
||||||
|
// Support for setting the default Continaer access type and Blob access tier varies on your Azure Storage Account and its limits.
|
||||||
|
// More information about Container access types and limts
|
||||||
|
// https://docs.microsoft.com/en-us/azure/storage/blobs/anonymous-read-access-configure?tabs=portal
|
||||||
|
|
||||||
|
// More information about Blob access tiers and limits
|
||||||
|
// https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-performance-tiers
|
||||||
|
// https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#access-tiers-for-block-blob-data
|
||||||
|
|
||||||
|
package azurestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
InfoBlobSuffix string = ".info"
|
||||||
|
MaxBlockBlobSize int64 = azblob.BlockBlobMaxBlocks * azblob.BlockBlobMaxStageBlockBytes
|
||||||
|
MaxBlockBlobChunkSize int64 = azblob.BlockBlobMaxStageBlockBytes
|
||||||
|
)
|
||||||
|
|
||||||
|
type azService struct {
|
||||||
|
BlobAccessTier azblob.AccessTierType
|
||||||
|
ContainerURL *azblob.ContainerURL
|
||||||
|
ContainerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type AzService interface {
|
||||||
|
NewBlob(ctx context.Context, name string) (AzBlob, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AzConfig struct {
|
||||||
|
AccountName string
|
||||||
|
AccountKey string
|
||||||
|
BlobAccessTier string
|
||||||
|
ContainerName string
|
||||||
|
ContainerAccessType string
|
||||||
|
Endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
type AzBlob interface {
|
||||||
|
// Delete the blob
|
||||||
|
Delete(ctx context.Context) error
|
||||||
|
// Upload the blob
|
||||||
|
Upload(ctx context.Context, body io.ReadSeeker) error
|
||||||
|
// Download the contents of the blob
|
||||||
|
Download(ctx context.Context) ([]byte, error)
|
||||||
|
// Get the offset of the blob and its indexes
|
||||||
|
GetOffset(ctx context.Context) (int64, error)
|
||||||
|
// Commit the uploaded blocks to the BlockBlob
|
||||||
|
Commit(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlockBlob struct {
|
||||||
|
Blob *azblob.BlockBlobURL
|
||||||
|
AccessTier azblob.AccessTierType
|
||||||
|
Indexes []int
|
||||||
|
}
|
||||||
|
|
||||||
|
type InfoBlob struct {
|
||||||
|
Blob *azblob.BlockBlobURL
|
||||||
|
}
|
||||||
|
|
||||||
|
// New Azure service for communication to Azure BlockBlob Storage API
|
||||||
|
func NewAzureService(config *AzConfig) (AzService, error) {
|
||||||
|
// struct to store your credentials.
|
||||||
|
credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Might be limited by the storage account
|
||||||
|
// "" or default inherits the access type from the Storage Account
|
||||||
|
var containerAccessType azblob.PublicAccessType
|
||||||
|
switch config.ContainerAccessType {
|
||||||
|
case "container":
|
||||||
|
containerAccessType = azblob.PublicAccessContainer
|
||||||
|
case "blob":
|
||||||
|
containerAccessType = azblob.PublicAccessBlob
|
||||||
|
case "":
|
||||||
|
default:
|
||||||
|
containerAccessType = azblob.PublicAccessNone
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does not support the premium access tiers
|
||||||
|
var blobAccessTierType azblob.AccessTierType
|
||||||
|
switch config.BlobAccessTier {
|
||||||
|
case "archive":
|
||||||
|
blobAccessTierType = azblob.AccessTierArchive
|
||||||
|
case "cool":
|
||||||
|
blobAccessTierType = azblob.AccessTierCool
|
||||||
|
case "hot":
|
||||||
|
blobAccessTierType = azblob.AccessTierHot
|
||||||
|
case "":
|
||||||
|
default:
|
||||||
|
blobAccessTierType = azblob.DefaultAccessTier
|
||||||
|
}
|
||||||
|
|
||||||
|
// The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more.
|
||||||
|
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||||
|
cURL, _ := url.Parse(fmt.Sprintf("%s/%s", config.Endpoint, config.ContainerName))
|
||||||
|
|
||||||
|
// Get the ContainerURL URL
|
||||||
|
containerURL := azblob.NewContainerURL(*cURL, p)
|
||||||
|
// Do not care about response since it will fail if container exists and create if it does not.
|
||||||
|
_, _ = containerURL.Create(context.Background(), azblob.Metadata{}, containerAccessType)
|
||||||
|
|
||||||
|
return &azService{
|
||||||
|
BlobAccessTier: blobAccessTierType,
|
||||||
|
ContainerURL: &containerURL,
|
||||||
|
ContainerName: config.ContainerName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if we return a InfoBlob or BlockBlob, based on the name
|
||||||
|
func (service *azService) NewBlob(ctx context.Context, name string) (AzBlob, error) {
|
||||||
|
var fileBlob AzBlob
|
||||||
|
bb := service.ContainerURL.NewBlockBlobURL(name)
|
||||||
|
if strings.HasSuffix(name, InfoBlobSuffix) {
|
||||||
|
fileBlob = &InfoBlob{
|
||||||
|
Blob: &bb,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fileBlob = &BlockBlob{
|
||||||
|
Blob: &bb,
|
||||||
|
Indexes: []int{},
|
||||||
|
AccessTier: service.BlobAccessTier,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fileBlob, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the blockBlob from Azure Blob Storage
|
||||||
|
func (blockBlob *BlockBlob) Delete(ctx context.Context) error {
|
||||||
|
_, err := blockBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload a block to Azure Blob Storage and add it to the indexes to be after upload is finished
|
||||||
|
func (blockBlob *BlockBlob) Upload(ctx context.Context, body io.ReadSeeker) error {
|
||||||
|
// Keep track of the indexes
|
||||||
|
var index int
|
||||||
|
if len(blockBlob.Indexes) == 0 {
|
||||||
|
index = 0
|
||||||
|
} else {
|
||||||
|
index = blockBlob.Indexes[len(blockBlob.Indexes)-1] + 1
|
||||||
|
}
|
||||||
|
blockBlob.Indexes = append(blockBlob.Indexes, index)
|
||||||
|
|
||||||
|
_, err := blockBlob.Blob.StageBlock(ctx, blockIDIntToBase64(index), body, azblob.LeaseAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download the blockBlob from Azure Blob Storage
|
||||||
|
func (blockBlob *BlockBlob) Download(ctx context.Context) (data []byte, err error) {
|
||||||
|
downloadResponse, err := blockBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
|
||||||
|
// If the file does not exist, it will not return an error, but a 404 status and body
|
||||||
|
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
|
||||||
|
return nil, handler.ErrNotFound
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
// This might occur when the blob is being uploaded, but a block list has not been committed yet
|
||||||
|
if isAzureError(err, "BlobNotFound") {
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
|
||||||
|
downloadedData := bytes.Buffer{}
|
||||||
|
|
||||||
|
_, err = downloadedData.ReadFrom(bodyStream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return downloadedData.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (blockBlob *BlockBlob) GetOffset(ctx context.Context) (int64, error) {
|
||||||
|
// Get the offset of the file from azure storage
|
||||||
|
// For the blob, show each block (ID and size) that is a committed part of it.
|
||||||
|
var indexes []int
|
||||||
|
var offset int64
|
||||||
|
|
||||||
|
getBlock, err := blockBlob.Blob.GetBlockList(ctx, azblob.BlockListAll, azblob.LeaseAccessConditions{})
|
||||||
|
if err != nil {
|
||||||
|
if isAzureError(err, "BlobNotFound") {
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need committed blocks to be added to offset to know how big the file really is
|
||||||
|
for _, block := range getBlock.CommittedBlocks {
|
||||||
|
offset += int64(block.Size)
|
||||||
|
indexes = append(indexes, blockIDBase64ToInt(block.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to get the uncommitted blocks so that we can commit them
|
||||||
|
for _, block := range getBlock.UncommittedBlocks {
|
||||||
|
offset += int64(block.Size)
|
||||||
|
indexes = append(indexes, blockIDBase64ToInt(block.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the block IDs in ascending order. This is required as Azure returns the block lists alphabetically
|
||||||
|
// and we store the indexes as base64 encoded ints.
|
||||||
|
sort.Ints(indexes)
|
||||||
|
blockBlob.Indexes = indexes
|
||||||
|
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// After all the blocks have been uploaded, we commit the unstaged blocks by sending a Block List
|
||||||
|
func (blockBlob *BlockBlob) Commit(ctx context.Context) error {
|
||||||
|
base64BlockIDs := make([]string, len(blockBlob.Indexes))
|
||||||
|
for index, id := range blockBlob.Indexes {
|
||||||
|
base64BlockIDs[index] = blockIDIntToBase64(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := blockBlob.Blob.CommitBlockList(ctx, base64BlockIDs, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, blockBlob.AccessTier, nil, azblob.ClientProvidedKeyOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the infoBlob from Azure Blob Storage
|
||||||
|
func (infoBlob *InfoBlob) Delete(ctx context.Context) error {
|
||||||
|
_, err := infoBlob.Blob.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload the infoBlob to Azure Blob Storage
|
||||||
|
// Because the info file is presumed to be smaller than azblob.BlockBlobMaxUploadBlobBytes (256MiB), we can upload it all in one go
|
||||||
|
// New uploaded data will create a new, or overwrite the existing block blob
|
||||||
|
func (infoBlob *InfoBlob) Upload(ctx context.Context, body io.ReadSeeker) error {
|
||||||
|
_, err := infoBlob.Blob.Upload(ctx, body, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download the infoBlob from Azure Blob Storage
|
||||||
|
func (infoBlob *InfoBlob) Download(ctx context.Context) ([]byte, error) {
|
||||||
|
downloadResponse, err := infoBlob.Blob.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||||
|
|
||||||
|
// If the file does not exist, it will not return an error, but a 404 status and body
|
||||||
|
if downloadResponse != nil && downloadResponse.StatusCode() == 404 {
|
||||||
|
return nil, fmt.Errorf("File %s does not exist", infoBlob.Blob.ToBlockBlobURL())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if isAzureError(err, "BlobNotFound") {
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 20})
|
||||||
|
downloadedData := bytes.Buffer{}
|
||||||
|
|
||||||
|
_, err = downloadedData.ReadFrom(bodyStream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return downloadedData.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// infoBlob does not utilise offset, so just return 0, nil
|
||||||
|
func (infoBlob *InfoBlob) GetOffset(ctx context.Context) (int64, error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// infoBlob does not have uncommited blocks, so just return nil
|
||||||
|
func (infoBlob *InfoBlob) Commit(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// === Helper Functions ===
|
||||||
|
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||||
|
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||||
|
func blockIDBinaryToBase64(blockID []byte) string {
|
||||||
|
return base64.StdEncoding.EncodeToString(blockID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockIDBase64ToBinary(blockID string) []byte {
|
||||||
|
binary, _ := base64.StdEncoding.DecodeString(blockID)
|
||||||
|
return binary
|
||||||
|
}
|
||||||
|
|
||||||
|
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||||
|
func blockIDIntToBase64(blockID int) string {
|
||||||
|
binaryBlockID := (&[4]byte{})[:] // All block IDs are 4 bytes long
|
||||||
|
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
|
||||||
|
return blockIDBinaryToBase64(binaryBlockID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockIDBase64ToInt(blockID string) int {
|
||||||
|
blockIDBase64ToBinary(blockID)
|
||||||
|
return int(binary.LittleEndian.Uint32(blockIDBase64ToBinary(blockID)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAzureError(err error, code string) bool {
|
||||||
|
if err, ok := err.(azblob.StorageError); ok && string(err.ServiceCode()) == code {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,232 @@
|
||||||
|
package azurestore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/tus/tusd/internal/uid"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AzureStore struct {
|
||||||
|
Service AzService
|
||||||
|
ObjectPrefix string
|
||||||
|
Container string
|
||||||
|
}
|
||||||
|
|
||||||
|
type AzUpload struct {
|
||||||
|
ID string
|
||||||
|
InfoBlob AzBlob
|
||||||
|
BlockBlob AzBlob
|
||||||
|
InfoHandler *handler.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(service AzService) *AzureStore {
|
||||||
|
return &AzureStore{
|
||||||
|
Service: service,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseIn sets this store as the core data store in the passed composer and adds
|
||||||
|
// all possible extension to it.
|
||||||
|
func (store AzureStore) UseIn(composer *handler.StoreComposer) {
|
||||||
|
composer.UseCore(store)
|
||||||
|
composer.UseTerminater(store)
|
||||||
|
composer.UseLengthDeferrer(store)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store AzureStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
|
if info.ID == "" {
|
||||||
|
info.ID = uid.Uid()
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Size > MaxBlockBlobSize {
|
||||||
|
return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes",
|
||||||
|
info.Size, MaxBlockBlobSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
infoFile := store.keyWithPrefix(store.infoPath(info.ID))
|
||||||
|
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.Storage = map[string]string{
|
||||||
|
"Type": "azurestore",
|
||||||
|
"Container": store.Container,
|
||||||
|
"Key": store.keyWithPrefix(info.ID),
|
||||||
|
}
|
||||||
|
|
||||||
|
azUpload := &AzUpload{
|
||||||
|
ID: info.ID,
|
||||||
|
InfoHandler: &info,
|
||||||
|
InfoBlob: infoBlob,
|
||||||
|
BlockBlob: blockBlob,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = azUpload.writeInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("azurestore: unable to create InfoHandler file:\n%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return azUpload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store AzureStore) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
|
||||||
|
info := handler.FileInfo{}
|
||||||
|
infoFile := store.keyWithPrefix(store.infoPath(id))
|
||||||
|
infoBlob, err := store.Service.NewBlob(ctx, infoFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download the info file from Azure Storage
|
||||||
|
data, err := infoBlob.Download(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Size > MaxBlockBlobSize {
|
||||||
|
return nil, fmt.Errorf("azurestore: max upload of %v bytes exceeded MaxBlockBlobSize of %v bytes",
|
||||||
|
info.Size, MaxBlockBlobSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
blockBlob, err := store.Service.NewBlob(ctx, store.keyWithPrefix(info.ID))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, err := blockBlob.GetOffset(ctx)
|
||||||
|
if err != nil && err != handler.ErrNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.Offset = offset
|
||||||
|
|
||||||
|
return &AzUpload{
|
||||||
|
ID: id,
|
||||||
|
InfoHandler: &info,
|
||||||
|
InfoBlob: infoBlob,
|
||||||
|
BlockBlob: blockBlob,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store AzureStore) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
|
||||||
|
return upload.(*AzUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store AzureStore) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
|
||||||
|
return upload.(*AzUpload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *AzUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
||||||
|
r := bufio.NewReader(src)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
n, err := r.WriteTo(buf)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkSize := int64(binary.Size(buf.Bytes()))
|
||||||
|
if chunkSize > MaxBlockBlobChunkSize {
|
||||||
|
return 0, fmt.Errorf("azurestore: Chunk of size %v too large. Max chunk size is %v", chunkSize, MaxBlockBlobChunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
re := bytes.NewReader(buf.Bytes())
|
||||||
|
err = upload.BlockBlob.Upload(ctx, re)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.InfoHandler.Offset += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *AzUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
|
info := handler.FileInfo{}
|
||||||
|
|
||||||
|
if upload.InfoHandler != nil {
|
||||||
|
return *upload.InfoHandler, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := upload.InfoBlob.Download(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.InfoHandler = &info
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the uploaded file from the Azure storage
|
||||||
|
func (upload *AzUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
|
b, err := upload.BlockBlob.Download(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bytes.NewReader(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish the file upload and commit the block list
|
||||||
|
func (upload *AzUpload) FinishUpload(ctx context.Context) error {
|
||||||
|
return upload.BlockBlob.Commit(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *AzUpload) Terminate(ctx context.Context) error {
|
||||||
|
// Delete info file
|
||||||
|
err := upload.InfoBlob.Delete(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete file
|
||||||
|
return upload.BlockBlob.Delete(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *AzUpload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
|
upload.InfoHandler.Size = length
|
||||||
|
upload.InfoHandler.SizeIsDeferred = false
|
||||||
|
return upload.writeInfo(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store AzureStore) infoPath(id string) string {
|
||||||
|
return id + InfoBlobSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *AzUpload) writeInfo(ctx context.Context) error {
|
||||||
|
data, err := json.Marshal(upload.InfoHandler)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bytes.NewReader(data)
|
||||||
|
return upload.InfoBlob.Upload(ctx, reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (store *AzureStore) keyWithPrefix(key string) string {
|
||||||
|
prefix := store.ObjectPrefix
|
||||||
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return prefix + key
|
||||||
|
}
|
|
@ -0,0 +1,146 @@
|
||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/tus/tusd/pkg/azurestore (interfaces: AzService,AzBlob)
|
||||||
|
|
||||||
|
// Package azurestore_test is a generated GoMock package.
|
||||||
|
package azurestore_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
azurestore "github.com/tus/tusd/pkg/azurestore"
|
||||||
|
io "io"
|
||||||
|
reflect "reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockAzService is a mock of AzService interface
|
||||||
|
type MockAzService struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockAzServiceMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockAzServiceMockRecorder is the mock recorder for MockAzService
|
||||||
|
type MockAzServiceMockRecorder struct {
|
||||||
|
mock *MockAzService
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockAzService creates a new mock instance
|
||||||
|
func NewMockAzService(ctrl *gomock.Controller) *MockAzService {
|
||||||
|
mock := &MockAzService{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockAzServiceMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockAzService) EXPECT() *MockAzServiceMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlob mocks base method
|
||||||
|
func (m *MockAzService) NewBlob(arg0 context.Context, arg1 string) (azurestore.AzBlob, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "NewBlob", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(azurestore.AzBlob)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlob indicates an expected call of NewBlob
|
||||||
|
func (mr *MockAzServiceMockRecorder) NewBlob(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlob", reflect.TypeOf((*MockAzService)(nil).NewBlob), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockAzBlob is a mock of AzBlob interface
|
||||||
|
type MockAzBlob struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockAzBlobMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockAzBlobMockRecorder is the mock recorder for MockAzBlob
|
||||||
|
type MockAzBlobMockRecorder struct {
|
||||||
|
mock *MockAzBlob
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockAzBlob creates a new mock instance
|
||||||
|
func NewMockAzBlob(ctrl *gomock.Controller) *MockAzBlob {
|
||||||
|
mock := &MockAzBlob{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockAzBlobMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
|
func (m *MockAzBlob) EXPECT() *MockAzBlobMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit mocks base method
|
||||||
|
func (m *MockAzBlob) Commit(arg0 context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Commit", arg0)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit indicates an expected call of Commit
|
||||||
|
func (mr *MockAzBlobMockRecorder) Commit(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockAzBlob)(nil).Commit), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete mocks base method
|
||||||
|
func (m *MockAzBlob) Delete(arg0 context.Context) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Delete", arg0)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete indicates an expected call of Delete
|
||||||
|
func (mr *MockAzBlobMockRecorder) Delete(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockAzBlob)(nil).Delete), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download mocks base method
|
||||||
|
func (m *MockAzBlob) Download(arg0 context.Context) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Download", arg0)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download indicates an expected call of Download
|
||||||
|
func (mr *MockAzBlobMockRecorder) Download(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockAzBlob)(nil).Download), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOffset mocks base method
|
||||||
|
func (m *MockAzBlob) GetOffset(arg0 context.Context) (int64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetOffset", arg0)
|
||||||
|
ret0, _ := ret[0].(int64)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOffset indicates an expected call of GetOffset
|
||||||
|
func (mr *MockAzBlobMockRecorder) GetOffset(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOffset", reflect.TypeOf((*MockAzBlob)(nil).GetOffset), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload mocks base method
|
||||||
|
func (m *MockAzBlob) Upload(arg0 context.Context, arg1 io.ReadSeeker) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Upload", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload indicates an expected call of Upload
|
||||||
|
func (mr *MockAzBlobMockRecorder) Upload(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockAzBlob)(nil).Upload), arg0, arg1)
|
||||||
|
}
|
|
@ -0,0 +1,426 @@
|
||||||
|
package azurestore_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/tus/tusd/pkg/azurestore"
|
||||||
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate mockgen -destination=./azurestore_mock_test.go -package=azurestore_test github.com/tus/tusd/pkg/azurestore AzService,AzBlob
|
||||||
|
|
||||||
|
// Test interface implementations
|
||||||
|
var _ handler.DataStore = azurestore.AzureStore{}
|
||||||
|
var _ handler.TerminaterDataStore = azurestore.AzureStore{}
|
||||||
|
var _ handler.LengthDeferrerDataStore = azurestore.AzureStore{}
|
||||||
|
|
||||||
|
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
|
||||||
|
const mockContainer = "tusd"
|
||||||
|
const mockSize int64 = 4096
|
||||||
|
const mockReaderData = "Hello World"
|
||||||
|
|
||||||
|
var mockTusdInfo = handler.FileInfo{
|
||||||
|
ID: mockID,
|
||||||
|
Size: mockSize,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
Storage: map[string]string{
|
||||||
|
"Type": "azurestore",
|
||||||
|
"Container": mockContainer,
|
||||||
|
"Key": mockID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewUpload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewUploadWithPrefix(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
objectPrefix := "/path/to/file/"
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
store.ObjectPrefix = objectPrefix
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
info := mockTusdInfo
|
||||||
|
info.Storage = map[string]string{
|
||||||
|
"Type": "azurestore",
|
||||||
|
"Container": mockContainer,
|
||||||
|
"Key": objectPrefix + mockID,
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := json.Marshal(info)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, objectPrefix+mockID).Return(NewMockAzBlob(mockCtrl), nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, objectPrefix+mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(context.Background(), mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewUploadTooLargeBlob(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
info := mockTusdInfo
|
||||||
|
info.Size = azurestore.MaxBlockBlobSize + 1
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(ctx, info)
|
||||||
|
assert.Nil(upload)
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Contains(err.Error(), "exceeded MaxBlockBlobSize")
|
||||||
|
assert.Contains(err.Error(), "209715200000001")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetUpload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(ctx)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(info)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetUploadTooLargeBlob(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
info := mockTusdInfo
|
||||||
|
info.Size = azurestore.MaxBlockBlobSize + 1
|
||||||
|
data, err := json.Marshal(info)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(upload)
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Contains(err.Error(), "exceeded MaxBlockBlobSize")
|
||||||
|
assert.Contains(err.Error(), "209715200000001")
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetUploadNotFound(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(nil, errors.New(string(azblob.StorageErrorCodeBlobNotFound))).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err := store.GetUpload(context.Background(), mockID)
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Equal(err.Error(), "BlobNotFound")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetReader(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
|
blockBlob.EXPECT().Download(ctx).Return([]byte(mockReaderData), nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
reader, err := upload.GetReader(ctx)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(reader)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteChunk(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
var offset int64 = mockSize / 2
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().Upload(ctx, bytes.NewReader([]byte(mockReaderData))).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
_, err = upload.WriteChunk(ctx, offset, bytes.NewReader([]byte(mockReaderData)))
|
||||||
|
assert.Nil(err)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFinishUpload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
var offset int64 = mockSize / 2
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(offset, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().Commit(ctx).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = upload.FinishUpload(ctx)
|
||||||
|
assert.Nil(err)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTerminate(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
data, err := json.Marshal(mockTusdInfo)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
|
||||||
|
blockBlob.EXPECT().Delete(ctx).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsTerminatableUpload(upload).Terminate(ctx)
|
||||||
|
assert.Nil(err)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeclareLength(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
service := NewMockAzService(mockCtrl)
|
||||||
|
store := azurestore.New(service)
|
||||||
|
store.Container = mockContainer
|
||||||
|
|
||||||
|
blockBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(blockBlob)
|
||||||
|
|
||||||
|
infoBlob := NewMockAzBlob(mockCtrl)
|
||||||
|
assert.NotNil(infoBlob)
|
||||||
|
|
||||||
|
info := mockTusdInfo
|
||||||
|
info.Size = mockSize * 2
|
||||||
|
|
||||||
|
data, err := json.Marshal(info)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
r := bytes.NewReader(data)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID+".info").Return(infoBlob, nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Download(ctx).Return(data, nil).Times(1),
|
||||||
|
service.EXPECT().NewBlob(ctx, mockID).Return(blockBlob, nil).Times(1),
|
||||||
|
blockBlob.EXPECT().GetOffset(ctx).Return(int64(0), nil).Times(1),
|
||||||
|
infoBlob.EXPECT().Upload(ctx, r).Return(nil).Times(1),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, mockID)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(ctx, mockSize*2)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err = upload.GetInfo(ctx)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(info)
|
||||||
|
assert.Equal(info.Size, mockSize*2)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
}
|
|
@ -49,9 +49,10 @@ func (store FileStore) UseIn(composer *handler.StoreComposer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
|
||||||
id := uid.Uid()
|
if info.ID == "" {
|
||||||
binPath := store.binPath(id)
|
info.ID = uid.Uid()
|
||||||
info.ID = id
|
}
|
||||||
|
binPath := store.binPath(info.ID)
|
||||||
info.Storage = map[string]string{
|
info.Storage = map[string]string{
|
||||||
"Type": "filestore",
|
"Type": "filestore",
|
||||||
"Path": binPath,
|
"Path": binPath,
|
||||||
|
@ -65,12 +66,15 @@ func (store FileStore) NewUpload(ctx context.Context, info handler.FileInfo) (ha
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
upload := &fileUpload{
|
upload := &fileUpload{
|
||||||
info: info,
|
info: info,
|
||||||
infoPath: store.infoPath(id),
|
infoPath: store.infoPath(info.ID),
|
||||||
binPath: store.binPath(id),
|
binPath: binPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeInfo creates the file by itself if necessary
|
// writeInfo creates the file by itself if necessary
|
||||||
|
@ -86,6 +90,10 @@ func (store FileStore) GetUpload(ctx context.Context, id string) (handler.Upload
|
||||||
info := handler.FileInfo{}
|
info := handler.FileInfo{}
|
||||||
data, err := ioutil.ReadFile(store.infoPath(id))
|
data, err := ioutil.ReadFile(store.infoPath(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// Interpret os.ErrNotExist as 404 Not Found
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, &info); err != nil {
|
if err := json.Unmarshal(data, &info); err != nil {
|
||||||
|
@ -96,6 +104,10 @@ func (store FileStore) GetUpload(ctx context.Context, id string) (handler.Upload
|
||||||
infoPath := store.infoPath(id)
|
infoPath := store.infoPath(id)
|
||||||
stat, err := os.Stat(binPath)
|
stat, err := os.Stat(binPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// Interpret os.ErrNotExist as 404 Not Found
|
||||||
|
err = handler.ErrNotFound
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,16 +164,7 @@ func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.R
|
||||||
|
|
||||||
n, err := io.Copy(file, src)
|
n, err := io.Copy(file, src)
|
||||||
|
|
||||||
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
|
|
||||||
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
|
|
||||||
// However, for FileStore it's not important whether the stream has ended
|
|
||||||
// on purpose or accidentally.
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
upload.info.Offset += n
|
upload.info.Offset += n
|
||||||
|
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -74,7 +73,7 @@ func TestFilestore(t *testing.T) {
|
||||||
// Test if upload is deleted
|
// Test if upload is deleted
|
||||||
upload, err = store.GetUpload(ctx, info.ID)
|
upload, err = store.GetUpload(ctx, info.ID)
|
||||||
a.Equal(nil, upload)
|
a.Equal(nil, upload)
|
||||||
a.True(os.IsNotExist(err))
|
a.Equal(handler.ErrNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMissingPath(t *testing.T) {
|
func TestMissingPath(t *testing.T) {
|
||||||
|
@ -89,6 +88,18 @@ func TestMissingPath(t *testing.T) {
|
||||||
a.Equal(nil, upload)
|
a.Equal(nil, upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNotFound(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
store := FileStore{"./path"}
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(ctx, "upload-that-does-not-exist")
|
||||||
|
a.Error(err)
|
||||||
|
a.Equal(handler.ErrNotFound, err)
|
||||||
|
a.Equal(nil, upload)
|
||||||
|
}
|
||||||
|
|
||||||
func TestConcatUploads(t *testing.T) {
|
func TestConcatUploads(t *testing.T) {
|
||||||
a := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
|
|
|
@ -77,10 +77,10 @@ type GCSService struct {
|
||||||
Client *storage.Client
|
Client *storage.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGCSService returns a GCSSerivce object given a GCloud service account file path.
|
// NewGCSService returns a GCSService object given a GCloud service account file path.
|
||||||
func NewGCSService(filename string) (*GCSService, error) {
|
func NewGCSService(filename string) (*GCSService, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile(filename))
|
client, err := storage.NewClient(ctx, option.WithCredentialsFile(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -129,6 +129,10 @@ const COMPOSE_RETRIES = 3
|
||||||
|
|
||||||
// Compose takes a bucket name, a list of initial source names, and a destination string to compose multiple GCS objects together
|
// Compose takes a bucket name, a list of initial source names, and a destination string to compose multiple GCS objects together
|
||||||
func (service *GCSService) compose(ctx context.Context, bucket string, srcs []string, dst string) error {
|
func (service *GCSService) compose(ctx context.Context, bucket string, srcs []string, dst string) error {
|
||||||
|
if len(srcs) < 1 {
|
||||||
|
return fmt.Errorf("empty srcs passed to compose for bucket: %s dest: %s", bucket, dst)
|
||||||
|
}
|
||||||
|
|
||||||
dstParams := GCSObjectParams{
|
dstParams := GCSObjectParams{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
ID: dst,
|
ID: dst,
|
||||||
|
@ -269,7 +273,7 @@ func (service *GCSService) ReadObject(ctx context.Context, params GCSObjectParam
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetObjectMetadata reads a GCSObjectParams and a map of metedata, returning a nil on sucess and an error otherwise
|
// SetObjectMetadata reads a GCSObjectParams and a map of metadata, returning a nil on success and an error otherwise
|
||||||
func (service *GCSService) SetObjectMetadata(ctx context.Context, params GCSObjectParams, metadata map[string]string) error {
|
func (service *GCSService) SetObjectMetadata(ctx context.Context, params GCSObjectParams, metadata map[string]string) error {
|
||||||
attrs := storage.ObjectAttrsToUpdate{
|
attrs := storage.ObjectAttrsToUpdate{
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
|
@ -324,11 +328,11 @@ func (service *GCSService) ComposeFrom(ctx context.Context, objSrcs []*storage.O
|
||||||
return dstAttrs.CRC32C, nil
|
return dstAttrs.CRC32C, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterObjects retuns a list of GCS object IDs that match the passed GCSFilterParams.
|
// FilterObjects returns a list of GCS object IDs that match the passed GCSFilterParams.
|
||||||
// It expects GCS objects to be of the format [uid]_[chunk_idx] where chunk_idx
|
// It expects GCS objects to be of the format [uid]_[chunk_idx] where chunk_idx
|
||||||
// is zero based. The format [uid]_tmp_[recursion_lvl]_[chunk_idx] can also be used to
|
// is zero based. The format [uid]_tmp_[recursion_lvl]_[chunk_idx] can also be used to
|
||||||
// specify objects that have been composed in a recursive fashion. These different formats
|
// specify objects that have been composed in a recursive fashion. These different formats
|
||||||
// are usedd to ensure that objects are composed in the correct order.
|
// are used to ensure that objects are composed in the correct order.
|
||||||
func (service *GCSService) FilterObjects(ctx context.Context, params GCSFilterParams) ([]string, error) {
|
func (service *GCSService) FilterObjects(ctx context.Context, params GCSFilterParams) ([]string, error) {
|
||||||
bkt := service.Client.Bucket(params.Bucket)
|
bkt := service.Client.Bucket(params.Bucket)
|
||||||
q := storage.Query{
|
q := storage.Query{
|
||||||
|
@ -351,7 +355,11 @@ loop:
|
||||||
if strings.HasSuffix(objAttrs.Name, "info") {
|
if strings.HasSuffix(objAttrs.Name, "info") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
split := strings.Split(objAttrs.Name, "_")
|
|
||||||
|
fileNameParts := strings.Split(objAttrs.Name, "/")
|
||||||
|
fileName := fileNameParts[len(fileNameParts)-1]
|
||||||
|
|
||||||
|
split := strings.Split(fileName, "_")
|
||||||
|
|
||||||
// If the object name does not split on "_", we have a composed object.
|
// If the object name does not split on "_", we have a composed object.
|
||||||
// If the object name splits on "_" in to four pieces we
|
// If the object name splits on "_" in to four pieces we
|
||||||
|
@ -384,6 +392,6 @@ loop:
|
||||||
|
|
||||||
names[idx] = objAttrs.Name
|
names[idx] = objAttrs.Name
|
||||||
}
|
}
|
||||||
return names, nil
|
|
||||||
|
|
||||||
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package gcsstore_test
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"gopkg.in/h2non/gock.v1"
|
"gopkg.in/h2non/gock.v1"
|
||||||
|
@ -23,7 +24,7 @@ type googleBucketResponse struct {
|
||||||
func TestGetObjectSize(t *testing.T) {
|
func TestGetObjectSize(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test-name").
|
Get("/storage/v1/b/test-bucket/o/test-name").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
|
@ -39,7 +40,9 @@ func TestGetObjectSize(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
// We need to explicit configure the GCS client to use the default HTTP client
|
||||||
|
// or otherwise gock cannot intercept the HTTP requests.
|
||||||
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -67,7 +70,7 @@ func TestGetObjectSize(t *testing.T) {
|
||||||
func TestDeleteObjectWithFilter(t *testing.T) {
|
func TestDeleteObjectWithFilter(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o").
|
Get("/storage/v1/b/test-bucket/o").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("pageToken", "").
|
MatchParam("pageToken", "").
|
||||||
|
@ -85,7 +88,7 @@ func TestDeleteObjectWithFilter(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -110,47 +113,47 @@ func TestDeleteObjectWithFilter(t *testing.T) {
|
||||||
func TestComposeObjects(t *testing.T) {
|
func TestComposeObjects(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test1").
|
Get("/storage/v1/b/test-bucket/o/test1").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test2").
|
Get("/storage/v1/b/test-bucket/o/test2").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test3").
|
Get("/storage/v1/b/test-bucket/o/test3").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test1").
|
Get("/storage/v1/b/test-bucket/o/test1").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Post("/storage/v1/b/test-bucket/o/test_all/compose").
|
Post("/storage/v1/b/test-bucket/o/test_all/compose").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test_all").
|
Get("/storage/v1/b/test-bucket/o/test_all").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o").
|
Get("/storage/v1/b/test-bucket/o").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("delimiter", "").
|
MatchParam("delimiter", "").
|
||||||
|
@ -170,7 +173,7 @@ func TestComposeObjects(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -192,10 +195,34 @@ func TestComposeObjects(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestComposeNoObjects(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
service := GCSService{
|
||||||
|
Client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.ComposeObjects(ctx, GCSComposeParams{
|
||||||
|
Bucket: "test-bucket",
|
||||||
|
Sources: []string{},
|
||||||
|
Destination: "test_all",
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetObjectAttrs(t *testing.T) {
|
func TestGetObjectAttrs(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o/test-name").
|
Get("/storage/v1/b/test-bucket/o/test-name").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("projection", "full").
|
MatchParam("projection", "full").
|
||||||
|
@ -211,7 +238,7 @@ func TestGetObjectAttrs(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -254,7 +281,7 @@ func TestReadObject(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -291,7 +318,7 @@ func TestSetObjectMetadata(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -329,7 +356,7 @@ func TestDeleteObject(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -360,15 +387,15 @@ func TestWriteObject(t *testing.T) {
|
||||||
"expiry_date": "1425333671141",
|
"expiry_date": "1425333671141",
|
||||||
})
|
})
|
||||||
|
|
||||||
gock.New("https://googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Post("/upload/storage/v1/b/test-bucket/o").
|
Post("/upload/storage/v1/b/test-bucket/o").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("key", "foo").
|
MatchParam("name", "test-name").
|
||||||
Reply(200).
|
Reply(200).
|
||||||
JSON(map[string]string{})
|
JSON(map[string]string{})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -419,7 +446,7 @@ func TestComposeFrom(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
@ -447,10 +474,10 @@ func TestFilterObject(t *testing.T) {
|
||||||
defer gock.Off()
|
defer gock.Off()
|
||||||
|
|
||||||
resp := googleBucketResponse{[]googleObjectResponse{
|
resp := googleBucketResponse{[]googleObjectResponse{
|
||||||
googleObjectResponse{Name: "test-prefix_1"},
|
googleObjectResponse{Name: "test_directory/test-prefix_1"},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
gock.New("https://www.googleapis.com").
|
gock.New("https://storage.googleapis.com").
|
||||||
Get("/storage/v1/b/test-bucket/o").
|
Get("/storage/v1/b/test-bucket/o").
|
||||||
MatchParam("alt", "json").
|
MatchParam("alt", "json").
|
||||||
MatchParam("pageToken", "").
|
MatchParam("pageToken", "").
|
||||||
|
@ -468,7 +495,7 @@ func TestFilterObject(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := storage.NewClient(ctx, option.WithAPIKey("foo"))
|
client, err := storage.NewClient(ctx, option.WithHTTPClient(http.DefaultClient), option.WithAPIKey("foo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
// GCSStore is a storage backend that uses the GCSAPI interface in order to store uploads
|
// GCSStore is a storage backend that uses the GCSAPI interface in order to store uploads
|
||||||
// on GCS. Uploads will be represented by two files in GCS; the data file will be stored
|
// on GCS. Uploads will be represented by two files in GCS; the data file will be stored
|
||||||
// as an extensionless object [uid] and the JSON info file will stored as [uid].info.
|
// as an extensionless object [uid] and the JSON info file will stored as [uid].info.
|
||||||
// In order to store uploads on GCS, make sure to specifiy the appropriate Google service
|
// In order to store uploads on GCS, make sure to specify the appropriate Google service
|
||||||
// account file path in the GCS_SERVICE_ACCOUNT_FILE environment variable. Also make sure that
|
// account file path in the GCS_SERVICE_ACCOUNT_FILE environment variable. Also make sure that
|
||||||
// this service account file has the "https://www.googleapis.com/auth/devstorage.read_write"
|
// this service account file has the "https://www.googleapis.com/auth/devstorage.read_write"
|
||||||
// scope enabled so you can read and write data to the storage buckets associated with the
|
// scope enabled so you can read and write data to the storage buckets associated with the
|
||||||
|
@ -163,7 +163,7 @@ func (upload gcsUpload) GetInfo(ctx context.Context) (handler.FileInfo, error) {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
prefix := fmt.Sprintf("%s", store.keyWithPrefix(id))
|
prefix := store.keyWithPrefix(id)
|
||||||
filterParams := GCSFilterParams{
|
filterParams := GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
|
@ -270,6 +270,10 @@ func (upload gcsUpload) FinishUpload(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(names) == 0 {
|
||||||
|
return fmt.Errorf("no GCS objects found with FilterObjects %+v", filterParams)
|
||||||
|
}
|
||||||
|
|
||||||
composeParams := GCSComposeParams{
|
composeParams := GCSComposeParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Destination: store.keyWithPrefix(id),
|
Destination: store.keyWithPrefix(id),
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// Automatically generated by MockGen. DO NOT EDIT!
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
// Source: github.com/tus/tusd/pkg/gcsstore (interfaces: GCSReader,GCSAPI)
|
// Source: github.com/tus/tusd/pkg/gcsstore (interfaces: GCSReader,GCSAPI)
|
||||||
|
|
||||||
|
// Package gcsstore_test is a generated GoMock package.
|
||||||
package gcsstore_test
|
package gcsstore_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -8,181 +9,238 @@ import (
|
||||||
gomock "github.com/golang/mock/gomock"
|
gomock "github.com/golang/mock/gomock"
|
||||||
gcsstore "github.com/tus/tusd/pkg/gcsstore"
|
gcsstore "github.com/tus/tusd/pkg/gcsstore"
|
||||||
io "io"
|
io "io"
|
||||||
|
reflect "reflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Mock of GCSReader interface
|
// MockGCSReader is a mock of GCSReader interface
|
||||||
type MockGCSReader struct {
|
type MockGCSReader struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *_MockGCSReaderRecorder
|
recorder *MockGCSReaderMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recorder for MockGCSReader (not exported)
|
// MockGCSReaderMockRecorder is the mock recorder for MockGCSReader
|
||||||
type _MockGCSReaderRecorder struct {
|
type MockGCSReaderMockRecorder struct {
|
||||||
mock *MockGCSReader
|
mock *MockGCSReader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMockGCSReader creates a new mock instance
|
||||||
func NewMockGCSReader(ctrl *gomock.Controller) *MockGCSReader {
|
func NewMockGCSReader(ctrl *gomock.Controller) *MockGCSReader {
|
||||||
mock := &MockGCSReader{ctrl: ctrl}
|
mock := &MockGCSReader{ctrl: ctrl}
|
||||||
mock.recorder = &_MockGCSReaderRecorder{mock}
|
mock.recorder = &MockGCSReaderMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) EXPECT() *_MockGCSReaderRecorder {
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
return _m.recorder
|
func (m *MockGCSReader) EXPECT() *MockGCSReaderMockRecorder {
|
||||||
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) Close() error {
|
// Close mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "Close")
|
func (m *MockGCSReader) Close() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Close")
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSReaderRecorder) Close() *gomock.Call {
|
// Close indicates an expected call of Close
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Close")
|
func (mr *MockGCSReaderMockRecorder) Close() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockGCSReader)(nil).Close))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) ContentType() string {
|
// ContentType mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "ContentType")
|
func (m *MockGCSReader) ContentType() string {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ContentType")
|
||||||
ret0, _ := ret[0].(string)
|
ret0, _ := ret[0].(string)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSReaderRecorder) ContentType() *gomock.Call {
|
// ContentType indicates an expected call of ContentType
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "ContentType")
|
func (mr *MockGCSReaderMockRecorder) ContentType() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContentType", reflect.TypeOf((*MockGCSReader)(nil).ContentType))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) Read(_param0 []byte) (int, error) {
|
// Read mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "Read", _param0)
|
func (m *MockGCSReader) Read(arg0 []byte) (int, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Read", arg0)
|
||||||
ret0, _ := ret[0].(int)
|
ret0, _ := ret[0].(int)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSReaderRecorder) Read(arg0 interface{}) *gomock.Call {
|
// Read indicates an expected call of Read
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Read", arg0)
|
func (mr *MockGCSReaderMockRecorder) Read(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockGCSReader)(nil).Read), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) Remain() int64 {
|
// Remain mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "Remain")
|
func (m *MockGCSReader) Remain() int64 {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Remain")
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(int64)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSReaderRecorder) Remain() *gomock.Call {
|
// Remain indicates an expected call of Remain
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Remain")
|
func (mr *MockGCSReaderMockRecorder) Remain() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remain", reflect.TypeOf((*MockGCSReader)(nil).Remain))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSReader) Size() int64 {
|
// Size mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "Size")
|
func (m *MockGCSReader) Size() int64 {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Size")
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(int64)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSReaderRecorder) Size() *gomock.Call {
|
// Size indicates an expected call of Size
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "Size")
|
func (mr *MockGCSReaderMockRecorder) Size() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockGCSReader)(nil).Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mock of GCSAPI interface
|
// MockGCSAPI is a mock of GCSAPI interface
|
||||||
type MockGCSAPI struct {
|
type MockGCSAPI struct {
|
||||||
ctrl *gomock.Controller
|
ctrl *gomock.Controller
|
||||||
recorder *_MockGCSAPIRecorder
|
recorder *MockGCSAPIMockRecorder
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recorder for MockGCSAPI (not exported)
|
// MockGCSAPIMockRecorder is the mock recorder for MockGCSAPI
|
||||||
type _MockGCSAPIRecorder struct {
|
type MockGCSAPIMockRecorder struct {
|
||||||
mock *MockGCSAPI
|
mock *MockGCSAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewMockGCSAPI creates a new mock instance
|
||||||
func NewMockGCSAPI(ctrl *gomock.Controller) *MockGCSAPI {
|
func NewMockGCSAPI(ctrl *gomock.Controller) *MockGCSAPI {
|
||||||
mock := &MockGCSAPI{ctrl: ctrl}
|
mock := &MockGCSAPI{ctrl: ctrl}
|
||||||
mock.recorder = &_MockGCSAPIRecorder{mock}
|
mock.recorder = &MockGCSAPIMockRecorder{mock}
|
||||||
return mock
|
return mock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) EXPECT() *_MockGCSAPIRecorder {
|
// EXPECT returns an object that allows the caller to indicate expected use
|
||||||
return _m.recorder
|
func (m *MockGCSAPI) EXPECT() *MockGCSAPIMockRecorder {
|
||||||
|
return m.recorder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) ComposeObjects(_param0 context.Context, _param1 gcsstore.GCSComposeParams) error {
|
// ComposeObjects mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "ComposeObjects", _param0, _param1)
|
func (m *MockGCSAPI) ComposeObjects(arg0 context.Context, arg1 gcsstore.GCSComposeParams) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ComposeObjects", arg0, arg1)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) ComposeObjects(arg0, arg1 interface{}) *gomock.Call {
|
// ComposeObjects indicates an expected call of ComposeObjects
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "ComposeObjects", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) ComposeObjects(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComposeObjects", reflect.TypeOf((*MockGCSAPI)(nil).ComposeObjects), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) DeleteObject(_param0 context.Context, _param1 gcsstore.GCSObjectParams) error {
|
// DeleteObject mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "DeleteObject", _param0, _param1)
|
func (m *MockGCSAPI) DeleteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "DeleteObject", arg0, arg1)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call {
|
// DeleteObject indicates an expected call of DeleteObject
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteObject", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) DeleteObject(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObject), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) DeleteObjectsWithFilter(_param0 context.Context, _param1 gcsstore.GCSFilterParams) error {
|
// DeleteObjectsWithFilter mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "DeleteObjectsWithFilter", _param0, _param1)
|
func (m *MockGCSAPI) DeleteObjectsWithFilter(arg0 context.Context, arg1 gcsstore.GCSFilterParams) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "DeleteObjectsWithFilter", arg0, arg1)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) DeleteObjectsWithFilter(arg0, arg1 interface{}) *gomock.Call {
|
// DeleteObjectsWithFilter indicates an expected call of DeleteObjectsWithFilter
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "DeleteObjectsWithFilter", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) DeleteObjectsWithFilter(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithFilter", reflect.TypeOf((*MockGCSAPI)(nil).DeleteObjectsWithFilter), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) FilterObjects(_param0 context.Context, _param1 gcsstore.GCSFilterParams) ([]string, error) {
|
// FilterObjects mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "FilterObjects", _param0, _param1)
|
func (m *MockGCSAPI) FilterObjects(arg0 context.Context, arg1 gcsstore.GCSFilterParams) ([]string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "FilterObjects", arg0, arg1)
|
||||||
ret0, _ := ret[0].([]string)
|
ret0, _ := ret[0].([]string)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) FilterObjects(arg0, arg1 interface{}) *gomock.Call {
|
// FilterObjects indicates an expected call of FilterObjects
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "FilterObjects", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) FilterObjects(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterObjects", reflect.TypeOf((*MockGCSAPI)(nil).FilterObjects), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) GetObjectSize(_param0 context.Context, _param1 gcsstore.GCSObjectParams) (int64, error) {
|
// GetObjectSize mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "GetObjectSize", _param0, _param1)
|
func (m *MockGCSAPI) GetObjectSize(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (int64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetObjectSize", arg0, arg1)
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(int64)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) GetObjectSize(arg0, arg1 interface{}) *gomock.Call {
|
// GetObjectSize indicates an expected call of GetObjectSize
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetObjectSize", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) GetObjectSize(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectSize", reflect.TypeOf((*MockGCSAPI)(nil).GetObjectSize), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) ReadObject(_param0 context.Context, _param1 gcsstore.GCSObjectParams) (gcsstore.GCSReader, error) {
|
// ReadObject mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "ReadObject", _param0, _param1)
|
func (m *MockGCSAPI) ReadObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams) (gcsstore.GCSReader, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ReadObject", arg0, arg1)
|
||||||
ret0, _ := ret[0].(gcsstore.GCSReader)
|
ret0, _ := ret[0].(gcsstore.GCSReader)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) ReadObject(arg0, arg1 interface{}) *gomock.Call {
|
// ReadObject indicates an expected call of ReadObject
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "ReadObject", arg0, arg1)
|
func (mr *MockGCSAPIMockRecorder) ReadObject(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadObject", reflect.TypeOf((*MockGCSAPI)(nil).ReadObject), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) SetObjectMetadata(_param0 context.Context, _param1 gcsstore.GCSObjectParams, _param2 map[string]string) error {
|
// SetObjectMetadata mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "SetObjectMetadata", _param0, _param1, _param2)
|
func (m *MockGCSAPI) SetObjectMetadata(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 map[string]string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SetObjectMetadata", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(error)
|
ret0, _ := ret[0].(error)
|
||||||
return ret0
|
return ret0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) SetObjectMetadata(arg0, arg1, arg2 interface{}) *gomock.Call {
|
// SetObjectMetadata indicates an expected call of SetObjectMetadata
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "SetObjectMetadata", arg0, arg1, arg2)
|
func (mr *MockGCSAPIMockRecorder) SetObjectMetadata(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetObjectMetadata", reflect.TypeOf((*MockGCSAPI)(nil).SetObjectMetadata), arg0, arg1, arg2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_m *MockGCSAPI) WriteObject(_param0 context.Context, _param1 gcsstore.GCSObjectParams, _param2 io.Reader) (int64, error) {
|
// WriteObject mocks base method
|
||||||
ret := _m.ctrl.Call(_m, "WriteObject", _param0, _param1, _param2)
|
func (m *MockGCSAPI) WriteObject(arg0 context.Context, arg1 gcsstore.GCSObjectParams, arg2 io.Reader) (int64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "WriteObject", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(int64)
|
ret0, _ := ret[0].(int64)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_mr *_MockGCSAPIRecorder) WriteObject(arg0, arg1, arg2 interface{}) *gomock.Call {
|
// WriteObject indicates an expected call of WriteObject
|
||||||
return _mr.mock.ctrl.RecordCall(_mr.mock, "WriteObject", arg0, arg1, arg2)
|
func (mr *MockGCSAPIMockRecorder) WriteObject(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteObject", reflect.TypeOf((*MockGCSAPI)(nil).WriteObject), arg0, arg1, arg2)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
|
||||||
// go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/pkg/gcsstore GCSReader,GCSAPI
|
//go:generate mockgen -destination=./gcsstore_mock_test.go -package=gcsstore_test github.com/tus/tusd/pkg/gcsstore GCSReader,GCSAPI
|
||||||
|
|
||||||
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
|
const mockID = "123456789abcdefghijklmnopqrstuvwxyz"
|
||||||
const mockBucket = "bucket"
|
const mockBucket = "bucket"
|
||||||
|
@ -146,7 +146,7 @@ func TestGetInfo(t *testing.T) {
|
||||||
|
|
||||||
filterParams := gcsstore.GCSFilterParams{
|
filterParams := gcsstore.GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: fmt.Sprintf("%s", mockID),
|
Prefix: mockID,
|
||||||
}
|
}
|
||||||
|
|
||||||
mockObjectParams0 := gcsstore.GCSObjectParams{
|
mockObjectParams0 := gcsstore.GCSObjectParams{
|
||||||
|
@ -319,7 +319,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
|
|
||||||
filterParams2 := gcsstore.GCSFilterParams{
|
filterParams2 := gcsstore.GCSFilterParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
Prefix: fmt.Sprintf("%s", mockID),
|
Prefix: mockID,
|
||||||
}
|
}
|
||||||
|
|
||||||
composeParams := gcsstore.GCSComposeParams{
|
composeParams := gcsstore.GCSComposeParams{
|
||||||
|
@ -360,7 +360,7 @@ func TestFinishUpload(t *testing.T) {
|
||||||
|
|
||||||
objectParams := gcsstore.GCSObjectParams{
|
objectParams := gcsstore.GCSObjectParams{
|
||||||
Bucket: store.Bucket,
|
Bucket: store.Bucket,
|
||||||
ID: fmt.Sprintf("%s", mockID),
|
ID: mockID,
|
||||||
}
|
}
|
||||||
|
|
||||||
metadata := map[string]string{
|
metadata := map[string]string{
|
||||||
|
@ -394,39 +394,6 @@ func TestFinishUpload(t *testing.T) {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
var mockTusdChunk0InfoJson = fmt.Sprintf(`{"ID":"%s","Size":%d,"Offset":%d,"MetaData":{"foo":"bar"}}`, mockID, mockSize, mockSize/3)
|
|
||||||
var mockTusdChunk1Info = handler.FileInfo{
|
|
||||||
ID: mockID,
|
|
||||||
Size: mockSize,
|
|
||||||
Offset: 455,
|
|
||||||
MetaData: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type MockWriteChunkReader struct{}
|
|
||||||
|
|
||||||
func (r MockWriteChunkReader) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r MockWriteChunkReader) ContentType() string {
|
|
||||||
return "text/plain; charset=utf-8"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r MockWriteChunkReader) Read(p []byte) (int, error) {
|
|
||||||
copy(p, mockTusdChunk0InfoJson)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r MockWriteChunkReader) Remain() int64 {
|
|
||||||
return int64(len(mockTusdChunk0InfoJson))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r MockWriteChunkReader) Size() int64 {
|
|
||||||
return int64(len(mockTusdChunk0InfoJson))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteChunk(t *testing.T) {
|
func TestWriteChunk(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
@ -459,14 +426,12 @@ func TestWriteChunk(t *testing.T) {
|
||||||
service.EXPECT().WriteObject(ctx, writeObjectParams, rGet).Return(int64(len(mockReaderData)), nil),
|
service.EXPECT().WriteObject(ctx, writeObjectParams, rGet).Return(int64(len(mockReaderData)), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
reader := bytes.NewReader([]byte(mockReaderData))
|
|
||||||
var offset int64
|
|
||||||
offset = mockSize / 3
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), mockID)
|
upload, err := store.GetUpload(context.Background(), mockID)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
reader := bytes.NewReader([]byte(mockReaderData))
|
||||||
|
var offset int64 = mockSize / 3
|
||||||
|
|
||||||
_, err = upload.WriteChunk(context.Background(), offset, reader)
|
_, err = upload.WriteChunk(context.Background(), offset, reader)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// bodyReader is an io.Reader, which is intended to wrap the request
|
||||||
|
// body reader. If an error occurr during reading the request body, it
|
||||||
|
// will not return this error to the reading entity, but instead store
|
||||||
|
// the error and close the io.Reader, so that the error can be checked
|
||||||
|
// afterwards. This is helpful, so that the stores do not have to handle
|
||||||
|
// the error but this can instead be done in the handler.
|
||||||
|
// In addition, the bodyReader keeps track of how many bytes were read.
|
||||||
|
type bodyReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
bytesCounter int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBodyReader(r io.Reader) *bodyReader {
|
||||||
|
return &bodyReader{
|
||||||
|
reader: r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *bodyReader) Read(b []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := r.reader.Read(b)
|
||||||
|
atomic.AddInt64(&r.bytesCounter, int64(n))
|
||||||
|
r.err = err
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.EOF
|
||||||
|
} else {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r bodyReader) hasError() error {
|
||||||
|
if r.err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *bodyReader) bytesRead() int64 {
|
||||||
|
return atomic.LoadInt64(&r.bytesCounter)
|
||||||
|
}
|
|
@ -22,6 +22,15 @@ type Config struct {
|
||||||
// absolute URL containing a scheme, e.g. "http://tus.io"
|
// absolute URL containing a scheme, e.g. "http://tus.io"
|
||||||
BasePath string
|
BasePath string
|
||||||
isAbs bool
|
isAbs bool
|
||||||
|
// DisableDownload indicates whether the server will refuse downloads of the
|
||||||
|
// uploaded file, by not mounting the GET handler.
|
||||||
|
DisableDownload bool
|
||||||
|
// DisableTermination indicates whether the server will refuse termination
|
||||||
|
// requests of the uploaded file, by not mounting the DELETE handler.
|
||||||
|
DisableTermination bool
|
||||||
|
// Disable cors headers. If set to true, tusd will not send any CORS related header.
|
||||||
|
// This is useful if you have a proxy sitting in front of tusd that handles CORS.
|
||||||
|
DisableCors bool
|
||||||
// NotifyCompleteUploads indicates whether sending notifications about
|
// NotifyCompleteUploads indicates whether sending notifications about
|
||||||
// completed uploads using the CompleteUploads channel should be enabled.
|
// completed uploads using the CompleteUploads channel should be enabled.
|
||||||
NotifyCompleteUploads bool
|
NotifyCompleteUploads bool
|
||||||
|
@ -40,16 +49,20 @@ type Config struct {
|
||||||
// potentially set by proxies when generating an absolute URL in the
|
// potentially set by proxies when generating an absolute URL in the
|
||||||
// response to POST requests.
|
// response to POST requests.
|
||||||
RespectForwardedHeaders bool
|
RespectForwardedHeaders bool
|
||||||
// PreUploadreateCCallback will be invoked before a new upload is created, if the
|
// PreUploadCreateCallback will be invoked before a new upload is created, if the
|
||||||
// property is supplied. If the callback returns nil, the upload will be created.
|
// property is supplied. If the callback returns nil, the upload will be created.
|
||||||
// Otherwise the HTTP request will be aborted. This can be used to implement
|
// Otherwise the HTTP request will be aborted. This can be used to implement
|
||||||
// validation of upload metadata etc.
|
// validation of upload metadata etc.
|
||||||
PreUploadCreateCallback func(hook HookEvent) error
|
PreUploadCreateCallback func(hook HookEvent) error
|
||||||
|
// PreFinishResponseCallback will be invoked after an upload is completed but before
|
||||||
|
// a response is returned to the client. Error responses from the callback will be passed
|
||||||
|
// back to the client. This can be used to implement post-processing validation.
|
||||||
|
PreFinishResponseCallback func(hook HookEvent) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) validate() error {
|
func (config *Config) validate() error {
|
||||||
if config.Logger == nil {
|
if config.Logger == nil {
|
||||||
config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Ltime)
|
config.Logger = log.New(os.Stdout, "[tusd] ", log.Ldate|log.Lmicroseconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
base := config.BasePath
|
base := config.BasePath
|
||||||
|
|
|
@ -21,8 +21,30 @@ func TestCORS(t *testing.T) {
|
||||||
},
|
},
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Access-Control-Allow-Headers": "Origin, X-Requested-With, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
"Access-Control-Allow-Headers": "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
||||||
"Access-Control-Allow-Methods": "POST, GET, HEAD, PATCH, DELETE, OPTIONS",
|
"Access-Control-Allow-Methods": "POST, HEAD, PATCH, OPTIONS, GET, DELETE",
|
||||||
|
"Access-Control-Max-Age": "86400",
|
||||||
|
"Access-Control-Allow-Origin": "tus.io",
|
||||||
|
},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Conditional allow methods", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
DisableTermination: true,
|
||||||
|
DisableDownload: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "OPTIONS",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Origin": "tus.io",
|
||||||
|
},
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Access-Control-Allow-Headers": "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat",
|
||||||
|
"Access-Control-Allow-Methods": "POST, HEAD, PATCH, OPTIONS",
|
||||||
"Access-Control-Max-Age": "86400",
|
"Access-Control-Max-Age": "86400",
|
||||||
"Access-Control-Allow-Origin": "tus.io",
|
"Access-Control-Allow-Origin": "tus.io",
|
||||||
},
|
},
|
||||||
|
@ -59,19 +81,35 @@ func TestCORS(t *testing.T) {
|
||||||
req.Host = "tus.io"
|
req.Host = "tus.io"
|
||||||
|
|
||||||
res := httptest.NewRecorder()
|
res := httptest.NewRecorder()
|
||||||
res.HeaderMap.Set("Access-Control-Allow-Headers", "HEADER")
|
res.Header().Set("Access-Control-Allow-Headers", "HEADER")
|
||||||
res.HeaderMap.Set("Access-Control-Allow-Methods", "METHOD")
|
res.Header().Set("Access-Control-Allow-Methods", "METHOD")
|
||||||
handler.ServeHTTP(res, req)
|
handler.ServeHTTP(res, req)
|
||||||
|
|
||||||
headers := res.HeaderMap["Access-Control-Allow-Headers"]
|
headers := res.Header()["Access-Control-Allow-Headers"]
|
||||||
methods := res.HeaderMap["Access-Control-Allow-Methods"]
|
methods := res.Header()["Access-Control-Allow-Methods"]
|
||||||
|
|
||||||
if headers[0] != "HEADER" {
|
if headers[0] != "HEADER" {
|
||||||
t.Errorf("expected header to contain HEADER but got: %#v", headers)
|
t.Errorf("expected header to contain HEADER but got: %#v", headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
if methods[0] != "METHOD" {
|
if methods[0] != "METHOD" {
|
||||||
t.Errorf("expected header to contain HEADER but got: %#v", methods)
|
t.Errorf("expected header to contain METHOD but got: %#v", methods)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
SubTest(t, "Disable CORS", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
DisableCors: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "OPTIONS",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Origin": "tus.io",
|
||||||
|
},
|
||||||
|
Code: http.StatusOK,
|
||||||
|
ResHeader: map[string]string{},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,7 +143,7 @@ func TestGet(t *testing.T) {
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
"filetype": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.v1",
|
||||||
"filename": "invoice.docx",
|
"filename": "invoice.docx",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
|
@ -158,7 +158,7 @@ func TestGet(t *testing.T) {
|
||||||
URL: "yes",
|
URL: "yes",
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Content-Length": "0",
|
"Content-Length": "0",
|
||||||
"Content-Type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
"Content-Type": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.v1",
|
||||||
"Content-Disposition": `attachment;filename="invoice.docx"`,
|
"Content-Disposition": `attachment;filename="invoice.docx"`,
|
||||||
},
|
},
|
||||||
Code: http.StatusNoContent,
|
Code: http.StatusNoContent,
|
||||||
|
|
|
@ -40,10 +40,12 @@ func NewHandler(config Config) (*Handler, error) {
|
||||||
mux.Post("", http.HandlerFunc(handler.PostFile))
|
mux.Post("", http.HandlerFunc(handler.PostFile))
|
||||||
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
mux.Head(":id", http.HandlerFunc(handler.HeadFile))
|
||||||
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
mux.Add("PATCH", ":id", http.HandlerFunc(handler.PatchFile))
|
||||||
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
if !config.DisableDownload {
|
||||||
|
mux.Get(":id", http.HandlerFunc(handler.GetFile))
|
||||||
|
}
|
||||||
|
|
||||||
// Only attach the DELETE handler if the Terminate() method is provided
|
// Only attach the DELETE handler if the Terminate() method is provided
|
||||||
if config.StoreComposer.UsesTerminater {
|
if config.StoreComposer.UsesTerminater && !config.DisableTermination {
|
||||||
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
mux.Del(":id", http.HandlerFunc(handler.DelFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package handler_test
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
|
@ -26,8 +25,8 @@ func TestHead(t *testing.T) {
|
||||||
Offset: 11,
|
Offset: 11,
|
||||||
Size: 44,
|
Size: 44,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"name": "lunrjs.png",
|
"name": "lunrjs.png",
|
||||||
"type": "image/png",
|
"empty": "",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
lock.EXPECT().Unlock().Return(nil),
|
lock.EXPECT().Unlock().Return(nil),
|
||||||
|
@ -49,22 +48,23 @@ func TestHead(t *testing.T) {
|
||||||
},
|
},
|
||||||
Code: http.StatusOK,
|
Code: http.StatusOK,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Upload-Offset": "11",
|
"Upload-Offset": "11",
|
||||||
"Upload-Length": "44",
|
"Upload-Length": "44",
|
||||||
"Cache-Control": "no-store",
|
"Content-Length": "44",
|
||||||
|
"Cache-Control": "no-store",
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
// Since the order of a map is not guaranteed in Go, we need to be prepared
|
// Since the order of a map is not guaranteed in Go, we need to be prepared
|
||||||
// for the case, that the order of the metadata may have been changed
|
// for the case, that the order of the metadata may have been changed
|
||||||
if v := res.Header().Get("Upload-Metadata"); v != "name bHVucmpzLnBuZw==,type aW1hZ2UvcG5n" &&
|
if v := res.Header().Get("Upload-Metadata"); v != "name bHVucmpzLnBuZw==,empty " &&
|
||||||
v != "type aW1hZ2UvcG5n,name bHVucmpzLnBuZw==" {
|
v != "empty ,name bHVucmpzLnBuZw==" {
|
||||||
t.Errorf("Expected valid metadata (got '%s')", v)
|
t.Errorf("Expected valid metadata (got '%s')", v)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -82,7 +82,7 @@ func TestHead(t *testing.T) {
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
if string(res.Body.Bytes()) != "" {
|
if res.Body.String() != "" {
|
||||||
t.Errorf("Expected empty body for failed HEAD request")
|
t.Errorf("Expected empty body for failed HEAD request")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -2,10 +2,10 @@ package handler_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -141,7 +141,7 @@ func TestPatch(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
SubTest(t, "UploadNotFoundFail", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, os.ErrNotExist)
|
store.EXPECT().GetUpload(context.Background(), "no").Return(nil, ErrNotFound)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
StoreComposer: composer,
|
StoreComposer: composer,
|
||||||
|
@ -497,14 +497,16 @@ func TestPatch(t *testing.T) {
|
||||||
defer ctrl.Finish()
|
defer ctrl.Finish()
|
||||||
upload := NewMockFullUpload(ctrl)
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
// We simulate that the upload has already an offset of 10 bytes. Therefore, the progress notifications
|
||||||
|
// must be the sum of the exisiting offset and the newly read bytes.
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "yes",
|
ID: "yes",
|
||||||
Offset: 0,
|
Offset: 10,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
upload.EXPECT().WriteChunk(context.Background(), int64(10), NewReaderMatcher("first second third")).Return(int64(18), nil),
|
||||||
)
|
)
|
||||||
|
|
||||||
handler, _ := NewHandler(Config{
|
handler, _ := NewHandler(Config{
|
||||||
|
@ -525,7 +527,7 @@ func TestPatch(t *testing.T) {
|
||||||
info := event.Upload
|
info := event.Upload
|
||||||
a.Equal("yes", info.ID)
|
a.Equal("yes", info.ID)
|
||||||
a.Equal(int64(100), info.Size)
|
a.Equal(int64(100), info.Size)
|
||||||
a.Equal(int64(6), info.Offset)
|
a.Equal(int64(16), info.Offset)
|
||||||
|
|
||||||
writer.Write([]byte("second "))
|
writer.Write([]byte("second "))
|
||||||
writer.Write([]byte("third"))
|
writer.Write([]byte("third"))
|
||||||
|
@ -534,7 +536,7 @@ func TestPatch(t *testing.T) {
|
||||||
info = event.Upload
|
info = event.Upload
|
||||||
a.Equal("yes", info.ID)
|
a.Equal("yes", info.ID)
|
||||||
a.Equal(int64(100), info.Size)
|
a.Equal(int64(100), info.Size)
|
||||||
a.Equal(int64(18), info.Offset)
|
a.Equal(int64(28), info.Offset)
|
||||||
|
|
||||||
writer.Close()
|
writer.Close()
|
||||||
|
|
||||||
|
@ -548,12 +550,12 @@ func TestPatch(t *testing.T) {
|
||||||
ReqHeader: map[string]string{
|
ReqHeader: map[string]string{
|
||||||
"Tus-Resumable": "1.0.0",
|
"Tus-Resumable": "1.0.0",
|
||||||
"Content-Type": "application/offset+octet-stream",
|
"Content-Type": "application/offset+octet-stream",
|
||||||
"Upload-Offset": "0",
|
"Upload-Offset": "10",
|
||||||
},
|
},
|
||||||
ReqBody: reader,
|
ReqBody: reader,
|
||||||
Code: http.StatusNoContent,
|
Code: http.StatusNoContent,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Upload-Offset": "18",
|
"Upload-Offset": "28",
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
|
@ -578,7 +580,7 @@ func TestPatch(t *testing.T) {
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
Size: 100,
|
Size: 100,
|
||||||
}, nil),
|
}, nil),
|
||||||
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), http.ErrBodyReadAfterClose),
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
||||||
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
store.EXPECT().AsTerminatableUpload(upload).Return(upload),
|
||||||
upload.EXPECT().Terminate(context.Background()),
|
upload.EXPECT().Terminate(context.Background()),
|
||||||
)
|
)
|
||||||
|
@ -627,9 +629,58 @@ func TestPatch(t *testing.T) {
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Upload-Offset": "",
|
"Upload-Offset": "",
|
||||||
},
|
},
|
||||||
|
ResBody: "upload has been stopped by server\n",
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
|
|
||||||
_, more := <-c
|
_, more := <-c
|
||||||
a.False(more)
|
a.False(more)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
SubTest(t, "BodyReadError", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
// This test ensure that error that occurr from reading the request body are not forwarded to the
|
||||||
|
// storage backend but are still causing an
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().GetUpload(context.Background(), "yes").Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "yes",
|
||||||
|
Offset: 0,
|
||||||
|
Size: 100,
|
||||||
|
}, nil),
|
||||||
|
// The reader for WriteChunk must not return an error.
|
||||||
|
upload.EXPECT().WriteChunk(context.Background(), int64(0), NewReaderMatcher("first ")).Return(int64(6), nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
})
|
||||||
|
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
writer.Write([]byte("first "))
|
||||||
|
err := writer.CloseWithError(errors.New("an error while reading the body"))
|
||||||
|
a.NoError(err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "PATCH",
|
||||||
|
URL: "yes",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Content-Type": "application/offset+octet-stream",
|
||||||
|
"Upload-Offset": "0",
|
||||||
|
},
|
||||||
|
ReqBody: reader,
|
||||||
|
Code: http.StatusInternalServerError,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Upload-Offset": "",
|
||||||
|
},
|
||||||
|
ResBody: "an error while reading the body\n",
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,16 +23,18 @@ func TestPost(t *testing.T) {
|
||||||
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
|
"empty": "",
|
||||||
},
|
},
|
||||||
}).Return(upload, nil),
|
}).Return(upload, nil),
|
||||||
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
ID: "foo",
|
ID: "foo",
|
||||||
Size: 300,
|
Size: 300,
|
||||||
MetaData: map[string]string{
|
MetaData: map[string]string{
|
||||||
"foo": "hello",
|
"foo": "hello",
|
||||||
"bar": "world",
|
"bar": "world",
|
||||||
|
"empty": "",
|
||||||
},
|
},
|
||||||
}, nil),
|
}, nil),
|
||||||
)
|
)
|
||||||
|
@ -52,7 +54,7 @@ func TestPost(t *testing.T) {
|
||||||
"Tus-Resumable": "1.0.0",
|
"Tus-Resumable": "1.0.0",
|
||||||
"Upload-Length": "300",
|
"Upload-Length": "300",
|
||||||
// Invalid Base64-encoded values should be ignored
|
// Invalid Base64-encoded values should be ignored
|
||||||
"Upload-Metadata": "foo aGVsbG8=, bar d29ybGQ=, hah INVALID",
|
"Upload-Metadata": "foo aGVsbG8=, bar d29ybGQ=, hah INVALID, empty",
|
||||||
},
|
},
|
||||||
Code: http.StatusCreated,
|
Code: http.StatusCreated,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
|
@ -308,11 +310,49 @@ func TestPost(t *testing.T) {
|
||||||
"Upload-Length": "300",
|
"Upload-Length": "300",
|
||||||
"X-Forwarded-Host": "bar.com",
|
"X-Forwarded-Host": "bar.com",
|
||||||
"X-Forwarded-Proto": "http",
|
"X-Forwarded-Proto": "http",
|
||||||
"Forwarded": "proto=https,host=foo.com",
|
"Forwarded": "for=192.168.10.112;host=upload.example.tld;proto=https;proto-version=",
|
||||||
},
|
},
|
||||||
Code: http.StatusCreated,
|
Code: http.StatusCreated,
|
||||||
ResHeader: map[string]string{
|
ResHeader: map[string]string{
|
||||||
"Location": "https://foo.com/files/foo",
|
"Location": "https://upload.example.tld/files/foo",
|
||||||
|
},
|
||||||
|
}).Run(handler, t)
|
||||||
|
})
|
||||||
|
|
||||||
|
SubTest(t, "RespectForwardedWithQuotes", func(t *testing.T, store *MockFullDataStore, composer *StoreComposer) {
|
||||||
|
// See https://github.com/tus/tusd/issues/809
|
||||||
|
ctrl := gomock.NewController(t)
|
||||||
|
defer ctrl.Finish()
|
||||||
|
upload := NewMockFullUpload(ctrl)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
store.EXPECT().NewUpload(context.Background(), FileInfo{
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}).Return(upload, nil),
|
||||||
|
upload.EXPECT().GetInfo(context.Background()).Return(FileInfo{
|
||||||
|
ID: "foo",
|
||||||
|
Size: 300,
|
||||||
|
MetaData: map[string]string{},
|
||||||
|
}, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
handler, _ := NewHandler(Config{
|
||||||
|
StoreComposer: composer,
|
||||||
|
BasePath: "/files/",
|
||||||
|
RespectForwardedHeaders: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
(&httpTest{
|
||||||
|
Method: "POST",
|
||||||
|
ReqHeader: map[string]string{
|
||||||
|
"Tus-Resumable": "1.0.0",
|
||||||
|
"Upload-Length": "300",
|
||||||
|
"Forwarded": `Forwarded: for=192.168.10.112;host="upload.example.tld:8443";proto=https`,
|
||||||
|
},
|
||||||
|
Code: http.StatusCreated,
|
||||||
|
ResHeader: map[string]string{
|
||||||
|
"Location": "https://upload.example.tld:8443/files/foo",
|
||||||
},
|
},
|
||||||
}).Run(handler, t)
|
}).Run(handler, t)
|
||||||
})
|
})
|
||||||
|
|
|
@ -9,11 +9,9 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,9 +19,9 @@ const UploadLengthDeferred = "1"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reExtractFileID = regexp.MustCompile(`([^/]+)\/?$`)
|
reExtractFileID = regexp.MustCompile(`([^/]+)\/?$`)
|
||||||
reForwardedHost = regexp.MustCompile(`host=([^,]+)`)
|
reForwardedHost = regexp.MustCompile(`host="?([^;"]+)`)
|
||||||
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
|
reForwardedProto = regexp.MustCompile(`proto=(https?)`)
|
||||||
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z\-\+\.]+$`)
|
reMimeType = regexp.MustCompile(`^[a-z]+\/[a-z0-9\-\+\.]+$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPError represents an error with an additional status code attached
|
// HTTPError represents an error with an additional status code attached
|
||||||
|
@ -55,6 +53,24 @@ func NewHTTPError(err error, statusCode int) HTTPError {
|
||||||
return httpError{err, statusCode}
|
return httpError{err, statusCode}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type contextWithValues struct {
|
||||||
|
context.Context
|
||||||
|
valueHolder context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c contextWithValues) Value(key interface{}) interface{} {
|
||||||
|
return c.valueHolder.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContextWithValues(ctx context.Context) contextWithValues {
|
||||||
|
return contextWithValues{
|
||||||
|
// Use background to not get cancel event
|
||||||
|
Context: context.Background(),
|
||||||
|
// Use request context to get stored values
|
||||||
|
valueHolder: ctx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
ErrUnsupportedVersion = NewHTTPError(errors.New("unsupported version"), http.StatusPreconditionFailed)
|
||||||
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
ErrMaxSizeExceeded = NewHTTPError(errors.New("maximum size exceeded"), http.StatusRequestEntityTooLarge)
|
||||||
|
@ -72,6 +88,9 @@ var (
|
||||||
ErrUploadLengthAndUploadDeferLength = NewHTTPError(errors.New("provided both Upload-Length and Upload-Defer-Length"), http.StatusBadRequest)
|
ErrUploadLengthAndUploadDeferLength = NewHTTPError(errors.New("provided both Upload-Length and Upload-Defer-Length"), http.StatusBadRequest)
|
||||||
ErrInvalidUploadDeferLength = NewHTTPError(errors.New("invalid Upload-Defer-Length header"), http.StatusBadRequest)
|
ErrInvalidUploadDeferLength = NewHTTPError(errors.New("invalid Upload-Defer-Length header"), http.StatusBadRequest)
|
||||||
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
ErrUploadStoppedByServer = NewHTTPError(errors.New("upload has been stopped by server"), http.StatusBadRequest)
|
||||||
|
|
||||||
|
errReadTimeout = errors.New("read tcp: i/o timeout")
|
||||||
|
errConnectionReset = errors.New("read tcp: connection reset by peer")
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPRequest contains basic details of an incoming HTTP request.
|
// HTTPRequest contains basic details of an incoming HTTP request.
|
||||||
|
@ -97,6 +116,12 @@ type HookEvent struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
func newHookEvent(info FileInfo, r *http.Request) HookEvent {
|
||||||
|
// The Host header field is not present in the header map, see https://pkg.go.dev/net/http#Request:
|
||||||
|
// > For incoming requests, the Host header is promoted to the
|
||||||
|
// > Request.Host field and removed from the Header map.
|
||||||
|
// That's why we add it back manually.
|
||||||
|
r.Header.Set("Host", r.Host)
|
||||||
|
|
||||||
return HookEvent{
|
return HookEvent{
|
||||||
Upload: info,
|
Upload: info,
|
||||||
HTTPRequest: HTTPRequest{
|
HTTPRequest: HTTPRequest{
|
||||||
|
@ -210,19 +235,28 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
r.Method = newMethod
|
r.Method = newMethod
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.log("RequestIncoming", "method", r.Method, "path", r.URL.Path)
|
handler.log("RequestIncoming", "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
|
||||||
|
|
||||||
handler.Metrics.incRequestsTotal(r.Method)
|
handler.Metrics.incRequestsTotal(r.Method)
|
||||||
|
|
||||||
header := w.Header()
|
header := w.Header()
|
||||||
|
|
||||||
if origin := r.Header.Get("Origin"); origin != "" {
|
if origin := r.Header.Get("Origin"); !handler.config.DisableCors && origin != "" {
|
||||||
header.Set("Access-Control-Allow-Origin", origin)
|
header.Set("Access-Control-Allow-Origin", origin)
|
||||||
|
|
||||||
if r.Method == "OPTIONS" {
|
if r.Method == "OPTIONS" {
|
||||||
|
allowedMethods := "POST, HEAD, PATCH, OPTIONS"
|
||||||
|
if !handler.config.DisableDownload {
|
||||||
|
allowedMethods += ", GET"
|
||||||
|
}
|
||||||
|
|
||||||
|
if !handler.config.DisableTermination {
|
||||||
|
allowedMethods += ", DELETE"
|
||||||
|
}
|
||||||
|
|
||||||
// Preflight request
|
// Preflight request
|
||||||
header.Add("Access-Control-Allow-Methods", "POST, GET, HEAD, PATCH, DELETE, OPTIONS")
|
header.Add("Access-Control-Allow-Methods", allowedMethods)
|
||||||
header.Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
header.Add("Access-Control-Allow-Headers", "Authorization, Origin, X-Requested-With, X-Request-ID, X-HTTP-Method-Override, Content-Type, Upload-Length, Upload-Offset, Tus-Resumable, Upload-Metadata, Upload-Defer-Length, Upload-Concat")
|
||||||
header.Set("Access-Control-Max-Age", "86400")
|
header.Set("Access-Control-Max-Age", "86400")
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -259,9 +293,9 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test if the version sent by the client is supported
|
// Test if the version sent by the client is supported
|
||||||
// GET methods are not checked since a browser may visit this URL and does
|
// GET and HEAD methods are not checked since a browser may visit this URL and does
|
||||||
// not include this header. This request is not part of the specification.
|
// not include this header. GET requests are not part of the specification.
|
||||||
if r.Method != "GET" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
if r.Method != "GET" && r.Method != "HEAD" && r.Header.Get("Tus-Resumable") != "1.0.0" {
|
||||||
handler.sendError(w, r, ErrUnsupportedVersion)
|
handler.sendError(w, r, ErrUnsupportedVersion)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -274,7 +308,7 @@ func (handler *UnroutedHandler) Middleware(h http.Handler) http.Handler {
|
||||||
// PostFile creates a new file upload using the datastore after validating the
|
// PostFile creates a new file upload using the datastore after validating the
|
||||||
// length and parsing the metadata.
|
// length and parsing the metadata.
|
||||||
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream. If another content
|
// Check for presence of application/offset+octet-stream. If another content
|
||||||
// type is defined, it will be ignored and treated as none was set because
|
// type is defined, it will be ignored and treated as none was set because
|
||||||
|
@ -406,7 +440,10 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
// Directly finish the upload if the upload is empty (i.e. has a size of 0).
|
||||||
// This statement is in an else-if block to avoid causing duplicate calls
|
// This statement is in an else-if block to avoid causing duplicate calls
|
||||||
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
// to finishUploadIfComplete if an upload is empty and contains a chunk.
|
||||||
handler.finishUploadIfComplete(ctx, upload, info, r)
|
if err := handler.finishUploadIfComplete(ctx, upload, info, r); err != nil {
|
||||||
|
handler.sendError(w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.sendResp(w, r, http.StatusCreated)
|
handler.sendResp(w, r, http.StatusCreated)
|
||||||
|
@ -414,7 +451,7 @@ func (handler *UnroutedHandler) PostFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// HeadFile returns the length and offset for the HEAD request
|
// HeadFile returns the length and offset for the HEAD request
|
||||||
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -468,6 +505,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
w.Header().Set("Upload-Defer-Length", UploadLengthDeferred)
|
w.Header().Set("Upload-Defer-Length", UploadLengthDeferred)
|
||||||
} else {
|
} else {
|
||||||
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
w.Header().Set("Upload-Length", strconv.FormatInt(info.Size, 10))
|
||||||
|
w.Header().Set("Content-Length", strconv.FormatInt(info.Size, 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Cache-Control", "no-store")
|
w.Header().Set("Cache-Control", "no-store")
|
||||||
|
@ -478,7 +516,7 @@ func (handler *UnroutedHandler) HeadFile(w http.ResponseWriter, r *http.Request)
|
||||||
// PatchFile adds a chunk to an upload. This operation is only allowed
|
// PatchFile adds a chunk to an upload. This operation is only allowed
|
||||||
// if enough space in the upload is left.
|
// if enough space in the upload is left.
|
||||||
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) PatchFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Check for presence of application/offset+octet-stream
|
// Check for presence of application/offset+octet-stream
|
||||||
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
@ -606,11 +644,12 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
handler.log("ChunkWriteStart", "id", id, "maxSize", i64toa(maxSize), "offset", i64toa(offset))
|
handler.log("ChunkWriteStart", "id", id, "maxSize", i64toa(maxSize), "offset", i64toa(offset))
|
||||||
|
|
||||||
var bytesWritten int64
|
var bytesWritten int64
|
||||||
|
var err error
|
||||||
// Prevent a nil pointer dereference when accessing the body which may not be
|
// Prevent a nil pointer dereference when accessing the body which may not be
|
||||||
// available in the case of a malicious request.
|
// available in the case of a malicious request.
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
// Limit the data read from the request's body to the allowed maximum
|
// Limit the data read from the request's body to the allowed maximum
|
||||||
reader := io.LimitReader(r.Body, maxSize)
|
reader := newBodyReader(io.LimitReader(r.Body, maxSize))
|
||||||
|
|
||||||
// We use a context object to allow the hook system to cancel an upload
|
// We use a context object to allow the hook system to cancel an upload
|
||||||
uploadCtx, stopUpload := context.WithCancel(context.Background())
|
uploadCtx, stopUpload := context.WithCancel(context.Background())
|
||||||
|
@ -630,12 +669,10 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if handler.config.NotifyUploadProgress {
|
if handler.config.NotifyUploadProgress {
|
||||||
var stopProgressEvents chan<- struct{}
|
stopProgressEvents := handler.sendProgressMessages(newHookEvent(info, r), reader)
|
||||||
reader, stopProgressEvents = handler.sendProgressMessages(newHookEvent(info, r), reader)
|
|
||||||
defer close(stopProgressEvents)
|
defer close(stopProgressEvents)
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
bytesWritten, err = upload.WriteChunk(ctx, offset, reader)
|
||||||
if terminateUpload && handler.composer.UsesTerminater {
|
if terminateUpload && handler.composer.UsesTerminater {
|
||||||
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
|
if terminateErr := handler.terminateUpload(ctx, upload, info, r); terminateErr != nil {
|
||||||
|
@ -645,20 +682,28 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The error "http: invalid Read on closed Body" is returned if we stop the upload
|
// If we encountered an error while reading the body from the HTTP request, log it, but only include
|
||||||
// while the data store is still reading. Since this is an implementation detail,
|
// it in the response, if the store did not also return an error.
|
||||||
// we replace this error with a message saying that the upload has been stopped.
|
if bodyErr := reader.hasError(); bodyErr != nil {
|
||||||
if err == http.ErrBodyReadAfterClose {
|
handler.log("BodyReadError", "id", id, "error", bodyErr.Error())
|
||||||
err = ErrUploadStoppedByServer
|
if err == nil {
|
||||||
|
err = bodyErr
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
// If the upload was stopped by the server, send an error response indicating this.
|
||||||
return err
|
// TODO: Include a custom reason for the end user why the upload was stopped.
|
||||||
|
if terminateUpload {
|
||||||
|
err = ErrUploadStoppedByServer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
handler.log("ChunkWriteComplete", "id", id, "bytesWritten", i64toa(bytesWritten))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Send new offset to client
|
// Send new offset to client
|
||||||
newOffset := offset + bytesWritten
|
newOffset := offset + bytesWritten
|
||||||
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
|
w.Header().Set("Upload-Offset", strconv.FormatInt(newOffset, 10))
|
||||||
|
@ -674,17 +719,24 @@ func (handler *UnroutedHandler) writeChunk(ctx context.Context, upload Upload, i
|
||||||
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, upload Upload, info FileInfo, r *http.Request) error {
|
||||||
// If the upload is completed, ...
|
// If the upload is completed, ...
|
||||||
if !info.SizeIsDeferred && info.Offset == info.Size {
|
if !info.SizeIsDeferred && info.Offset == info.Size {
|
||||||
// ... allow custom mechanism to finish and cleanup the upload
|
// ... allow the data storage to finish and cleanup the upload
|
||||||
if err := upload.FinishUpload(ctx); err != nil {
|
if err := upload.FinishUpload(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ... allow the hook callback to run before sending the response
|
||||||
|
if handler.config.PreFinishResponseCallback != nil {
|
||||||
|
if err := handler.config.PreFinishResponseCallback(newHookEvent(info, r)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
handler.Metrics.incUploadsFinished()
|
||||||
|
|
||||||
// ... send the info out to the channel
|
// ... send the info out to the channel
|
||||||
if handler.config.NotifyCompleteUploads {
|
if handler.config.NotifyCompleteUploads {
|
||||||
handler.CompleteUploads <- newHookEvent(info, r)
|
handler.CompleteUploads <- newHookEvent(info, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler.Metrics.incUploadsFinished()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -693,7 +745,7 @@ func (handler *UnroutedHandler) finishUploadIfComplete(ctx context.Context, uplo
|
||||||
// GetFile handles requests to download a file using a GET request. This is not
|
// GetFile handles requests to download a file using a GET request. This is not
|
||||||
// part of the specification.
|
// part of the specification.
|
||||||
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
id, err := extractIDFromPath(r.URL.Path)
|
id, err := extractIDFromPath(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -753,10 +805,10 @@ func (handler *UnroutedHandler) GetFile(w http.ResponseWriter, r *http.Request)
|
||||||
|
|
||||||
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
// mimeInlineBrowserWhitelist is a map containing MIME types which should be
|
||||||
// allowed to be rendered by browser inline, instead of being forced to be
|
// allowed to be rendered by browser inline, instead of being forced to be
|
||||||
// downloadd. For example, HTML or SVG files are not allowed, since they may
|
// downloaded. For example, HTML or SVG files are not allowed, since they may
|
||||||
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
|
// contain malicious JavaScript. In a similiar fashion PDF is not on this list
|
||||||
// as their parsers commonly contain vulnerabilities which can be exploited.
|
// as their parsers commonly contain vulnerabilities which can be exploited.
|
||||||
// The values of this map does not convei any meaning and are therefore just
|
// The values of this map does not convey any meaning and are therefore just
|
||||||
// empty structs.
|
// empty structs.
|
||||||
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
var mimeInlineBrowserWhitelist = map[string]struct{}{
|
||||||
"text/plain": struct{}{},
|
"text/plain": struct{}{},
|
||||||
|
@ -774,7 +826,7 @@ var mimeInlineBrowserWhitelist = map[string]struct{}{
|
||||||
"audio/webm": struct{}{},
|
"audio/webm": struct{}{},
|
||||||
"video/webm": struct{}{},
|
"video/webm": struct{}{},
|
||||||
"audio/ogg": struct{}{},
|
"audio/ogg": struct{}{},
|
||||||
"video/ogg ": struct{}{},
|
"video/ogg": struct{}{},
|
||||||
"application/ogg": struct{}{},
|
"application/ogg": struct{}{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -814,7 +866,7 @@ func filterContentType(info FileInfo) (contentType string, contentDisposition st
|
||||||
|
|
||||||
// DelFile terminates an upload permanently.
|
// DelFile terminates an upload permanently.
|
||||||
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
func (handler *UnroutedHandler) DelFile(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := context.Background()
|
ctx := newContextWithValues(r.Context())
|
||||||
|
|
||||||
// Abort the request handling if the required interface is not implemented
|
// Abort the request handling if the required interface is not implemented
|
||||||
if !handler.composer.UsesTerminater {
|
if !handler.composer.UsesTerminater {
|
||||||
|
@ -887,26 +939,40 @@ func (handler *UnroutedHandler) terminateUpload(ctx context.Context, upload Uplo
|
||||||
// Send the error in the response body. The status code will be looked up in
|
// Send the error in the response body. The status code will be looked up in
|
||||||
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
// ErrStatusCodes. If none is found 500 Internal Error will be used.
|
||||||
func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request, err error) {
|
func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
// Interpret os.ErrNotExist as 404 Not Found
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = ErrNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errors for read timeouts contain too much information which is not
|
// Errors for read timeouts contain too much information which is not
|
||||||
// necessary for us and makes grouping for the metrics harder. The error
|
// necessary for us and makes grouping for the metrics harder. The error
|
||||||
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
|
// message looks like: read tcp 127.0.0.1:1080->127.0.0.1:53673: i/o timeout
|
||||||
// Therefore, we use a common error message for all of them.
|
// Therefore, we use a common error message for all of them.
|
||||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
||||||
err = errors.New("read tcp: i/o timeout")
|
err = errReadTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors for connnection resets also contain TCP details, we don't need, e.g:
|
// Errors for connnection resets also contain TCP details, we don't need, e.g:
|
||||||
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
|
// read tcp 127.0.0.1:1080->127.0.0.1:10023: read: connection reset by peer
|
||||||
// Therefore, we also trim those down.
|
// Therefore, we also trim those down.
|
||||||
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
|
if strings.HasSuffix(err.Error(), "read: connection reset by peer") {
|
||||||
err = errors.New("read tcp: connection reset by peer")
|
err = errConnectionReset
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: Decide if we should handle this in here, in body_reader or not at all.
|
||||||
|
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
|
||||||
|
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
|
||||||
|
// However, for the handler it's not important whether the stream has ended
|
||||||
|
// on purpose or accidentally.
|
||||||
|
//if err == io.ErrUnexpectedEOF {
|
||||||
|
// err = nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
// TODO: Decide if we want to ignore connection reset errors all together.
|
||||||
|
// In some cases, the HTTP connection gets reset by the other peer. This is not
|
||||||
|
// necessarily the tus client but can also be a proxy in front of tusd, e.g. HAProxy 2
|
||||||
|
// is known to reset the connection to tusd, when the tus client closes the connection.
|
||||||
|
// To avoid erroring out in this case and loosing the uploaded data, we can ignore
|
||||||
|
// the error here without causing harm.
|
||||||
|
//if strings.Contains(err.Error(), "read: connection reset by peer") {
|
||||||
|
// err = nil
|
||||||
|
//}
|
||||||
|
|
||||||
statusErr, ok := err.(HTTPError)
|
statusErr, ok := err.(HTTPError)
|
||||||
if !ok {
|
if !ok {
|
||||||
statusErr = NewHTTPError(err, http.StatusInternalServerError)
|
statusErr = NewHTTPError(err, http.StatusInternalServerError)
|
||||||
|
@ -922,7 +988,7 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
|
||||||
w.WriteHeader(statusErr.StatusCode())
|
w.WriteHeader(statusErr.StatusCode())
|
||||||
w.Write(reason)
|
w.Write(reason)
|
||||||
|
|
||||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(statusErr.StatusCode()), "method", r.Method, "path", r.URL.Path, "error", err.Error())
|
handler.log("ResponseOutgoing", "status", strconv.Itoa(statusErr.StatusCode()), "method", r.Method, "path", r.URL.Path, "error", err.Error(), "requestId", getRequestId(r))
|
||||||
|
|
||||||
handler.Metrics.incErrorsTotal(statusErr)
|
handler.Metrics.incErrorsTotal(statusErr)
|
||||||
}
|
}
|
||||||
|
@ -931,7 +997,7 @@ func (handler *UnroutedHandler) sendError(w http.ResponseWriter, r *http.Request
|
||||||
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
|
func (handler *UnroutedHandler) sendResp(w http.ResponseWriter, r *http.Request, status int) {
|
||||||
w.WriteHeader(status)
|
w.WriteHeader(status)
|
||||||
|
|
||||||
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path)
|
handler.log("ResponseOutgoing", "status", strconv.Itoa(status), "method", r.Method, "path", r.URL.Path, "requestId", getRequestId(r))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make an absolute URLs to the given upload id. If the base path is absolute
|
// Make an absolute URLs to the given upload id. If the base path is absolute
|
||||||
|
@ -949,39 +1015,27 @@ func (handler *UnroutedHandler) absFileURL(r *http.Request, id string) string {
|
||||||
return url
|
return url
|
||||||
}
|
}
|
||||||
|
|
||||||
type progressWriter struct {
|
|
||||||
Offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *progressWriter) Write(b []byte) (int, error) {
|
|
||||||
atomic.AddInt64(&w.Offset, int64(len(b)))
|
|
||||||
return len(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendProgressMessage will send a notification over the UploadProgress channel
|
// sendProgressMessage will send a notification over the UploadProgress channel
|
||||||
// every second, indicating how much data has been transfered to the server.
|
// every second, indicating how much data has been transfered to the server.
|
||||||
// It will stop sending these instances once the returned channel has been
|
// It will stop sending these instances once the returned channel has been
|
||||||
// closed. The returned reader should be used to read the request body.
|
// closed.
|
||||||
func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader io.Reader) (io.Reader, chan<- struct{}) {
|
func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader *bodyReader) chan<- struct{} {
|
||||||
previousOffset := int64(0)
|
previousOffset := int64(0)
|
||||||
progress := &progressWriter{
|
originalOffset := hook.Upload.Offset
|
||||||
Offset: hook.Upload.Offset,
|
|
||||||
}
|
|
||||||
stop := make(chan struct{}, 1)
|
stop := make(chan struct{}, 1)
|
||||||
reader = io.TeeReader(reader, progress)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
hook.Upload.Offset = originalOffset + reader.bytesRead()
|
||||||
if hook.Upload.Offset != previousOffset {
|
if hook.Upload.Offset != previousOffset {
|
||||||
handler.UploadProgress <- hook
|
handler.UploadProgress <- hook
|
||||||
previousOffset = hook.Upload.Offset
|
previousOffset = hook.Upload.Offset
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
hook.Upload.Offset = atomic.LoadInt64(&progress.Offset)
|
hook.Upload.Offset = originalOffset + reader.bytesRead()
|
||||||
if hook.Upload.Offset != previousOffset {
|
if hook.Upload.Offset != previousOffset {
|
||||||
handler.UploadProgress <- hook
|
handler.UploadProgress <- hook
|
||||||
previousOffset = hook.Upload.Offset
|
previousOffset = hook.Upload.Offset
|
||||||
|
@ -990,7 +1044,7 @@ func (handler *UnroutedHandler) sendProgressMessages(hook HookEvent, reader io.R
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return reader, stop
|
return stop
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHostAndProtocol extracts the host and used protocol (either HTTP or HTTPS)
|
// getHostAndProtocol extracts the host and used protocol (either HTTP or HTTPS)
|
||||||
|
@ -1111,19 +1165,27 @@ func ParseMetadataHeader(header string) map[string]string {
|
||||||
|
|
||||||
parts := strings.Split(element, " ")
|
parts := strings.Split(element, " ")
|
||||||
|
|
||||||
// Do not continue with this element if no key and value or presented
|
if len(parts) > 2 {
|
||||||
if len(parts) != 2 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore corrent element if the value is no valid base64
|
|
||||||
key := parts[0]
|
key := parts[0]
|
||||||
value, err := base64.StdEncoding.DecodeString(parts[1])
|
if key == "" {
|
||||||
if err != nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
meta[key] = string(value)
|
value := ""
|
||||||
|
if len(parts) == 2 {
|
||||||
|
// Ignore current element if the value is no valid base64
|
||||||
|
dec, err := base64.StdEncoding.DecodeString(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
value = string(dec)
|
||||||
|
}
|
||||||
|
|
||||||
|
meta[key] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
return meta
|
return meta
|
||||||
|
@ -1202,3 +1264,20 @@ func extractIDFromPath(url string) (string, error) {
|
||||||
func i64toa(num int64) string {
|
func i64toa(num int64) string {
|
||||||
return strconv.FormatInt(num, 10)
|
return strconv.FormatInt(num, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getRequestId returns the value of the X-Request-ID header, if available,
|
||||||
|
// and also takes care of truncating the input.
|
||||||
|
func getRequestId(r *http.Request) string {
|
||||||
|
reqId := r.Header.Get("X-Request-ID")
|
||||||
|
if reqId == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the length of the request ID to 36 characters, which is enough
|
||||||
|
// to fit a UUID.
|
||||||
|
if len(reqId) > 36 {
|
||||||
|
reqId = reqId[:36]
|
||||||
|
}
|
||||||
|
|
||||||
|
return reqId
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
package handler_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
. "github.com/tus/tusd/pkg/handler"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseMetadataHeader(t *testing.T) {
|
||||||
|
a := assert.New(t)
|
||||||
|
|
||||||
|
md := ParseMetadataHeader("")
|
||||||
|
a.Equal(md, map[string]string{})
|
||||||
|
|
||||||
|
// Invalidly encoded values are ignored
|
||||||
|
md = ParseMetadataHeader("k1 INVALID")
|
||||||
|
a.Equal(md, map[string]string{})
|
||||||
|
|
||||||
|
// If the same key occurs multiple times, the last one wins
|
||||||
|
md = ParseMetadataHeader("k1 aGVsbG8=,k1 d29ybGQ=")
|
||||||
|
a.Equal(md, map[string]string{
|
||||||
|
"k1": "world",
|
||||||
|
})
|
||||||
|
|
||||||
|
// Empty values are mapped to an empty string
|
||||||
|
md = ParseMetadataHeader("k1 aGVsbG8=, k2, k3 , k4 d29ybGQ=")
|
||||||
|
a.Equal(md, map[string]string{
|
||||||
|
"k1": "hello",
|
||||||
|
"k2": "",
|
||||||
|
"k3": "",
|
||||||
|
"k4": "world",
|
||||||
|
})
|
||||||
|
}
|
|
@ -75,15 +75,15 @@ func (test *httpTest) Run(handler http.Handler, t *testing.T) *httptest.Response
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, value := range test.ResHeader {
|
for key, value := range test.ResHeader {
|
||||||
header := w.HeaderMap.Get(key)
|
header := w.Header().Get(key)
|
||||||
|
|
||||||
if value != header {
|
if value != header {
|
||||||
t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header)
|
t.Errorf("Expected '%s' as '%s' (got '%s')", value, key, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if test.ResBody != "" && string(w.Body.Bytes()) != test.ResBody {
|
if test.ResBody != "" && w.Body.String() != test.ResBody {
|
||||||
t.Errorf("Expected '%s' as body (got '%s'", test.ResBody, string(w.Body.Bytes()))
|
t.Errorf("Expected '%s' as body (got '%s'", test.ResBody, w.Body.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return w
|
return w
|
||||||
|
|
|
@ -45,7 +45,7 @@ type memoryLock struct {
|
||||||
id string
|
id string
|
||||||
}
|
}
|
||||||
|
|
||||||
// LockUpload tries to obtain the exclusive lock.
|
// Lock tries to obtain the exclusive lock.
|
||||||
func (lock memoryLock) Lock() error {
|
func (lock memoryLock) Lock() error {
|
||||||
lock.locker.mutex.Lock()
|
lock.locker.mutex.Lock()
|
||||||
defer lock.locker.mutex.Unlock()
|
defer lock.locker.mutex.Unlock()
|
||||||
|
@ -60,7 +60,7 @@ func (lock memoryLock) Lock() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnlockUpload releases a lock. If no such lock exists, no error will be returned.
|
// Unlock releases a lock. If no such lock exists, no error will be returned.
|
||||||
func (lock memoryLock) Unlock() error {
|
func (lock memoryLock) Unlock() error {
|
||||||
lock.locker.mutex.Lock()
|
lock.locker.mutex.Lock()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,475 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: v1/hook.proto
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
fmt "fmt"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
any "github.com/golang/protobuf/ptypes/any"
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
codes "google.golang.org/grpc/codes"
|
||||||
|
status "google.golang.org/grpc/status"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
|
// Uploaded data
|
||||||
|
type Upload struct {
|
||||||
|
// Unique integer identifier of the uploaded file
|
||||||
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
|
// Total file size in bytes specified in the NewUpload call
|
||||||
|
Size int64 `protobuf:"varint,2,opt,name=Size,proto3" json:"Size,omitempty"`
|
||||||
|
// Indicates whether the total file size is deferred until later
|
||||||
|
SizeIsDeferred bool `protobuf:"varint,3,opt,name=SizeIsDeferred,proto3" json:"SizeIsDeferred,omitempty"`
|
||||||
|
// Offset in bytes (zero-based)
|
||||||
|
Offset int64 `protobuf:"varint,4,opt,name=Offset,proto3" json:"Offset,omitempty"`
|
||||||
|
MetaData map[string]string `protobuf:"bytes,5,rep,name=metaData,proto3" json:"metaData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// Indicates that this is a partial upload which will later be used to form
|
||||||
|
// a final upload by concatenation. Partial uploads should not be processed
|
||||||
|
// when they are finished since they are only incomplete chunks of files.
|
||||||
|
IsPartial bool `protobuf:"varint,6,opt,name=isPartial,proto3" json:"isPartial,omitempty"`
|
||||||
|
// Indicates that this is a final upload
|
||||||
|
IsFinal bool `protobuf:"varint,7,opt,name=isFinal,proto3" json:"isFinal,omitempty"`
|
||||||
|
// If the upload is a final one (see IsFinal) this will be a non-empty
|
||||||
|
// ordered slice containing the ids of the uploads of which the final upload
|
||||||
|
// will consist after concatenation.
|
||||||
|
PartialUploads []string `protobuf:"bytes,8,rep,name=partialUploads,proto3" json:"partialUploads,omitempty"`
|
||||||
|
// Storage contains information about where the data storage saves the upload,
|
||||||
|
// for example a file path. The available values vary depending on what data
|
||||||
|
// store is used. This map may also be nil.
|
||||||
|
Storage map[string]string `protobuf:"bytes,9,rep,name=storage,proto3" json:"storage,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) Reset() { *m = Upload{} }
|
||||||
|
func (m *Upload) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Upload) ProtoMessage() {}
|
||||||
|
func (*Upload) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_581082325ef044c1, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Upload.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Upload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Upload.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Upload) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Upload.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Upload) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Upload.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Upload) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Upload.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Upload proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Upload) GetId() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetSize() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Size
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetSizeIsDeferred() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.SizeIsDeferred
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetOffset() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Offset
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetMetaData() map[string]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.MetaData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetIsPartial() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.IsPartial
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetIsFinal() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.IsFinal
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetPartialUploads() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.PartialUploads
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Upload) GetStorage() map[string]string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Storage
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPRequest struct {
|
||||||
|
// Method is the HTTP method, e.g. POST or PATCH
|
||||||
|
Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"`
|
||||||
|
// URI is the full HTTP request URI, e.g. /files/fooo
|
||||||
|
Uri string `protobuf:"bytes,2,opt,name=uri,proto3" json:"uri,omitempty"`
|
||||||
|
// RemoteAddr contains the network address that sent the request
|
||||||
|
RemoteAddr string `protobuf:"bytes,3,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HTTPRequest) Reset() { *m = HTTPRequest{} }
|
||||||
|
func (m *HTTPRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*HTTPRequest) ProtoMessage() {}
|
||||||
|
func (*HTTPRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_581082325ef044c1, []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HTTPRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_HTTPRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *HTTPRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_HTTPRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *HTTPRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_HTTPRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *HTTPRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_HTTPRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *HTTPRequest) GetMethod() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Method
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HTTPRequest) GetUri() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Uri
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *HTTPRequest) GetRemoteAddr() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.RemoteAddr
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook's data
|
||||||
|
type Hook struct {
|
||||||
|
// Upload contains information about the upload that caused this hook
|
||||||
|
// to be fired.
|
||||||
|
Upload *Upload `protobuf:"bytes,1,opt,name=upload,proto3" json:"upload,omitempty"`
|
||||||
|
// HTTPRequest contains details about the HTTP request that reached
|
||||||
|
// tusd.
|
||||||
|
HttpRequest *HTTPRequest `protobuf:"bytes,2,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"`
|
||||||
|
// The hook name
|
||||||
|
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Hook) Reset() { *m = Hook{} }
|
||||||
|
func (m *Hook) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Hook) ProtoMessage() {}
|
||||||
|
func (*Hook) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_581082325ef044c1, []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Hook) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_Hook.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *Hook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_Hook.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *Hook) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_Hook.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *Hook) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_Hook.Size(m)
|
||||||
|
}
|
||||||
|
func (m *Hook) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_Hook.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_Hook proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *Hook) GetUpload() *Upload {
|
||||||
|
if m != nil {
|
||||||
|
return m.Upload
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Hook) GetHttpRequest() *HTTPRequest {
|
||||||
|
if m != nil {
|
||||||
|
return m.HttpRequest
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Hook) GetName() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request data to send hook
|
||||||
|
type SendRequest struct {
|
||||||
|
// The hook data
|
||||||
|
Hook *Hook `protobuf:"bytes,1,opt,name=hook,proto3" json:"hook,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SendRequest) Reset() { *m = SendRequest{} }
|
||||||
|
func (m *SendRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SendRequest) ProtoMessage() {}
|
||||||
|
func (*SendRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_581082325ef044c1, []int{3}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SendRequest) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_SendRequest.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *SendRequest) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_SendRequest.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *SendRequest) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_SendRequest.Size(m)
|
||||||
|
}
|
||||||
|
func (m *SendRequest) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_SendRequest.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_SendRequest proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *SendRequest) GetHook() *Hook {
|
||||||
|
if m != nil {
|
||||||
|
return m.Hook
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response that contains data for sended hook
|
||||||
|
type SendResponse struct {
|
||||||
|
// The response of the hook.
|
||||||
|
Response *any.Any `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SendResponse) Reset() { *m = SendResponse{} }
|
||||||
|
func (m *SendResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SendResponse) ProtoMessage() {}
|
||||||
|
func (*SendResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_581082325ef044c1, []int{4}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SendResponse) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_SendResponse.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *SendResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_SendResponse.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (m *SendResponse) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_SendResponse.Merge(m, src)
|
||||||
|
}
|
||||||
|
func (m *SendResponse) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_SendResponse.Size(m)
|
||||||
|
}
|
||||||
|
func (m *SendResponse) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_SendResponse.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_SendResponse proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *SendResponse) GetResponse() *any.Any {
|
||||||
|
if m != nil {
|
||||||
|
return m.Response
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Upload)(nil), "v1.Upload")
|
||||||
|
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.MetaDataEntry")
|
||||||
|
proto.RegisterMapType((map[string]string)(nil), "v1.Upload.StorageEntry")
|
||||||
|
proto.RegisterType((*HTTPRequest)(nil), "v1.HTTPRequest")
|
||||||
|
proto.RegisterType((*Hook)(nil), "v1.Hook")
|
||||||
|
proto.RegisterType((*SendRequest)(nil), "v1.SendRequest")
|
||||||
|
proto.RegisterType((*SendResponse)(nil), "v1.SendResponse")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("v1/hook.proto", fileDescriptor_581082325ef044c1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor_581082325ef044c1 = []byte{
|
||||||
|
// 477 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4d, 0x6f, 0xd3, 0x40,
|
||||||
|
0x10, 0x25, 0xb1, 0xeb, 0xd8, 0xe3, 0xb6, 0x54, 0xab, 0x0a, 0x96, 0xa8, 0x42, 0x96, 0x0f, 0xc8,
|
||||||
|
0x52, 0x25, 0x07, 0x07, 0x0e, 0x28, 0x5c, 0xa8, 0x54, 0x50, 0x39, 0x20, 0xaa, 0x4d, 0x11, 0xe7,
|
||||||
|
0x2d, 0xde, 0x24, 0x56, 0x1c, 0xaf, 0xbb, 0x5e, 0x5b, 0x0a, 0x3f, 0x8a, 0xdf, 0x88, 0xf6, 0xc3,
|
||||||
|
0x8d, 0xe9, 0x8d, 0x93, 0x67, 0xde, 0xbc, 0x79, 0xf3, 0x3c, 0x3b, 0x70, 0xd2, 0x65, 0xb3, 0x0d,
|
||||||
|
0xe7, 0xdb, 0xb4, 0x16, 0x5c, 0x72, 0x34, 0xee, 0xb2, 0xe9, 0xab, 0x35, 0xe7, 0xeb, 0x92, 0xcd,
|
||||||
|
0x34, 0x72, 0xdf, 0xae, 0x66, 0xb4, 0xda, 0x9b, 0x72, 0xfc, 0xc7, 0x01, 0xef, 0x47, 0x5d, 0x72,
|
||||||
|
0x9a, 0xa3, 0x53, 0x18, 0x17, 0x39, 0x1e, 0x45, 0xa3, 0x24, 0x20, 0xe3, 0x22, 0x47, 0x08, 0xdc,
|
||||||
|
0x65, 0xf1, 0x9b, 0xe1, 0x71, 0x34, 0x4a, 0x1c, 0xa2, 0x63, 0xf4, 0x06, 0x4e, 0xd5, 0xf7, 0x6b,
|
||||||
|
0x73, 0xcd, 0x56, 0x4c, 0x08, 0x96, 0x63, 0x27, 0x1a, 0x25, 0x3e, 0x79, 0x82, 0xa2, 0x17, 0xe0,
|
||||||
|
0x7d, 0x5f, 0xad, 0x1a, 0x26, 0xb1, 0xab, 0xbb, 0x6d, 0x86, 0xde, 0x83, 0xbf, 0x63, 0x92, 0x5e,
|
||||||
|
0x53, 0x49, 0xf1, 0x51, 0xe4, 0x24, 0xe1, 0x1c, 0xa7, 0x5d, 0x96, 0x1a, 0x07, 0xe9, 0x37, 0x5b,
|
||||||
|
0xfa, 0x5c, 0x49, 0xb1, 0x27, 0x8f, 0x4c, 0x74, 0x01, 0x41, 0xd1, 0xdc, 0x52, 0x21, 0x0b, 0x5a,
|
||||||
|
0x62, 0x4f, 0x0f, 0x3c, 0x00, 0x08, 0xc3, 0xa4, 0x68, 0xbe, 0x14, 0x15, 0x2d, 0xf1, 0x44, 0xd7,
|
||||||
|
0xfa, 0x54, 0xb9, 0xad, 0x0d, 0xc9, 0x0c, 0x68, 0xb0, 0x1f, 0x39, 0x49, 0x40, 0x9e, 0xa0, 0x28,
|
||||||
|
0x83, 0x49, 0x23, 0xb9, 0xa0, 0x6b, 0x86, 0x03, 0x6d, 0xea, 0xe5, 0xc0, 0xd4, 0xd2, 0x54, 0x8c,
|
||||||
|
0xa7, 0x9e, 0x37, 0xfd, 0x08, 0x27, 0xff, 0xb8, 0x45, 0x67, 0xe0, 0x6c, 0xd9, 0xde, 0xae, 0x4f,
|
||||||
|
0x85, 0xe8, 0x1c, 0x8e, 0x3a, 0x5a, 0xb6, 0x66, 0x81, 0x01, 0x31, 0xc9, 0x62, 0xfc, 0x61, 0x34,
|
||||||
|
0x5d, 0xc0, 0xf1, 0x50, 0xf5, 0x7f, 0x7a, 0xe3, 0x9f, 0x10, 0xde, 0xdc, 0xdd, 0xdd, 0x12, 0xf6,
|
||||||
|
0xd0, 0xb2, 0x46, 0xaa, 0x45, 0xef, 0x98, 0xdc, 0xf0, 0xfe, 0xe1, 0x6c, 0xa6, 0x24, 0x5b, 0x51,
|
||||||
|
0xd8, 0x76, 0x15, 0xa2, 0xd7, 0x00, 0x82, 0xed, 0xb8, 0x64, 0x57, 0x79, 0x2e, 0xf4, 0xb3, 0x05,
|
||||||
|
0x64, 0x80, 0xc4, 0x0f, 0xe0, 0xde, 0x70, 0xbe, 0x45, 0x31, 0x78, 0xad, 0xfe, 0x73, 0xad, 0x18,
|
||||||
|
0xce, 0xe1, 0xb0, 0x0b, 0x62, 0x2b, 0x28, 0x83, 0x70, 0x23, 0x65, 0x6d, 0x4d, 0xe8, 0x29, 0xe1,
|
||||||
|
0xfc, 0xb9, 0x22, 0x0e, 0xbc, 0x91, 0x21, 0x47, 0x5d, 0x53, 0x45, 0x77, 0xcc, 0x0e, 0xd6, 0x71,
|
||||||
|
0x7c, 0x09, 0xe1, 0x92, 0x55, 0x79, 0x4f, 0xb9, 0x00, 0x57, 0x1d, 0xae, 0x9d, 0xeb, 0x6b, 0x39,
|
||||||
|
0xce, 0xb7, 0x44, 0xa3, 0xf1, 0x27, 0x38, 0x36, 0xe4, 0xa6, 0xe6, 0x55, 0xc3, 0xd0, 0x5b, 0xf0,
|
||||||
|
0x85, 0x8d, 0x6d, 0xc7, 0x79, 0x6a, 0xee, 0x3c, 0xed, 0xef, 0x3c, 0xbd, 0xaa, 0xf6, 0xe4, 0x91,
|
||||||
|
0x35, 0x5f, 0x40, 0xa8, 0xf4, 0x96, 0x4c, 0x74, 0xc5, 0x2f, 0x86, 0x2e, 0xc1, 0x55, 0x82, 0x48,
|
||||||
|
0xfb, 0x1e, 0xf8, 0x98, 0x9e, 0x1d, 0x00, 0xd3, 0x19, 0x3f, 0xbb, 0xf7, 0xb4, 0xe6, 0xbb, 0xbf,
|
||||||
|
0x01, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xd4, 0x14, 0x0d, 0x5e, 0x03, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ context.Context
|
||||||
|
var _ grpc.ClientConnInterface
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the grpc package it is being compiled against.
|
||||||
|
const _ = grpc.SupportPackageIsVersion6
|
||||||
|
|
||||||
|
// HookServiceClient is the client API for HookService service.
|
||||||
|
//
|
||||||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||||
|
type HookServiceClient interface {
|
||||||
|
// Sends a hook
|
||||||
|
Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type hookServiceClient struct {
|
||||||
|
cc grpc.ClientConnInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHookServiceClient(cc grpc.ClientConnInterface) HookServiceClient {
|
||||||
|
return &hookServiceClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *hookServiceClient) Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) {
|
||||||
|
out := new(SendResponse)
|
||||||
|
err := c.cc.Invoke(ctx, "/v1.HookService/Send", in, out, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HookServiceServer is the server API for HookService service.
|
||||||
|
type HookServiceServer interface {
|
||||||
|
// Sends a hook
|
||||||
|
Send(context.Context, *SendRequest) (*SendResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnimplementedHookServiceServer can be embedded to have forward compatible implementations.
|
||||||
|
type UnimplementedHookServiceServer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*UnimplementedHookServiceServer) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Send not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterHookServiceServer(s *grpc.Server, srv HookServiceServer) {
|
||||||
|
s.RegisterService(&_HookService_serviceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _HookService_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(SendRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(HookServiceServer).Send(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/v1.HookService/Send",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(HookServiceServer).Send(ctx, req.(*SendRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _HookService_serviceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "v1.HookService",
|
||||||
|
HandlerType: (*HookServiceServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{
|
||||||
|
MethodName: "Send",
|
||||||
|
Handler: _HookService_Send_Handler,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{},
|
||||||
|
Metadata: "v1/hook.proto",
|
||||||
|
}
|
|
@ -46,6 +46,7 @@ func TestCalcOptimalPartSize(t *testing.T) {
|
||||||
/*
|
/*
|
||||||
store.MinPartSize = 2
|
store.MinPartSize = 2
|
||||||
store.MaxPartSize = 10
|
store.MaxPartSize = 10
|
||||||
|
store.PreferredPartSize = 5
|
||||||
store.MaxMultipartParts = 20
|
store.MaxMultipartParts = 20
|
||||||
store.MaxObjectSize = 200
|
store.MaxObjectSize = 200
|
||||||
*/
|
*/
|
||||||
|
@ -67,6 +68,11 @@ func TestCalcOptimalPartSize(t *testing.T) {
|
||||||
testcases := []int64{
|
testcases := []int64{
|
||||||
0,
|
0,
|
||||||
1,
|
1,
|
||||||
|
|
||||||
|
store.PreferredPartSize - 1,
|
||||||
|
store.PreferredPartSize,
|
||||||
|
store.PreferredPartSize + 1,
|
||||||
|
|
||||||
store.MinPartSize - 1,
|
store.MinPartSize - 1,
|
||||||
store.MinPartSize,
|
store.MinPartSize,
|
||||||
store.MinPartSize + 1,
|
store.MinPartSize + 1,
|
||||||
|
@ -136,6 +142,7 @@ func TestCalcOptimalPartSize_AllUploadSizes(t *testing.T) {
|
||||||
|
|
||||||
store.MinPartSize = 5
|
store.MinPartSize = 5
|
||||||
store.MaxPartSize = 5 * 1024
|
store.MaxPartSize = 5 * 1024
|
||||||
|
store.PreferredPartSize = 10
|
||||||
store.MaxMultipartParts = 1000
|
store.MaxMultipartParts = 1000
|
||||||
store.MaxObjectSize = store.MaxPartSize * store.MaxMultipartParts
|
store.MaxObjectSize = store.MaxPartSize * store.MaxMultipartParts
|
||||||
|
|
||||||
|
|
|
@ -76,10 +76,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/tus/tusd/internal/uid"
|
"github.com/tus/tusd/internal/uid"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
|
@ -90,21 +92,22 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This regular expression matches every character which is not defined in the
|
// This regular expression matches every character which is not
|
||||||
// ASCII tables which range from 00 to 7F, inclusive.
|
// considered valid into a header value according to RFC2616.
|
||||||
// It also matches the \r and \n characters which are not allowed in values
|
var nonPrintableRegexp = regexp.MustCompile(`[^\x09\x20-\x7E]`)
|
||||||
// for HTTP headers.
|
|
||||||
var nonASCIIRegexp = regexp.MustCompile(`([^\x00-\x7F]|[\r\n])`)
|
|
||||||
|
|
||||||
// See the handler.DataStore interface for documentation about the different
|
// See the handler.DataStore interface for documentation about the different
|
||||||
// methods.
|
// methods.
|
||||||
type S3Store struct {
|
type S3Store struct {
|
||||||
// Bucket used to store the data in, e.g. "tusdstore.example.com"
|
// Bucket used to store the data in, e.g. "tusdstore.example.com"
|
||||||
Bucket string
|
Bucket string
|
||||||
// ObjectPrefix is prepended to the name of each S3 object that is created.
|
// ObjectPrefix is prepended to the name of each S3 object that is created
|
||||||
// It can be used to create a pseudo-directory structure in the bucket,
|
// to store uploaded files. It can be used to create a pseudo-directory
|
||||||
// e.g. "path/to/my/uploads".
|
// structure in the bucket, e.g. "path/to/my/uploads".
|
||||||
ObjectPrefix string
|
ObjectPrefix string
|
||||||
|
// MetadataObjectPrefix is prepended to the name of each .info and .part S3
|
||||||
|
// object that is created. If it is not set, then ObjectPrefix is used.
|
||||||
|
MetadataObjectPrefix string
|
||||||
// Service specifies an interface used to communicate with the S3 backend.
|
// Service specifies an interface used to communicate with the S3 backend.
|
||||||
// Usually, this is an instance of github.com/aws/aws-sdk-go/service/s3.S3
|
// Usually, this is an instance of github.com/aws/aws-sdk-go/service/s3.S3
|
||||||
// (http://docs.aws.amazon.com/sdk-for-go/api/service/s3/S3.html).
|
// (http://docs.aws.amazon.com/sdk-for-go/api/service/s3/S3.html).
|
||||||
|
@ -125,6 +128,12 @@ type S3Store struct {
|
||||||
// in bytes. This number needs to match with the underlying S3 backend or else
|
// in bytes. This number needs to match with the underlying S3 backend or else
|
||||||
// uploaded parts will be reject. AWS S3, for example, uses 5MB for this value.
|
// uploaded parts will be reject. AWS S3, for example, uses 5MB for this value.
|
||||||
MinPartSize int64
|
MinPartSize int64
|
||||||
|
// PreferredPartSize specifies the preferred size of a single part uploaded to
|
||||||
|
// S3. S3Store will attempt to slice the incoming data into parts with this
|
||||||
|
// size whenever possible. In some cases, smaller parts are necessary, so
|
||||||
|
// not every part may reach this value. The PreferredPartSize must be inside the
|
||||||
|
// range of MinPartSize to MaxPartSize.
|
||||||
|
PreferredPartSize int64
|
||||||
// MaxMultipartParts is the maximum number of parts an S3 multipart upload is
|
// MaxMultipartParts is the maximum number of parts an S3 multipart upload is
|
||||||
// allowed to have according to AWS S3 API specifications.
|
// allowed to have according to AWS S3 API specifications.
|
||||||
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
|
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
|
||||||
|
@ -132,6 +141,21 @@ type S3Store struct {
|
||||||
// MaxObjectSize is the maximum size an S3 Object can have according to S3
|
// MaxObjectSize is the maximum size an S3 Object can have according to S3
|
||||||
// API specifications. See link above.
|
// API specifications. See link above.
|
||||||
MaxObjectSize int64
|
MaxObjectSize int64
|
||||||
|
// MaxBufferedParts is the number of additional parts that can be received from
|
||||||
|
// the client and stored on disk while a part is being uploaded to S3. This
|
||||||
|
// can help improve throughput by not blocking the client while tusd is
|
||||||
|
// communicating with the S3 API, which can have unpredictable latency.
|
||||||
|
MaxBufferedParts int64
|
||||||
|
// TemporaryDirectory is the path where S3Store will create temporary files
|
||||||
|
// on disk during the upload. An empty string ("", the default value) will
|
||||||
|
// cause S3Store to use the operating system's default temporary directory.
|
||||||
|
TemporaryDirectory string
|
||||||
|
// DisableContentHashes instructs the S3Store to not calculate the MD5 and SHA256
|
||||||
|
// hashes when uploading data to S3. These hashes are used for file integrity checks
|
||||||
|
// and for authentication. However, these hashes also consume a significant amount of
|
||||||
|
// CPU, so it might be desirable to disable them.
|
||||||
|
// Note that this property is experimental and might be removed in the future!
|
||||||
|
DisableContentHashes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type S3API interface {
|
type S3API interface {
|
||||||
|
@ -147,15 +171,22 @@ type S3API interface {
|
||||||
UploadPartCopyWithContext(ctx context.Context, input *s3.UploadPartCopyInput, opt ...request.Option) (*s3.UploadPartCopyOutput, error)
|
UploadPartCopyWithContext(ctx context.Context, input *s3.UploadPartCopyInput, opt ...request.Option) (*s3.UploadPartCopyOutput, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type s3APIForPresigning interface {
|
||||||
|
UploadPartRequest(input *s3.UploadPartInput) (req *request.Request, output *s3.UploadPartOutput)
|
||||||
|
}
|
||||||
|
|
||||||
// New constructs a new storage using the supplied bucket and service object.
|
// New constructs a new storage using the supplied bucket and service object.
|
||||||
func New(bucket string, service S3API) S3Store {
|
func New(bucket string, service S3API) S3Store {
|
||||||
return S3Store{
|
return S3Store{
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Service: service,
|
Service: service,
|
||||||
MaxPartSize: 5 * 1024 * 1024 * 1024,
|
MaxPartSize: 5 * 1024 * 1024 * 1024,
|
||||||
MinPartSize: 5 * 1024 * 1024,
|
MinPartSize: 5 * 1024 * 1024,
|
||||||
MaxMultipartParts: 10000,
|
PreferredPartSize: 50 * 1024 * 1024,
|
||||||
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
|
MaxMultipartParts: 10000,
|
||||||
|
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
|
||||||
|
MaxBufferedParts: 20,
|
||||||
|
TemporaryDirectory: "",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +228,7 @@ func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (hand
|
||||||
for key, value := range info.MetaData {
|
for key, value := range info.MetaData {
|
||||||
// Copying the value is required in order to prevent it from being
|
// Copying the value is required in order to prevent it from being
|
||||||
// overwritten by the next iteration.
|
// overwritten by the next iteration.
|
||||||
v := nonASCIIRegexp.ReplaceAllString(value, "?")
|
v := nonPrintableRegexp.ReplaceAllString(value, "?")
|
||||||
metadata[key] = &v
|
metadata[key] = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,7 +292,7 @@ func (upload *s3Upload) writeInfo(ctx context.Context, info handler.FileInfo) er
|
||||||
// Create object on S3 containing information about the file
|
// Create object on S3 containing information about the file
|
||||||
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".info"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
||||||
Body: bytes.NewReader(infoJson),
|
Body: bytes.NewReader(infoJson),
|
||||||
ContentLength: aws.Int64(int64(len(infoJson))),
|
ContentLength: aws.Int64(int64(len(infoJson))),
|
||||||
})
|
})
|
||||||
|
@ -302,8 +333,7 @@ func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Read
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if incompletePartFile != nil {
|
if incompletePartFile != nil {
|
||||||
defer os.Remove(incompletePartFile.Name())
|
defer cleanUpTempFile(incompletePartFile)
|
||||||
defer incompletePartFile.Close()
|
|
||||||
|
|
||||||
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
|
if err := store.deleteIncompletePartForUpload(ctx, uploadId); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -312,49 +342,42 @@ func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Read
|
||||||
src = io.MultiReader(incompletePartFile, src)
|
src = io.MultiReader(incompletePartFile, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
fileChan := make(chan *os.File, store.MaxBufferedParts)
|
||||||
// Create a temporary file to store the part in it
|
doneChan := make(chan struct{})
|
||||||
file, err := ioutil.TempFile("", "tusd-s3-tmp-")
|
defer close(doneChan)
|
||||||
|
|
||||||
|
// If we panic or return while there are still files in the channel, then
|
||||||
|
// we may leak file descriptors. Let's ensure that those are cleaned up.
|
||||||
|
defer func() {
|
||||||
|
for file := range fileChan {
|
||||||
|
cleanUpTempFile(file)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
partProducer := s3PartProducer{
|
||||||
|
store: store,
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: src,
|
||||||
|
}
|
||||||
|
go partProducer.produce(optimalPartSize)
|
||||||
|
|
||||||
|
for file := range fileChan {
|
||||||
|
stat, err := file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bytesUploaded, err
|
return 0, err
|
||||||
}
|
}
|
||||||
defer os.Remove(file.Name())
|
n := stat.Size()
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
limitedReader := io.LimitReader(src, optimalPartSize)
|
|
||||||
n, err := io.Copy(file, limitedReader)
|
|
||||||
|
|
||||||
// If the HTTP PATCH request gets interrupted in the middle (e.g. because
|
|
||||||
// the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF.
|
|
||||||
// However, for S3Store it's not important whether the stream has ended
|
|
||||||
// on purpose or accidentally. Therefore, we ignore this error to not
|
|
||||||
// prevent the remaining chunk to be stored on S3.
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// io.Copy does not return io.EOF, so we not have to handle it differently.
|
|
||||||
if err != nil {
|
|
||||||
return bytesUploaded, err
|
|
||||||
}
|
|
||||||
// If io.Copy is finished reading, it will always return (0, nil).
|
|
||||||
if n == 0 {
|
|
||||||
return (bytesUploaded - incompletePartSize), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek to the beginning of the file
|
|
||||||
file.Seek(0, 0)
|
|
||||||
|
|
||||||
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
|
isFinalChunk := !info.SizeIsDeferred && (size == (offset-incompletePartSize)+n)
|
||||||
if n >= store.MinPartSize || isFinalChunk {
|
if n >= store.MinPartSize || isFinalChunk {
|
||||||
_, err = store.Service.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
uploadPartInput := &s3.UploadPartInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
UploadId: aws.String(multipartId),
|
UploadId: aws.String(multipartId),
|
||||||
PartNumber: aws.Int64(nextPartNum),
|
PartNumber: aws.Int64(nextPartNum),
|
||||||
Body: file,
|
}
|
||||||
})
|
if err := upload.putPartForUpload(ctx, uploadPartInput, file, n); err != nil {
|
||||||
if err != nil {
|
|
||||||
return bytesUploaded, err
|
return bytesUploaded, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -371,6 +394,63 @@ func (upload s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Read
|
||||||
bytesUploaded += n
|
bytesUploaded += n
|
||||||
nextPartNum += 1
|
nextPartNum += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return bytesUploaded - incompletePartSize, partProducer.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanUpTempFile(file *os.File) {
|
||||||
|
file.Close()
|
||||||
|
os.Remove(file.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file *os.File, size int64) error {
|
||||||
|
defer cleanUpTempFile(file)
|
||||||
|
|
||||||
|
if !upload.store.DisableContentHashes {
|
||||||
|
// By default, use the traditional approach to upload data
|
||||||
|
uploadPartInput.Body = file
|
||||||
|
_, err := upload.store.Service.UploadPartWithContext(ctx, uploadPartInput)
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
// Experimental feature to prevent the AWS SDK from calculating the SHA256 hash
|
||||||
|
// for the parts we upload to S3.
|
||||||
|
// We compute the presigned URL without the body attached and then send the request
|
||||||
|
// on our own. This way, the body is not included in the SHA256 calculation.
|
||||||
|
s3api, ok := upload.store.Service.(s3APIForPresigning)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("s3store: failed to cast S3 service for presigning")
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Req, _ := s3api.UploadPartRequest(uploadPartInput)
|
||||||
|
|
||||||
|
url, err := s3Req.Presign(15 * time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", url, file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the Content-Length manually to prevent the usage of Transfer-Encoding: chunked,
|
||||||
|
// which is not supported by AWS S3.
|
||||||
|
req.ContentLength = size
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
buf := new(strings.Builder)
|
||||||
|
io.Copy(buf, res.Body)
|
||||||
|
return fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
|
||||||
|
@ -395,7 +475,7 @@ func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, er
|
||||||
// Get file info stored in separate object
|
// Get file info stored in separate object
|
||||||
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".info"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isAwsError(err, "NoSuchKey") {
|
if isAwsError(err, "NoSuchKey") {
|
||||||
|
@ -416,7 +496,9 @@ func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, er
|
||||||
// when the multipart upload has already been completed or aborted. Since
|
// when the multipart upload has already been completed or aborted. Since
|
||||||
// we already found the info object, we know that the upload has been
|
// we already found the info object, we know that the upload has been
|
||||||
// completed and therefore can ensure the the offset is the size.
|
// completed and therefore can ensure the the offset is the size.
|
||||||
if isAwsError(err, "NoSuchUpload") {
|
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
|
||||||
|
// Spaces, can also return NoSuchKey.
|
||||||
|
if isAwsError(err, "NoSuchUpload") || isAwsError(err, "NoSuchKey") {
|
||||||
info.Offset = info.Size
|
info.Offset = info.Size
|
||||||
return info, nil
|
return info, nil
|
||||||
} else {
|
} else {
|
||||||
|
@ -476,7 +558,7 @@ func (upload s3Upload) GetReader(ctx context.Context) (io.Reader, error) {
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// The multipart upload still exists, which means we cannot download it yet
|
// The multipart upload still exists, which means we cannot download it yet
|
||||||
return nil, errors.New("cannot stream non-finished upload")
|
return nil, handler.NewHTTPError(errors.New("cannot stream non-finished upload"), http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
if isAwsError(err, "NoSuchUpload") {
|
if isAwsError(err, "NoSuchUpload") {
|
||||||
|
@ -521,10 +603,10 @@ func (upload s3Upload) Terminate(ctx context.Context) error {
|
||||||
Key: store.keyWithPrefix(uploadId),
|
Key: store.keyWithPrefix(uploadId),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Key: store.keyWithPrefix(uploadId + ".info"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".info"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Quiet: aws.Bool(true),
|
Quiet: aws.Bool(true),
|
||||||
|
@ -563,6 +645,30 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(parts) == 0 {
|
||||||
|
// AWS expects at least one part to be present when completing the multipart
|
||||||
|
// upload. So if the tus upload has a size of 0, we create an empty part
|
||||||
|
// and use that for completing the multipart upload.
|
||||||
|
res, err := store.Service.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
UploadId: aws.String(multipartId),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
Body: bytes.NewReader([]byte{}),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = []*s3.Part{
|
||||||
|
&s3.Part{
|
||||||
|
ETag: res.ETag,
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Transform the []*s3.Part slice to a []*s3.CompletedPart slice for the next
|
// Transform the []*s3.Part slice to a []*s3.CompletedPart slice for the next
|
||||||
// request.
|
// request.
|
||||||
completedParts := make([]*s3.CompletedPart, len(parts))
|
completedParts := make([]*s3.CompletedPart, len(parts))
|
||||||
|
@ -587,6 +693,89 @@ func (upload s3Upload) FinishUpload(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
|
hasSmallPart := false
|
||||||
|
for _, partialUpload := range partialUploads {
|
||||||
|
info, err := partialUpload.GetInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Size < upload.store.MinPartSize {
|
||||||
|
hasSmallPart = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If one partial upload is smaller than the the minimum part size for an S3
|
||||||
|
// Multipart Upload, we cannot use S3 Multipart Uploads for concatenating all
|
||||||
|
// the files.
|
||||||
|
// So instead we have to download them and concat them on disk.
|
||||||
|
if hasSmallPart {
|
||||||
|
return upload.concatUsingDownload(ctx, partialUploads)
|
||||||
|
} else {
|
||||||
|
return upload.concatUsingMultipart(ctx, partialUploads)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) concatUsingDownload(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
|
id := upload.id
|
||||||
|
store := upload.store
|
||||||
|
uploadId, multipartId := splitIds(id)
|
||||||
|
|
||||||
|
// Create a temporary file for holding the concatenated data
|
||||||
|
file, err := ioutil.TempFile(store.TemporaryDirectory, "tusd-s3-concat-tmp-")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cleanUpTempFile(file)
|
||||||
|
|
||||||
|
// Download each part and append it to the temporary file
|
||||||
|
for _, partialUpload := range partialUploads {
|
||||||
|
partialS3Upload := partialUpload.(*s3Upload)
|
||||||
|
partialId, _ := splitIds(partialS3Upload.id)
|
||||||
|
|
||||||
|
res, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(partialId),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(file, res.Body); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to the beginning of the file, so the entire file is being uploaded
|
||||||
|
file.Seek(0, 0)
|
||||||
|
|
||||||
|
// Upload the entire file to S3
|
||||||
|
_, err = store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
Body: file,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, abort the multipart upload since it will no longer be used.
|
||||||
|
// This happens asynchronously since we do not need to wait for the result.
|
||||||
|
// Also, the error is ignored on purpose as it does not change the outcome of
|
||||||
|
// the request.
|
||||||
|
go func() {
|
||||||
|
store.Service.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: store.keyWithPrefix(uploadId),
|
||||||
|
UploadId: aws.String(multipartId),
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads []handler.Upload) error {
|
||||||
id := upload.id
|
id := upload.id
|
||||||
store := upload.store
|
store := upload.store
|
||||||
uploadId, multipartId := splitIds(id)
|
uploadId, multipartId := splitIds(id)
|
||||||
|
@ -611,7 +800,7 @@ func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []hand
|
||||||
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
// Part numbers must be in the range of 1 to 10000, inclusive. Since
|
||||||
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
// slice indexes start at 0, we add 1 to ensure that i >= 1.
|
||||||
PartNumber: aws.Int64(int64(i + 1)),
|
PartNumber: aws.Int64(int64(i + 1)),
|
||||||
CopySource: aws.String(store.Bucket + "/" + partialId),
|
CopySource: aws.String(store.Bucket + "/" + *store.keyWithPrefix(partialId)),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
|
@ -629,7 +818,7 @@ func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []hand
|
||||||
return upload.FinishUpload(ctx)
|
return upload.FinishUpload(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (upload s3Upload) DeclareLength(ctx context.Context, length int64) error {
|
func (upload *s3Upload) DeclareLength(ctx context.Context, length int64) error {
|
||||||
info, err := upload.GetInfo(ctx)
|
info, err := upload.GetInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -678,7 +867,7 @@ func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, upload
|
||||||
}
|
}
|
||||||
defer incompleteUploadObject.Body.Close()
|
defer incompleteUploadObject.Body.Close()
|
||||||
|
|
||||||
partFile, err := ioutil.TempFile("", "tusd-s3-tmp-")
|
partFile, err := ioutil.TempFile(store.TemporaryDirectory, "tusd-s3-tmp-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -702,7 +891,7 @@ func (store S3Store) downloadIncompletePartForUpload(ctx context.Context, upload
|
||||||
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
|
func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId string) (*s3.GetObjectOutput, error) {
|
||||||
obj, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
obj, err := store.Service.GetObjectWithContext(ctx, &s3.GetObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil && (isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "NotFound") || isAwsError(err, "AccessDenied")) {
|
if err != nil && (isAwsError(err, s3.ErrCodeNoSuchKey) || isAwsError(err, "NotFound") || isAwsError(err, "AccessDenied")) {
|
||||||
|
@ -712,11 +901,13 @@ func (store S3Store) getIncompletePartForUpload(ctx context.Context, uploadId st
|
||||||
return obj, err
|
return obj, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, r io.ReadSeeker) error {
|
func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId string, file *os.File) error {
|
||||||
|
defer cleanUpTempFile(file)
|
||||||
|
|
||||||
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
_, err := store.Service.PutObjectWithContext(ctx, &s3.PutObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
Body: r,
|
Body: file,
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -724,7 +915,7 @@ func (store S3Store) putIncompletePartForUpload(ctx context.Context, uploadId st
|
||||||
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
|
func (store S3Store) deleteIncompletePartForUpload(ctx context.Context, uploadId string) error {
|
||||||
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
_, err := store.Service.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(store.Bucket),
|
Bucket: aws.String(store.Bucket),
|
||||||
Key: store.keyWithPrefix(uploadId + ".part"),
|
Key: store.metadataKeyWithPrefix(uploadId + ".part"),
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -751,12 +942,12 @@ func isAwsError(err error, code string) bool {
|
||||||
|
|
||||||
func (store S3Store) calcOptimalPartSize(size int64) (optimalPartSize int64, err error) {
|
func (store S3Store) calcOptimalPartSize(size int64) (optimalPartSize int64, err error) {
|
||||||
switch {
|
switch {
|
||||||
// When upload is smaller or equal MinPartSize, we upload in just one part.
|
// When upload is smaller or equal to PreferredPartSize, we upload in just one part.
|
||||||
case size <= store.MinPartSize:
|
case size <= store.PreferredPartSize:
|
||||||
optimalPartSize = store.MinPartSize
|
optimalPartSize = store.PreferredPartSize
|
||||||
// Does the upload fit in MaxMultipartParts parts or less with MinPartSize.
|
// Does the upload fit in MaxMultipartParts parts or less with PreferredPartSize.
|
||||||
case size <= store.MinPartSize*store.MaxMultipartParts:
|
case size <= store.PreferredPartSize*store.MaxMultipartParts:
|
||||||
optimalPartSize = store.MinPartSize
|
optimalPartSize = store.PreferredPartSize
|
||||||
// Prerequisite: Be aware, that the result of an integer division (x/y) is
|
// Prerequisite: Be aware, that the result of an integer division (x/y) is
|
||||||
// ALWAYS rounded DOWN, as there are no digits behind the comma.
|
// ALWAYS rounded DOWN, as there are no digits behind the comma.
|
||||||
// In order to find out, whether we have an exact result or a rounded down
|
// In order to find out, whether we have an exact result or a rounded down
|
||||||
|
@ -801,3 +992,15 @@ func (store S3Store) keyWithPrefix(key string) *string {
|
||||||
|
|
||||||
return aws.String(prefix + key)
|
return aws.String(prefix + key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (store S3Store) metadataKeyWithPrefix(key string) *string {
|
||||||
|
prefix := store.MetadataObjectPrefix
|
||||||
|
if prefix == "" {
|
||||||
|
prefix = store.ObjectPrefix
|
||||||
|
}
|
||||||
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return aws.String(prefix + key)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
package s3store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// s3PartProducer converts a stream of bytes from the reader into a stream of files on disk
|
||||||
|
type s3PartProducer struct {
|
||||||
|
store *S3Store
|
||||||
|
files chan<- *os.File
|
||||||
|
done chan struct{}
|
||||||
|
err error
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spp *s3PartProducer) produce(partSize int64) {
|
||||||
|
for {
|
||||||
|
file, err := spp.nextPart(partSize)
|
||||||
|
if err != nil {
|
||||||
|
spp.err = err
|
||||||
|
close(spp.files)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if file == nil {
|
||||||
|
close(spp.files)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case spp.files <- file:
|
||||||
|
case <-spp.done:
|
||||||
|
close(spp.files)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spp *s3PartProducer) nextPart(size int64) (*os.File, error) {
|
||||||
|
// Create a temporary file to store the part
|
||||||
|
file, err := ioutil.TempFile(spp.store.TemporaryDirectory, "tusd-s3-tmp-")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
limitedReader := io.LimitReader(spp.r, size)
|
||||||
|
n, err := io.Copy(file, limitedReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the entire request body is read and no more data is available,
|
||||||
|
// io.Copy returns 0 since it is unable to read any bytes. In that
|
||||||
|
// case, we can close the s3PartProducer.
|
||||||
|
if n == 0 {
|
||||||
|
cleanUpTempFile(file)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to the beginning of the file
|
||||||
|
file.Seek(0, 0)
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
|
@ -0,0 +1,159 @@
|
||||||
|
package s3store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InfiniteZeroReader struct{}
|
||||||
|
|
||||||
|
func (izr InfiniteZeroReader) Read(b []byte) (int, error) {
|
||||||
|
b[0] = 0
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorReader struct{}
|
||||||
|
|
||||||
|
func (ErrorReader) Read(b []byte) (int, error) {
|
||||||
|
return 0, errors.New("error from ErrorReader")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartProducerConsumesEntireReaderWithoutError(t *testing.T) {
|
||||||
|
fileChan := make(chan *os.File)
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
expectedStr := "test"
|
||||||
|
r := strings.NewReader(expectedStr)
|
||||||
|
pp := s3PartProducer{
|
||||||
|
store: &S3Store{},
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: r,
|
||||||
|
}
|
||||||
|
go pp.produce(1)
|
||||||
|
|
||||||
|
actualStr := ""
|
||||||
|
b := make([]byte, 1)
|
||||||
|
for f := range fileChan {
|
||||||
|
n, err := f.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
if n != 1 {
|
||||||
|
t.Fatalf("incorrect number of bytes read: wanted %d, got %d", 1, n)
|
||||||
|
}
|
||||||
|
actualStr += string(b)
|
||||||
|
|
||||||
|
os.Remove(f.Name())
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
if actualStr != expectedStr {
|
||||||
|
t.Errorf("incorrect string read from channel: wanted %s, got %s", expectedStr, actualStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pp.err != nil {
|
||||||
|
t.Errorf("unexpected error from part producer: %s", pp.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartProducerExitsWhenDoneChannelIsClosed(t *testing.T) {
|
||||||
|
fileChan := make(chan *os.File)
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
pp := s3PartProducer{
|
||||||
|
store: &S3Store{},
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: InfiniteZeroReader{},
|
||||||
|
}
|
||||||
|
|
||||||
|
completedChan := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
pp.produce(10)
|
||||||
|
completedChan <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
close(doneChan)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-completedChan:
|
||||||
|
// producer exited cleanly
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Error("timed out waiting for producer to exit")
|
||||||
|
}
|
||||||
|
|
||||||
|
safelyDrainChannelOrFail(fileChan, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartProducerExitsWhenDoneChannelIsClosedBeforeAnyPartIsSent(t *testing.T) {
|
||||||
|
fileChan := make(chan *os.File)
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
pp := s3PartProducer{
|
||||||
|
store: &S3Store{},
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: InfiniteZeroReader{},
|
||||||
|
}
|
||||||
|
|
||||||
|
close(doneChan)
|
||||||
|
|
||||||
|
completedChan := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
pp.produce(10)
|
||||||
|
completedChan <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-completedChan:
|
||||||
|
// producer exited cleanly
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Error("timed out waiting for producer to exit")
|
||||||
|
}
|
||||||
|
|
||||||
|
safelyDrainChannelOrFail(fileChan, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartProducerExitsWhenUnableToReadFromFile(t *testing.T) {
|
||||||
|
fileChan := make(chan *os.File)
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
pp := s3PartProducer{
|
||||||
|
store: &S3Store{},
|
||||||
|
done: doneChan,
|
||||||
|
files: fileChan,
|
||||||
|
r: ErrorReader{},
|
||||||
|
}
|
||||||
|
|
||||||
|
completedChan := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
pp.produce(10)
|
||||||
|
completedChan <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-completedChan:
|
||||||
|
// producer exited cleanly
|
||||||
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Error("timed out waiting for producer to exit")
|
||||||
|
}
|
||||||
|
|
||||||
|
safelyDrainChannelOrFail(fileChan, t)
|
||||||
|
|
||||||
|
if pp.err == nil {
|
||||||
|
t.Error("expected an error but didn't get one")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func safelyDrainChannelOrFail(c chan *os.File, t *testing.T) {
|
||||||
|
// At this point, we've signaled that the producer should exit, but it may write a few files
|
||||||
|
// into the channel before closing it and exiting. Make sure that we get a nil value
|
||||||
|
// eventually.
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if f := <-c; f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatal("timed out waiting for channel to drain")
|
||||||
|
}
|
|
@ -4,15 +4,17 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/tus/tusd/pkg/handler"
|
"github.com/tus/tusd/pkg/handler"
|
||||||
)
|
)
|
||||||
|
@ -120,6 +122,121 @@ func TestNewUploadWithObjectPrefix(t *testing.T) {
|
||||||
assert.NotNil(upload)
|
assert.NotNil(upload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNewUploadWithMetadataObjectPrefix(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
store := New("bucket", s3obj)
|
||||||
|
store.ObjectPrefix = "my/uploaded/files"
|
||||||
|
store.MetadataObjectPrefix = "my/metadata"
|
||||||
|
|
||||||
|
assert.Equal("bucket", store.Bucket)
|
||||||
|
assert.Equal(s3obj, store.Service)
|
||||||
|
|
||||||
|
s1 := "hello"
|
||||||
|
s2 := "men?"
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("my/uploaded/files/uploadId"),
|
||||||
|
Metadata: map[string]*string{
|
||||||
|
"foo": &s1,
|
||||||
|
"bar": &s2,
|
||||||
|
},
|
||||||
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("my/metadata/uploadId.info"),
|
||||||
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"SizeIsDeferred":false,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`)),
|
||||||
|
ContentLength: aws.Int64(int64(253)),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
info := handler.FileInfo{
|
||||||
|
ID: "uploadId",
|
||||||
|
Size: 500,
|
||||||
|
MetaData: map[string]string{
|
||||||
|
"foo": "hello",
|
||||||
|
"bar": "menü",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEmptyUpload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
s3obj.EXPECT().CreateMultipartUploadWithContext(context.Background(), &s3.CreateMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
Metadata: map[string]*string{},
|
||||||
|
}).Return(&s3.CreateMultipartUploadOutput{
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
Body: bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":0,"SizeIsDeferred":false,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"uploadId","Type":"s3store"}}`)),
|
||||||
|
ContentLength: aws.Int64(int64(208)),
|
||||||
|
}),
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{},
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
Body: bytes.NewReader([]byte("")),
|
||||||
|
})).Return(&s3.UploadPartOutput{
|
||||||
|
ETag: aws.String("etag"),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().CompleteMultipartUploadWithContext(context.Background(), &s3.CompleteMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||||
|
Parts: []*s3.CompletedPart{
|
||||||
|
{
|
||||||
|
ETag: aws.String("etag"),
|
||||||
|
PartNumber: aws.Int64(1),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).Return(nil, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
info := handler.FileInfo{
|
||||||
|
ID: "uploadId",
|
||||||
|
Size: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := store.NewUpload(context.Background(), info)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.NotNil(upload)
|
||||||
|
err = upload.FinishUpload(context.Background())
|
||||||
|
assert.Nil(err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
func TestNewUploadLargerMaxObjectSize(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
@ -227,6 +344,72 @@ func TestGetInfo(t *testing.T) {
|
||||||
assert.Equal("my/uploaded/files/uploadId", info.Storage["Key"])
|
assert.Equal("my/uploaded/files/uploadId", info.Storage["Key"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetInfoWithMetadataObjectPrefix(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
store := New("bucket", s3obj)
|
||||||
|
store.MetadataObjectPrefix = "my/metadata"
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("my/metadata/uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId+multipartId","Size":500,"Offset":0,"MetaData":{"bar":"menü","foo":"hello"},"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":{"Bucket":"bucket","Key":"my/uploaded/files/uploadId","Type":"s3store"}}`))),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Size: aws.Int64(200),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NextPartNumberMarker: aws.Int64(2),
|
||||||
|
IsTruncated: aws.Bool(true),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(2),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("my/metadata/uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(int64(500), info.Size)
|
||||||
|
assert.Equal(int64(400), info.Offset)
|
||||||
|
assert.Equal("uploadId+multipartId", info.ID)
|
||||||
|
assert.Equal("hello", info.MetaData["foo"])
|
||||||
|
assert.Equal("menü", info.MetaData["bar"])
|
||||||
|
assert.Equal("s3store", info.Storage["Type"])
|
||||||
|
assert.Equal("bucket", info.Storage["Bucket"])
|
||||||
|
assert.Equal("my/uploaded/files/uploadId", info.Storage["Key"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetInfoWithIncompletePart(t *testing.T) {
|
func TestGetInfoWithIncompletePart(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
@ -421,6 +604,9 @@ func TestDeclareLength(t *testing.T) {
|
||||||
|
|
||||||
err = store.AsLengthDeclarableUpload(upload).DeclareLength(context.Background(), 500)
|
err = store.AsLengthDeclarableUpload(upload).DeclareLength(context.Background(), 500)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
info, err := upload.GetInfo(context.Background())
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(int64(500), info.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFinishUpload(t *testing.T) {
|
func TestFinishUpload(t *testing.T) {
|
||||||
|
@ -506,54 +692,41 @@ func TestWriteChunk(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MaxPartSize = 8
|
store.MaxPartSize = 8
|
||||||
store.MinPartSize = 4
|
store.MinPartSize = 4
|
||||||
|
store.PreferredPartSize = 4
|
||||||
store.MaxMultipartParts = 10000
|
store.MaxMultipartParts = 10000
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Size: aws.Int64(200),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil).Times(2)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.info"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
|
@ -590,88 +763,6 @@ func TestWriteChunk(t *testing.T) {
|
||||||
assert.Equal(int64(14), bytesRead)
|
assert.Equal(int64(14), bytesRead)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWriteChunkWithUnexpectedEOF ensures that WriteChunk does not error out
|
|
||||||
// if the io.Reader returns an io.ErrUnexpectedEOF. This happens when a HTTP
|
|
||||||
// PATCH request gets interrupted.
|
|
||||||
func TestWriteChunkWithUnexpectedEOF(t *testing.T) {
|
|
||||||
mockCtrl := gomock.NewController(t)
|
|
||||||
defer mockCtrl.Finish()
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
|
||||||
store := New("bucket", s3obj)
|
|
||||||
store.MaxPartSize = 500
|
|
||||||
store.MinPartSize = 100
|
|
||||||
store.MaxMultipartParts = 10000
|
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
|
||||||
|
|
||||||
gomock.InOrder(
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.info"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil)),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
|
||||||
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
Body: bytes.NewReader([]byte("1234567890ABCD")),
|
|
||||||
})).Return(nil, nil),
|
|
||||||
)
|
|
||||||
|
|
||||||
reader, writer := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
writer.Write([]byte("1234567890ABCD"))
|
|
||||||
writer.CloseWithError(io.ErrUnexpectedEOF)
|
|
||||||
}()
|
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
|
||||||
assert.Nil(err)
|
|
||||||
|
|
||||||
bytesRead, err := upload.WriteChunk(context.Background(), 300, reader)
|
|
||||||
assert.Nil(err)
|
|
||||||
assert.Equal(int64(14), bytesRead)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
|
@ -680,47 +771,33 @@ func TestWriteChunkWriteIncompletePartBecauseTooSmall(t *testing.T) {
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Size: aws.Int64(200),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil).Times(2)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil))
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.info"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist", nil)),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(100),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(200),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId.part"),
|
Key: aws.String("uploadId.part"),
|
||||||
|
@ -749,46 +826,42 @@ func TestWriteChunkPrependsIncompletePart(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MaxPartSize = 8
|
store.MaxPartSize = 8
|
||||||
store.MinPartSize = 4
|
store.MinPartSize = 4
|
||||||
|
store.PreferredPartSize = 4
|
||||||
store.MaxMultipartParts = 10000
|
store.MaxMultipartParts = 10000
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
ContentLength: aws.Int64(3),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
ContentLength: aws.Int64(3),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.DeleteObjectOutput{}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.info"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":5,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
ContentLength: aws.Int64(3),
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
ContentLength: aws.Int64(3),
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
|
||||||
Bucket: aws.String(store.Bucket),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil),
|
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
|
@ -822,46 +895,35 @@ func TestWriteChunkPrependsIncompletePartAndWritesANewIncompletePart(t *testing.
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MaxPartSize = 8
|
store.MaxPartSize = 8
|
||||||
store.MinPartSize = 4
|
store.MinPartSize = 4
|
||||||
|
store.PreferredPartSize = 4
|
||||||
store.MaxMultipartParts = 10000
|
store.MaxMultipartParts = 10000
|
||||||
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":10,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil).Times(2)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
ContentLength: aws.Int64(3),
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
||||||
|
}, nil).Times(2)
|
||||||
|
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(store.Bucket),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.DeleteObjectOutput{}, nil)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.info"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":10,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
ContentLength: aws.Int64(3),
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{Parts: []*s3.Part{}}, nil),
|
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte("123"))),
|
|
||||||
ContentLength: aws.Int64(3),
|
|
||||||
}, nil),
|
|
||||||
s3obj.EXPECT().DeleteObjectWithContext(context.Background(), &s3.DeleteObjectInput{
|
|
||||||
Bucket: aws.String(store.Bucket),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.DeleteObjectOutput{}, nil),
|
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
Key: aws.String("uploadId"),
|
Key: aws.String("uploadId"),
|
||||||
|
@ -893,59 +955,42 @@ func TestWriteChunkAllowTooSmallLast(t *testing.T) {
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
store.MinPartSize = 20
|
store.MinPartSize = 20
|
||||||
|
|
||||||
gomock.InOrder(
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId.info"),
|
||||||
Key: aws.String("uploadId.info"),
|
}).Return(&s3.GetObjectOutput{
|
||||||
}).Return(&s3.GetObjectOutput{
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
}, nil)
|
||||||
}, nil),
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
Bucket: aws.String("bucket"),
|
||||||
Bucket: aws.String("bucket"),
|
Key: aws.String("uploadId"),
|
||||||
Key: aws.String("uploadId"),
|
UploadId: aws.String("multipartId"),
|
||||||
UploadId: aws.String("multipartId"),
|
PartNumberMarker: aws.Int64(0),
|
||||||
PartNumberMarker: aws.Int64(0),
|
}).Return(&s3.ListPartsOutput{
|
||||||
}).Return(&s3.ListPartsOutput{
|
Parts: []*s3.Part{
|
||||||
Parts: []*s3.Part{
|
{
|
||||||
{
|
Size: aws.Int64(400),
|
||||||
Size: aws.Int64(400),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(90),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}, nil),
|
{
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
Size: aws.Int64(90),
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId.part"),
|
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil)),
|
|
||||||
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
|
||||||
Bucket: aws.String("bucket"),
|
|
||||||
Key: aws.String("uploadId"),
|
|
||||||
UploadId: aws.String("multipartId"),
|
|
||||||
PartNumberMarker: aws.Int64(0),
|
|
||||||
}).Return(&s3.ListPartsOutput{
|
|
||||||
Parts: []*s3.Part{
|
|
||||||
{
|
|
||||||
Size: aws.Int64(400),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Size: aws.Int64(90),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}, nil),
|
},
|
||||||
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
}, nil).Times(2)
|
||||||
Bucket: aws.String("bucket"),
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Key: aws.String("uploadId.part"),
|
Bucket: aws.String("bucket"),
|
||||||
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil)),
|
Key: aws.String("uploadId.part"),
|
||||||
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
}).Return(&s3.GetObjectOutput{}, awserr.New("AccessDenied", "Access Denied.", nil))
|
||||||
Bucket: aws.String("bucket"),
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
Key: aws.String("uploadId"),
|
Bucket: aws.String("bucket"),
|
||||||
UploadId: aws.String("multipartId"),
|
Key: aws.String("uploadId.part"),
|
||||||
PartNumber: aws.Int64(3),
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
Body: bytes.NewReader([]byte("1234567890")),
|
s3obj.EXPECT().UploadPartWithContext(context.Background(), NewUploadPartInputMatcher(&s3.UploadPartInput{
|
||||||
})).Return(nil, nil),
|
Bucket: aws.String("bucket"),
|
||||||
)
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumber: aws.Int64(3),
|
||||||
|
Body: bytes.NewReader([]byte("1234567890")),
|
||||||
|
})).Return(nil, nil)
|
||||||
|
|
||||||
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -1047,13 +1092,14 @@ func TestTerminateWithErrors(t *testing.T) {
|
||||||
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
assert.Equal("Multiple errors occurred:\n\tAWS S3 Error (hello) for object uploadId: it's me.\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConcatUploads(t *testing.T) {
|
func TestConcatUploadsUsingMultipart(t *testing.T) {
|
||||||
mockCtrl := gomock.NewController(t)
|
mockCtrl := gomock.NewController(t)
|
||||||
defer mockCtrl.Finish()
|
defer mockCtrl.Finish()
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
s3obj := NewMockS3API(mockCtrl)
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
store := New("bucket", s3obj)
|
store := New("bucket", s3obj)
|
||||||
|
store.MinPartSize = 100
|
||||||
|
|
||||||
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
s3obj.EXPECT().UploadPartCopyWithContext(context.Background(), &s3.UploadPartCopyInput{
|
||||||
Bucket: aws.String("bucket"),
|
Bucket: aws.String("bucket"),
|
||||||
|
@ -1135,6 +1181,11 @@ func TestConcatUploads(t *testing.T) {
|
||||||
uploadC, err := store.GetUpload(context.Background(), "ccc+CCC")
|
uploadC, err := store.GetUpload(context.Background(), "ccc+CCC")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// All uploads have a size larger than the MinPartSize, so a S3 Multipart Upload is used for concatenation.
|
||||||
|
uploadA.(*s3Upload).info = &handler.FileInfo{Size: 500}
|
||||||
|
uploadB.(*s3Upload).info = &handler.FileInfo{Size: 500}
|
||||||
|
uploadC.(*s3Upload).info = &handler.FileInfo{Size: 500}
|
||||||
|
|
||||||
err = store.AsConcatableUpload(upload).ConcatUploads(context.Background(), []handler.Upload{
|
err = store.AsConcatableUpload(upload).ConcatUploads(context.Background(), []handler.Upload{
|
||||||
uploadA,
|
uploadA,
|
||||||
uploadB,
|
uploadB,
|
||||||
|
@ -1142,3 +1193,167 @@ func TestConcatUploads(t *testing.T) {
|
||||||
})
|
})
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConcatUploadsUsingDownload(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
store := New("bucket", s3obj)
|
||||||
|
store.MinPartSize = 100
|
||||||
|
|
||||||
|
gomock.InOrder(
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("aaa"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("aaa"))),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("bbb"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("bbbb"))),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("ccc"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte("ccccc"))),
|
||||||
|
}, nil),
|
||||||
|
s3obj.EXPECT().PutObjectWithContext(context.Background(), NewPutObjectInputMatcher(&s3.PutObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
Body: bytes.NewReader([]byte("aaabbbbccccc")),
|
||||||
|
})),
|
||||||
|
s3obj.EXPECT().AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
}).Return(nil, nil),
|
||||||
|
)
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
uploadA, err := store.GetUpload(context.Background(), "aaa+AAA")
|
||||||
|
assert.Nil(err)
|
||||||
|
uploadB, err := store.GetUpload(context.Background(), "bbb+BBB")
|
||||||
|
assert.Nil(err)
|
||||||
|
uploadC, err := store.GetUpload(context.Background(), "ccc+CCC")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// All uploads have a size smaller than the MinPartSize, so the files are downloaded for concatenation.
|
||||||
|
uploadA.(*s3Upload).info = &handler.FileInfo{Size: 3}
|
||||||
|
uploadB.(*s3Upload).info = &handler.FileInfo{Size: 4}
|
||||||
|
uploadC.(*s3Upload).info = &handler.FileInfo{Size: 5}
|
||||||
|
|
||||||
|
err = store.AsConcatableUpload(upload).ConcatUploads(context.Background(), []handler.Upload{
|
||||||
|
uploadA,
|
||||||
|
uploadB,
|
||||||
|
uploadC,
|
||||||
|
})
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// Wait a short delay until the call to AbortMultipartUploadWithContext also occurs.
|
||||||
|
<-time.After(10 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
type s3APIWithTempFileAssertion struct {
|
||||||
|
*MockS3API
|
||||||
|
assert *assert.Assertions
|
||||||
|
tempDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s s3APIWithTempFileAssertion) UploadPartWithContext(context.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) {
|
||||||
|
assert := s.assert
|
||||||
|
|
||||||
|
// Make sure that only the two temporary files from tusd are in here.
|
||||||
|
files, err := ioutil.ReadDir(s.tempDir)
|
||||||
|
assert.Nil(err)
|
||||||
|
for _, file := range files {
|
||||||
|
assert.True(strings.HasPrefix(file.Name(), "tusd-s3-tmp-"))
|
||||||
|
}
|
||||||
|
assert.GreaterOrEqual(len(files), 1)
|
||||||
|
assert.LessOrEqual(len(files), 3)
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("not now")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test ensures that the S3Store will cleanup all files that it creates during
|
||||||
|
// a call to WriteChunk, even if an error occurs during that invocation.
|
||||||
|
// Here, we provide 14 bytes to WriteChunk and since the PartSize is set to 10,
|
||||||
|
// it will split the input into two parts (10 bytes and 4 bytes).
|
||||||
|
// Inside the first call to UploadPartWithContext, we assert that the temporary files
|
||||||
|
// for both parts have been created and we return an error.
|
||||||
|
// In the end, we assert that the error bubbled up and that all temporary files have
|
||||||
|
// been cleaned up.
|
||||||
|
func TestWriteChunkCleansUpTempFiles(t *testing.T) {
|
||||||
|
mockCtrl := gomock.NewController(t)
|
||||||
|
defer mockCtrl.Finish()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
// Create a temporary directory, so no files get mixed in.
|
||||||
|
tempDir, err := ioutil.TempDir("", "tusd-s3-cleanup-tests-")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
s3obj := NewMockS3API(mockCtrl)
|
||||||
|
s3api := s3APIWithTempFileAssertion{
|
||||||
|
MockS3API: s3obj,
|
||||||
|
assert: assert,
|
||||||
|
tempDir: tempDir,
|
||||||
|
}
|
||||||
|
store := New("bucket", s3api)
|
||||||
|
store.MaxPartSize = 10
|
||||||
|
store.MinPartSize = 10
|
||||||
|
store.PreferredPartSize = 10
|
||||||
|
store.MaxMultipartParts = 10000
|
||||||
|
store.MaxObjectSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||||
|
store.TemporaryDirectory = tempDir
|
||||||
|
|
||||||
|
// The usual S3 calls for retrieving the upload
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.info"),
|
||||||
|
}).Return(&s3.GetObjectOutput{
|
||||||
|
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"ID":"uploadId","Size":500,"Offset":0,"MetaData":null,"IsPartial":false,"IsFinal":false,"PartialUploads":null,"Storage":null}`))),
|
||||||
|
}, nil)
|
||||||
|
s3obj.EXPECT().ListPartsWithContext(context.Background(), &s3.ListPartsInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId"),
|
||||||
|
UploadId: aws.String("multipartId"),
|
||||||
|
PartNumberMarker: aws.Int64(0),
|
||||||
|
}).Return(&s3.ListPartsOutput{
|
||||||
|
Parts: []*s3.Part{
|
||||||
|
{
|
||||||
|
Size: aws.Int64(100),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Size: aws.Int64(200),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil).Times(2)
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "Not found", nil))
|
||||||
|
s3obj.EXPECT().GetObjectWithContext(context.Background(), &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String("bucket"),
|
||||||
|
Key: aws.String("uploadId.part"),
|
||||||
|
}).Return(&s3.GetObjectOutput{}, awserr.New("NoSuchKey", "The specified key does not exist.", nil))
|
||||||
|
|
||||||
|
// No calls to s3obj.EXPECT().UploadPartWithContext since that is handled by s3APIWithTempFileAssertion
|
||||||
|
|
||||||
|
upload, err := store.GetUpload(context.Background(), "uploadId+multipartId")
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
bytesRead, err := upload.WriteChunk(context.Background(), 300, bytes.NewReader([]byte("1234567890ABCD")))
|
||||||
|
assert.NotNil(err)
|
||||||
|
assert.Equal(err.Error(), "not now")
|
||||||
|
assert.Equal(int64(0), bytesRead)
|
||||||
|
|
||||||
|
files, err := ioutil.ReadDir(tempDir)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(len(files), 0)
|
||||||
|
}
|
||||||
|
|
|
@ -9,16 +9,19 @@ source "${__dir}/build_funcs.sh"
|
||||||
compile linux 386
|
compile linux 386
|
||||||
compile linux amd64
|
compile linux amd64
|
||||||
compile linux arm
|
compile linux arm
|
||||||
compile darwin 386
|
compile linux arm64
|
||||||
compile darwin amd64
|
compile darwin amd64
|
||||||
|
compile darwin arm64
|
||||||
compile windows 386 .exe
|
compile windows 386 .exe
|
||||||
compile windows amd64 .exe
|
compile windows amd64 .exe
|
||||||
|
|
||||||
maketar linux 386
|
maketar linux 386
|
||||||
maketar linux amd64
|
maketar linux amd64
|
||||||
maketar linux arm
|
maketar linux arm
|
||||||
makezip darwin 386
|
maketar linux arm64
|
||||||
makezip darwin amd64
|
makezip darwin amd64
|
||||||
|
makezip darwin arm64
|
||||||
makezip windows 386 .exe
|
makezip windows 386 .exe
|
||||||
makezip windows amd64 .exe
|
makezip windows amd64 .exe
|
||||||
makedep amd64
|
makedep amd64
|
||||||
|
makedep arm64
|
||||||
|
|
|
@ -30,6 +30,7 @@ function makezip {
|
||||||
|
|
||||||
local dir="tusd_${os}_${arch}"
|
local dir="tusd_${os}_${arch}"
|
||||||
zip "$dir.zip" "$dir/tusd$ext" LICENSE.txt README.md
|
zip "$dir.zip" "$dir/tusd$ext" LICENSE.txt README.md
|
||||||
|
sha256sum "$dir.zip" > "$dir.zip.sha256"
|
||||||
}
|
}
|
||||||
|
|
||||||
function maketar {
|
function maketar {
|
||||||
|
@ -40,6 +41,7 @@ function maketar {
|
||||||
|
|
||||||
local dir="tusd_${os}_${arch}"
|
local dir="tusd_${os}_${arch}"
|
||||||
tar -czf "$dir.tar.gz" "$dir/tusd" LICENSE.txt README.md
|
tar -czf "$dir.tar.gz" "$dir/tusd" LICENSE.txt README.md
|
||||||
|
sha256sum "$dir.tar.gz" > "$dir.tar.gz.sha256"
|
||||||
}
|
}
|
||||||
|
|
||||||
function makedep {
|
function makedep {
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -o pipefail
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
# set -o xtrace
|
|
||||||
|
|
||||||
# Set magic variables for current FILE & DIR
|
|
||||||
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
__root="$(cd "$(dirname "${__dir}")" && pwd)"
|
|
||||||
|
|
||||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
|
|
||||||
chmod +x ./kubectl
|
|
||||||
sudo mv ./kubectl /usr/local/bin/kubectl
|
|
||||||
|
|
||||||
#Store the new image in docker hub
|
|
||||||
docker build --quiet -t tusproject/tusd:latest -t tusproject/tusd:$TRAVIS_COMMIT ${__root};
|
|
||||||
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD";
|
|
||||||
docker push tusproject/tusd:$TRAVIS_COMMIT;
|
|
||||||
docker push tusproject/tusd:latest;
|
|
||||||
|
|
||||||
|
|
||||||
echo "Create directory..."
|
|
||||||
mkdir ${HOME}/.kube
|
|
||||||
echo "Writing KUBECONFIG to file..."
|
|
||||||
echo $KUBECONFIGVAR | python -m base64 -d > ${HOME}/.kube/config
|
|
||||||
echo "KUBECONFIG file written"
|
|
||||||
|
|
||||||
sleep 10s # This cost me some precious debugging time.
|
|
||||||
kubectl apply -f "${__root}/infra/kube/tusd-kube.yaml"
|
|
||||||
|
|
||||||
|
|
||||||
kubectl set image deployment/tusd --namespace=tus tusd=docker.io/tusproject/tusd:$TRAVIS_COMMIT
|
|
||||||
|
|
||||||
kubectl get pods --namespace=tus
|
|
||||||
kubectl get service --namespace=tus
|
|
||||||
kubectl get deployment --namespace=tus
|
|
||||||
|
|
||||||
|
|
||||||
function cleanup {
|
|
||||||
printf "Cleaning up...\n"
|
|
||||||
rm -f ${HOME}/.kube/config
|
|
||||||
printf "Cleaning done."
|
|
||||||
}
|
|
||||||
|
|
||||||
trap cleanup EXIT
|
|
|
@ -7,7 +7,7 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
cat <<-EOH
|
cat <<-EOH
|
||||||
# This file is generated via https://github.com/tus/tusd/blob/master/generate-docker-library.sh
|
# This file is generated via https://github.com/tus/tusd/blob/main/generate-docker-library.sh
|
||||||
Maintainers: tus.io (@tus), Thomas A. Hirsch (@thirsch)
|
Maintainers: tus.io (@tus), Thomas A. Hirsch (@thirsch)
|
||||||
GitRepo: https://github.com/tus/tusd.git
|
GitRepo: https://github.com/tus/tusd.git
|
||||||
EOH
|
EOH
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
go test ./pkg/...
|
|
||||||
go vet ./pkg/...
|
|
Loading…
Reference in New Issue