mirror of
https://github.com/discourse/discourse.git
synced 2024-11-22 22:21:55 +08:00
FEATURE: Dynamic chunk size with uppy (#22061)
When we get to really big files, it's better to not have thousands of small chunks, since we don't have a resume functionality if the upload fails. Better to try upload less chunks even if those chunks are bigger. For example, with this change a 20GB file would go from 4000 chunks of the default 5mb to 1000 chunks of the new 20mb size. Still a lot, but perhaps more manageable. This is somewhat experimental -- if we still don't see improvements we can always change back.
This commit is contained in:
parent
fca6c1836c
commit
7e0fcd1b42
|
@ -5,6 +5,7 @@ import { Promise } from "rsvp";
|
|||
import { ajax } from "discourse/lib/ajax";
|
||||
import AwsS3Multipart from "@uppy/aws-s3-multipart";
|
||||
const RETRY_DELAYS = [0, 1000, 3000, 5000];
|
||||
const MB = 1024 * 1024;
|
||||
|
||||
export default Mixin.create({
|
||||
_useS3MultipartUploads() {
|
||||
|
@ -21,6 +22,20 @@ export default Mixin.create({
|
|||
limit: 10,
|
||||
retryDelays: RETRY_DELAYS,
|
||||
|
||||
// When we get to really big files, it's better to not have thousands
|
||||
// of small chunks, since we don't have a resume functionality if the
|
||||
// upload fails. Better to try upload less chunks even if those chunks
|
||||
// are bigger.
|
||||
getChunkSize(file) {
|
||||
if (file.size >= 500 * MB) {
|
||||
return 20 * MB;
|
||||
} else if (file.size >= 100 * MB) {
|
||||
return 10 * MB;
|
||||
} else {
|
||||
return 5 * MB;
|
||||
}
|
||||
},
|
||||
|
||||
createMultipartUpload: this._createMultipartUpload,
|
||||
prepareUploadParts: this._prepareUploadParts,
|
||||
completeMultipartUpload: this._completeMultipartUpload,
|
||||
|
|
Loading…
Reference in New Issue
Block a user