From 7e0fcd1b42a3496df1487dd2b7e9fe3eaed91dd2 Mon Sep 17 00:00:00 2001 From: Martin Brennan Date: Mon, 12 Jun 2023 17:47:29 +1000 Subject: [PATCH] FEATURE: Dynamic chunk size with uppy (#22061) When we get to really big files, it's better to not have thousands of small chunks, since we don't have a resume functionality if the upload fails. Better to try upload less chunks even if those chunks are bigger. For example, with this change a 20GB file would go from 4000 chunks of the default 5mb to 1000 chunks of the new 20mb size. Still a lot, but perhaps more manageable. This is somewhat experimental -- if we still don't see improvements we can always change back. --- .../discourse/app/mixins/uppy-s3-multipart.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/app/assets/javascripts/discourse/app/mixins/uppy-s3-multipart.js b/app/assets/javascripts/discourse/app/mixins/uppy-s3-multipart.js index 0de515d2a79..5c905dd3e9e 100644 --- a/app/assets/javascripts/discourse/app/mixins/uppy-s3-multipart.js +++ b/app/assets/javascripts/discourse/app/mixins/uppy-s3-multipart.js @@ -5,6 +5,7 @@ import { Promise } from "rsvp"; import { ajax } from "discourse/lib/ajax"; import AwsS3Multipart from "@uppy/aws-s3-multipart"; const RETRY_DELAYS = [0, 1000, 3000, 5000]; +const MB = 1024 * 1024; export default Mixin.create({ _useS3MultipartUploads() { @@ -21,6 +22,20 @@ export default Mixin.create({ limit: 10, retryDelays: RETRY_DELAYS, + // When we get to really big files, it's better to not have thousands + // of small chunks, since we don't have a resume functionality if the + // upload fails. Better to try upload less chunks even if those chunks + // are bigger. + getChunkSize(file) { + if (file.size >= 500 * MB) { + return 20 * MB; + } else if (file.size >= 100 * MB) { + return 10 * MB; + } else { + return 5 * MB; + } + }, + createMultipartUpload: this._createMultipartUpload, prepareUploadParts: this._prepareUploadParts, completeMultipartUpload: this._completeMultipartUpload,