2019-05-03 06:17:27 +08:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2017-06-13 19:27:05 +08:00
|
|
|
require "mini_mime"
|
2017-05-11 06:16:57 +08:00
|
|
|
|
2013-02-06 03:16:51 +08:00
|
|
|
class UploadsController < ApplicationController
|
2020-03-26 05:16:02 +08:00
|
|
|
requires_login except: [:show, :show_short, :show_secure]
|
2018-02-01 12:17:59 +08:00
|
|
|
|
2019-11-18 09:25:42 +08:00
|
|
|
skip_before_action :preload_json, :check_xhr, :redirect_to_login_if_required, only: [:show, :show_short, :show_secure]
|
2019-10-14 12:40:33 +08:00
|
|
|
protect_from_forgery except: :show
|
2013-04-03 07:17:17 +08:00
|
|
|
|
2021-01-29 10:14:49 +08:00
|
|
|
before_action :is_asset_path, :apply_cdn_headers, only: [:show, :show_short, :show_secure]
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
before_action :external_store_check, only: [
|
|
|
|
:show_secure,
|
|
|
|
:generate_presigned_put,
|
|
|
|
:complete_external_upload,
|
|
|
|
:create_multipart,
|
|
|
|
:batch_presign_multipart_parts,
|
|
|
|
:abort_multipart,
|
|
|
|
:complete_multipart
|
|
|
|
]
|
|
|
|
before_action :direct_s3_uploads_check, only: [
|
|
|
|
:generate_presigned_put,
|
|
|
|
:complete_external_upload,
|
|
|
|
:create_multipart,
|
|
|
|
:batch_presign_multipart_parts,
|
|
|
|
:abort_multipart,
|
|
|
|
:complete_multipart
|
|
|
|
]
|
|
|
|
before_action :can_upload_external?, only: [:create_multipart, :generate_presigned_put]
|
2019-11-18 13:56:20 +08:00
|
|
|
|
2020-05-18 22:00:41 +08:00
|
|
|
SECURE_REDIRECT_GRACE_SECONDS = 5
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
PRESIGNED_PUT_RATE_LIMIT_PER_MINUTE = 10
|
|
|
|
CREATE_MULTIPART_RATE_LIMIT_PER_MINUTE = 10
|
|
|
|
COMPLETE_MULTIPART_RATE_LIMIT_PER_MINUTE = 10
|
|
|
|
BATCH_PRESIGN_RATE_LIMIT_PER_MINUTE = 10
|
2020-05-18 22:00:41 +08:00
|
|
|
|
2013-02-06 03:16:51 +08:00
|
|
|
def create
|
2017-11-23 14:28:18 +08:00
|
|
|
# capture current user for block later on
|
|
|
|
me = current_user
|
|
|
|
|
2021-07-13 10:22:00 +08:00
|
|
|
params.permit(:type, :upload_type)
|
|
|
|
if params[:type].blank? && params[:upload_type].blank?
|
|
|
|
raise Discourse::InvalidParameters
|
|
|
|
end
|
2017-05-18 18:13:13 +08:00
|
|
|
# 50 characters ought to be enough for the upload type
|
2021-07-13 10:22:00 +08:00
|
|
|
type = (params[:upload_type].presence || params[:type].presence).parameterize(separator: "_")[0..50]
|
2015-05-20 07:39:58 +08:00
|
|
|
|
2021-08-24 15:46:28 +08:00
|
|
|
if type == "avatar" && !me.admin? && (SiteSetting.discourse_connect_overrides_avatar || !TrustLevelAndStaffAndDisabledSetting.matches?(SiteSetting.allow_uploaded_avatars, me))
|
2017-05-11 06:16:57 +08:00
|
|
|
return render json: failed_json, status: 422
|
2015-11-12 17:26:45 +08:00
|
|
|
end
|
|
|
|
|
2017-06-23 18:13:48 +08:00
|
|
|
url = params[:url]
|
|
|
|
file = params[:file] || params[:files]&.first
|
|
|
|
pasted = params[:pasted] == "true"
|
|
|
|
for_private_message = params[:for_private_message] == "true"
|
2018-11-14 15:03:02 +08:00
|
|
|
for_site_setting = params[:for_site_setting] == "true"
|
2017-11-27 09:43:18 +08:00
|
|
|
is_api = is_api?
|
|
|
|
retain_hours = params[:retain_hours].to_i
|
|
|
|
|
|
|
|
# note, atm hijack is processed in its own context and has not access to controller
|
|
|
|
# longer term we may change this
|
|
|
|
hijack do
|
2017-12-27 23:33:25 +08:00
|
|
|
begin
|
|
|
|
info = UploadsController.create_upload(
|
|
|
|
current_user: me,
|
|
|
|
file: file,
|
|
|
|
url: url,
|
|
|
|
type: type,
|
|
|
|
for_private_message: for_private_message,
|
2018-11-14 15:03:02 +08:00
|
|
|
for_site_setting: for_site_setting,
|
2017-12-27 23:33:25 +08:00
|
|
|
pasted: pasted,
|
|
|
|
is_api: is_api,
|
|
|
|
retain_hours: retain_hours
|
|
|
|
)
|
|
|
|
rescue => e
|
|
|
|
render json: failed_json.merge(message: e.message&.split("\n")&.first), status: 422
|
|
|
|
else
|
|
|
|
render json: UploadsController.serialize_upload(info), status: Upload === info ? 200 : 422
|
|
|
|
end
|
2014-09-23 13:50:26 +08:00
|
|
|
end
|
2013-02-06 03:16:51 +08:00
|
|
|
end
|
2013-06-05 06:34:53 +08:00
|
|
|
|
2017-08-23 04:40:01 +08:00
|
|
|
def lookup_urls
|
|
|
|
params.permit(short_urls: [])
|
|
|
|
uploads = []
|
|
|
|
|
|
|
|
if (params[:short_urls] && params[:short_urls].length > 0)
|
2019-05-29 09:00:25 +08:00
|
|
|
PrettyText::Helpers.lookup_upload_urls(params[:short_urls]).each do |short_url, paths|
|
|
|
|
uploads << {
|
|
|
|
short_url: short_url,
|
|
|
|
url: paths[:url],
|
|
|
|
short_path: paths[:short_path]
|
|
|
|
}
|
2017-08-23 04:40:01 +08:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
render json: uploads.to_json
|
|
|
|
end
|
|
|
|
|
2013-09-07 01:18:42 +08:00
|
|
|
def show
|
2019-06-26 22:02:55 +08:00
|
|
|
# do not serve uploads requested via XHR to prevent XSS
|
2019-06-27 17:13:44 +08:00
|
|
|
return xhr_not_allowed if request.xhr?
|
2019-06-26 22:02:55 +08:00
|
|
|
|
2014-05-14 08:51:09 +08:00
|
|
|
return render_404 if !RailsMultisite::ConnectionManagement.has_db?(params[:site])
|
|
|
|
|
2014-03-25 07:37:31 +08:00
|
|
|
RailsMultisite::ConnectionManagement.with_connection(params[:site]) do |db|
|
2014-09-10 00:40:11 +08:00
|
|
|
return render_404 if SiteSetting.prevent_anons_from_downloading_files && current_user.nil?
|
2013-09-07 01:18:42 +08:00
|
|
|
|
2015-05-20 21:32:31 +08:00
|
|
|
if upload = Upload.find_by(sha1: params[:sha]) || Upload.find_by(id: params[:id], url: request.env["PATH_INFO"])
|
2019-05-15 04:36:54 +08:00
|
|
|
unless Discourse.store.internal?
|
|
|
|
local_store = FileStore::LocalStore.new
|
|
|
|
return render_404 unless local_store.has_been_uploaded?(upload.url)
|
|
|
|
end
|
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
send_file_local_upload(upload)
|
|
|
|
else
|
|
|
|
render_404
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2019-01-30 05:47:25 +08:00
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
def show_short
|
2019-06-26 22:02:55 +08:00
|
|
|
# do not serve uploads requested via XHR to prevent XSS
|
2019-06-27 17:13:44 +08:00
|
|
|
return xhr_not_allowed if request.xhr?
|
2019-06-26 22:02:55 +08:00
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
if SiteSetting.prevent_anons_from_downloading_files && current_user.nil?
|
|
|
|
return render_404
|
|
|
|
end
|
2019-05-28 23:18:21 +08:00
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
sha1 = Upload.sha1_from_base62_encoded(params[:base62])
|
|
|
|
|
|
|
|
if upload = Upload.find_by(sha1: sha1)
|
2020-03-16 09:54:14 +08:00
|
|
|
if upload.secure? && SiteSetting.secure_media?
|
|
|
|
return handle_secure_upload_request(upload)
|
|
|
|
end
|
2020-01-16 11:50:27 +08:00
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
if Discourse.store.internal?
|
|
|
|
send_file_local_upload(upload)
|
2014-04-15 04:55:57 +08:00
|
|
|
else
|
2020-09-29 10:12:03 +08:00
|
|
|
redirect_to Discourse.store.url_for(upload, force_download: force_download?)
|
2014-04-15 04:55:57 +08:00
|
|
|
end
|
2019-05-29 09:00:25 +08:00
|
|
|
else
|
|
|
|
render_404
|
2014-03-25 07:37:31 +08:00
|
|
|
end
|
2013-09-07 01:18:42 +08:00
|
|
|
end
|
|
|
|
|
2019-11-18 09:25:42 +08:00
|
|
|
def show_secure
|
|
|
|
# do not serve uploads requested via XHR to prevent XSS
|
|
|
|
return xhr_not_allowed if request.xhr?
|
2020-01-07 10:27:24 +08:00
|
|
|
|
|
|
|
path_with_ext = "#{params[:path]}.#{params[:extension]}"
|
|
|
|
|
|
|
|
sha1 = File.basename(path_with_ext, File.extname(path_with_ext))
|
|
|
|
# this takes care of optimized image requests
|
|
|
|
sha1 = sha1.partition("_").first if sha1.include?("_")
|
|
|
|
|
|
|
|
upload = Upload.find_by(sha1: sha1)
|
|
|
|
return render_404 if upload.blank?
|
2019-11-18 09:25:42 +08:00
|
|
|
|
2020-03-26 05:16:02 +08:00
|
|
|
return render_404 if SiteSetting.prevent_anons_from_downloading_files && current_user.nil?
|
2020-01-16 11:50:27 +08:00
|
|
|
return handle_secure_upload_request(upload, path_with_ext) if SiteSetting.secure_media?
|
2020-01-07 10:27:24 +08:00
|
|
|
|
|
|
|
# we don't want to 404 here if secure media gets disabled
|
|
|
|
# because all posts with secure uploads will show broken media
|
|
|
|
# until rebaked, which could take some time
|
2020-01-07 12:02:17 +08:00
|
|
|
#
|
|
|
|
# if the upload is still secure, that means the ACL is probably still
|
|
|
|
# private, so we don't want to go to the CDN url just yet otherwise we
|
|
|
|
# will get a 403. if the upload is not secure we assume the ACL is public
|
2020-03-16 09:54:14 +08:00
|
|
|
signed_secure_url = Discourse.store.signed_url_for_path(path_with_ext)
|
2020-01-07 12:02:17 +08:00
|
|
|
redirect_to upload.secure? ? signed_secure_url : Discourse.store.cdn_url(upload.url)
|
2019-11-18 09:25:42 +08:00
|
|
|
end
|
|
|
|
|
2020-03-16 09:54:14 +08:00
|
|
|
def handle_secure_upload_request(upload, path_with_ext = nil)
|
2020-01-16 11:50:27 +08:00
|
|
|
if upload.access_control_post_id.present?
|
|
|
|
raise Discourse::InvalidAccess if !guardian.can_see?(upload.access_control_post)
|
2020-03-26 05:16:02 +08:00
|
|
|
else
|
|
|
|
return render_404 if current_user.nil?
|
2020-01-16 11:50:27 +08:00
|
|
|
end
|
|
|
|
|
2020-07-03 11:42:36 +08:00
|
|
|
# defaults to public: false, so only cached by the client browser
|
2020-05-18 22:00:41 +08:00
|
|
|
cache_seconds = S3Helper::DOWNLOAD_URL_EXPIRES_AFTER_SECONDS - SECURE_REDIRECT_GRACE_SECONDS
|
2020-07-03 11:42:36 +08:00
|
|
|
expires_in cache_seconds.seconds
|
2020-05-18 22:00:41 +08:00
|
|
|
|
2020-03-16 09:54:14 +08:00
|
|
|
# url_for figures out the full URL, handling multisite DBs,
|
|
|
|
# and will return a presigned URL for the upload
|
|
|
|
if path_with_ext.blank?
|
2020-09-29 10:12:03 +08:00
|
|
|
return redirect_to Discourse.store.url_for(upload, force_download: force_download?)
|
2020-03-16 09:54:14 +08:00
|
|
|
end
|
|
|
|
|
2020-07-03 11:42:36 +08:00
|
|
|
redirect_to Discourse.store.signed_url_for_path(
|
2020-09-29 10:12:03 +08:00
|
|
|
path_with_ext,
|
|
|
|
expires_in: S3Helper::DOWNLOAD_URL_EXPIRES_AFTER_SECONDS,
|
|
|
|
force_download: force_download?
|
2020-07-03 11:42:36 +08:00
|
|
|
)
|
2020-01-16 11:50:27 +08:00
|
|
|
end
|
|
|
|
|
2019-02-21 10:13:37 +08:00
|
|
|
def metadata
|
|
|
|
params.require(:url)
|
|
|
|
upload = Upload.get_from_url(params[:url])
|
|
|
|
raise Discourse::NotFound unless upload
|
|
|
|
|
|
|
|
render json: {
|
|
|
|
original_filename: upload.original_filename,
|
|
|
|
width: upload.width,
|
|
|
|
height: upload.height,
|
|
|
|
human_filesize: upload.human_filesize
|
|
|
|
}
|
|
|
|
end
|
|
|
|
|
2021-07-28 06:42:25 +08:00
|
|
|
def generate_presigned_put
|
|
|
|
RateLimiter.new(
|
|
|
|
current_user, "generate-presigned-put-upload-stub", PRESIGNED_PUT_RATE_LIMIT_PER_MINUTE, 1.minute
|
|
|
|
).performed!
|
|
|
|
|
|
|
|
file_name = params.require(:file_name)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
file_size = params.require(:file_size).to_i
|
2021-07-28 06:42:25 +08:00
|
|
|
type = params.require(:type)
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
if file_size_too_big?(file_name, file_size)
|
|
|
|
return render_json_error(
|
FEATURE: Humanize file size error messages (#14398)
The file size error messages for max_image_size_kb and
max_attachment_size_kb are shown to the user in the KB
format, regardless of how large the limit is. Since we
are going to support uploading much larger files soon,
this KB-based limit soon becomes unfriendly to the end
user.
For example, if the max attachment size is set to 512000
KB, this is what the user sees:
> Sorry, the file you are trying to upload is too big (maximum
size is 512000KB)
This makes the user do math. In almost all file explorers that
a regular user would be familiar width, the file size is shown
in a format based on the maximum increment (e.g. KB, MB, GB).
This commit changes the behaviour to output a humanized file size
instead of the raw KB. For the above example, it would now say:
> Sorry, the file you are trying to upload is too big (maximum
size is 512 MB)
This humanization also handles decimals, e.g. 1536KB = 1.5 MB
2021-09-22 05:59:45 +08:00
|
|
|
I18n.t("upload.attachments.too_large_humanized", max_size: ActiveSupport::NumberHelper.number_to_human_size(SiteSetting.max_attachment_size_kb.kilobytes)),
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
status: 422
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
2021-11-08 07:16:38 +08:00
|
|
|
external_upload_data = ExternalUploadManager.create_direct_upload(
|
|
|
|
current_user: current_user,
|
|
|
|
file_name: file_name,
|
|
|
|
file_size: file_size,
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
upload_type: type,
|
2021-11-08 07:16:38 +08:00
|
|
|
metadata: parse_allowed_metadata(params[:metadata])
|
2021-07-28 06:42:25 +08:00
|
|
|
)
|
|
|
|
|
2021-11-08 07:16:38 +08:00
|
|
|
render json: external_upload_data
|
2021-07-28 06:42:25 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
def complete_external_upload
|
|
|
|
unique_identifier = params.require(:unique_identifier)
|
|
|
|
external_upload_stub = ExternalUploadStub.find_by(
|
|
|
|
unique_identifier: unique_identifier, created_by: current_user
|
|
|
|
)
|
|
|
|
return render_404 if external_upload_stub.blank?
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
complete_external_upload_via_manager(external_upload_stub)
|
|
|
|
end
|
2021-07-28 06:42:25 +08:00
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
def complete_external_upload_via_manager(external_upload_stub)
|
2021-10-19 11:25:42 +08:00
|
|
|
opts = {
|
|
|
|
for_private_message: params[:for_private_message]&.to_s == "true",
|
|
|
|
for_site_setting: params[:for_site_setting]&.to_s == "true",
|
|
|
|
pasted: params[:pasted]&.to_s == "true",
|
|
|
|
}
|
|
|
|
|
|
|
|
external_upload_manager = ExternalUploadManager.new(external_upload_stub, opts)
|
2021-07-28 06:42:25 +08:00
|
|
|
hijack do
|
|
|
|
begin
|
|
|
|
upload = external_upload_manager.promote_to_upload!
|
|
|
|
if upload.errors.empty?
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
external_upload_stub.destroy!
|
2021-07-28 06:42:25 +08:00
|
|
|
render json: UploadsController.serialize_upload(upload), status: 200
|
|
|
|
else
|
|
|
|
render_json_error(upload.errors.to_hash.values.flatten, status: 422)
|
|
|
|
end
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
rescue ExternalUploadManager::SizeMismatchError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
render_json_error(
|
|
|
|
debug_upload_error(err, "upload.size_mismatch_failure", additional_detail: err.message),
|
|
|
|
status: 422
|
|
|
|
)
|
2021-07-28 06:42:25 +08:00
|
|
|
rescue ExternalUploadManager::ChecksumMismatchError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
render_json_error(
|
|
|
|
debug_upload_error(err, "upload.checksum_mismatch_failure", additional_detail: err.message),
|
|
|
|
status: 422
|
|
|
|
)
|
2021-07-28 06:42:25 +08:00
|
|
|
rescue ExternalUploadManager::CannotPromoteError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
render_json_error(
|
|
|
|
debug_upload_error(err, "upload.cannot_promote_failure", additional_detail: err.message),
|
|
|
|
status: 422
|
|
|
|
)
|
2021-07-28 06:42:25 +08:00
|
|
|
rescue ExternalUploadManager::DownloadFailedError, Aws::S3::Errors::NotFound => err
|
2021-09-22 06:43:02 +08:00
|
|
|
render_json_error(
|
|
|
|
debug_upload_error(err, "upload.download_failure", additional_detail: err.message),
|
|
|
|
status: 422
|
|
|
|
)
|
2021-07-28 06:42:25 +08:00
|
|
|
rescue => err
|
|
|
|
Discourse.warn_exception(
|
|
|
|
err, message: "Complete external upload failed unexpectedly for user #{current_user.id}"
|
|
|
|
)
|
2021-09-22 06:43:02 +08:00
|
|
|
|
2021-07-28 06:42:25 +08:00
|
|
|
render_json_error(I18n.t("upload.failed"), status: 422)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
def create_multipart
|
|
|
|
RateLimiter.new(
|
|
|
|
current_user, "create-multipart-upload", CREATE_MULTIPART_RATE_LIMIT_PER_MINUTE, 1.minute
|
|
|
|
).performed!
|
|
|
|
|
|
|
|
file_name = params.require(:file_name)
|
|
|
|
file_size = params.require(:file_size).to_i
|
|
|
|
upload_type = params.require(:upload_type)
|
|
|
|
|
|
|
|
if file_size_too_big?(file_name, file_size)
|
|
|
|
return render_json_error(
|
FEATURE: Humanize file size error messages (#14398)
The file size error messages for max_image_size_kb and
max_attachment_size_kb are shown to the user in the KB
format, regardless of how large the limit is. Since we
are going to support uploading much larger files soon,
this KB-based limit soon becomes unfriendly to the end
user.
For example, if the max attachment size is set to 512000
KB, this is what the user sees:
> Sorry, the file you are trying to upload is too big (maximum
size is 512000KB)
This makes the user do math. In almost all file explorers that
a regular user would be familiar width, the file size is shown
in a format based on the maximum increment (e.g. KB, MB, GB).
This commit changes the behaviour to output a humanized file size
instead of the raw KB. For the above example, it would now say:
> Sorry, the file you are trying to upload is too big (maximum
size is 512 MB)
This humanization also handles decimals, e.g. 1536KB = 1.5 MB
2021-09-22 05:59:45 +08:00
|
|
|
I18n.t("upload.attachments.too_large_humanized", max_size: ActiveSupport::NumberHelper.number_to_human_size(SiteSetting.max_attachment_size_kb.kilobytes)),
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
status: 422
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
begin
|
2021-11-08 07:16:38 +08:00
|
|
|
external_upload_data = ExternalUploadManager.create_direct_multipart_upload(
|
|
|
|
current_user: current_user,
|
|
|
|
file_name: file_name,
|
|
|
|
file_size: file_size,
|
|
|
|
upload_type: upload_type,
|
|
|
|
metadata: parse_allowed_metadata(params[:metadata])
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
)
|
|
|
|
rescue Aws::S3::Errors::ServiceError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
return render_json_error(
|
|
|
|
debug_upload_error(err, "upload.create_mutlipart_failure", additional_detail: err.message),
|
|
|
|
status: 422
|
|
|
|
)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
end
|
|
|
|
|
2021-11-08 07:16:38 +08:00
|
|
|
render json: external_upload_data
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
def batch_presign_multipart_parts
|
|
|
|
part_numbers = params.require(:part_numbers)
|
|
|
|
unique_identifier = params.require(:unique_identifier)
|
|
|
|
|
|
|
|
RateLimiter.new(
|
|
|
|
current_user, "batch-presign", BATCH_PRESIGN_RATE_LIMIT_PER_MINUTE, 1.minute
|
|
|
|
).performed!
|
|
|
|
|
|
|
|
part_numbers = part_numbers.map do |part_number|
|
|
|
|
validate_part_number(part_number)
|
|
|
|
end
|
|
|
|
|
|
|
|
external_upload_stub = ExternalUploadStub.find_by(
|
|
|
|
unique_identifier: unique_identifier, created_by: current_user
|
|
|
|
)
|
|
|
|
return render_404 if external_upload_stub.blank?
|
|
|
|
|
|
|
|
if !multipart_upload_exists?(external_upload_stub)
|
|
|
|
return render_404
|
|
|
|
end
|
|
|
|
|
|
|
|
presigned_urls = {}
|
|
|
|
part_numbers.each do |part_number|
|
|
|
|
presigned_urls[part_number] = Discourse.store.presign_multipart_part(
|
|
|
|
upload_id: external_upload_stub.external_upload_identifier,
|
|
|
|
key: external_upload_stub.key,
|
|
|
|
part_number: part_number
|
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
render json: { presigned_urls: presigned_urls }
|
|
|
|
end
|
|
|
|
|
|
|
|
def validate_part_number(part_number)
|
|
|
|
part_number = part_number.to_i
|
|
|
|
if !part_number.between?(1, 10000)
|
|
|
|
raise Discourse::InvalidParameters.new(
|
|
|
|
"Each part number should be between 1 and 10000"
|
|
|
|
)
|
|
|
|
end
|
|
|
|
part_number
|
|
|
|
end
|
|
|
|
|
|
|
|
def multipart_upload_exists?(external_upload_stub)
|
|
|
|
begin
|
|
|
|
Discourse.store.list_multipart_parts(
|
|
|
|
upload_id: external_upload_stub.external_upload_identifier, key: external_upload_stub.key
|
|
|
|
)
|
|
|
|
rescue Aws::S3::Errors::NoSuchUpload => err
|
|
|
|
debug_upload_error(err, "upload.external_upload_not_found", { additional_detail: "path: #{external_upload_stub.key}" })
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
|
|
|
def abort_multipart
|
|
|
|
external_upload_identifier = params.require(:external_upload_identifier)
|
|
|
|
external_upload_stub = ExternalUploadStub.find_by(
|
|
|
|
external_upload_identifier: external_upload_identifier
|
|
|
|
)
|
|
|
|
|
|
|
|
# The stub could have already been deleted by an earlier error via
|
|
|
|
# ExternalUploadManager, so we consider this a great success if the
|
|
|
|
# stub is already gone.
|
|
|
|
return render json: success_json if external_upload_stub.blank?
|
|
|
|
|
|
|
|
return render_404 if external_upload_stub.created_by_id != current_user.id
|
|
|
|
|
|
|
|
begin
|
|
|
|
Discourse.store.abort_multipart(
|
|
|
|
upload_id: external_upload_stub.external_upload_identifier,
|
|
|
|
key: external_upload_stub.key
|
|
|
|
)
|
|
|
|
rescue Aws::S3::Errors::ServiceError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
return render_json_error(
|
|
|
|
debug_upload_error(err, "upload.abort_mutlipart_failure", additional_detail: "external upload stub id: #{external_upload_stub.id}"),
|
|
|
|
status: 422
|
|
|
|
)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
external_upload_stub.destroy!
|
|
|
|
|
|
|
|
render json: success_json
|
|
|
|
end
|
|
|
|
|
|
|
|
def complete_multipart
|
|
|
|
unique_identifier = params.require(:unique_identifier)
|
|
|
|
parts = params.require(:parts)
|
|
|
|
|
|
|
|
RateLimiter.new(
|
|
|
|
current_user, "complete-multipart-upload", COMPLETE_MULTIPART_RATE_LIMIT_PER_MINUTE, 1.minute
|
|
|
|
).performed!
|
|
|
|
|
|
|
|
external_upload_stub = ExternalUploadStub.find_by(
|
|
|
|
unique_identifier: unique_identifier, created_by: current_user
|
|
|
|
)
|
|
|
|
return render_404 if external_upload_stub.blank?
|
|
|
|
|
|
|
|
if !multipart_upload_exists?(external_upload_stub)
|
|
|
|
return render_404
|
|
|
|
end
|
|
|
|
|
|
|
|
parts = parts.map do |part|
|
|
|
|
part_number = part[:part_number]
|
|
|
|
etag = part[:etag]
|
|
|
|
part_number = validate_part_number(part_number)
|
|
|
|
|
|
|
|
if etag.blank?
|
|
|
|
raise Discourse::InvalidParameters.new("All parts must have an etag and a valid part number")
|
|
|
|
end
|
|
|
|
|
|
|
|
# this is done so it's an array of hashes rather than an array of
|
|
|
|
# ActionController::Parameters
|
|
|
|
{ part_number: part_number, etag: etag }
|
|
|
|
end.sort_by do |part|
|
|
|
|
part[:part_number]
|
|
|
|
end
|
|
|
|
|
|
|
|
begin
|
|
|
|
complete_response = Discourse.store.complete_multipart(
|
|
|
|
upload_id: external_upload_stub.external_upload_identifier,
|
|
|
|
key: external_upload_stub.key,
|
|
|
|
parts: parts
|
|
|
|
)
|
|
|
|
rescue Aws::S3::Errors::ServiceError => err
|
2021-09-22 06:43:02 +08:00
|
|
|
return render_json_error(
|
|
|
|
debug_upload_error(err, "upload.complete_mutlipart_failure", additional_detail: "external upload stub id: #{external_upload_stub.id}"),
|
|
|
|
status: 422
|
|
|
|
)
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
complete_external_upload_via_manager(external_upload_stub)
|
|
|
|
end
|
|
|
|
|
2014-05-14 08:51:09 +08:00
|
|
|
protected
|
|
|
|
|
2020-09-29 10:12:03 +08:00
|
|
|
def force_download?
|
|
|
|
params[:dl] == "1"
|
|
|
|
end
|
|
|
|
|
2019-06-27 17:13:44 +08:00
|
|
|
def xhr_not_allowed
|
|
|
|
raise Discourse::InvalidParameters.new("XHR not allowed")
|
|
|
|
end
|
|
|
|
|
2017-11-27 09:43:18 +08:00
|
|
|
def render_404
|
|
|
|
raise Discourse::NotFound
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.serialize_upload(data)
|
2017-08-23 04:40:01 +08:00
|
|
|
# as_json.as_json is not a typo... as_json in AM serializer returns keys as symbols, we need them
|
|
|
|
# as strings here
|
|
|
|
serialized = UploadSerializer.new(data, root: nil).as_json.as_json if Upload === data
|
|
|
|
serialized ||= (data || {}).as_json
|
|
|
|
end
|
|
|
|
|
2018-11-14 15:03:02 +08:00
|
|
|
def self.create_upload(current_user:,
|
|
|
|
file:,
|
|
|
|
url:,
|
|
|
|
type:,
|
|
|
|
for_private_message:,
|
|
|
|
for_site_setting:,
|
|
|
|
pasted:,
|
|
|
|
is_api:,
|
|
|
|
retain_hours:)
|
|
|
|
|
2017-05-11 06:16:57 +08:00
|
|
|
if file.nil?
|
2017-11-27 09:43:18 +08:00
|
|
|
if url.present? && is_api
|
2017-05-11 06:16:57 +08:00
|
|
|
maximum_upload_size = [SiteSetting.max_image_size_kb, SiteSetting.max_attachment_size_kb].max.kilobytes
|
2017-05-25 01:42:52 +08:00
|
|
|
tempfile = FileHelper.download(
|
|
|
|
url,
|
2019-05-28 08:28:57 +08:00
|
|
|
follow_redirect: true,
|
2017-05-25 01:42:52 +08:00
|
|
|
max_file_size: maximum_upload_size,
|
|
|
|
tmp_file_name: "discourse-upload-#{type}"
|
|
|
|
) rescue nil
|
2017-05-11 06:16:57 +08:00
|
|
|
filename = File.basename(URI.parse(url).path)
|
2015-08-13 00:33:13 +08:00
|
|
|
end
|
2017-05-11 06:16:57 +08:00
|
|
|
else
|
|
|
|
tempfile = file.tempfile
|
|
|
|
filename = file.original_filename
|
|
|
|
end
|
2015-08-13 00:33:13 +08:00
|
|
|
|
2017-05-11 06:16:57 +08:00
|
|
|
return { errors: [I18n.t("upload.file_missing")] } if tempfile.nil?
|
2015-06-15 22:12:15 +08:00
|
|
|
|
2017-06-23 18:13:48 +08:00
|
|
|
opts = {
|
|
|
|
type: type,
|
|
|
|
for_private_message: for_private_message,
|
2018-11-14 15:03:02 +08:00
|
|
|
for_site_setting: for_site_setting,
|
2017-06-23 18:13:48 +08:00
|
|
|
pasted: pasted,
|
|
|
|
}
|
2017-06-13 04:41:29 +08:00
|
|
|
|
|
|
|
upload = UploadCreator.new(tempfile, filename, opts).create_for(current_user.id)
|
2015-06-15 22:12:15 +08:00
|
|
|
|
2017-05-11 06:16:57 +08:00
|
|
|
if upload.errors.empty? && current_user.admin?
|
|
|
|
upload.update_columns(retain_hours: retain_hours) if retain_hours > 0
|
2015-06-15 22:12:15 +08:00
|
|
|
end
|
|
|
|
|
2019-04-30 14:58:18 +08:00
|
|
|
upload.errors.empty? ? upload : { errors: upload.errors.to_hash.values.flatten }
|
2017-05-11 06:16:57 +08:00
|
|
|
ensure
|
2018-03-28 16:20:08 +08:00
|
|
|
tempfile&.close!
|
2016-06-20 18:35:07 +08:00
|
|
|
end
|
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
private
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
def external_store_check
|
|
|
|
return render_404 if !Discourse.store.external?
|
|
|
|
end
|
|
|
|
|
|
|
|
def direct_s3_uploads_check
|
|
|
|
return render_404 if !SiteSetting.enable_direct_s3_uploads
|
|
|
|
end
|
|
|
|
|
|
|
|
def can_upload_external?
|
|
|
|
raise Discourse::InvalidAccess if !guardian.can_upload_external?
|
|
|
|
end
|
|
|
|
|
|
|
|
# We can pre-emptively check size for attachments, but not for images
|
|
|
|
# as they may be further reduced in size by UploadCreator (at this point
|
|
|
|
# they may have already been reduced in size by preprocessors)
|
|
|
|
def file_size_too_big?(file_name, file_size)
|
|
|
|
!FileHelper.is_supported_image?(file_name) && file_size >= SiteSetting.max_attachment_size_kb.kilobytes
|
|
|
|
end
|
|
|
|
|
2019-05-29 09:00:25 +08:00
|
|
|
def send_file_local_upload(upload)
|
|
|
|
opts = {
|
|
|
|
filename: upload.original_filename,
|
|
|
|
content_type: MiniMime.lookup_by_filename(upload.original_filename)&.content_type
|
|
|
|
}
|
|
|
|
|
2020-07-09 11:31:48 +08:00
|
|
|
if !FileHelper.is_inline_image?(upload.original_filename)
|
2019-05-29 09:00:25 +08:00
|
|
|
opts[:disposition] = "attachment"
|
2019-12-11 21:21:41 +08:00
|
|
|
elsif params[:inline]
|
|
|
|
opts[:disposition] = "inline"
|
2019-05-29 09:00:25 +08:00
|
|
|
end
|
|
|
|
|
|
|
|
file_path = Discourse.store.path_for(upload)
|
|
|
|
return render_404 unless file_path
|
|
|
|
|
|
|
|
send_file(file_path, opts)
|
|
|
|
end
|
|
|
|
|
FEATURE: Uppy direct S3 multipart uploads in composer (#14051)
This pull request introduces the endpoints required, and the JavaScript functionality in the `ComposerUppyUpload` mixin, for direct S3 multipart uploads. There are four new endpoints in the uploads controller:
* `create-multipart.json` - Creates the multipart upload in S3 along with an `ExternalUploadStub` record, storing information about the file in the same way as `generate-presigned-put.json` does for regular direct S3 uploads
* `batch-presign-multipart-parts.json` - Takes a list of part numbers and the unique identifier for an `ExternalUploadStub` record, and generates the presigned URLs for those parts if the multipart upload still exists and if the user has permission to access that upload
* `complete-multipart.json` - Completes the multipart upload in S3. Needs the full list of part numbers and their associated ETags which are returned when the part is uploaded to the presigned URL above. Only works if the user has permission to access the associated `ExternalUploadStub` record and the multipart upload still exists.
After we confirm the upload is complete in S3, we go through the regular `UploadCreator` flow, the same as `complete-external-upload.json`, and promote the temporary upload S3 into a full `Upload` record, moving it to its final destination.
* `abort-multipart.json` - Aborts the multipart upload on S3 and destroys the `ExternalUploadStub` record if the user has permission to access that upload.
Also added are a few new columns to `ExternalUploadStub`:
* multipart - Whether or not this is a multipart upload
* external_upload_identifier - The "upload ID" for an S3 multipart upload
* filesize - The size of the file when the `create-multipart.json` or `generate-presigned-put.json` is called. This is used for validation.
When the user completes a direct S3 upload, either regular or multipart, we take the `filesize` that was captured when the `ExternalUploadStub` was first created and compare it with the final `Content-Length` size of the file where it is stored in S3. Then, if the two do not match, we throw an error, delete the file on S3, and ban the user from uploading files for N (default 5) minutes. This would only happen if the user uploads a different file than what they first specified, or in the case of multipart uploads uploaded larger chunks than needed. This is done to prevent abuse of S3 storage by bad actors.
Also included in this PR is an update to vendor/uppy.js. This has been built locally from the latest uppy source at https://github.com/transloadit/uppy/commit/d613b849a6591083f8a0968aa8d66537e231bbcd. This must be done so that I can get my multipart upload changes into Discourse. When the Uppy team cuts a proper release, we can bump the package.json versions instead.
2021-08-25 06:46:54 +08:00
|
|
|
def debug_upload_error(err, translation_key, translation_params = {})
|
2021-07-28 06:42:25 +08:00
|
|
|
return if !SiteSetting.enable_upload_debug_mode
|
2021-09-22 06:43:02 +08:00
|
|
|
message = I18n.t(translation_key, translation_params)
|
|
|
|
Discourse.warn_exception(err, message: message)
|
|
|
|
Rails.env.development? ? message : I18n.t("upload.failed")
|
2021-07-28 06:42:25 +08:00
|
|
|
end
|
2021-08-27 07:50:23 +08:00
|
|
|
|
|
|
|
# don't want people posting arbitrary S3 metadata so we just take the
|
|
|
|
# one we need. all of these will be converted to x-amz-meta- metadata
|
|
|
|
# fields in S3 so it's best to use dashes in the names for consistency
|
|
|
|
#
|
|
|
|
# this metadata is baked into the presigned url and is not altered when
|
|
|
|
# sending the PUT from the clientside to the presigned url
|
|
|
|
def parse_allowed_metadata(metadata)
|
|
|
|
return if metadata.blank?
|
|
|
|
metadata.permit("sha1-checksum").to_h
|
|
|
|
end
|
2013-02-06 03:16:51 +08:00
|
|
|
end
|