From 473c112dae830b06a3c2b2b4e5bedc3ccd9247f3 Mon Sep 17 00:00:00 2001 From: Claire Date: Tue, 25 Nov 2025 11:18:34 +0100 Subject: [PATCH] Increase HTTP read timeout for expensive S3 batch delete operation (#37004) --- app/lib/attachment_batch.rb | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/app/lib/attachment_batch.rb b/app/lib/attachment_batch.rb index 374abfac49..1443a1ec60 100644 --- a/app/lib/attachment_batch.rb +++ b/app/lib/attachment_batch.rb @@ -112,10 +112,12 @@ class AttachmentBatch keys.each_slice(LIMIT) do |keys_slice| logger.debug { "Deleting #{keys_slice.size} objects" } - bucket.delete_objects(delete: { - objects: keys_slice.map { |key| { key: key } }, - quiet: true, - }) + with_overridden_timeout(bucket.client, 120) do + bucket.delete_objects(delete: { + objects: keys_slice.map { |key| { key: key } }, + quiet: true, + }) + end rescue => e retries += 1 @@ -134,6 +136,20 @@ class AttachmentBatch @bucket ||= records.first.public_send(@attachment_names.first).s3_bucket end + # Currently, the aws-sdk-s3 gem does not offer a way to cleanly override the timeout + # per-request. So we change the client's config instead. As this client will likely + # be re-used for other jobs, restore its original configuration in an `ensure` block. + def with_overridden_timeout(s3_client, longer_read_timeout) + original_timeout = s3_client.config.http_read_timeout + s3_client.config.http_read_timeout = [original_timeout, longer_read_timeout].max + + begin + yield + ensure + s3_client.config.http_read_timeout = original_timeout + end + end + def nullified_attributes @attachment_names.flat_map { |attachment_name| NULLABLE_ATTRIBUTES.map { |attribute| "#{attachment_name}_#{attribute}" } & klass.column_names }.index_with(nil) end