Increase HTTP read timeout for expensive S3 batch delete operation (#37004)

This commit is contained in:
Claire
2025-11-25 11:18:34 +01:00
parent 821e735524
commit 473c112dae

View File

@@ -112,10 +112,12 @@ class AttachmentBatch
keys.each_slice(LIMIT) do |keys_slice|
logger.debug { "Deleting #{keys_slice.size} objects" }
with_overridden_timeout(bucket.client, 120) do
bucket.delete_objects(delete: {
objects: keys_slice.map { |key| { key: key } },
quiet: true,
})
end
rescue => e
retries += 1
@@ -134,6 +136,20 @@ class AttachmentBatch
@bucket ||= records.first.public_send(@attachment_names.first).s3_bucket
end
# Currently, the aws-sdk-s3 gem does not offer a way to cleanly override the timeout
# per-request. So we change the client's config instead. As this client will likely
# be re-used for other jobs, restore its original configuration in an `ensure` block.
def with_overridden_timeout(s3_client, longer_read_timeout)
original_timeout = s3_client.config.http_read_timeout
s3_client.config.http_read_timeout = [original_timeout, longer_read_timeout].max
begin
yield
ensure
s3_client.config.http_read_timeout = original_timeout
end
end
def nullified_attributes
@attachment_names.flat_map { |attachment_name| NULLABLE_ATTRIBUTES.map { |attribute| "#{attachment_name}_#{attribute}" } & klass.column_names }.index_with(nil)
end