Revert "Increase HTTP read timeout for expensive S3 batch delete operation (#36971)" (#36996)

This commit is contained in:
Claire
2025-11-24 14:33:37 +01:00
committed by GitHub
parent 57bfe863f3
commit 96d5e57351

View File

@@ -112,17 +112,10 @@ class AttachmentBatch
keys.each_slice(LIMIT) do |keys_slice|
logger.debug { "Deleting #{keys_slice.size} objects" }
bucket.delete_objects(
{
delete: {
objects: keys_slice.map { |key| { key: key } },
quiet: true,
},
},
{
http_read_timeout: [Paperclip::Attachment.default_options[:s3_options][:http_read_timeout], 120].max,
}
)
bucket.delete_objects(delete: {
objects: keys_slice.map { |key| { key: key } },
quiet: true,
})
rescue => e
retries += 1