mirror of
https://github.com/glitch-soc/mastodon.git
synced 2025-12-11 14:30:35 +00:00
Increase HTTP read timeout for expensive S3 batch delete operation (#37004)
This commit is contained in:
@@ -112,10 +112,12 @@ class AttachmentBatch
|
||||
keys.each_slice(LIMIT) do |keys_slice|
|
||||
logger.debug { "Deleting #{keys_slice.size} objects" }
|
||||
|
||||
bucket.delete_objects(delete: {
|
||||
objects: keys_slice.map { |key| { key: key } },
|
||||
quiet: true,
|
||||
})
|
||||
with_overridden_timeout(bucket.client, 120) do
|
||||
bucket.delete_objects(delete: {
|
||||
objects: keys_slice.map { |key| { key: key } },
|
||||
quiet: true,
|
||||
})
|
||||
end
|
||||
rescue => e
|
||||
retries += 1
|
||||
|
||||
@@ -134,6 +136,20 @@ class AttachmentBatch
|
||||
@bucket ||= records.first.public_send(@attachment_names.first).s3_bucket
|
||||
end
|
||||
|
||||
# Currently, the aws-sdk-s3 gem does not offer a way to cleanly override the timeout
|
||||
# per-request. So we change the client's config instead. As this client will likely
|
||||
# be re-used for other jobs, restore its original configuration in an `ensure` block.
|
||||
def with_overridden_timeout(s3_client, longer_read_timeout)
|
||||
original_timeout = s3_client.config.http_read_timeout
|
||||
s3_client.config.http_read_timeout = [original_timeout, longer_read_timeout].max
|
||||
|
||||
begin
|
||||
yield
|
||||
ensure
|
||||
s3_client.config.http_read_timeout = original_timeout
|
||||
end
|
||||
end
|
||||
|
||||
def nullified_attributes
|
||||
@attachment_names.flat_map { |attachment_name| NULLABLE_ATTRIBUTES.map { |attribute| "#{attachment_name}_#{attribute}" } & klass.column_names }.index_with(nil)
|
||||
end
|
||||
|
||||
Reference in New Issue
Block a user