From bad33880c38517d1eaa5f421008cab3758030abe Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Wed, 9 Apr 2025 18:42:45 +0530 Subject: [PATCH 1/8] Added x-amz-expiration, missing HTTP header in the response of object GET/PUT/HEAD Signed-off-by: Aayush Chouhan --- src/api/bucket_api.js | 1 + src/endpoint/s3/s3_rest.js | 2 + src/sdk/object_sdk.js | 10 +++ src/server/system_services/bucket_server.js | 1 + src/util/http_utils.js | 78 +++++++++++++++++++++ 5 files changed, 92 insertions(+) diff --git a/src/api/bucket_api.js b/src/api/bucket_api.js index 5634313fc4..d5ddbf5b0b 100644 --- a/src/api/bucket_api.js +++ b/src/api/bucket_api.js @@ -1215,6 +1215,7 @@ module.exports = { $ref: 'common_api#/definitions/bucket_policy' }, replication_policy_id: { objectid: true }, + lifecycle_configuration_rules: { $ref: 'common_api#/definitions/bucket_lifecycle_configuration' }, } }, diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index eff8e68923..ce5740a163 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -139,6 +139,8 @@ async function handle_request(req, res) { authenticate_request(req); await authorize_request(req); + await http_utils.set_expiration_header(req, res); + dbg.log1('S3 REQUEST', req.method, req.originalUrl, 'op', op_name, 'request_id', req.request_id, req.headers); usage_report.s3_usage_info.total_calls += 1; usage_report.s3_usage_info[op_name] = (usage_report.s3_usage_info[op_name] || 0) + 1; diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index 787a2a0572..1a3b7fbdda 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -224,6 +224,16 @@ class ObjectSDK { return policy_info; } + async read_bucket_lifecycle_config_info(name) { + try { + const { bucket } = await bucket_namespace_cache.get_with_cache({ sdk: this, name }); + return bucket.bucket_info.lifecycle_configuration_rules; + } catch (error) { + if (error.rpc_code === 'NO_SUCH_BUCKET') return undefined; + throw error; + } + } + async read_bucket_usage_info(name) { const { bucket } = await bucket_namespace_cache.get_with_cache({ sdk: this, name }); return bucket.bucket_info.data; diff --git a/src/server/system_services/bucket_server.js b/src/server/system_services/bucket_server.js index 268006beff..234e1fa05e 100644 --- a/src/server/system_services/bucket_server.js +++ b/src/server/system_services/bucket_server.js @@ -1654,6 +1654,7 @@ function get_bucket_info({ website: bucket.website, s3_policy: bucket.s3_policy, replication_policy_id: bucket.replication_policy_id, + lifecycle_configuration_rules: bucket.lifecycle_configuration_rules, }; const metrics = _calc_metrics({ bucket, nodes_aggregate_pool, hosts_aggregate_pool, tiering_pools_status, info }); diff --git a/src/util/http_utils.js b/src/util/http_utils.js index 387db25a5a..bcc1a4639e 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -23,6 +23,7 @@ const net_utils = require('./net_utils'); const time_utils = require('./time_utils'); const cloud_utils = require('./cloud_utils'); const ssl_utils = require('../util/ssl_utils'); +const s3_utils = require('../endpoint/s3/s3_utils'); const RpcError = require('../rpc/rpc_error'); const S3Error = require('../endpoint/s3/s3_errors').S3Error; @@ -664,6 +665,82 @@ function set_amz_headers(req, res) { res.setHeader('x-amz-id-2', req.request_id); } +const s3_error_options = { + ErrorClass: S3Error, + error_missing_content_length: S3Error.MissingContentLength +}; +/** + * @param {Object} req + * @param {http.ServerResponse} res + */ +async function set_expiration_header(req, res) { + if (req.method === 'HEAD' || req.method === 'GET' || req.method === 'PUT') { + const rules = req.params.bucket && await req.object_sdk.read_bucket_lifecycle_config_info(req.params.bucket); + const object_md = { + bucket: req.params.bucket, + key: req.params.key, + size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, s3_error_options) : undefined, + tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, + }; + + if (object_md.key && rules?.length > 0) { // updating x-amz-expiration if object key is present + for (const rule of rules) { + if (rule?.status !== 'Enabled') continue; + + const filter = rule?.filter || {}; + + if (filter.prefix && !object_md?.key.startsWith(filter.prefix)) continue; + + if (filter.object_size_greater_than && object_md?.size <= filter.object_size_greater_than) continue; + if (filter.object_size_less_than && object_md?.size >= filter.object_size_less_than) continue; + + if (filter.tagging && Array.isArray(filter.tagging)) { + const obj_tags = object_md?.tagging || []; + + const matches_all_tags = filter.tagging.every(filter_tag => + obj_tags.some(obj_tag => obj_tag.key === filter_tag.key && obj_tag.value === filter_tag.value) + ); + + if (!matches_all_tags) continue; + } + + const expiration_head = parse_expiration_header(rule?.expiration, rule?.id); + if (expiration_head) { + dbg.log0('set x_amz_expiration header from applied rule: ', rule); + res.setHeader('x-amz-expiration', expiration_head); + break; // apply only for first matching rule + } + } + } + } +} + +/** + * parse_expiration_header converts an expiration rule (either with `date` or `days`) + * into an s3 style `x-amz-expiration` header value + * + * @param {Object} expiration - expiration object from lifecycle config + * @param {string} rule_id - id of the lifecycle rule + * @returns {string|undefined} + * + * Example output: + * expiry-date="Thu, 10 Apr 2025 00:00:00 GMT", rule-id="rule_id" + */ +function parse_expiration_header(expiration, rule_id) { + if (!expiration || (!expiration.date && !expiration.days)) return undefined; + + const expiration_date = expiration.date ? + new Date(expiration.date) : + new Date(Date.UTC( + new Date().getUTCFullYear(), + new Date().getUTCMonth(), + new Date().getUTCDate() + expiration.days + )); + + return `expiry-date="${expiration_date.toUTCString()}", rule-id="${rule_id}"`; +} + + /** * @typedef {{ * allow_origin: string; @@ -945,6 +1022,7 @@ exports.set_keep_alive_whitespace_interval = set_keep_alive_whitespace_interval; exports.parse_xml_to_js = parse_xml_to_js; exports.check_headers = check_headers; exports.set_amz_headers = set_amz_headers; +exports.set_expiration_header = set_expiration_header; exports.set_cors_headers = set_cors_headers; exports.set_cors_headers_s3 = set_cors_headers_s3; exports.set_cors_headers_sts = set_cors_headers_sts; From 00b7b02eb7d4acf8811b26c927c1908ab9dd250b Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Wed, 30 Apr 2025 18:36:26 +0530 Subject: [PATCH 2/8] Added fixes as per the comments Signed-off-by: Aayush Chouhan --- src/endpoint/s3/ops/s3_get_object.js | 1 + src/endpoint/s3/ops/s3_head_object.js | 2 + src/endpoint/s3/ops/s3_put_object.js | 2 + src/endpoint/s3/s3_rest.js | 2 - src/util/http_utils.js | 64 +++++++++++++-------------- 5 files changed, 37 insertions(+), 34 deletions(-) diff --git a/src/endpoint/s3/ops/s3_get_object.js b/src/endpoint/s3/ops/s3_get_object.js index 6a183fa336..3f8f215a13 100644 --- a/src/endpoint/s3/ops/s3_get_object.js +++ b/src/endpoint/s3/ops/s3_get_object.js @@ -39,6 +39,7 @@ async function get_object(req, res) { const object_md = await req.object_sdk.read_object_md(md_params); + http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); if (object_md.storage_class === s3_utils.STORAGE_CLASS_GLACIER) { diff --git a/src/endpoint/s3/ops/s3_head_object.js b/src/endpoint/s3/ops/s3_head_object.js index 1cd3543c3a..47b2ec58dd 100644 --- a/src/endpoint/s3/ops/s3_head_object.js +++ b/src/endpoint/s3/ops/s3_head_object.js @@ -24,6 +24,8 @@ async function head_object(req, res) { if (req.query.get_from_cache !== undefined) { params.get_from_cache = true; } + http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle + const object_md = await req.object_sdk.read_object_md(params); s3_utils.set_response_object_md(res, object_md); diff --git a/src/endpoint/s3/ops/s3_put_object.js b/src/endpoint/s3/ops/s3_put_object.js index d8bbdddd1e..c054abe999 100644 --- a/src/endpoint/s3/ops/s3_put_object.js +++ b/src/endpoint/s3/ops/s3_put_object.js @@ -32,6 +32,8 @@ async function put_object(req, res) { sha256_b64: req.content_sha256_buf && req.content_sha256_buf.toString('base64'), }; + http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle + dbg.log0('PUT OBJECT', req.params.bucket, req.params.key, req.headers['x-amz-copy-source'] || '', encryption || ''); //for copy, use correct s3_event_method. otherwise, just use default (req.method) diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index ce5740a163..eff8e68923 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -139,8 +139,6 @@ async function handle_request(req, res) { authenticate_request(req); await authorize_request(req); - await http_utils.set_expiration_header(req, res); - dbg.log1('S3 REQUEST', req.method, req.originalUrl, 'op', op_name, 'request_id', req.request_id, req.headers); usage_report.s3_usage_info.total_calls += 1; usage_report.s3_usage_info[op_name] = (usage_report.s3_usage_info[op_name] || 0) + 1; diff --git a/src/util/http_utils.js b/src/util/http_utils.js index bcc1a4639e..324b258c8f 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -665,51 +665,51 @@ function set_amz_headers(req, res) { res.setHeader('x-amz-id-2', req.request_id); } -const s3_error_options = { - ErrorClass: S3Error, - error_missing_content_length: S3Error.MissingContentLength -}; /** + * set_expiration_header sets the `x-amz-expiration` response header for GET, PUT, or HEAD object requests + * if the object matches any enabled bucket lifecycle rule + * * @param {Object} req * @param {http.ServerResponse} res */ async function set_expiration_header(req, res) { - if (req.method === 'HEAD' || req.method === 'GET' || req.method === 'PUT') { - const rules = req.params.bucket && await req.object_sdk.read_bucket_lifecycle_config_info(req.params.bucket); - const object_md = { - bucket: req.params.bucket, - key: req.params.key, - size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, s3_error_options) : undefined, - tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, - }; + const rules = req.params.bucket && await req.object_sdk.read_bucket_lifecycle_config_info(req.params.bucket); + const object_md = { + bucket: req.params.bucket, + key: req.params.key, + size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, { + ErrorClass: S3Error, + error_missing_content_length: S3Error.MissingContentLength + }) : undefined, + tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, + }; - if (object_md.key && rules?.length > 0) { // updating x-amz-expiration if object key is present - for (const rule of rules) { - if (rule?.status !== 'Enabled') continue; + if (object_md.key && rules?.length > 0) { + for (const rule of rules) { + if (rule?.status !== 'Enabled') continue; - const filter = rule?.filter || {}; + const filter = rule?.filter || {}; - if (filter.prefix && !object_md?.key.startsWith(filter.prefix)) continue; + if (filter.prefix && !object_md?.key.startsWith(filter.prefix)) continue; - if (filter.object_size_greater_than && object_md?.size <= filter.object_size_greater_than) continue; - if (filter.object_size_less_than && object_md?.size >= filter.object_size_less_than) continue; + if (filter.object_size_greater_than && object_md?.size <= filter.object_size_greater_than) continue; + if (filter.object_size_less_than && object_md?.size >= filter.object_size_less_than) continue; - if (filter.tagging && Array.isArray(filter.tagging)) { - const obj_tags = object_md?.tagging || []; + if (filter.tagging && Array.isArray(filter.tagging)) { + const obj_tags = object_md?.tagging || []; - const matches_all_tags = filter.tagging.every(filter_tag => - obj_tags.some(obj_tag => obj_tag.key === filter_tag.key && obj_tag.value === filter_tag.value) - ); + const matches_all_tags = filter.tagging.every(filter_tag => + obj_tags.some(obj_tag => obj_tag.key === filter_tag.key && obj_tag.value === filter_tag.value) + ); - if (!matches_all_tags) continue; - } + if (!matches_all_tags) continue; + } - const expiration_head = parse_expiration_header(rule?.expiration, rule?.id); - if (expiration_head) { - dbg.log0('set x_amz_expiration header from applied rule: ', rule); - res.setHeader('x-amz-expiration', expiration_head); - break; // apply only for first matching rule - } + const expiration_header = parse_expiration_header(rule?.expiration, rule?.id); + if (expiration_header) { + dbg.log1('set x_amz_expiration header from applied rule: ', rule); + res.setHeader('x-amz-expiration', expiration_header); + break; // apply only for first matching rule } } } From 2b55c63b7662d92dbf0906458c59a29122093bd9 Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Wed, 30 Apr 2025 18:43:28 +0530 Subject: [PATCH 3/8] Added ceph s3 tests - removed from pending/black list Signed-off-by: Aayush Chouhan --- .../ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt | 3 --- .../ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt index e098992817..9c27e2befa 100644 --- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt +++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt @@ -363,9 +363,6 @@ s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1 s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_newer_noncurrent s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_gt diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt index 8f1d7b4db2..dd5af5df9e 100644 --- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt +++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt @@ -35,9 +35,6 @@ s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1 s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head -s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_s3 s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_kms From 3ed1e56a8645751881b64544f5caad935489b8eb Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Wed, 7 May 2025 20:13:07 +0530 Subject: [PATCH 4/8] Updated the logic for matching rules and some changes Signed-off-by: Aayush Chouhan --- src/api/bucket_api.js | 1 - src/endpoint/s3/ops/s3_get_object.js | 2 +- src/endpoint/s3/ops/s3_head_object.js | 3 +- src/endpoint/s3/ops/s3_put_object.js | 3 +- src/sdk/object_sdk.js | 10 -- src/server/system_services/bucket_server.js | 1 - src/util/http_utils.js | 105 +++++++++++++++----- 7 files changed, 81 insertions(+), 44 deletions(-) diff --git a/src/api/bucket_api.js b/src/api/bucket_api.js index d5ddbf5b0b..5634313fc4 100644 --- a/src/api/bucket_api.js +++ b/src/api/bucket_api.js @@ -1215,7 +1215,6 @@ module.exports = { $ref: 'common_api#/definitions/bucket_policy' }, replication_policy_id: { objectid: true }, - lifecycle_configuration_rules: { $ref: 'common_api#/definitions/bucket_lifecycle_configuration' }, } }, diff --git a/src/endpoint/s3/ops/s3_get_object.js b/src/endpoint/s3/ops/s3_get_object.js index 3f8f215a13..693b996234 100644 --- a/src/endpoint/s3/ops/s3_get_object.js +++ b/src/endpoint/s3/ops/s3_get_object.js @@ -39,7 +39,6 @@ async function get_object(req, res) { const object_md = await req.object_sdk.read_object_md(md_params); - http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); if (object_md.storage_class === s3_utils.STORAGE_CLASS_GLACIER) { @@ -50,6 +49,7 @@ async function get_object(req, res) { } } http_utils.set_response_headers_from_request(req, res); + await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle const obj_size = object_md.size; const params = { object_md, diff --git a/src/endpoint/s3/ops/s3_head_object.js b/src/endpoint/s3/ops/s3_head_object.js index 47b2ec58dd..005526bddc 100644 --- a/src/endpoint/s3/ops/s3_head_object.js +++ b/src/endpoint/s3/ops/s3_head_object.js @@ -24,13 +24,12 @@ async function head_object(req, res) { if (req.query.get_from_cache !== undefined) { params.get_from_cache = true; } - http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle - const object_md = await req.object_sdk.read_object_md(params); s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); http_utils.set_response_headers_from_request(req, res); + await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle } module.exports = { diff --git a/src/endpoint/s3/ops/s3_put_object.js b/src/endpoint/s3/ops/s3_put_object.js index c054abe999..55c5b00e64 100644 --- a/src/endpoint/s3/ops/s3_put_object.js +++ b/src/endpoint/s3/ops/s3_put_object.js @@ -32,8 +32,6 @@ async function put_object(req, res) { sha256_b64: req.content_sha256_buf && req.content_sha256_buf.toString('base64'), }; - http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle - dbg.log0('PUT OBJECT', req.params.bucket, req.params.key, req.headers['x-amz-copy-source'] || '', encryption || ''); //for copy, use correct s3_event_method. otherwise, just use default (req.method) @@ -82,6 +80,7 @@ async function put_object(req, res) { }; } res.setHeader('ETag', `"${reply.etag}"`); + await http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle if (reply.seq) { res.seq = reply.seq; diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index 1a3b7fbdda..787a2a0572 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -224,16 +224,6 @@ class ObjectSDK { return policy_info; } - async read_bucket_lifecycle_config_info(name) { - try { - const { bucket } = await bucket_namespace_cache.get_with_cache({ sdk: this, name }); - return bucket.bucket_info.lifecycle_configuration_rules; - } catch (error) { - if (error.rpc_code === 'NO_SUCH_BUCKET') return undefined; - throw error; - } - } - async read_bucket_usage_info(name) { const { bucket } = await bucket_namespace_cache.get_with_cache({ sdk: this, name }); return bucket.bucket_info.data; diff --git a/src/server/system_services/bucket_server.js b/src/server/system_services/bucket_server.js index 234e1fa05e..268006beff 100644 --- a/src/server/system_services/bucket_server.js +++ b/src/server/system_services/bucket_server.js @@ -1654,7 +1654,6 @@ function get_bucket_info({ website: bucket.website, s3_policy: bucket.s3_policy, replication_policy_id: bucket.replication_policy_id, - lifecycle_configuration_rules: bucket.lifecycle_configuration_rules, }; const metrics = _calc_metrics({ bucket, nodes_aggregate_pool, hosts_aggregate_pool, tiering_pools_status, info }); diff --git a/src/util/http_utils.js b/src/util/http_utils.js index 324b258c8f..06c4e5c009 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -671,20 +671,53 @@ function set_amz_headers(req, res) { * * @param {Object} req * @param {http.ServerResponse} res + * @param {Object} object_md */ -async function set_expiration_header(req, res) { - const rules = req.params.bucket && await req.object_sdk.read_bucket_lifecycle_config_info(req.params.bucket); - const object_md = { - bucket: req.params.bucket, - key: req.params.key, - size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, { - ErrorClass: S3Error, - error_missing_content_length: S3Error.MissingContentLength - }) : undefined, - tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, - }; +async function set_expiration_header(req, res, object_md) { + const rules = req.params.bucket && await req.object_sdk.get_bucket_lifecycle_configuration_rules({ name: req.params.bucket }); + if (!object_md) { // calculating object_md for putObject + object_md = { + bucket: req.params.bucket, + key: req.params.key, + create_time: new Date().getTime(), + size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, { + ErrorClass: S3Error, + error_missing_content_length: S3Error.MissingContentLength + }) : undefined, + tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, + }; + } - if (object_md.key && rules?.length > 0) { + const matched_rule = get_lifecycle_rule_for_object(rules, object_md); + if (matched_rule) { + const expiration_header = parse_expiration_header(matched_rule, object_md.create_time); + if (expiration_header) { + dbg.log1('set x_amz_expiration header from applied rule: ', matched_rule); + res.setHeader('x-amz-expiration', expiration_header); + } + } +} + +/** + * get_lifecycle_rule_for_object determines the most specific matching lifecycle rule for the given object metadata + * + * priority is based on: + * - longest matching prefix + * - most matching tags + * - narrowest object size range + * + * @param {Array} rules + * @param {Object} object_md + * @returns {Object|undefined} + */ +function get_lifecycle_rule_for_object(rules, object_md) { + let matched_rule; + let rule_priority = { + prefix_len: -1, + tag_count: -1, + size_span: Infinity, + }; + if (object_md?.key && rules?.length > 0) { for (const rule of rules) { if (rule?.status !== 'Enabled') continue; @@ -695,47 +728,65 @@ async function set_expiration_header(req, res) { if (filter.object_size_greater_than && object_md?.size <= filter.object_size_greater_than) continue; if (filter.object_size_less_than && object_md?.size >= filter.object_size_less_than) continue; - if (filter.tagging && Array.isArray(filter.tagging)) { + if (filter.tags && Array.isArray(filter.tags)) { const obj_tags = object_md?.tagging || []; - const matches_all_tags = filter.tagging.every(filter_tag => + const matches_all_tags = filter.tags.every(filter_tag => obj_tags.some(obj_tag => obj_tag.key === filter_tag.key && obj_tag.value === filter_tag.value) ); if (!matches_all_tags) continue; } - const expiration_header = parse_expiration_header(rule?.expiration, rule?.id); - if (expiration_header) { - dbg.log1('set x_amz_expiration header from applied rule: ', rule); - res.setHeader('x-amz-expiration', expiration_header); - break; // apply only for first matching rule + const priority = { + prefix_len: (filter?.prefix || '').length, + tag_count: Array.isArray(filter?.tags) ? filter?.tags.length : 0, + size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) + }; + + // compare prefix length + const is_more_specific_prefix = priority.prefix_len > rule_priority.prefix_len; + + // compare tag count (if prefixes are equal) + const is_more_specific_tags = priority.prefix_len === rule_priority.prefix_len && + priority.tag_count > rule_priority.tag_count; + + // compare size span (if prefixes and tags are equal) + const is_more_specific_size = priority.prefix_len === rule_priority.prefix_len && + priority.tag_count === rule_priority.tag_count && + priority.size_span < rule_priority.size_span; + + if (is_more_specific_prefix || is_more_specific_tags || is_more_specific_size) { + matched_rule = rule; + rule_priority = priority; } } } + return matched_rule; } /** * parse_expiration_header converts an expiration rule (either with `date` or `days`) * into an s3 style `x-amz-expiration` header value * - * @param {Object} expiration - expiration object from lifecycle config - * @param {string} rule_id - id of the lifecycle rule + * @param {Object} rule + * @param {Object} create_time * @returns {string|undefined} * * Example output: * expiry-date="Thu, 10 Apr 2025 00:00:00 GMT", rule-id="rule_id" */ -function parse_expiration_header(expiration, rule_id) { +function parse_expiration_header(rule, create_time) { + const expiration = rule.expiration; + const rule_id = rule.id; + if (!expiration || (!expiration.date && !expiration.days)) return undefined; const expiration_date = expiration.date ? new Date(expiration.date) : - new Date(Date.UTC( - new Date().getUTCFullYear(), - new Date().getUTCMonth(), - new Date().getUTCDate() + expiration.days - )); + new Date(create_time + expiration.days * 24 * 60 * 60 * 1000); + + expiration_date.setUTCHours(0, 0, 0, 0); // adjust expiration to midnight UTC return `expiry-date="${expiration_date.toUTCString()}", rule-id="${rule_id}"`; } From df730425e74621c54bbe18d5794e02e0897dbaea Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Mon, 12 May 2025 13:22:58 +0530 Subject: [PATCH 5/8] Move code to lifecycle_utils.js and removed ceph-s3 tests Signed-off-by: Aayush Chouhan --- src/endpoint/s3/ops/s3_put_object.js | 9 +- src/manage_nsfs/nc_lifecycle.js | 65 +------- .../nsfs_s3_tests_black_list.txt | 3 + .../s3-tests-lists/s3_tests_pending_list.txt | 3 + .../jest_tests/test_nc_lifecycle.test.js | 93 ++++++----- src/util/http_utils.js | 116 +------------- src/util/lifecycle_utils.js | 147 ++++++++++++++++++ 7 files changed, 216 insertions(+), 220 deletions(-) diff --git a/src/endpoint/s3/ops/s3_put_object.js b/src/endpoint/s3/ops/s3_put_object.js index 55c5b00e64..17389e05c7 100644 --- a/src/endpoint/s3/ops/s3_put_object.js +++ b/src/endpoint/s3/ops/s3_put_object.js @@ -80,7 +80,14 @@ async function put_object(req, res) { }; } res.setHeader('ETag', `"${reply.etag}"`); - await http_utils.set_expiration_header(req, res); // setting expiration header for bucket lifecycle + + const object_info = { + key: req.params.key, + create_time: new Date().getTime(), + size: size, + tagging: tagging, + }; + await http_utils.set_expiration_header(req, res, object_info); // setting expiration header for bucket lifecycle if (reply.seq) { res.seq = reply.seq; diff --git a/src/manage_nsfs/nc_lifecycle.js b/src/manage_nsfs/nc_lifecycle.js index f679aab3c3..83a1ccc0cb 100644 --- a/src/manage_nsfs/nc_lifecycle.js +++ b/src/manage_nsfs/nc_lifecycle.js @@ -271,7 +271,7 @@ class NCLifecycle { if (candidates.delete_candidates?.length > 0) { const expiration = lifecycle_rule.expiration ? this._get_expiration_time(lifecycle_rule.expiration) : 0; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); dbg.log0('process_rule: calling delete_multiple_objects, num of objects to be deleted', candidates.delete_candidates.length); const delete_res = await this._call_op_and_update_status({ bucket_name, @@ -478,7 +478,7 @@ class NCLifecycle { if (rule_state.is_finished) return []; const expiration = this._get_expiration_time(lifecycle_rule.expiration); if (expiration < 0) return []; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); const filtered_objects = []; // TODO list_objects does not accept a filter and works in batch sizes of 1000. should handle batching @@ -537,7 +537,7 @@ class NCLifecycle { const versions_list = params.versions_list; const candidates = []; const expiration = lifecycle_rule.expiration?.days ? this._get_expiration_time(lifecycle_rule.expiration) : 0; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration}); for (let i = 0; i < versions_list.objects.length - 1; i++) { if (this.filter_expired_delete_marker(versions_list.objects[i], versions_list.objects[i + 1], filter_func)) { candidates.push(versions_list.objects[i]); @@ -640,7 +640,7 @@ class NCLifecycle { } const versions_list = params.versions_list; - const filter_func = this._build_lifecycle_filter({filter: lifecycle_rule.filter, expiration: 0}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter: lifecycle_rule.filter, expiration: 0}); const num_newer_versions = lifecycle_rule.noncurrent_version_expiration.newer_noncurrent_versions; const num_non_current_days = lifecycle_rule.noncurrent_version_expiration.noncurrent_days; const delete_candidates = []; @@ -674,7 +674,7 @@ class NCLifecycle { const expiration = lifecycle_rule.abort_incomplete_multipart_upload.days_after_initiation; const res = []; - const filter_func = this._build_lifecycle_filter({filter, expiration}); + const filter_func = lifecycle_utils.build_lifecycle_filter({filter, expiration}); let dir_handle; //TODO this is almost identical to list_uploads except for error handling and support for pagination. should modify list-upload and use it in here instead try { @@ -720,29 +720,6 @@ class NCLifecycle { ///////// FILTER HELPERS //////// //////////////////////////////////// - /** - * @typedef {{ - * filter: Object - * expiration: Number - * }} filter_params - * - * @param {filter_params} params - * @returns - */ - _build_lifecycle_filter(params) { - /** - * @param {Object} object_info - */ - return function(object_info) { - if (params.filter?.prefix && !object_info.key.startsWith(params.filter.prefix)) return false; - if (params.expiration && object_info.age < params.expiration) return false; - if (params.filter?.tags && !_file_contain_tags(object_info, params.filter.tags)) return false; - if (params.filter?.object_size_greater_than && object_info.size < params.filter.object_size_greater_than) return false; - if (params.filter?.object_size_less_than && object_info.size > params.filter.object_size_less_than) return false; - return true; - }; - } - /** * get the expiration time in days of an object * if rule is set with date, then rule is applied for all objects after that date @@ -1468,38 +1445,6 @@ class NCLifecycle { } } -////////////////// -// TAGS HELPERS // -////////////////// - -/** - * checks if tag query_tag is in the list tag_set - * @param {Object} query_tag - * @param {Array} tag_set - */ -function _list_contain_tag(query_tag, tag_set) { - for (const t of tag_set) { - if (t.key === query_tag.key && t.value === query_tag.value) return true; - } - return false; -} - -/** - * checks if object has all the tags in filter_tags - * @param {Object} object_info - * @param {Array} filter_tags - * @returns - */ -function _file_contain_tags(object_info, filter_tags) { - if (object_info.tags === undefined) return false; - for (const tag of filter_tags) { - if (!_list_contain_tag(tag, object_info.tags)) { - return false; - } - } - return true; -} - // EXPORTS exports.NCLifecycle = NCLifecycle; exports.ILM_POLICIES_TMP_DIR = ILM_POLICIES_TMP_DIR; diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt index 9c27e2befa..e098992817 100644 --- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt +++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt @@ -363,6 +363,9 @@ s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1 s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_newer_noncurrent s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_gt diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt index dd5af5df9e..8f1d7b4db2 100644 --- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt +++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/s3_tests_pending_list.txt @@ -35,6 +35,9 @@ s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2 s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1 s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head +s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_s3 s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_kms diff --git a/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js b/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js index cc2c5ce4d2..a61c36e77f 100644 --- a/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_lifecycle.test.js @@ -9,10 +9,9 @@ const path = require('path'); const crypto = require('crypto'); const config = require('../../../../config'); const fs_utils = require('../../../util/fs_utils'); -const { ConfigFS } = require('../../../sdk/config_fs'); const NamespaceFS = require('../../../sdk/namespace_fs'); const buffer_utils = require('../../../util/buffer_utils'); -const { NCLifecycle } = require('../../../manage_nsfs/nc_lifecycle'); +const lifecycle_utils = require('../../../../src/util/lifecycle_utils'); const endpoint_stats_collector = require('../../../sdk/endpoint_stats_collector'); const { TMP_PATH, set_nc_config_dir_in_config, TEST_TIMEOUT } = require('../../system_tests/test_utils'); @@ -21,9 +20,7 @@ const config_root = path.join(TMP_PATH, 'config_root_nc_lifecycle'); const root_path = path.join(TMP_PATH, 'root_path_nc_lifecycle/'); const bucket_name = 'lifecycle_bucket'; const bucket_path = path.join(root_path, bucket_name); -const config_fs = new ConfigFS(config_root); const dummy_object_sdk = make_dummy_object_sdk(); -const nc_lifecycle = new NCLifecycle(config_fs); const key = 'obj1.txt'; const data = crypto.randomBytes(100); @@ -90,7 +87,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong prefix - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -98,7 +95,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong object_size_less_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -106,7 +103,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong object_size_greater_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -114,7 +111,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on wrong tags - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -122,7 +119,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should fail on expiration - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -133,7 +130,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong prefix - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -141,7 +138,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong object_size_less_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -149,7 +146,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on wrong object_size_greater_than - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -158,7 +155,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); const tagging = [{ key: 'a', value: 'b' }]; await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -166,7 +163,7 @@ describe('delete_multiple_objects + filter', () => { it('delete_multiple_objects - filter should pass on expiration - versioning DISABLED bucket', async () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); }); @@ -178,7 +175,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -187,7 +184,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -196,7 +193,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -205,7 +202,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -214,7 +211,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); }); @@ -226,7 +223,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -235,7 +232,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -244,7 +241,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -254,7 +251,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer = buffer_utils.buffer_to_read_stream(data); const tagging = [{ key: 'a', value: 'b' }]; await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -263,7 +260,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const delete_res = await nsfs.delete_multiple_objects({ objects: [{ key }], filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { should_create_a_delete_marker: true }); }); @@ -275,7 +272,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -285,7 +282,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -295,7 +292,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -305,7 +302,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -315,7 +312,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -329,7 +326,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -339,7 +336,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -349,7 +346,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -361,7 +358,7 @@ describe('delete_multiple_objects + filter', () => { const tagging = [{ key: 'a', value: 'b' }]; const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer, tagging }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -371,7 +368,7 @@ describe('delete_multiple_objects + filter', () => { nsfs.versioning = 'ENABLED'; const data_buffer = buffer_utils.buffer_to_read_stream(data); const upload_res = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res); @@ -386,7 +383,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -398,7 +395,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 99 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -410,7 +407,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -422,7 +419,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b'}] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -434,7 +431,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res); @@ -449,7 +446,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'ob' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -461,7 +458,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_less_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -473,7 +470,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 1 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -486,7 +483,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: tagging }, expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -498,7 +495,7 @@ describe('delete_multiple_objects + filter', () => { const data_buffer2 = buffer_utils.buffer_to_read_stream(data); const upload_res1 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const upload_res2 = await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer2 }, dummy_object_sdk); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 0 }); const objects_to_delete = [{ key, version_id: upload_res1.version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deleted(delete_res, { new_latest_version: upload_res2.version_id}); @@ -513,7 +510,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { prefix: 'd' }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -525,7 +522,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { object_size_greater_than: 101 }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -537,7 +534,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b' }] }, expiration: 0 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ filter: { tags: [{ key: 'a', value: 'b' }] }, expiration: 0 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res, { latest_delete_marker: true }); @@ -549,7 +546,7 @@ describe('delete_multiple_objects + filter', () => { await nsfs.upload_object({ bucket: bucket_name, key: key, source_stream: data_buffer1 }, dummy_object_sdk); const delete_res1 = await nsfs.delete_object({ bucket: bucket_name, key: key }, dummy_object_sdk); expect(delete_res1.created_delete_marker).toBe(true); - const filter_func = nc_lifecycle._build_lifecycle_filter({ expiration: 5 }); + const filter_func = lifecycle_utils.build_lifecycle_filter({ expiration: 5 }); const objects_to_delete = [{ key, version_id: delete_res1.created_version_id }]; const delete_res2 = await nsfs.delete_multiple_objects({ objects: objects_to_delete, filter_func }, dummy_object_sdk); await assert_object_deletion_failed(delete_res2, { latest_delete_marker: true }); diff --git a/src/util/http_utils.js b/src/util/http_utils.js index 06c4e5c009..670f4783da 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -23,7 +23,7 @@ const net_utils = require('./net_utils'); const time_utils = require('./time_utils'); const cloud_utils = require('./cloud_utils'); const ssl_utils = require('../util/ssl_utils'); -const s3_utils = require('../endpoint/s3/s3_utils'); +const lifecycle_utils = require('../../src/util/lifecycle_utils'); const RpcError = require('../rpc/rpc_error'); const S3Error = require('../endpoint/s3/s3_errors').S3Error; @@ -671,26 +671,14 @@ function set_amz_headers(req, res) { * * @param {Object} req * @param {http.ServerResponse} res - * @param {Object} object_md + * @param {Object} object_info */ -async function set_expiration_header(req, res, object_md) { +async function set_expiration_header(req, res, object_info) { const rules = req.params.bucket && await req.object_sdk.get_bucket_lifecycle_configuration_rules({ name: req.params.bucket }); - if (!object_md) { // calculating object_md for putObject - object_md = { - bucket: req.params.bucket, - key: req.params.key, - create_time: new Date().getTime(), - size: req.headers['x-amz-decoded-content-length'] || req.headers['content-length'] ? parse_content_length(req, { - ErrorClass: S3Error, - error_missing_content_length: S3Error.MissingContentLength - }) : undefined, - tagging: req.body && req.body.Tagging ? s3_utils.parse_body_tagging_xml(req) : undefined, - }; - } - const matched_rule = get_lifecycle_rule_for_object(rules, object_md); + const matched_rule = lifecycle_utils.get_lifecycle_rule_for_object(rules, object_info); if (matched_rule) { - const expiration_header = parse_expiration_header(matched_rule, object_md.create_time); + const expiration_header = lifecycle_utils.build_expiration_header(matched_rule, object_info.create_time); if (expiration_header) { dbg.log1('set x_amz_expiration header from applied rule: ', matched_rule); res.setHeader('x-amz-expiration', expiration_header); @@ -698,100 +686,6 @@ async function set_expiration_header(req, res, object_md) { } } -/** - * get_lifecycle_rule_for_object determines the most specific matching lifecycle rule for the given object metadata - * - * priority is based on: - * - longest matching prefix - * - most matching tags - * - narrowest object size range - * - * @param {Array} rules - * @param {Object} object_md - * @returns {Object|undefined} - */ -function get_lifecycle_rule_for_object(rules, object_md) { - let matched_rule; - let rule_priority = { - prefix_len: -1, - tag_count: -1, - size_span: Infinity, - }; - if (object_md?.key && rules?.length > 0) { - for (const rule of rules) { - if (rule?.status !== 'Enabled') continue; - - const filter = rule?.filter || {}; - - if (filter.prefix && !object_md?.key.startsWith(filter.prefix)) continue; - - if (filter.object_size_greater_than && object_md?.size <= filter.object_size_greater_than) continue; - if (filter.object_size_less_than && object_md?.size >= filter.object_size_less_than) continue; - - if (filter.tags && Array.isArray(filter.tags)) { - const obj_tags = object_md?.tagging || []; - - const matches_all_tags = filter.tags.every(filter_tag => - obj_tags.some(obj_tag => obj_tag.key === filter_tag.key && obj_tag.value === filter_tag.value) - ); - - if (!matches_all_tags) continue; - } - - const priority = { - prefix_len: (filter?.prefix || '').length, - tag_count: Array.isArray(filter?.tags) ? filter?.tags.length : 0, - size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) - }; - - // compare prefix length - const is_more_specific_prefix = priority.prefix_len > rule_priority.prefix_len; - - // compare tag count (if prefixes are equal) - const is_more_specific_tags = priority.prefix_len === rule_priority.prefix_len && - priority.tag_count > rule_priority.tag_count; - - // compare size span (if prefixes and tags are equal) - const is_more_specific_size = priority.prefix_len === rule_priority.prefix_len && - priority.tag_count === rule_priority.tag_count && - priority.size_span < rule_priority.size_span; - - if (is_more_specific_prefix || is_more_specific_tags || is_more_specific_size) { - matched_rule = rule; - rule_priority = priority; - } - } - } - return matched_rule; -} - -/** - * parse_expiration_header converts an expiration rule (either with `date` or `days`) - * into an s3 style `x-amz-expiration` header value - * - * @param {Object} rule - * @param {Object} create_time - * @returns {string|undefined} - * - * Example output: - * expiry-date="Thu, 10 Apr 2025 00:00:00 GMT", rule-id="rule_id" - */ -function parse_expiration_header(rule, create_time) { - const expiration = rule.expiration; - const rule_id = rule.id; - - if (!expiration || (!expiration.date && !expiration.days)) return undefined; - - const expiration_date = expiration.date ? - new Date(expiration.date) : - new Date(create_time + expiration.days * 24 * 60 * 60 * 1000); - - expiration_date.setUTCHours(0, 0, 0, 0); // adjust expiration to midnight UTC - - return `expiry-date="${expiration_date.toUTCString()}", rule-id="${rule_id}"`; -} - - /** * @typedef {{ * allow_origin: string; diff --git a/src/util/lifecycle_utils.js b/src/util/lifecycle_utils.js index dc7d22434c..142e4daa45 100644 --- a/src/util/lifecycle_utils.js +++ b/src/util/lifecycle_utils.js @@ -69,7 +69,154 @@ function file_matches_filter({obj_info, filter_func = undefined}) { return true; } +/** + * get_lifecycle_rule_for_object determines the most specific matching lifecycle rule for the given object metadata + * + * priority is based on: + * - longest matching prefix + * - most matching tags + * - narrowest object size range + * + * @param {Array} rules + * @param {Object} object_info + * @returns {Object|undefined} + */ +function get_lifecycle_rule_for_object(rules, object_info) { + let matched_rule; + let rule_priority = { + prefix_len: -1, + tag_count: -1, + size_span: Infinity, + }; + if (object_info?.key && rules?.length > 0) { + for (const rule of rules) { + if (rule?.status !== 'Enabled') continue; + + const filter = rule?.filter || {}; + + const filter_func = build_lifecycle_filter(filter); + + if (!filter_func(object_info)) { continue; } + + const priority = { + prefix_len: (filter?.prefix || '').length, + tag_count: Array.isArray(filter?.tags) ? filter?.tags.length : 0, + size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) + }; + + // compare prefix length + const is_more_specific_prefix = priority.prefix_len > rule_priority.prefix_len; + + // compare tag count (if prefixes are equal) + const is_more_specific_tags = priority.prefix_len === rule_priority.prefix_len && + priority.tag_count > rule_priority.tag_count; + + // compare size span (if prefixes and tags are equal) + const is_more_specific_size = priority.prefix_len === rule_priority.prefix_len && + priority.tag_count === rule_priority.tag_count && + priority.size_span < rule_priority.size_span; + + if (is_more_specific_prefix || is_more_specific_tags || is_more_specific_size) { + matched_rule = rule; + rule_priority = priority; + } + } + } + return matched_rule; +} + +/** + * build_expiration_header converts an expiration rule (either with `date` or `days`) + * into an s3 style `x-amz-expiration` header value + * + * @param {Object} rule + * @param {Object} create_time + * @returns {string|undefined} + * + * Example output: + * expiry-date="Thu, 10 Apr 2025 00:00:00 GMT", rule-id="rule_id" + */ +function build_expiration_header(rule, create_time) { + const expiration = rule.expiration; + const rule_id = rule.id; + + if (!expiration || (!expiration.date && !expiration.days)) return undefined; + + const expiration_date = expiration.date ? + new Date(expiration.date) : + new Date(create_time + expiration.days * 24 * 60 * 60 * 1000); + + expiration_date.setUTCHours(0, 0, 0, 0); // adjust expiration to midnight UTC + + return `expiry-date="${expiration_date.toUTCString()}", rule-id="${rule_id}"`; +} + +////////////////// +// FILTERS HELPERS // +////////////////// + +/** + * @typedef {{ + * filter: Object + * expiration: Number + * }} filter_params + * + * builds lifecycle filter function + * + * @param {filter_params} params + * @returns + */ +function build_lifecycle_filter(params) { + /** + * @param {Object} object_info + */ + return function(object_info) { + if (params.filter?.prefix && !object_info.key.startsWith(params.filter.prefix)) return false; + if (params.expiration && object_info.age < params.expiration) return false; + if (params.filter?.tags && !file_contain_tags(object_info, params.filter.tags)) return false; + if (params.filter?.object_size_greater_than && object_info.size < params.filter.object_size_greater_than) return false; + if (params.filter?.object_size_less_than && object_info.size > params.filter.object_size_less_than) return false; + return true; + }; +} + +////////////////// +// TAGS HELPERS // +////////////////// + +/** + * checks if tag query_tag is in the list tag_set + * @param {Object} query_tag + * @param {Array} tag_set + */ +function list_contain_tag(query_tag, tag_set) { + for (const t of tag_set) { + if (t.key === query_tag.key && t.value === query_tag.value) return true; + } + return false; +} + +/** + * checks if object has all the tags in filter_tags + * @param {Object} object_info + * @param {Array} filter_tags + * @returns + */ +function file_contain_tags(object_info, filter_tags) { + const object_tags = object_info.tags || object_info.tagging; + if (!object_tags) return false; + for (const tag of filter_tags) { + if (!list_contain_tag(tag, object_tags)) { + return false; + } + } + return true; +} + exports.get_latest_nc_lifecycle_run_status = get_latest_nc_lifecycle_run_status; exports.file_matches_filter = file_matches_filter; exports.get_lifecycle_object_info_for_filter = get_lifecycle_object_info_for_filter; exports.get_file_age_days = get_file_age_days; +exports.get_lifecycle_rule_for_object = get_lifecycle_rule_for_object; +exports.build_expiration_header = build_expiration_header; +exports.build_lifecycle_filter = build_lifecycle_filter; From 5dd314fbaaaed48930f775d8d221fb188d838459 Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Mon, 12 May 2025 20:21:50 +0530 Subject: [PATCH 6/8] Created new functions for priority Signed-off-by: Aayush Chouhan --- src/util/lifecycle_utils.js | 88 +++++++++++++++++++++++-------------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/src/util/lifecycle_utils.js b/src/util/lifecycle_utils.js index 142e4daa45..4226c4e4a3 100644 --- a/src/util/lifecycle_utils.js +++ b/src/util/lifecycle_utils.js @@ -72,56 +72,37 @@ function file_matches_filter({obj_info, filter_func = undefined}) { /** * get_lifecycle_rule_for_object determines the most specific matching lifecycle rule for the given object metadata * - * priority is based on: - * - longest matching prefix - * - most matching tags - * - narrowest object size range - * * @param {Array} rules * @param {Object} object_info * @returns {Object|undefined} */ function get_lifecycle_rule_for_object(rules, object_info) { + if (!object_info?.key || !Array.isArray(rules) || rules.length < 1) return; + let matched_rule; - let rule_priority = { + let curr_priority = { prefix_len: -1, tag_count: -1, size_span: Infinity, }; - if (object_info?.key && rules?.length > 0) { - for (const rule of rules) { - if (rule?.status !== 'Enabled') continue; - - const filter = rule?.filter || {}; - const filter_func = build_lifecycle_filter(filter); + for (const rule of rules) { + if (rule?.status !== 'Enabled') continue; - if (!filter_func(object_info)) { continue; } + const filter = rule?.filter || {}; - const priority = { - prefix_len: (filter?.prefix || '').length, - tag_count: Array.isArray(filter?.tags) ? filter?.tags.length : 0, - size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) - }; + const filter_func = build_lifecycle_filter(filter); - // compare prefix length - const is_more_specific_prefix = priority.prefix_len > rule_priority.prefix_len; + if (!filter_func(object_info)) { continue; } - // compare tag count (if prefixes are equal) - const is_more_specific_tags = priority.prefix_len === rule_priority.prefix_len && - priority.tag_count > rule_priority.tag_count; + const new_priority = get_rule_priority(filter); - // compare size span (if prefixes and tags are equal) - const is_more_specific_size = priority.prefix_len === rule_priority.prefix_len && - priority.tag_count === rule_priority.tag_count && - priority.size_span < rule_priority.size_span; - - if (is_more_specific_prefix || is_more_specific_tags || is_more_specific_size) { - matched_rule = rule; - rule_priority = priority; - } + if (compare_rule_priority(curr_priority, new_priority)) { + matched_rule = rule; + curr_priority = new_priority; } } + return matched_rule; } @@ -180,6 +161,49 @@ function build_lifecycle_filter(params) { }; } +/** + * get_rule_priority calculates the priority of a lifecycle rule's filter + * + * @param {Object} filter + * @returns {Object} priority object + */ +function get_rule_priority(filter) { + return { + prefix_len: (filter?.prefix || '').length, + tag_count: Array.isArray(filter?.tags) ? filter.tags.length : 0, + size_span: (filter?.object_size_less_than ?? Infinity) - (filter?.object_size_greater_than ?? 0) + }; +} + +/** + * compare_rule_priority determines if a new rule has higher priority + * + * priority is based on: + * - longest matching prefix + * - most matching tags + * - narrowest object size range + * + * @param {Object} curr_priority + * @param {Object} new_priority + * @returns {boolean} + */ +function compare_rule_priority(curr_priority, new_priority) { + // compare prefix length + if (new_priority.prefix_len > curr_priority.prefix_len) return true; + + if (new_priority.prefix_len === curr_priority.prefix_len) { + // compare tag count (if prefixes are equal) + if (new_priority.tag_count > curr_priority.tag_count) return true; + + if (new_priority.tag_count === curr_priority.tag_count) { + // compare size span (if prefixes and tags are equal) + if (new_priority.size_span < curr_priority.size_span) return true; + } + } + + return false; +} + ////////////////// // TAGS HELPERS // ////////////////// From aa4238e21b5760b03841d52782259104eb780fe2 Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Mon, 12 May 2025 23:32:44 +0530 Subject: [PATCH 7/8] Added unit tests Signed-off-by: Aayush Chouhan --- src/test/unit_tests/test_lifecycle.js | 117 ++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) diff --git a/src/test/unit_tests/test_lifecycle.js b/src/test/unit_tests/test_lifecycle.js index 512e790fef..efd0e619a6 100644 --- a/src/test/unit_tests/test_lifecycle.js +++ b/src/test/unit_tests/test_lifecycle.js @@ -799,6 +799,123 @@ mocha.describe('lifecycle', () => { } }); + mocha.describe('bucket-lifecycle-expiration-header', function() { + const bucket = Bucket; + + const run_expiration_test = async ({ rules, expected_id, expected_days, key, tagging = undefined, size = 1000}) => { + const putLifecycleParams = { + Bucket: bucket, + LifecycleConfiguration: { Rules: rules } + }; + await s3.putBucketLifecycleConfiguration(putLifecycleParams); + + const putObjectParams = { + Bucket: bucket, + Key: key, + Body: 'x'.repeat(size) // default 1KB if size not specified + }; + if (tagging) { + putObjectParams.Tagging = tagging; + } + const start_time = new Date(); + let res = await s3.putObject(putObjectParams); + assert.ok(res.Expiration, 'expiration header missing in putObject response'); + + res = await s3.headObject({ Bucket: bucket, Key: key }); + assert.ok(res.Expiration, 'expiration header missing in headObject response'); + + const valid = validate_expiration_header(res.Expiration, start_time, expected_id, expected_days); + assert.ok(valid, `expected rule ${expected_id} to match`); + }; + + function generate_rule(id, prefix, tags, size_gt, size_lt, expiration_days) { + const filter = {}; + if (prefix) filter.Prefix = prefix; + if (tags.length) filter.Tags = tags; + if (size_gt !== undefined) filter.ObjectSizeGreaterThan = size_gt; + if (size_lt !== undefined) filter.ObjectSizeLessThan = size_lt; + + return { + ID: id, + Status: 'Enabled', + Filter: filter, + Expiration: { Days: expiration_days }, + }; + } + + function validate_expiration_header(expiration_header, start_time, expected_rule_id, delta_days) { + const match = expiration_header.match(/expiry-date="(.+)", rule-id="(.+)"/); + if (!match) return false; + console.log("match: ", match); + + const [, expiry_str, rule_id] = match; + const expiration_date = new Date(expiry_str); + const start = new Date(start_time); + start_time.setUTCHours(0, 0, 0, 0); // adjusting to midnight UTC otherwise the tests will fail - similar to ceph-s3 tests + + const days_diff = Math.floor((expiration_date.getTime() - start.getTime()) / (24 * 60 * 60 * 1000)); + + return days_diff === delta_days && rule_id === expected_rule_id; + } + + mocha.it('should select rule with longest prefix', async () => { + const rules = [ + generate_rule('short-prefix', 'test1/', [], undefined, undefined, 10), + generate_rule('long-prefix', 'test1/logs/', [], undefined, undefined, 17), + ]; + await run_expiration_test({ + rules, + key: 'test1/logs//file.txt', + expected_id: 'long-prefix', + expected_days: 17 + }); + }); + + mocha.it('should select rule with more tags when prefix is same', async () => { + const rules = [ + generate_rule('one-tag', 'test2/', [{ Key: 'env', Value: 'prod' }], undefined, undefined, 5), + generate_rule('two-tags', 'test2/', [ + { Key: 'env', Value: 'prod' }, + { Key: 'team', Value: 'backend' } + ], undefined, undefined, 9), + ]; + await run_expiration_test({ + rules, + key: 'test2/file2.txt', + tagging: 'env=prod&team=backend', + expected_id: 'two-tags', + expected_days: 9 + }); + }); + + mocha.it('should select rule with narrower size span when prefix and tags are matching', async () => { + const rules = [ + generate_rule('wide-range', 'test3/', [], 100, 10000, 4), + generate_rule('narrow-range', 'test3/', [], 1000, 5000, 6), + ]; + await run_expiration_test({ + rules, + key: 'test3/file3.txt', + size: 1500, + expected_id: 'narrow-range', + expected_days: 6 + }); + }); + + mocha.it('should fallback to first matching rule if all filters are equal', async () => { + const rules = [ + generate_rule('rule-a', 'test4/', [], 0, 10000, 7), + generate_rule('rule-b', 'test4/', [], 0, 10000, 11), + ]; + await run_expiration_test({ + rules, + key: 'test4/file4.txt', + expected_id: 'rule-a', + expected_days: 7 + }); + }); + }); + function readable_buffer(data, split = 1, finish = 'end') { const max = Math.ceil(data.length / split); let pos = 0; From f26854eaf481a5e25b4382d7b597a103fb1ce095 Mon Sep 17 00:00:00 2001 From: Aayush Chouhan Date: Tue, 13 May 2025 15:33:28 +0530 Subject: [PATCH 8/8] Test and some fixes Signed-off-by: Aayush Chouhan --- src/endpoint/s3/ops/s3_get_object.js | 2 +- src/endpoint/s3/ops/s3_head_object.js | 2 +- src/test/unit_tests/test_lifecycle.js | 18 ++++++++++-------- src/util/lifecycle_utils.js | 10 +++------- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/endpoint/s3/ops/s3_get_object.js b/src/endpoint/s3/ops/s3_get_object.js index 693b996234..69c2391817 100644 --- a/src/endpoint/s3/ops/s3_get_object.js +++ b/src/endpoint/s3/ops/s3_get_object.js @@ -49,7 +49,7 @@ async function get_object(req, res) { } } http_utils.set_response_headers_from_request(req, res); - await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle + if (!version_id) await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle const obj_size = object_md.size; const params = { object_md, diff --git a/src/endpoint/s3/ops/s3_head_object.js b/src/endpoint/s3/ops/s3_head_object.js index 005526bddc..c34db17d6a 100644 --- a/src/endpoint/s3/ops/s3_head_object.js +++ b/src/endpoint/s3/ops/s3_head_object.js @@ -29,7 +29,7 @@ async function head_object(req, res) { s3_utils.set_response_object_md(res, object_md); s3_utils.set_encryption_response_headers(req, res, object_md.encryption); http_utils.set_response_headers_from_request(req, res); - await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle + if (!params.version_id) await http_utils.set_expiration_header(req, res, object_md); // setting expiration header for bucket lifecycle } module.exports = { diff --git a/src/test/unit_tests/test_lifecycle.js b/src/test/unit_tests/test_lifecycle.js index efd0e619a6..3a5127480e 100644 --- a/src/test/unit_tests/test_lifecycle.js +++ b/src/test/unit_tests/test_lifecycle.js @@ -829,11 +829,13 @@ mocha.describe('lifecycle', () => { }; function generate_rule(id, prefix, tags, size_gt, size_lt, expiration_days) { - const filter = {}; - if (prefix) filter.Prefix = prefix; - if (tags.length) filter.Tags = tags; - if (size_gt !== undefined) filter.ObjectSizeGreaterThan = size_gt; - if (size_lt !== undefined) filter.ObjectSizeLessThan = size_lt; + const filters = {}; + if (prefix) filters.Prefix = prefix; + if (Array.isArray(tags) && tags.length) filters.Tags = tags; + if (size_gt !== undefined) filters.ObjectSizeGreaterThan = size_gt; + if (size_lt !== undefined) filters.ObjectSizeLessThan = size_lt; + + const filter = Object.keys(filters).length > 1 ? { And: filters } : filters; return { ID: id, @@ -849,11 +851,11 @@ mocha.describe('lifecycle', () => { console.log("match: ", match); const [, expiry_str, rule_id] = match; - const expiration_date = new Date(expiry_str); + const expiration = new Date(expiry_str); const start = new Date(start_time); - start_time.setUTCHours(0, 0, 0, 0); // adjusting to midnight UTC otherwise the tests will fail - similar to ceph-s3 tests + start.setUTCHours(0, 0, 0, 0); // adjusting to midnight UTC otherwise the tests will fail - fix for ceph-s3 tests - const days_diff = Math.floor((expiration_date.getTime() - start.getTime()) / (24 * 60 * 60 * 1000)); + const days_diff = Math.floor((expiration.getTime() - start.getTime()) / (24 * 60 * 60 * 1000)); return days_diff === delta_days && rule_id === expected_rule_id; } diff --git a/src/util/lifecycle_utils.js b/src/util/lifecycle_utils.js index 4226c4e4a3..239ea8116e 100644 --- a/src/util/lifecycle_utils.js +++ b/src/util/lifecycle_utils.js @@ -89,20 +89,16 @@ function get_lifecycle_rule_for_object(rules, object_info) { for (const rule of rules) { if (rule?.status !== 'Enabled') continue; - const filter = rule?.filter || {}; + const filter_func = build_lifecycle_filter(rule); + if (!filter_func(object_info)) continue; - const filter_func = build_lifecycle_filter(filter); - - if (!filter_func(object_info)) { continue; } - - const new_priority = get_rule_priority(filter); + const new_priority = get_rule_priority(rule.filter); if (compare_rule_priority(curr_priority, new_priority)) { matched_rule = rule; curr_priority = new_priority; } } - return matched_rule; }