diff --git a/constants.js b/constants.js index 728574b88c..044530490f 100644 --- a/constants.js +++ b/constants.js @@ -177,8 +177,7 @@ const constants = { assumedRoleArnResourceType: 'assumed-role', // Session name of the backbeat lifecycle assumed role session. backbeatLifecycleSessionName: 'backbeat-lifecycle', - // Backends that support metadata batching - supportsBatchingMethods: ['mongodb'], + multiObjectDeleteConcurrency: 50, }; module.exports = constants; diff --git a/lib/Config.js b/lib/Config.js index 5b2b28675b..9fe40bf3b6 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -17,6 +17,7 @@ const { azureAccountNameRegex, base64Regex, } = require('../constants'); const { utapiVersion } = require('utapi'); const { versioning } = require('arsenal'); +const constants = require('../constants'); const versionIdUtils = versioning.VersionID; @@ -1320,6 +1321,13 @@ class Config extends EventEmitter { } this.lifecycleRoleName = config.lifecycleRoleName || null; + this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency; + if (!isNaN(config.multiObjectDeleteConcurrency)) { + const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10); + if (extractedNumber > 0 && extractedNumber < 1000 && extractedNumber % 1 === 0) { + this.multiObjectDeleteConcurrency = extractedNumber; + } + } } _configureBackends() { diff --git a/lib/api/apiUtils/object/deleteObject.js b/lib/api/apiUtils/object/deleteObject.js new file mode 100644 index 0000000000..214c5f2bc2 --- /dev/null +++ b/lib/api/apiUtils/object/deleteObject.js @@ -0,0 +1,18 @@ +/** + * _bucketRequiresOplogUpdate - DELETE an object from a bucket + * @param {BucketInfo} bucket - bucket object + * @return {boolean} whether objects require oplog updates on deletion, or not + */ +function _bucketRequiresOplogUpdate(bucket) { + // Default behavior is to require an oplog update + if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) { + return true; + } + // If the bucket has lifecycle configuration or notification configuration + // set, we also require an oplog update + return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration(); +} + +module.exports = { + _bucketRequiresOplogUpdate, +}; diff --git a/lib/api/multiObjectDelete.js b/lib/api/multiObjectDelete.js index 6fdb08b355..a9ce79bc04 100644 --- a/lib/api/multiObjectDelete.js +++ b/lib/api/multiObjectDelete.js @@ -17,14 +17,14 @@ const { preprocessingVersioningDelete } = require('./apiUtils/object/versioning'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const monitoring = require('../utilities/metrics'); -const { metadataGetObject, metadataGetObjects } = require('../metadata/metadataUtils'); +const metadataUtils = require('../metadata/metadataUtils'); const { config } = require('../Config'); const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } = require('./apiUtils/object/objectLockHelpers'); const requestUtils = policies.requestUtils; const { data } = require('../data/wrapper'); const logger = require('../utilities/logger'); -const constants = require('../../constants'); +const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); const versionIdUtils = versioning.VersionID; @@ -170,21 +170,18 @@ function _parseXml(xmlToParse, next) { } /** - * processObjectVersion - process object version to be deleted + * decodeObjectVersion - decode object version to be deleted * @param {object} entry - entry from data model - * @param {string} bucketName - bucket name * @param {function} next - callback to call with error or decoded version * @return {undefined} **/ -function processObjectVersion(entry, bucketName) { +function decodeObjectVersion(entry) { let decodedVersionId; if (entry.versionId) { decodedVersionId = entry.versionId === 'null' ? 'null' : versionIdUtils.decode(entry.versionId); } if (decodedVersionId instanceof Error) { - monitoring.promMetrics('DELETE', bucketName, 404, - 'multiObjectDelete'); return [errors.NoSuchVersion]; } return [null, decodedVersionId]; @@ -198,29 +195,32 @@ function processObjectVersion(entry, bucketName) { * @param {string} bucketName - bucket name * @param {string []} inPlay - list of object keys still in play * @param {object} log - logger object + * @param {function} callback - callback to call with error or list of objects * @return {undefined} */ -function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log) { - const backendSupportsBatching = constants.supportsBatchingMethods.includes(config.backends.metadata); +function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback) { // If the backend supports batching, we want to optimize the API latency by // first getting all the objects metadata, stored in memory, for later use // in the API. This approach does not change the API architecture, but // transplants an additional piece of code that can greatly improve the API // latency when the database supports batching. - if (backendSupportsBatching) { - const objectKeys = Object.values(inPlay).map(entry => { - const [err, versionId] = processObjectVersion(entry, bucketName); - if (err) { - return null; - } - return { - versionId, - inPlay: entry, - }; - }); - return callback => metadataGetObjects(bucketName, objectKeys, log, callback); - } - return callback => callback(); + const objectKeys = Object.values(inPlay).map(entry => { + const [err, versionId] = decodeObjectVersion(entry, bucketName); + if (err) { + return null; + } + return { + versionId, + inPlay: entry, + }; + }); + return metadataUtils.metadataGetObjects(bucketName, objectKeys, log, (err, cache) => { + // This optional step is read-only, so any error can be safely ignored + if (err) { + return callback(null, {}); + } + return callback(null, cache); + }); } /** @@ -249,47 +249,41 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, const skipError = new Error('skip'); const objectLockedError = new Error('object locked'); let deleteFromStorage = []; - const initialStep = initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log); return async.waterfall([ - callback => initialStep((err, cache) => callback(err, cache)), - (cache, callback) => async.forEachLimit(inPlay, 50, (entry, moveOn) => { + callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback), + (cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => { async.waterfall([ - callback => callback(...processObjectVersion(entry, bucketName)), + callback => callback(...decodeObjectVersion(entry, bucketName)), // for obj deletes, no need to check acl's at object level // (authority is at the bucket level for obj deletes) - (versionId, callback) => metadataGetObject(bucketName, entry.key, - versionId, log, (err, objMD) => { - // if general error from metadata return error - if (err) { - monitoring.promMetrics('DELETE', bucketName, err.code, - 'multiObjectDelete'); - return callback(err); - } - if (!objMD) { - const verCfg = bucket.getVersioningConfiguration(); - // To adhere to AWS behavior, create a delete marker - // if trying to delete an object that does not exist - // when versioning has been configured - if (verCfg && !entry.versionId) { - log.debug('trying to delete specific version ' + - 'that does not exist'); - return callback(null, objMD, versionId); - } - // otherwise if particular key does not exist, AWS - // returns success for key so add to successfullyDeleted - // list and move on - successfullyDeleted.push({ entry }); - return callback(skipError); - } - if (versionId && objMD.location && - Array.isArray(objMD.location) && objMD.location[0]) { - // we need this information for data deletes to AWS - // eslint-disable-next-line no-param-reassign - objMD.location[0].deleteVersion = true; + (versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key, + versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)), + (objMD, versionId, callback) => { + if (!objMD) { + const verCfg = bucket.getVersioningConfiguration(); + // To adhere to AWS behavior, create a delete marker + // if trying to delete an object that does not exist + // when versioning has been configured + if (verCfg && !entry.versionId) { + log.debug('trying to delete specific version ' + + 'that does not exist'); + return callback(null, objMD, versionId); } - return callback(null, objMD, versionId); - }, cache ? cache[`${entry.key}${versionId}`] : null), + // otherwise if particular key does not exist, AWS + // returns success for key so add to successfullyDeleted + // list and move on + successfullyDeleted.push({ entry }); + return callback(skipError); + } + if (versionId && objMD.location && + Array.isArray(objMD.location) && objMD.location[0]) { + // we need this information for data deletes to AWS + // eslint-disable-next-line no-param-reassign + objMD.location[0].deleteVersion = true; + } + return callback(null, objMD, versionId); + }, (objMD, versionId, callback) => { // AWS only returns an object lock error if a version id // is specified, else continue to create a delete marker @@ -338,16 +332,15 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, const deleteInfo = {}; if (options && options.deleteData) { deleteInfo.deleted = true; - if ((bucket.getLifecycleConfiguration && !bucket.getLifecycleConfiguration()) - && (bucket.getNotificationConfiguration && !bucket.getNotificationConfiguration())) { - options.shouldOnlyDelete = true; + if (!_bucketRequiresOplogUpdate(bucket)) { + options.doesNotNeedOpogUpdate = true; } if (objMD.uploadId) { // eslint-disable-next-line options.replayId = objMD.uploadId; } return services.deleteObject(bucketName, objMD, - entry.key, options, log, (err, toDelete) => { + entry.key, options, true, log, (err, toDelete) => { if (err) { return callback(err); } @@ -355,7 +348,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, deleteFromStorage = deleteFromStorage.concat(toDelete); } return callback(null, objMD, deleteInfo); - }, true); + }); } deleteInfo.newDeleteMarker = true; // This call will create a delete-marker @@ -428,7 +421,15 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, return onDone(); }); }), - ], next); + ], (err, ...results) => { + // if general error from metadata return error + if (err) { + monitoring.promMetrics('DELETE', bucketName, err.code, + 'multiObjectDelete'); + return next(err); + } + return next(null, ...results); + }); } /** @@ -654,6 +655,6 @@ function multiObjectDelete(authInfo, request, log, callback) { module.exports = { getObjMetadataAndDelete, multiObjectDelete, - processObjectVersion, + decodeObjectVersion, initializeMultiObjectDeleteWithBatchingSupport, }; diff --git a/lib/api/objectDelete.js b/lib/api/objectDelete.js index beee7c64e0..68eed7e977 100644 --- a/lib/api/objectDelete.js +++ b/lib/api/objectDelete.js @@ -13,6 +13,7 @@ const monitoring = require('../utilities/metrics'); const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } = require('./apiUtils/object/objectLockHelpers'); const { config } = require('../Config'); +const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); const versionIdUtils = versioning.VersionID; const objectLockedError = new Error('object locked'); @@ -155,8 +156,12 @@ function objectDelete(authInfo, request, log, cb) { delOptions.replayId = objectMD.uploadId; } + if (!_bucketRequiresOplogUpdate(bucketMD)) { + delOptions.doesNotNeedOpogUpdate = true; + } + return services.deleteObject(bucketName, objectMD, objectKey, - delOptions, log, (err, delResult) => next(err, bucketMD, + delOptions, false, log, (err, delResult) => next(err, bucketMD, objectMD, delResult, deleteInfo)); } // putting a new delete marker diff --git a/lib/metadata/metadataUtils.js b/lib/metadata/metadataUtils.js index 4bb416e5e2..ef93f24b23 100644 --- a/lib/metadata/metadataUtils.js +++ b/lib/metadata/metadataUtils.js @@ -60,17 +60,17 @@ function getNullVersionFromMaster(bucketName, objectKey, log, cb) { * @param {string} bucketName - name of bucket * @param {string} objectKey - name of object key * @param {string} [versionId] - version of object to retrieve + * @param {object} cachedDocuments - cached version of the documents used for + * abstraction purposes * @param {RequestLogger} log - request logger * @param {function} cb - callback - * @param {object} cachedDocument - cached version of the document used for - * abstraction purposes * @return {undefined} - and call callback with err, bucket md and object md */ -function metadataGetObject(bucketName, objectKey, versionId, log, cb, cachedDocument = null) { +function metadataGetObject(bucketName, objectKey, versionId, cachedDocuments, log, cb) { // versionId may be 'null', which asks metadata to fetch the null key specifically const options = { versionId, getDeleteMarker: true }; - if (cachedDocument) { - return cb(null, cachedDocument); + if (cachedDocuments && cachedDocuments[objectKey]) { + return cb(null, cachedDocuments[objectKey]); } return metadata.getObjectMD(bucketName, objectKey, options, log, (err, objMD) => { diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index d017007720..560e593bd9 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -909,7 +909,7 @@ function putObjectTagging(request, response, log, callback) { // retrieve it from metadata here. if (dataStoreVersionId === '') { return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, log, (err, objMD) => { + sourceVersionId, null, log, (err, objMD) => { if (err) { return callback(err); } @@ -941,7 +941,7 @@ function deleteObjectTagging(request, response, log, callback) { // retrieve it from metadata here. if (dataStoreVersionId === '') { return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, log, (err, objMD) => { + sourceVersionId, null, log, (err, objMD) => { if (err) { return callback(err); } diff --git a/lib/services.js b/lib/services.js index 23268107b2..c6103f288d 100644 --- a/lib/services.js +++ b/lib/services.js @@ -284,14 +284,13 @@ const services = { * @param {string} objectKey - object key name * @param {object} options - other instructions, such as { versionId } to * delete a specific version of the object + * @param {boolean} deferLocationDeletion - true if the object should not + * be removed from the storage, but be returned instead. * @param {Log} log - logger instance * @param {function} cb - callback from async.waterfall in objectGet - * @param {boolean} deferDeletion - true if the object should not be removed - * from the storage, but be returned - * instead. * @return {undefined} */ - deleteObject(bucketName, objectMD, objectKey, options, log, cb, deferDeletion) { + deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, cb) { log.trace('deleting object from bucket'); assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof objectMD, 'object'); @@ -310,14 +309,17 @@ const services = { return cb(null, res); } - const locations = Array.isArray(objectMD.location) - ? objectMD.location : [objectMD.location]; + if (deferLocationDeletion) { + return cb(null, Array.isArray(objectMD.location) + ? objectMD.location : [objectMD.location]); + } - if (deferDeletion) { - return cb(null, locations); + if (!Array.isArray(objectMD.location)) { + data.delete(objectMD.location, deleteLog); + return cb(null, res); } - return data.batchDelete(locations, null, null, deleteLog, err => { + return data.batchDelete(objectMD.location, null, null, deleteLog, err => { if (err) { return cb(err); } diff --git a/package.json b/package.json index efa65cd705..9fd1887610 100644 --- a/package.json +++ b/package.json @@ -20,7 +20,7 @@ "homepage": "https://github.com/scality/S3#readme", "dependencies": { "@hapi/joi": "^17.1.0", - "arsenal": "git+https://github.com/scality/arsenal#bfb80c43355b13939c53b44461b41b8b5ac183e6", + "arsenal": "git+https://github.com/scality/arsenal#075373b766b44007f3d88b92cc06bdfc7edbb7e9", "async": "~2.5.0", "aws-sdk": "2.905.0", "azure-storage": "^2.1.0", diff --git a/tests/unit/api/multiObjectDelete.js b/tests/unit/api/multiObjectDelete.js index 79f21829e3..c3dd768ded 100644 --- a/tests/unit/api/multiObjectDelete.js +++ b/tests/unit/api/multiObjectDelete.js @@ -1,15 +1,16 @@ const assert = require('assert'); const { errors } = require('arsenal'); -const { processObjectVersion, getObjMetadataAndDelete, initializeMultiObjectDeleteWithBatchingSupport } +const { decodeObjectVersion, getObjMetadataAndDelete, initializeMultiObjectDeleteWithBatchingSupport } = require('../../../lib/api/multiObjectDelete'); +const multiObjectDelete = require('../../../lib/api/multiObjectDelete'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const { ds } = require('arsenal').storage.data.inMemory.datastore; const { metadata } = require('arsenal').storage.metadata.inMemory.metadata; const DummyRequest = require('../DummyRequest'); const { bucketPut } = require('../../../lib/api/bucketPut'); const objectPut = require('../../../lib/api/objectPut'); -const constants = require('../../../constants'); +const sinon = require('sinon'); const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); @@ -19,6 +20,8 @@ const postBody = Buffer.from('I am a body', 'utf8'); const contentLength = 2 * postBody.length; const objectKey1 = 'objectName1'; const objectKey2 = 'objectName2'; +const metadataUtils = require('../../../lib/metadata/metadataUtils'); +const services = require('../../../lib/services'); const testBucketPutRequest = new DummyRequest({ bucketName, namespace, @@ -68,6 +71,10 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { }); }); + afterEach(() => { + sinon.restore(); + }); + it('should successfully get object metadata and then ' + 'delete metadata and data', done => { getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, @@ -184,48 +191,138 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { done(); }); }); -}); -describe('initializeMultiObjectDeleteWithBatchingSupport', () => { - it('should return a call to the batching method if the backend supports it', done => { - constants.supportsBatchingMethods.push('mem'); - const returnedCallback = initializeMultiObjectDeleteWithBatchingSupport(bucketName, [], log); - returnedCallback(err => { - assert.strictEqual(err.NotImplemented, true); - constants.supportsBatchingMethods.splice(-1, 1); - return done(); - }); - }); - it('should not return a call to the batching method if the backend does not support it', done => { - const returnedCallback = initializeMultiObjectDeleteWithBatchingSupport(bucketName, [], log); - returnedCallback((err, cache) => { - assert.strictEqual(err, undefined); - assert.strictEqual(cache, undefined); + it('should properly batch delete data even if there are errors in other objects', done => { + const deleteObjectStub = sinon.stub(services, 'deleteObject'); + deleteObjectStub.onCall(0).callsArgWith(6, errors.InternalError); + deleteObjectStub.onCall(1).callsArgWith(6, null); + + getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, + true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, + (err, quietSetting, errorResults, numOfObjects, + successfullyDeleted, totalContentLengthDeleted) => { + assert.ifError(err); + assert.strictEqual(quietSetting, true); + assert.deepStrictEqual(errorResults, [ + { + entry: { + key: objectKey1, + }, + error: errors.InternalError, + }, + ]); + assert.strictEqual(numOfObjects, 1); + assert.strictEqual(totalContentLengthDeleted, contentLength / 2); + // Expect still in memory as we stubbed the function + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); + // ensure object 2 only is in the list of successful deletions + assert.strictEqual(successfullyDeleted.length, 1); + assert.deepStrictEqual(successfullyDeleted[0].entry.key, objectKey2); return done(); }); }); }); -describe('processObjectVersion function helper', () => { - const bucketName = 'bucketName'; +describe('initializeMultiObjectDeleteWithBatchingSupport', () => { + let bucketName; + let inPlay; + let log; + let callback; + + beforeEach(() => { + bucketName = 'myBucket'; + inPlay = { one: 'object1', two: 'object2' }; + log = {}; + callback = sinon.spy(); + }); + + afterEach(() => { + sinon.restore(); + }); + + it('should not throw if the decodeObjectVersion function fails', done => { + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, {}); + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([new Error('decode error')]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + assert.deepStrictEqual(callback.getCall(0).args[1], {}); + done(); + }); + + it('should call the batching method if the backend supports it', done => { + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, {}); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + done(); + }); + + it('should not return an error if the metadataGetObjects function fails', done => { + const metadataGetObjectsStub = + sinon.stub(metadataUtils, 'metadataGetObjects').yields(new Error('metadata error'), null); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0] instanceof Error, false); + assert.deepStrictEqual(callback.getCall(0).args[1], {}); + done(); + }); + it('should populate the cache when the backend supports it', done => { + const expectedOutput = { + one: { + value: 'object1', + }, + two: { + value: 'object2', + }, + }; + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, expectedOutput); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + assert.deepStrictEqual(callback.getCall(0).args[1], expectedOutput); + done(); + }); +}); + +describe('decodeObjectVersion function helper', () => { it('should throw error for invalid version IDs', () => { - const ret = processObjectVersion({ + const ret = decodeObjectVersion({ versionId: '\0', - }, bucketName); + }); assert(ret[0].is.NoSuchVersion); }); it('should return "null" for null versionId', () => { - const ret = processObjectVersion({ + const ret = decodeObjectVersion({ versionId: 'null', - }, bucketName); + }); assert.strictEqual(ret[0], null); assert.strictEqual(ret[1], 'null'); }); it('should return null error on success', () => { - const ret = processObjectVersion({}, bucketName); + const ret = decodeObjectVersion({}); assert.ifError(ret[0]); assert.deepStrictEqual(ret[1], undefined); }); diff --git a/tests/unit/api/objectDelete.js b/tests/unit/api/objectDelete.js index b1ae2e77c6..5da43260eb 100644 --- a/tests/unit/api/objectDelete.js +++ b/tests/unit/api/objectDelete.js @@ -139,7 +139,8 @@ describe('objectDelete API', () => { any, any, any, { deleteData: true, replayId: testUploadId, - }, any, any); + doesNotNeedOpogUpdate: true, + }, any, any, any); done(); }); }); diff --git a/tests/unit/metadata/metadataUtils.spec.js b/tests/unit/metadata/metadataUtils.spec.js index 5d3aa8768f..ef83c607bb 100644 --- a/tests/unit/metadata/metadataUtils.spec.js +++ b/tests/unit/metadata/metadataUtils.spec.js @@ -112,20 +112,23 @@ describe('metadataGetObject', () => { }); it('should return the cached document if provided', done => { - const cachedDoc = { key: 'objectKey1', versionId: 'versionId1' }; - - metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, log, (err, result) => { + const cachedDoc = { + [objectKey.inPlay.key]: { + key: 'objectKey1', versionId: 'versionId1', + }, + }; + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, cachedDoc, log, (err, result) => { assert.ifError(err); - assert.deepStrictEqual(result, cachedDoc); + assert.deepStrictEqual(result, cachedDoc[objectKey.inPlay.key]); done(); - }, cachedDoc); + }); }); it('should return error if metadata.getObjectMD fails', done => { const error = new Error('Failed to get object metadata'); sandbox.stub(metadata, 'getObjectMD').yields(error); - metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, log, err => { + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, null, log, err => { assert(err); assert.strictEqual(err, error); done(); @@ -136,7 +139,7 @@ describe('metadataGetObject', () => { const metadataObj = { doc: { key: 'objectKey1', versionId: 'versionId1' } }; sandbox.stub(metadata, 'getObjectMD').yields(null, metadataObj); - metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, log, (err, result) => { + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, null, log, (err, result) => { assert.ifError(err); assert.deepStrictEqual(result, metadataObj); done(); diff --git a/tests/utilities/objectLock-util.js b/tests/utilities/objectLock-util.js index e7a1778a4f..92a223e6cd 100644 --- a/tests/utilities/objectLock-util.js +++ b/tests/utilities/objectLock-util.js @@ -12,7 +12,7 @@ const log = new DummyRequestLogger(); function changeObjectLock(objects, newConfig, cb) { async.each(objects, (object, next) => { const { bucket, key, versionId } = object; - metadataGetObject(bucket, key, versionIdUtils.decode(versionId), log, (err, objMD) => { + metadataGetObject(bucket, key, versionIdUtils.decode(versionId), null, log, (err, objMD) => { assert.ifError(err); // set newConfig as empty string to remove object lock /* eslint-disable no-param-reassign */ diff --git a/yarn.lock b/yarn.lock index c75f59c7dc..b03abec906 100644 --- a/yarn.lock +++ b/yarn.lock @@ -448,9 +448,9 @@ arraybuffer.slice@~0.0.7: optionalDependencies: ioctl "^2.0.2" -"arsenal@git+https://github.com/scality/arsenal#bfb80c43355b13939c53b44461b41b8b5ac183e6": +"arsenal@git+https://github.com/scality/arsenal#075373b766b44007f3d88b92cc06bdfc7edbb7e9": version "7.70.6" - resolved "git+https://github.com/scality/arsenal#bfb80c43355b13939c53b44461b41b8b5ac183e6" + resolved "git+https://github.com/scality/arsenal#075373b766b44007f3d88b92cc06bdfc7edbb7e9" dependencies: "@types/async" "^3.2.12" "@types/utf8" "^3.0.1"