diff --git a/constants.js b/constants.js index 2c8fad30ef..36e15f29a8 100644 --- a/constants.js +++ b/constants.js @@ -205,6 +205,7 @@ const constants = { NON_CURRENT_TYPE: 'noncurrent', ORPHAN_DM_TYPE: 'orphan', }, + multiObjectDeleteConcurrency: 50, }; module.exports = constants; diff --git a/lib/Config.js b/lib/Config.js index 78a24d5c2e..ca6aacfd22 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -17,7 +17,7 @@ const { azureAccountNameRegex, base64Regex, allowedUtapiEventFilterFields, allowedUtapiEventFilterStates, } = require('../constants'); const { utapiVersion } = require('utapi'); - +const constants = require('../constants'); // config paths const configSearchPaths = [ @@ -1562,6 +1562,17 @@ class Config extends EventEmitter { // Version of the configuration we're running under this.overlayVersion = config.overlayVersion || 0; + + this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency; + const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10); + if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) { + this.multiObjectDeleteConcurrency = extractedNumber; + } + + this.multiObjectDeleteEnableOptimizations = true; + if (config.multiObjectDeleteEnableOptimizations === false) { + this.multiObjectDeleteEnableOptimizations = false; + } } _getAuthData() { diff --git a/lib/api/apiUtils/object/deleteObject.js b/lib/api/apiUtils/object/deleteObject.js new file mode 100644 index 0000000000..214c5f2bc2 --- /dev/null +++ b/lib/api/apiUtils/object/deleteObject.js @@ -0,0 +1,18 @@ +/** + * _bucketRequiresOplogUpdate - DELETE an object from a bucket + * @param {BucketInfo} bucket - bucket object + * @return {boolean} whether objects require oplog updates on deletion, or not + */ +function _bucketRequiresOplogUpdate(bucket) { + // Default behavior is to require an oplog update + if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) { + return true; + } + // If the bucket has lifecycle configuration or notification configuration + // set, we also require an oplog update + return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration(); +} + +module.exports = { + _bucketRequiresOplogUpdate, +}; diff --git a/lib/api/multiObjectDelete.js b/lib/api/multiObjectDelete.js index db7f359350..c972e6af39 100644 --- a/lib/api/multiObjectDelete.js +++ b/lib/api/multiObjectDelete.js @@ -17,12 +17,15 @@ const { preprocessingVersioningDelete } = require('./apiUtils/object/versioning'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const monitoring = require('../utilities/monitoringHandler'); -const { metadataGetObject } = require('../metadata/metadataUtils'); +const metadataUtils = require('../metadata/metadataUtils'); const { config } = require('../Config'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } = require('./apiUtils/object/objectLockHelpers'); const requestUtils = policies.requestUtils; +const { data } = require('../data/wrapper'); +const logger = require('../utilities/logger'); +const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); const versionIdUtils = versioning.VersionID; @@ -167,6 +170,63 @@ function _parseXml(xmlToParse, next) { }); } +/** + * decodeObjectVersion - decode object version to be deleted + * @param {object} entry - entry from data model + * @param {function} next - callback to call with error or decoded version + * @return {undefined} + **/ +function decodeObjectVersion(entry) { + let decodedVersionId; + if (entry.versionId) { + decodedVersionId = entry.versionId === 'null' ? + 'null' : versionIdUtils.decode(entry.versionId); + } + if (decodedVersionId instanceof Error) { + return [errors.NoSuchVersion]; + } + return [null, decodedVersionId]; +} + +/** + * Initialization function for the MultiObjectDelete API that will, based on the + * current metadata backend, assess if metadata READ batching is supported. If + * yes, the initialization step will call the metadataGetObjects function from + * the MetadataWrapper. + * @param {string} bucketName - bucket name + * @param {string []} inPlay - list of object keys still in play + * @param {object} log - logger object + * @param {function} callback - callback to call with error or list of objects + * @return {undefined} + */ +function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback) { + if (config.multiObjectDeleteEnableOptimizations === false) { + return callback(null, {}); + } + // If the backend supports batching, we want to optimize the API latency by + // first getting all the objects metadata, stored in memory, for later use + // in the API. This approach does not change the API architecture, but + // transplants an additional piece of code that can greatly improve the API + // latency when the database supports batching. + const objectKeys = Object.values(inPlay).map(entry => { + const [err, versionId] = decodeObjectVersion(entry, bucketName); + if (err) { + return null; + } + return { + versionId, + inPlay: entry, + }; + }); + return metadataUtils.metadataGetObjects(bucketName, objectKeys, log, (err, cache) => { + // This optional step is read-only, so any error can be safely ignored + if (err) { + return callback(null, {}); + } + return callback(null, cache); + }); +} + /** * gets object metadata and deletes object * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info @@ -192,34 +252,18 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, let numOfObjectsRemoved = 0; const skipError = new Error('skip'); const objectLockedError = new Error('object locked'); + let deleteFromStorage = []; - // doing 5 requests at a time. note that the data wrapper - // will do 5 parallel requests to data backend to delete parts - return async.forEachLimit(inPlay, 5, (entry, moveOn) => { - async.waterfall([ - callback => { - let decodedVersionId; - if (entry.versionId) { - decodedVersionId = entry.versionId === 'null' ? - 'null' : versionIdUtils.decode(entry.versionId); - } - if (decodedVersionId instanceof Error) { - monitoring.promMetrics('DELETE', bucketName, 404, - 'multiObjectDelete'); - return callback(errors.NoSuchVersion); - } - return callback(null, decodedVersionId); - }, - // for obj deletes, no need to check acl's at object level - // (authority is at the bucket level for obj deletes) - (versionId, callback) => metadataGetObject(bucketName, entry.key, - versionId, log, (err, objMD) => { - // if general error from metadata return error - if (err) { - monitoring.promMetrics('DELETE', bucketName, err.code, - 'multiObjectDelete'); - return callback(err); - } + return async.waterfall([ + callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback), + (cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => { + async.waterfall([ + callback => callback(...decodeObjectVersion(entry, bucketName)), + // for obj deletes, no need to check acl's at object level + // (authority is at the bucket level for obj deletes) + (versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key, + versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)), + (objMD, versionId, callback) => { if (!objMD) { const verCfg = bucket.getVersioningConfiguration(); // To adhere to AWS behavior, create a delete marker @@ -227,7 +271,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, // when versioning has been configured if (verCfg && !entry.versionId) { log.debug('trying to delete specific version ' + - ' that does not exist'); + 'that does not exist'); return callback(null, objMD, versionId); } // otherwise if particular key does not exist, AWS @@ -243,113 +287,160 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request, objMD.location[0].deleteVersion = true; } return callback(null, objMD, versionId); - }), - (objMD, versionId, callback) => { - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker - if (!versionId || !bucket.isObjectLockEnabled()) { - return callback(null, null, objMD, versionId); - } - const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers); - if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { - return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => { - if (error && error.is.AccessDenied) { - log.debug('user does not have BypassGovernanceRetention and object is locked', { error }); - return callback(objectLockedError); + }, + (objMD, versionId, callback) => { + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker + if (!versionId || !bucket.isObjectLockEnabled()) { + return callback(null, null, objMD, versionId); + } + const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers); + if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { + return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => { + if (error && error.is.AccessDenied) { + log.debug('user does not have BypassGovernanceRetention and object is locked', + { error }); + return callback(objectLockedError); + } + if (error) { + return callback(error); + } + return callback(null, hasGovernanceBypass, objMD, versionId); + }); + } + return callback(null, hasGovernanceBypass, objMD, versionId); + }, + (hasGovernanceBypass, objMD, versionId, callback) => { + // AWS only returns an object lock error if a version id + // is specified, else continue to create a delete marker + if (!versionId || !bucket.isObjectLockEnabled()) { + return callback(null, objMD, versionId); + } + const objLockInfo = new ObjectLockInfo({ + mode: objMD.retentionMode, + date: objMD.retentionDate, + legalHold: objMD.legalHold || false, + }); + + // If the object can not be deleted raise an error + if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { + log.debug('trying to delete locked object'); + return callback(objectLockedError); + } + + return callback(null, objMD, versionId); + }, + (objMD, versionId, callback) => { + const options = preprocessingVersioningDelete( + bucketName, bucket, objMD, versionId, config.nullVersionCompatMode); + const deleteInfo = {}; + if (options && options.deleteData) { + deleteInfo.deleted = true; + if (!_bucketRequiresOplogUpdate(bucket)) { + options.doesNotNeedOpogUpdate = true; } - if (error) { - return callback(error); + if (objMD.uploadId) { + // eslint-disable-next-line + options.replayId = objMD.uploadId; } - return callback(null, hasGovernanceBypass, objMD, versionId); - }); + return services.deleteObject(bucketName, objMD, + entry.key, options, config.multiObjectDeleteEnableOptimizations, log, (err, toDelete) => { + if (err) { + return callback(err); + } + if (toDelete) { + deleteFromStorage = deleteFromStorage.concat(toDelete); + } + return callback(null, objMD, deleteInfo); + }); + } + deleteInfo.newDeleteMarker = true; + // This call will create a delete-marker + return createAndStoreObject(bucketName, bucket, entry.key, + objMD, authInfo, canonicalID, null, request, + deleteInfo.newDeleteMarker, null, log, (err, result) => + callback(err, objMD, deleteInfo, result.versionId)); + }, + ], (err, objMD, deleteInfo, versionId) => { + if (err === skipError) { + return moveOn(); + } else if (err === objectLockedError) { + errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true }); + return moveOn(); + } else if (err) { + log.error('error deleting object', { error: err, entry }); + errorResults.push({ entry, error: err }); + return moveOn(); } - return callback(null, hasGovernanceBypass, objMD, versionId); - }, - (hasGovernanceBypass, objMD, versionId, callback) => { - // AWS only returns an object lock error if a version id - // is specified, else continue to create a delete marker - if (!versionId || !bucket.isObjectLockEnabled()) { - return callback(null, objMD, versionId); + if (deleteInfo.deleted && objMD['content-length']) { + numOfObjectsRemoved++; + totalContentLengthDeleted += objMD['content-length']; + } + let isDeleteMarker; + let deleteMarkerVersionId; + // - If trying to delete an object that does not exist (if a new + // delete marker was created) + // - Or if an object exists but no version was specified + // return DeleteMarkerVersionId equals the versionID of the marker + // you just generated and DeleteMarker tag equals true + if (deleteInfo.newDeleteMarker) { + isDeleteMarker = true; + deleteMarkerVersionId = versionIdUtils.encode( + versionId, config.versionIdEncodingType); + // In this case we are putting a new object (i.e., the delete + // marker), so we decrement the numOfObjectsRemoved value. + numOfObjectsRemoved--; + // If trying to delete a delete marker, DeleteMarkerVersionId equals + // deleteMarker's versionID and DeleteMarker equals true + } else if (objMD && objMD.isDeleteMarker) { + isDeleteMarker = true; + deleteMarkerVersionId = entry.versionId; } - const objLockInfo = new ObjectLockInfo({ - mode: objMD.retentionMode, - date: objMD.retentionDate, - legalHold: objMD.legalHold || false, + successfullyDeleted.push({ + entry, isDeleteMarker, + deleteMarkerVersionId, }); + return moveOn(); + }); + }, + // end of forEach func + err => { + // Batch delete all objects + const onDone = () => callback(err, quietSetting, errorResults, numOfObjectsRemoved, + successfullyDeleted, totalContentLengthDeleted, bucket); - // If the object can not be deleted raise an error - if (!objLockInfo.canModifyObject(hasGovernanceBypass)) { - log.debug('trying to delete locked object'); - return callback(objectLockedError); + if (err && deleteFromStorage.length === 0) { + log.trace('no objects to delete from data backend'); + return onDone(); } + // If error but we have objects in the list, delete them to ensure + // consistent state. + log.trace('deleting objects from data backend'); - return callback(null, objMD, versionId); - }, - (objMD, versionId, callback) => { - const options = preprocessingVersioningDelete( - bucketName, bucket, objMD, versionId, config.nullVersionCompatMode); - const deleteInfo = {}; - if (options && options.deleteData) { - deleteInfo.deleted = true; - if (objMD.uploadId) { - // eslint-disable-next-line - options.replayId = objMD.uploadId; - } - return services.deleteObject(bucketName, objMD, - entry.key, options, log, err => - callback(err, objMD, deleteInfo)); + // Split the array into chunks + const chunks = []; + while (deleteFromStorage.length > 0) { + chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency)); } - deleteInfo.newDeleteMarker = true; - // This call will create a delete-marker - return createAndStoreObject(bucketName, bucket, entry.key, - objMD, authInfo, canonicalID, null, request, - deleteInfo.newDeleteMarker, null, log, (err, result) => - callback(err, objMD, deleteInfo, result.versionId)); - }, - ], (err, objMD, deleteInfo, versionId) => { - if (err === skipError) { - return moveOn(); - } else if (err === objectLockedError) { - errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true }); - return moveOn(); - } else if (err) { - log.error('error deleting object', { error: err, entry }); - errorResults.push({ entry, error: err }); - return moveOn(); - } - if (deleteInfo.deleted && objMD['content-length']) { - numOfObjectsRemoved++; - totalContentLengthDeleted += objMD['content-length']; - } - let isDeleteMarker; - let deleteMarkerVersionId; - // - If trying to delete an object that does not exist (if a new - // delete marker was created) - // - Or if an object exists but no version was specified - // return DeleteMarkerVersionId equals the versionID of the marker - // you just generated and DeleteMarker tag equals true - if (deleteInfo.newDeleteMarker) { - isDeleteMarker = true; - deleteMarkerVersionId = versionIdUtils.encode(versionId); - // In this case we are putting a new object (i.e., the delete - // marker), so we decrement the numOfObjectsRemoved value. - numOfObjectsRemoved--; - // If trying to delete a delete marker, DeleteMarkerVersionId equals - // deleteMarker's versionID and DeleteMarker equals true - } else if (objMD && objMD.isDeleteMarker) { - isDeleteMarker = true; - deleteMarkerVersionId = entry.versionId; - } - successfullyDeleted.push({ entry, isDeleteMarker, - deleteMarkerVersionId }); - return moveOn(); - }); - }, - // end of forEach func - err => { - log.trace('finished deleting objects', { numOfObjectsRemoved }); - return next(err, quietSetting, errorResults, numOfObjectsRemoved, - successfullyDeleted, totalContentLengthDeleted, bucket); + + return async.each(chunks, (chunk, done) => data.batchDelete(chunk, null, null, + logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), done), + err => { + if (err) { + log.error('error deleting objects from data backend', { error: err }); + return onDone(err); + } + return onDone(); + }); + }), + ], (err, ...results) => { + // if general error from metadata return error + if (err) { + monitoring.promMetrics('DELETE', bucketName, err.code, + 'multiObjectDelete'); + return next(err); + } + return next(null, ...results); }); } @@ -576,4 +667,6 @@ function multiObjectDelete(authInfo, request, log, callback) { module.exports = { getObjMetadataAndDelete, multiObjectDelete, + decodeObjectVersion, + initializeMultiObjectDeleteWithBatchingSupport, }; diff --git a/lib/api/objectDelete.js b/lib/api/objectDelete.js index 02dd038f25..a325a1258c 100644 --- a/lib/api/objectDelete.js +++ b/lib/api/objectDelete.js @@ -14,6 +14,7 @@ const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } = require('./apiUtils/object/objectLockHelpers'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { config } = require('../Config'); +const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); const versionIdUtils = versioning.VersionID; const objectLockedError = new Error('object locked'); @@ -191,8 +192,12 @@ function objectDelete(authInfo, request, log, cb) { delOptions.replayId = objectMD.uploadId; } + if (!_bucketRequiresOplogUpdate(bucketMD)) { + delOptions.doesNotNeedOpogUpdate = true; + } + return services.deleteObject(bucketName, objectMD, objectKey, - delOptions, log, (err, delResult) => next(err, bucketMD, + delOptions, false, log, (err, delResult) => next(err, bucketMD, objectMD, delResult, deleteInfo)); } // putting a new delete marker diff --git a/lib/metadata/metadataUtils.js b/lib/metadata/metadataUtils.js index d931d5d5c9..ef93f24b23 100644 --- a/lib/metadata/metadataUtils.js +++ b/lib/metadata/metadataUtils.js @@ -60,13 +60,18 @@ function getNullVersionFromMaster(bucketName, objectKey, log, cb) { * @param {string} bucketName - name of bucket * @param {string} objectKey - name of object key * @param {string} [versionId] - version of object to retrieve + * @param {object} cachedDocuments - cached version of the documents used for + * abstraction purposes * @param {RequestLogger} log - request logger * @param {function} cb - callback * @return {undefined} - and call callback with err, bucket md and object md */ -function metadataGetObject(bucketName, objectKey, versionId, log, cb) { +function metadataGetObject(bucketName, objectKey, versionId, cachedDocuments, log, cb) { // versionId may be 'null', which asks metadata to fetch the null key specifically const options = { versionId, getDeleteMarker: true }; + if (cachedDocuments && cachedDocuments[objectKey]) { + return cb(null, cachedDocuments[objectKey]); + } return metadata.getObjectMD(bucketName, objectKey, options, log, (err, objMD) => { if (err) { @@ -84,6 +89,40 @@ function metadataGetObject(bucketName, objectKey, versionId, log, cb) { }); } +/** metadataGetObjects - retrieves specified object or version from metadata. This + * method uses cursors, hence is only compatible with a MongoDB DB backend. + * @param {string} bucketName - name of bucket + * @param {string} objectsKeys - name of object key + * @param {RequestLogger} log - request logger + * @param {function} cb - callback + * @return {undefined} - and call callback with err, bucket md and object md + */ +function metadataGetObjects(bucketName, objectsKeys, log, cb) { + const options = { getDeleteMarker: true }; + const objects = objectsKeys.map(objectKey => ({ + key: objectKey ? objectKey.inPlay.key : null, + params: options, + versionId: objectKey ? objectKey.versionId : null, + })); + + // Returned objects are following the following format: { key, doc, versionId } + // That is required with batching to properly map the objects + return metadata.getObjectsMD(bucketName, objects, log, (err, objMds) => { + if (err) { + log.debug('error getting objects MD from metadata', { error: err }); + return cb(err); + } + + const result = {}; + objMds.forEach(objMd => { + if (objMd.doc) { + result[`${objMd.doc.key}${objMd.versionId}`] = objMd.doc; + } + }); + + return cb(null, result); + }); +} /** * Validate that a bucket is accessible and authorized to the user, * return a specific error code otherwise @@ -215,6 +254,7 @@ function metadataValidateBucket(params, log, callback) { module.exports = { validateBucket, metadataGetObject, + metadataGetObjects, metadataValidateBucketAndObj, metadataValidateBucket, }; diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index 89355f6f12..8182ec3e14 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -937,7 +937,7 @@ function putObjectTagging(request, response, log, callback) { // retrieve it from metadata here. if (dataStoreVersionId === '') { return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, log, (err, objMD) => { + sourceVersionId, null, log, (err, objMD) => { if (err) { return callback(err); } @@ -969,7 +969,7 @@ function deleteObjectTagging(request, response, log, callback) { // retrieve it from metadata here. if (dataStoreVersionId === '') { return metadataGetObject(sourceBucket, request.objectKey, - sourceVersionId, log, (err, objMD) => { + sourceVersionId, null, log, (err, objMD) => { if (err) { return callback(err); } diff --git a/lib/services.js b/lib/services.js index 1573648b88..e7ab8268ce 100644 --- a/lib/services.js +++ b/lib/services.js @@ -306,11 +306,13 @@ const services = { * @param {string} objectKey - object key name * @param {object} options - other instructions, such as { versionId } to * delete a specific version of the object + * @param {boolean} deferLocationDeletion - true if the object should not + * be removed from the storage, but be returned instead. * @param {Log} log - logger instance * @param {function} cb - callback from async.waterfall in objectGet * @return {undefined} */ - deleteObject(bucketName, objectMD, objectKey, options, log, cb) { + deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, cb) { log.trace('deleting object from bucket'); assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof objectMD, 'object'); @@ -327,12 +329,19 @@ const services = { log.getSerializedUids()); if (objectMD.location === null) { return cb(null, res); - } else if (!Array.isArray(objectMD.location)) { + } + + if (deferLocationDeletion) { + return cb(null, Array.isArray(objectMD.location) + ? objectMD.location : [objectMD.location]); + } + + if (!Array.isArray(objectMD.location)) { data.delete(objectMD.location, deleteLog); return cb(null, res); } - return data.batchDelete(objectMD.location, null, null, - deleteLog, err => { + + return data.batchDelete(objectMD.location, null, null, deleteLog, err => { if (err) { return cb(err); } diff --git a/package.json b/package.json index 68edde3feb..f1464344f8 100644 --- a/package.json +++ b/package.json @@ -21,7 +21,7 @@ "dependencies": { "@azure/storage-blob": "^12.12.0", "@hapi/joi": "^17.1.0", - "arsenal": "git+https://github.com/scality/arsenal#8.1.98", + "arsenal": "git+https://github.com/scality/arsenal#8.1.104", "async": "~2.5.0", "aws-sdk": "2.905.0", "bucketclient": "scality/bucketclient#8.1.9", diff --git a/tests/unit/api/multiObjectDelete.js b/tests/unit/api/multiObjectDelete.js index f9bc9831de..470aab29a0 100644 --- a/tests/unit/api/multiObjectDelete.js +++ b/tests/unit/api/multiObjectDelete.js @@ -1,17 +1,19 @@ const assert = require('assert'); const { errors, storage } = require('arsenal'); -const { getObjMetadataAndDelete } +const { decodeObjectVersion, getObjMetadataAndDelete, initializeMultiObjectDeleteWithBatchingSupport } = require('../../../lib/api/multiObjectDelete'); +const multiObjectDelete = require('../../../lib/api/multiObjectDelete'); const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers'); const DummyRequest = require('../DummyRequest'); const { bucketPut } = require('../../../lib/api/bucketPut'); const objectPut = require('../../../lib/api/objectPut'); +const log = new DummyRequestLogger(); const { metadata } = storage.metadata.inMemory.metadata; const { ds } = storage.data.inMemory.datastore; +const sinon = require('sinon'); -const log = new DummyRequestLogger(); const canonicalID = 'accessKey1'; const authInfo = makeAuthInfo(canonicalID); const namespace = 'default'; @@ -20,6 +22,8 @@ const postBody = Buffer.from('I am a body', 'utf8'); const contentLength = 2 * postBody.length; const objectKey1 = 'objectName1'; const objectKey2 = 'objectName2'; +const metadataUtils = require('../../../lib/metadata/metadataUtils'); +const services = require('../../../lib/services'); const testBucketPutRequest = new DummyRequest({ bucketName, namespace, @@ -69,6 +73,10 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { }); }); + afterEach(() => { + sinon.restore(); + }); + it('should successfully get object metadata and then ' + 'delete metadata and data', done => { getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, @@ -185,4 +193,139 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => { done(); }); }); + + it('should properly batch delete data even if there are errors in other objects', done => { + const deleteObjectStub = sinon.stub(services, 'deleteObject'); + deleteObjectStub.onCall(0).callsArgWith(6, errors.InternalError); + deleteObjectStub.onCall(1).callsArgWith(6, null); + + getObjMetadataAndDelete(authInfo, 'foo', request, bucketName, bucket, + true, [], [{ key: objectKey1 }, { key: objectKey2 }], log, + (err, quietSetting, errorResults, numOfObjects, + successfullyDeleted, totalContentLengthDeleted) => { + assert.ifError(err); + assert.strictEqual(quietSetting, true); + assert.deepStrictEqual(errorResults, [ + { + entry: { + key: objectKey1, + }, + error: errors.InternalError, + }, + ]); + assert.strictEqual(numOfObjects, 1); + assert.strictEqual(totalContentLengthDeleted, contentLength / 2); + // Expect still in memory as we stubbed the function + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey1), true); + assert.strictEqual(metadata.keyMaps.get(bucketName).has(objectKey2), true); + // ensure object 2 only is in the list of successful deletions + assert.strictEqual(successfullyDeleted.length, 1); + assert.deepStrictEqual(successfullyDeleted[0].entry.key, objectKey2); + return done(); + }); + }); +}); + +describe('initializeMultiObjectDeleteWithBatchingSupport', () => { + let bucketName; + let inPlay; + let log; + let callback; + + beforeEach(() => { + bucketName = 'myBucket'; + inPlay = { one: 'object1', two: 'object2' }; + log = {}; + callback = sinon.spy(); + }); + + afterEach(() => { + sinon.restore(); + }); + + it('should not throw if the decodeObjectVersion function fails', done => { + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, {}); + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([new Error('decode error')]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + assert.deepStrictEqual(callback.getCall(0).args[1], {}); + done(); + }); + + it('should call the batching method if the backend supports it', done => { + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, {}); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + done(); + }); + + it('should not return an error if the metadataGetObjects function fails', done => { + const metadataGetObjectsStub = + sinon.stub(metadataUtils, 'metadataGetObjects').yields(new Error('metadata error'), null); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0] instanceof Error, false); + assert.deepStrictEqual(callback.getCall(0).args[1], {}); + done(); + }); + + it('should populate the cache when the backend supports it', done => { + const expectedOutput = { + one: { + value: 'object1', + }, + two: { + value: 'object2', + }, + }; + const metadataGetObjectsStub = sinon.stub(metadataUtils, 'metadataGetObjects').yields(null, expectedOutput); + const objectVersion = 'someVersionId'; + sinon.stub(multiObjectDelete, 'decodeObjectVersion').returns([null, objectVersion]); + + initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback); + + assert.strictEqual(metadataGetObjectsStub.callCount, 1); + sinon.assert.calledOnce(callback); + assert.strictEqual(callback.getCall(0).args[0], null); + assert.deepStrictEqual(callback.getCall(0).args[1], expectedOutput); + done(); + }); +}); + +describe('decodeObjectVersion function helper', () => { + it('should throw error for invalid version IDs', () => { + const ret = decodeObjectVersion({ + versionId: '\0', + }); + assert(ret[0].is.NoSuchVersion); + }); + + it('should return "null" for null versionId', () => { + const ret = decodeObjectVersion({ + versionId: 'null', + }); + assert.strictEqual(ret[0], null); + assert.strictEqual(ret[1], 'null'); + }); + + it('should return null error on success', () => { + const ret = decodeObjectVersion({}); + assert.ifError(ret[0]); + assert.deepStrictEqual(ret[1], undefined); + }); }); diff --git a/tests/unit/api/objectDelete.js b/tests/unit/api/objectDelete.js index b5090aec96..bd8d84a610 100644 --- a/tests/unit/api/objectDelete.js +++ b/tests/unit/api/objectDelete.js @@ -142,7 +142,8 @@ describe('objectDelete API', () => { any, any, any, { deleteData: true, replayId: testUploadId, - }, any, any); + doesNotNeedOpogUpdate: true, + }, any, any, any); done(); }); }); diff --git a/tests/unit/metadata/metadataUtils.spec.js b/tests/unit/metadata/metadataUtils.spec.js index 574c90e41e..ef83c607bb 100644 --- a/tests/unit/metadata/metadataUtils.spec.js +++ b/tests/unit/metadata/metadataUtils.spec.js @@ -1,4 +1,5 @@ const assert = require('assert'); +const sinon = require('sinon'); const { models } = require('arsenal'); const { BucketInfo } = models; @@ -13,7 +14,8 @@ const bucket = new BucketInfo('niftyBucket', ownerCanonicalId, authInfo.getAccountDisplayName(), creationDate); const log = new DummyRequestLogger(); -const { validateBucket } = require('../../../lib/metadata/metadataUtils'); +const { validateBucket, metadataGetObjects, metadataGetObject } = require('../../../lib/metadata/metadataUtils'); +const metadata = require('../../../lib/metadata/wrapper'); describe('validateBucket', () => { it('action bucketPutPolicy by bucket owner', () => { @@ -53,3 +55,94 @@ describe('validateBucket', () => { assert(validationResult.is.AccessDenied); }); }); + +describe('metadataGetObjects', () => { + let sandbox; + const objectsKeys = [ + { inPlay: { key: 'objectKey1' }, versionId: 'versionId1' }, + { inPlay: { key: 'objectKey2' }, versionId: 'versionId2' }, + ]; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should return error if metadata.getObjectsMD fails', done => { + const error = new Error('Failed to get object metadata'); + sandbox.stub(metadata, 'getObjectsMD').yields(error); + + metadataGetObjects('bucketName', objectsKeys, log, err => { + assert(err); + assert.strictEqual(err, error); + done(); + }); + }); + + it('should return object metadata if successful', done => { + const metadataObjs = [ + { doc: { key: 'objectKey1' }, versionId: 'versionId1' }, + { doc: { key: 'objectKey2' }, versionId: 'versionId2' }, + ]; + sandbox.stub(metadata, 'getObjectsMD').yields(null, metadataObjs); + + metadataGetObjects('bucketName', objectsKeys, log, (err, result) => { + assert.ifError(err); + assert(result); + assert.strictEqual(result.objectKey1versionId1, metadataObjs[0].doc); + assert.strictEqual(result.objectKey2versionId2, metadataObjs[1].doc); + done(); + }); + }); +}); + +describe('metadataGetObject', () => { + let sandbox; + const objectKey = { inPlay: { key: 'objectKey1' }, versionId: 'versionId1' }; + + beforeEach(() => { + sandbox = sinon.createSandbox(); + }); + + afterEach(() => { + sandbox.restore(); + }); + + it('should return the cached document if provided', done => { + const cachedDoc = { + [objectKey.inPlay.key]: { + key: 'objectKey1', versionId: 'versionId1', + }, + }; + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, cachedDoc, log, (err, result) => { + assert.ifError(err); + assert.deepStrictEqual(result, cachedDoc[objectKey.inPlay.key]); + done(); + }); + }); + + it('should return error if metadata.getObjectMD fails', done => { + const error = new Error('Failed to get object metadata'); + sandbox.stub(metadata, 'getObjectMD').yields(error); + + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, null, log, err => { + assert(err); + assert.strictEqual(err, error); + done(); + }); + }); + + it('should return object metadata if successful', done => { + const metadataObj = { doc: { key: 'objectKey1', versionId: 'versionId1' } }; + sandbox.stub(metadata, 'getObjectMD').yields(null, metadataObj); + + metadataGetObject('bucketName', objectKey.inPlay.key, objectKey.versionId, null, log, (err, result) => { + assert.ifError(err); + assert.deepStrictEqual(result, metadataObj); + done(); + }); + }); +}); diff --git a/tests/utilities/objectLock-util.js b/tests/utilities/objectLock-util.js index e7a1778a4f..92a223e6cd 100644 --- a/tests/utilities/objectLock-util.js +++ b/tests/utilities/objectLock-util.js @@ -12,7 +12,7 @@ const log = new DummyRequestLogger(); function changeObjectLock(objects, newConfig, cb) { async.each(objects, (object, next) => { const { bucket, key, versionId } = object; - metadataGetObject(bucket, key, versionIdUtils.decode(versionId), log, (err, objMD) => { + metadataGetObject(bucket, key, versionIdUtils.decode(versionId), null, log, (err, objMD) => { assert.ifError(err); // set newConfig as empty string to remove object lock /* eslint-disable no-param-reassign */ diff --git a/yarn.lock b/yarn.lock index 225ed07357..d8f30fba0a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -357,6 +357,11 @@ resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz#5981a8db18b56ba38ef0efb7d995b12aa7b51918" integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ== +"@socket.io/component-emitter@~3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz#96116f2a912e0c02817345b3c10751069920d553" + integrity sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg== + "@tootallnate/once@1": version "1.1.2" resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-1.1.2.tgz#ccb91445360179a04e7fe6aff78c00ffc1eeaf82" @@ -372,6 +377,18 @@ resolved "https://registry.yarnpkg.com/@types/async/-/async-3.2.15.tgz#26d4768fdda0e466f18d6c9918ca28cc89a4e1fe" integrity sha512-PAmPfzvFA31mRoqZyTVsgJMsvbynR429UTTxhmfsUCrWGh3/fxOrzqBtaTPJsn4UtzTv4Vb0+/O7CARWb69N4g== +"@types/cookie@^0.4.1": + version "0.4.1" + resolved "https://registry.yarnpkg.com/@types/cookie/-/cookie-0.4.1.tgz#bfd02c1f2224567676c1545199f87c3a861d878d" + integrity sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q== + +"@types/cors@^2.8.12": + version "2.8.13" + resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.13.tgz#b8ade22ba455a1b8cb3b5d3f35910fd204f84f94" + integrity sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA== + dependencies: + "@types/node" "*" + "@types/json5@^0.0.29": version "0.0.29" resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee" @@ -390,6 +407,11 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-18.11.15.tgz#de0e1fbd2b22b962d45971431e2ae696643d3f5d" integrity sha512-VkhBbVo2+2oozlkdHXLrb3zjsRkpdnaU2bXmX8Wgle3PUi569eLRaHGlgETQHR7lLL1w7GiG3h9SnePhxNDecw== +"@types/node@>=10.0.0": + version "20.4.2" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.4.2.tgz#129cc9ae69f93824f92fac653eebfb4812ab4af9" + integrity sha512-Dd0BYtWgnWJKwO1jkmTrzofjK2QXXcai0dmtzvIBhcA+RsG5h8R3xlyta0kGOZRNfL9GuRtb1knmPEhQrePCEw== + "@types/tunnel@^0.0.3": version "0.0.3" resolved "https://registry.yarnpkg.com/@types/tunnel/-/tunnel-0.0.3.tgz#f109e730b072b3136347561fc558c9358bb8c6e9" @@ -707,9 +729,9 @@ arraybuffer.slice@~0.0.7: optionalDependencies: ioctl "^2.0.2" -"arsenal@git+https://github.com/scality/arsenal#8.1.98": - version "8.1.97" - resolved "git+https://github.com/scality/arsenal#3f7229eebe378a0f4852d2c25b9ac33c027fa7eb" +"arsenal@git+https://github.com/scality/arsenal#8.1.104": + version "8.1.104" + resolved "git+https://github.com/scality/arsenal#8716fee67d77f78e79497c75c62d9e4fa5b9a80e" dependencies: "@azure/identity" "^3.1.1" "@azure/storage-blob" "^12.12.0" @@ -726,7 +748,7 @@ arraybuffer.slice@~0.0.7: bson "4.0.0" debug "~4.1.0" diskusage "^1.1.1" - fcntl "github:scality/node-fcntl#0.2.0" + fcntl "github:scality/node-fcntl#0.2.2" hdclient scality/hdclient#1.1.5 httpagent scality/httpagent#1.0.6 https-proxy-agent "^2.2.0" @@ -739,8 +761,8 @@ arraybuffer.slice@~0.0.7: node-forge "^1.3.0" prom-client "14.2.0" simple-glob "^0.2.0" - socket.io "2.4.1" - socket.io-client "2.4.0" + socket.io "~4.6.1" + socket.io-client "~4.6.1" sproxydclient "git+https://github.com/scality/sproxydclient#8.0.9" utf8 "3.0.0" uuid "^3.0.1" @@ -965,7 +987,7 @@ base64-js@^1.0.2, base64-js@^1.3.1: resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== -base64id@2.0.0: +base64id@2.0.0, base64id@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/base64id/-/base64id-2.0.0.tgz#2770ac6bc47d312af97a8bf9a634342e0cd25cb6" integrity sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog== @@ -1399,6 +1421,14 @@ core-util-is@~1.0.0: resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== +cors@~2.8.5: + version "2.8.5" + resolved "https://registry.yarnpkg.com/cors/-/cors-2.8.5.tgz#eac11da51592dd86b9f06f6e7ac293b3df875d29" + integrity sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g== + dependencies: + object-assign "^4" + vary "^1" + cron-parser@^2.11.0, cron-parser@^2.15.0, cron-parser@^2.18.0: version "2.18.0" resolved "https://registry.yarnpkg.com/cron-parser/-/cron-parser-2.18.0.tgz#de1bb0ad528c815548371993f81a54e5a089edcf" @@ -1471,7 +1501,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.6.8, debug@^2.6.9: dependencies: ms "2.0.0" -debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3: +debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3, debug@~4.3.1, debug@~4.3.2: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== @@ -1684,6 +1714,17 @@ engine.io-client@~3.5.0: xmlhttprequest-ssl "~1.6.2" yeast "0.1.2" +engine.io-client@~6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-6.4.0.tgz#88cd3082609ca86d7d3c12f0e746d12db4f47c91" + integrity sha512-GyKPDyoEha+XZ7iEqam49vz6auPnNJ9ZBfy89f+rMMas8AuiMWOZ9PVzu8xb9ZC6rafUqiGHSCfu22ih66E+1g== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.1" + engine.io-parser "~5.0.3" + ws "~8.11.0" + xmlhttprequest-ssl "~2.0.0" + engine.io-parser@~2.2.0: version "2.2.1" resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-2.2.1.tgz#57ce5611d9370ee94f99641b589f94c97e4f5da7" @@ -1695,6 +1736,11 @@ engine.io-parser@~2.2.0: blob "0.0.5" has-binary2 "~1.0.2" +engine.io-parser@~5.0.3: + version "5.0.7" + resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-5.0.7.tgz#ed5eae76c71f398284c578ab6deafd3ba7e4e4f6" + integrity sha512-P+jDFbvK6lE3n1OL+q9KuzdOFWkkZ/cMV9gol/SbVfpyqfvrfrFTOFJ6fQm2VC3PZHlU3QPhVwmbsCnauHF2MQ== + engine.io@~3.5.0: version "3.5.0" resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-3.5.0.tgz#9d6b985c8a39b1fe87cd91eb014de0552259821b" @@ -1707,6 +1753,22 @@ engine.io@~3.5.0: engine.io-parser "~2.2.0" ws "~7.4.2" +engine.io@~6.4.2: + version "6.4.2" + resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-6.4.2.tgz#ffeaf68f69b1364b0286badddf15ff633476473f" + integrity sha512-FKn/3oMiJjrOEOeUub2WCox6JhxBXq/Zn3fZOMCBxKnNYtsdKjxhl7yR3fZhM9PV+rdE75SU5SYMc+2PGzo+Tg== + dependencies: + "@types/cookie" "^0.4.1" + "@types/cors" "^2.8.12" + "@types/node" ">=10.0.0" + accepts "~1.3.4" + base64id "2.0.0" + cookie "~0.4.1" + cors "~2.8.5" + debug "~4.3.1" + engine.io-parser "~5.0.3" + ws "~8.11.0" + entities@~2.0.0: version "2.0.3" resolved "https://registry.yarnpkg.com/entities/-/entities-2.0.3.tgz#5c487e5742ab93c15abb5da22759b8590ec03b7f" @@ -2132,6 +2194,14 @@ fastq@^1.6.0: nan "^2.3.2" node-gyp "^8.0.0" +"fcntl@github:scality/node-fcntl#0.2.2": + version "0.2.1" + resolved "https://codeload.github.com/scality/node-fcntl/tar.gz/b1335ca204c6265cedc50c26020c4d63aabe920e" + dependencies: + bindings "^1.1.1" + nan "^2.3.2" + node-gyp "^8.0.0" + fecha@^4.2.0: version "4.2.3" resolved "https://registry.yarnpkg.com/fecha/-/fecha-4.2.3.tgz#4d9ccdbc61e8629b259fdca67e65891448d569fd" @@ -4269,6 +4339,11 @@ oauth-sign@~0.9.0: resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== +object-assign@^4: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + object-inspect@^1.12.0, object-inspect@^1.9.0: version "1.12.2" resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.2.tgz#c0641f26394532f28ab8d796ab954e43c009a8ea" @@ -5062,6 +5137,13 @@ socket.io-adapter@~1.1.0: resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz#ab3f0d6f66b8fc7fca3959ab5991f82221789be9" integrity sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g== +socket.io-adapter@~2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz#5de9477c9182fdc171cd8c8364b9a8894ec75d12" + integrity sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA== + dependencies: + ws "~8.11.0" + socket.io-client@2.4.0: version "2.4.0" resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-2.4.0.tgz#aafb5d594a3c55a34355562fc8aea22ed9119a35" @@ -5079,6 +5161,16 @@ socket.io-client@2.4.0: socket.io-parser "~3.3.0" to-array "0.1.4" +socket.io-client@~4.6.1: + version "4.6.2" + resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-4.6.2.tgz#2bfde952e74625d54e622718a7cb1d591ee62fd6" + integrity sha512-OwWrMbbA8wSqhBAR0yoPK6EdQLERQAYjXb3A0zLpgxfM1ZGLKoxHx8gVmCHA6pcclRX5oA/zvQf7bghAS11jRA== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.2" + engine.io-client "~6.4.0" + socket.io-parser "~4.2.4" + socket.io-parser@~3.3.0: version "3.3.2" resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-3.3.2.tgz#ef872009d0adcf704f2fbe830191a14752ad50b6" @@ -5097,6 +5189,14 @@ socket.io-parser@~3.4.0: debug "~4.1.0" isarray "2.0.1" +socket.io-parser@~4.2.4: + version "4.2.4" + resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-4.2.4.tgz#c806966cf7270601e47469ddeec30fbdfda44c83" + integrity sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.1" + socket.io@2.4.1: version "2.4.1" resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-2.4.1.tgz#95ad861c9a52369d7f1a68acf0d4a1b16da451d2" @@ -5109,6 +5209,18 @@ socket.io@2.4.1: socket.io-client "2.4.0" socket.io-parser "~3.4.0" +socket.io@~4.6.1: + version "4.6.2" + resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-4.6.2.tgz#d597db077d4df9cbbdfaa7a9ed8ccc3d49439786" + integrity sha512-Vp+lSks5k0dewYTfwgPT9UeGGd+ht7sCpB7p0e83VgO4X/AHYWhXITMrNk/pg8syY2bpx23ptClCQuHhqi2BgQ== + dependencies: + accepts "~1.3.4" + base64id "~2.0.0" + debug "~4.3.2" + engine.io "~6.4.2" + socket.io-adapter "~2.5.2" + socket.io-parser "~4.2.4" + socks-proxy-agent@^6.0.0: version "6.2.1" resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz#2687a31f9d7185e38d530bef1944fe1f1496d6ce" @@ -5788,7 +5900,7 @@ validator@^13.0.0, validator@^13.6.0: resolved "https://registry.yarnpkg.com/validator/-/validator-13.7.0.tgz#4f9658ba13ba8f3d82ee881d3516489ea85c0857" integrity sha512-nYXQLCBkpJ8X6ltALua9dRrZDHVYxjJ1wgskNt1lH9fzGjs3tgojGSCBjmEPwkWS1y29+DrizMTW19Pr9uB2nw== -vary@~1.1.2: +vary@^1, vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== @@ -5969,6 +6081,11 @@ ws@~7.4.2: resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== +ws@~8.11.0: + version "8.11.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.11.0.tgz#6a0d36b8edfd9f96d8b25683db2f8d7de6e8e143" + integrity sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg== + xml2js@0.4.19: version "0.4.19" resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.19.tgz#686c20f213209e94abf0d1bcf1efaa291c7827a7" @@ -6005,6 +6122,11 @@ xmlhttprequest-ssl@~1.6.2: resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.3.tgz#03b713873b01659dfa2c1c5d056065b27ddc2de6" integrity sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q== +xmlhttprequest-ssl@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.0.0.tgz#91360c86b914e67f44dce769180027c0da618c67" + integrity sha512-QKxVRxiRACQcVuQEYFsI1hhkrMlrXHPegbbd1yn9UHOmRxY+si12nQYzri3vbzt8VdTTRviqcKxcyllFas5z2A== + xtend@^4.0.2, xtend@~4.0.0: version "4.0.2" resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"