From 354ceeb6aa68a3ebbd6ff85b753256313251e1ea Mon Sep 17 00:00:00 2001 From: shirady <57721533+shirady@users.noreply.github.com> Date: Mon, 18 Mar 2024 13:27:32 +0200 Subject: [PATCH 1/5] NSFS | NC | Manage NSFS | Create bucket - add check if owner of bucket has rw_access to underlying path 1. Add a check if the owner of the bucket has rw_access to the underlying path when creating or updating a bucket. 2. Move the functionality of verify_bucket_owner to validate_bucket_args before we check the access of the underlying path (so we will have the account's details - to reduce fetching the config file again). 3. Add a new error: InaccessibleStoragePath. 4. Edit the existing tests to permit read and write access to the bucket owner. 5. Minor style changes and renaming in test_nc_nsfs_bucket_cli.test.js. Signed-off-by: shirady <57721533+shirady@users.noreply.github.com> (cherry picked from commit b55879998b144fadd1eb5c22225cdba271483184) --- src/cmd/manage_nsfs.js | 43 ++--- src/manage_nsfs/manage_nsfs_cli_errors.js | 5 + .../test_nc_nsfs_bucket_cli.test.js | 160 +++++++++++++++++- src/test/unit_tests/nc_coretest.js | 20 ++- src/test/unit_tests/test_bucketspace.js | 32 +++- src/test/unit_tests/test_nc_nsfs_cli.js | 85 ++++++---- 6 files changed, 282 insertions(+), 63 deletions(-) diff --git a/src/cmd/manage_nsfs.js b/src/cmd/manage_nsfs.js index 2353011761..c0e09df77f 100644 --- a/src/cmd/manage_nsfs.js +++ b/src/cmd/manage_nsfs.js @@ -203,13 +203,11 @@ function get_symlink_config_file_path(config_type_path, file_name) { async function add_bucket(data) { await validate_bucket_args(data, ACTIONS.ADD); - const account_id = await verify_bucket_owner(data.bucket_owner, ACTIONS.ADD); const fs_context = native_fs_utils.get_process_fs_context(config_root_backend); const bucket_conf_path = get_config_file_path(buckets_dir_path, data.name); const exists = await native_fs_utils.is_path_exists(fs_context, bucket_conf_path); if (exists) throw_cli_error(ManageCLIError.BucketAlreadyExists, data.name, { bucket: data.name }); data._id = mongo_utils.mongoObjectId(); - data.owner_account = account_id; const data_json = JSON.stringify(data); // We take an object that was stringify // (it unwraps ths sensitive strings, creation_date to string and removes undefined parameters) @@ -219,19 +217,16 @@ async function add_bucket(data) { write_stdout_response(ManageCLIResponse.BucketCreated, data_json, { bucket: data.name }); } -/** verify_bucket_owner will check if the bucket_owner has an account - * bucket_owner is the account name in the account schema - * after it finds one, it returns the account id, otherwise it would throw an error - * (in case the action is add bucket it also checks that the owner has allow_bucket_creation) - * @param {string} bucket_owner account name - * @param {string} action +/** + * get_bucket_owner_account will return the account of the bucket_owner + * otherwise it would throw an error + * @param {string} bucket_owner */ -async function verify_bucket_owner(bucket_owner, action) { - // check if bucket owner exists +async function get_bucket_owner_account(bucket_owner) { const account_config_path = get_config_file_path(accounts_dir_path, bucket_owner); - let account; try { - account = await get_config_data(account_config_path); + const account = await get_config_data(account_config_path); + return account; } catch (err) { if (err.code === 'ENOENT') { const detail_msg = `bucket owner ${bucket_owner} does not exists`; @@ -239,13 +234,6 @@ async function verify_bucket_owner(bucket_owner, action) { } throw err; } - // check if bucket owner has the permission to create bucket (for bucket add only) - if (action === ACTIONS.ADD && !account.allow_bucket_creation) { - const detail_msg = `${bucket_owner} account not allowed to create new buckets. ` + - `Please make sure to have a valid new_buckets_path and enable the flag allow_bucket_creation`; - throw_cli_error(ManageCLIError.BucketCreationNotAllowed, detail_msg); - } - return account._id; } async function get_bucket_status(data) { @@ -263,7 +251,6 @@ async function get_bucket_status(data) { async function update_bucket(data) { await validate_bucket_args(data, ACTIONS.UPDATE); - await verify_bucket_owner(data.bucket_owner, ACTIONS.UPDATE); const fs_context = native_fs_utils.get_process_fs_context(config_root_backend); @@ -760,7 +747,7 @@ function get_access_keys(action, user_input) { async function validate_bucket_args(data, action) { if (action === ACTIONS.DELETE || action === ACTIONS.STATUS) { if (_.isUndefined(data.name)) throw_cli_error(ManageCLIError.MissingBucketNameFlag); - } else { + } else { // action === ACTIONS.ADD || action === ACTIONS.UPDATE if (_.isUndefined(data.name)) throw_cli_error(ManageCLIError.MissingBucketNameFlag); try { native_fs_utils.validate_bucket_creation({ name: data.name }); @@ -787,6 +774,20 @@ async function validate_bucket_args(data, action) { if (!exists) { throw_cli_error(ManageCLIError.InvalidStoragePath, data.path); } + const account = await get_bucket_owner_account(data.bucket_owner); + const account_fs_context = await native_fs_utils.get_fs_context(account.nsfs_account_config, data.fs_backend); + const accessible = await native_fs_utils.is_dir_rw_accessible(account_fs_context, data.path); + if (!accessible) { + throw_cli_error(ManageCLIError.InaccessibleStoragePath, data.path); + } + if (action === ACTIONS.ADD) { + if (!account.allow_bucket_creation) { + const detail_msg = `${data.bucket_owner} account not allowed to create new buckets. ` + + `Please make sure to have a valid new_buckets_path and enable the flag allow_bucket_creation`; + throw_cli_error(ManageCLIError.BucketCreationNotAllowed, detail_msg); + } + data.owner_account = account._id; // TODO move this assignment to better place + } if (data.s3_policy) { try { await bucket_policy_utils.validate_s3_policy(data.s3_policy, data.name, diff --git a/src/manage_nsfs/manage_nsfs_cli_errors.js b/src/manage_nsfs/manage_nsfs_cli_errors.js index e6fe0c53e6..ad25615afa 100644 --- a/src/manage_nsfs/manage_nsfs_cli_errors.js +++ b/src/manage_nsfs/manage_nsfs_cli_errors.js @@ -360,6 +360,11 @@ ManageCLIError.MalformedPolicy = Object.freeze({ http_code: 400, }); +ManageCLIError.InaccessibleStoragePath = Object.freeze({ + code: 'InaccessibleStoragePath', + message: 'Bucket owner should have read & write access to the specified bucket storage path', + http_code: 400, +}); ManageCLIError.BucketNotEmpty = Object.freeze({ code: 'BucketNotEmpty', diff --git a/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js b/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js index e881942f4b..8a9697fd5c 100644 --- a/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_nsfs_bucket_cli.test.js @@ -41,7 +41,7 @@ describe('manage nsfs cli bucket flow', () => { const bucket_defaults = { name: 'bucket1', - owner: 'account1', + owner: account_defaults.name, path: bucket_storage_path, }; @@ -64,7 +64,7 @@ describe('manage nsfs cli bucket flow', () => { await fs_utils.folder_delete(`${root_path}`); }); - it('cli create bucket invalid option type (path as number)', async () => { + it('should fail - cli create bucket - invalid option type (path as number)', async () => { const action = ACTIONS.ADD; const path_invalid = 4e34; // invalid should be string represents a path const bucket_options = { config_root, ...bucket_defaults, path: path_invalid }; @@ -72,7 +72,7 @@ describe('manage nsfs cli bucket flow', () => { expect(JSON.parse(res.stdout).error.message).toBe(ManageCLIError.InvalidArgumentType.message); }); - it('cli create bucket invalid option type (path as string)', async () => { + it('should fail - cli create bucket - invalid option type (path as string)', async () => { const action = ACTIONS.ADD; const path_invalid = 'aaa'; // invalid should be string represents a path const bucket_options = { config_root, ...bucket_defaults, path: path_invalid }; @@ -80,6 +80,47 @@ describe('manage nsfs cli bucket flow', () => { expect(JSON.parse(res.stdout).error.message).toBe(ManageCLIError.InvalidStoragePath.message); }); + it('should fail - cli create bucket - owner does not have any permission to path', async () => { + const action = ACTIONS.ADD; + const bucket_options = { config_root, ...bucket_defaults}; + await fs_utils.create_fresh_path(bucket_options.path); + await fs_utils.file_must_exist(bucket_options.path); + await set_path_permissions_and_owner(bucket_options.path, account_defaults, 0o077); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('should fail - cli create bucket - owner does not have write permission to path', async () => { + const action = ACTIONS.ADD; + const bucket_options = { config_root, ...bucket_defaults}; + await fs_utils.create_fresh_path(bucket_options.path); + await fs_utils.file_must_exist(bucket_options.path); + await set_path_permissions_and_owner(bucket_options.path, account_defaults, 0o477); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('should fail - cli create bucket - owner does not have read permission to path', async () => { + const action = ACTIONS.ADD; + const bucket_options = { config_root, ...bucket_defaults}; + await fs_utils.create_fresh_path(bucket_options.path); + await fs_utils.file_must_exist(bucket_options.path); + await set_path_permissions_and_owner(bucket_options.path, account_defaults, 0o277); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('cli create bucket - account can access path', async () => { + const action = ACTIONS.ADD; + const bucket_options = { config_root, ...bucket_defaults}; + await fs_utils.create_fresh_path(bucket_options.path); + await fs_utils.file_must_exist(bucket_options.path); + await set_path_permissions_and_owner(bucket_options.path, account_defaults, 0o700); + await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + const bucket = await read_config_file(config_root, CONFIG_SUBDIRS.BUCKETS, bucket_defaults.name); + assert_bucket(bucket, bucket_options); + }); + }); @@ -124,6 +165,7 @@ describe('manage nsfs cli bucket flow', () => { const bucket_options = { config_root, ...bucket_defaults }; await fs_utils.create_fresh_path(bucket_path); await fs_utils.file_must_exist(bucket_path); + await set_path_permissions_and_owner(bucket_path, account_options, 0o700); const resp = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); const bucket_resp = JSON.parse(resp); expect(bucket_resp.response.reply._id).not.toBeNull(); @@ -240,6 +282,11 @@ describe('cli create bucket using from_file', () => { await fs_utils.file_must_exist(account_path); await set_path_permissions_and_owner(account_path, account_options, 0o700); await exec_manage_cli(TYPES.ACCOUNT, action, account_options); + // give permission on bucket path to bucket owner + const { path: bucket_path } = bucket_defaults; + await fs_utils.create_fresh_path(bucket_path); + await fs_utils.file_must_exist(bucket_path); + await set_path_permissions_and_owner(bucket_path, account_options, 0o700); }); afterEach(async () => { @@ -371,6 +418,113 @@ describe('cli create bucket using from_file', () => { }); +describe('cli update bucket', () => { + const config_root = path.join(tmp_fs_path, 'config_root_manage_nsfs3'); + const root_path = path.join(tmp_fs_path, 'root_path_manage_nsfs3/'); + const bucket_storage_path = path.join(tmp_fs_path, 'root_path_manage_nsfs3', 'bucket1'); + + const account_name = 'account_test'; + const account_name2 = 'account_test_update2'; + + const account_defaults = { + name: account_name, + new_buckets_path: `${root_path}new_buckets_path_4/`, + uid: 1001, + gid: 1001, + }; + + const account_defaults2 = { + name: account_name2, + new_buckets_path: `${root_path}new_buckets_path_user41/`, + uid: 1002, + gid: 1002, + }; + + const bucket_defaults = { + name: 'bucket1', + owner: account_name, + path: bucket_storage_path, + }; + + beforeEach(async () => { + await fs_utils.create_fresh_path(`${config_root}/${CONFIG_SUBDIRS.BUCKETS}`); + await fs_utils.create_fresh_path(root_path); + await fs_utils.create_fresh_path(bucket_storage_path); + const action = ACTIONS.ADD; + // account add 1 + let { new_buckets_path: account_path } = account_defaults; + let account_options = { config_root, ...account_defaults }; + await fs_utils.create_fresh_path(account_path); + await fs_utils.file_must_exist(account_path); + await set_path_permissions_and_owner(account_path, account_options, 0o700); + await exec_manage_cli(TYPES.ACCOUNT, action, account_options); + + // account add 2 + account_path = account_defaults2.new_buckets_path; + account_options = { config_root, ...account_defaults2 }; + await fs_utils.create_fresh_path(account_path); + await fs_utils.file_must_exist(account_path); + await set_path_permissions_and_owner(account_path, account_options, 0o700); + await exec_manage_cli(TYPES.ACCOUNT, action, account_options); + + // bucket add + const { path: bucket_path } = bucket_defaults; + const bucket_options = { config_root, ...bucket_defaults }; + await fs_utils.create_fresh_path(bucket_path); + await fs_utils.file_must_exist(bucket_path); + await set_path_permissions_and_owner(bucket_path, account_defaults, 0o700); + const resp = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + const bucket_resp = JSON.parse(resp); + expect(bucket_resp.response.reply._id).not.toBeNull(); + }); + + afterEach(async () => { + await fs_utils.folder_delete(`${config_root}`); + await fs_utils.folder_delete(`${root_path}`); + }); + + it('should fail - cli update bucket owner - owner does not have any permission to path', async () => { + const action = ACTIONS.UPDATE; + const bucket_options = { config_root, name: bucket_defaults.name, owner: account_defaults2.name}; + await fs_utils.create_fresh_path(bucket_defaults.path); + await fs_utils.file_must_exist(bucket_defaults.path); + await set_path_permissions_and_owner(bucket_defaults.path, account_defaults2, 0o077); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('should fail - cli update bucket owner - owner does not have write permission to path', async () => { + const action = ACTIONS.UPDATE; + const bucket_options = { config_root, name: bucket_defaults.name, owner: account_defaults2.name}; + await fs_utils.create_fresh_path(bucket_defaults.path); + await fs_utils.file_must_exist(bucket_defaults.path); + await set_path_permissions_and_owner(bucket_defaults.path, account_defaults2, 0o477); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('should fail - cli update bucket owner - owner does not have read permission to path', async () => { + const action = ACTIONS.UPDATE; + const bucket_options = { config_root, name: bucket_defaults.name, owner: account_defaults2.name}; + await fs_utils.create_fresh_path(bucket_defaults.path); + await fs_utils.file_must_exist(bucket_defaults.path); + await set_path_permissions_and_owner(bucket_defaults.path, account_defaults2, 0o277); + const res = await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + expect(JSON.parse(res.stdout).error.code).toBe(ManageCLIError.InaccessibleStoragePath.code); + }); + + it('cli update bucket owner - account can access path', async () => { + const action = ACTIONS.UPDATE; + const bucket_options = { config_root, name: bucket_defaults.name, owner: account_defaults2.name}; + await fs_utils.create_fresh_path(bucket_defaults.path); + await fs_utils.file_must_exist(bucket_defaults.path); + await set_path_permissions_and_owner(bucket_defaults.path, account_defaults2, 0o700); + await exec_manage_cli(TYPES.BUCKET, action, bucket_options); + const bucket = await read_config_file(config_root, CONFIG_SUBDIRS.BUCKETS, bucket_defaults.name); + expect(bucket.bucket_owner).toBe(account_defaults2.name); + }); +}); + /** * exec_manage_cli will get the flags for the cli and runs the cli with it's flags * @param {string} type diff --git a/src/test/unit_tests/nc_coretest.js b/src/test/unit_tests/nc_coretest.js index ea9488edd8..bba152c8d4 100644 --- a/src/test/unit_tests/nc_coretest.js +++ b/src/test/unit_tests/nc_coretest.js @@ -161,12 +161,7 @@ async function config_dir_teardown() { */ async function admin_account_creation() { await announce('admin_account_creation'); - const cli_account_options = { - name: NC_CORETEST, - new_buckets_path: NC_CORETEST_STORAGE_PATH, - uid: 200, - gid: 200 - }; + const cli_account_options = get_admin_account_details(); await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.ADD, cli_account_options); } @@ -228,6 +223,18 @@ const get_name_by_email = email => { return name; }; +/** + * get_admin_account_details returns the account details that we use in NC core tests + */ +function get_admin_account_details() { + const cli_account_options = { + name: NC_CORETEST, + new_buckets_path: NC_CORETEST_STORAGE_PATH, + uid: 200, + gid: 200 + }; + return cli_account_options; +} //////////////////////////////////// //////// ACCOUNT MANAGE CMDS /////// @@ -459,3 +466,4 @@ exports.get_dbg_level = get_dbg_level; exports.rpc_client = rpc_cli_funcs_to_manage_nsfs_cli_cmds; exports.get_http_address = get_http_address; exports.get_https_address = get_https_address; +exports.get_admin_account_details = get_admin_account_details; diff --git a/src/test/unit_tests/test_bucketspace.js b/src/test/unit_tests/test_bucketspace.js index a69cc8202b..a866d6ee0e 100644 --- a/src/test/unit_tests/test_bucketspace.js +++ b/src/test/unit_tests/test_bucketspace.js @@ -14,6 +14,7 @@ const assert = require('assert'); const config = require('../../../config'); const fs_utils = require('../../util/fs_utils'); const { stat, open } = require('../../util/nb_native')().fs; +const test_utils = require('../system_tests/test_utils'); const { get_process_fs_context } = require('../../util/native_fs_utils'); const ManageCLIError = require('../../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError; const { TMP_PATH, get_coretest_path, invalid_nsfs_root_permissions, @@ -24,7 +25,7 @@ const { NodeHttpHandler } = require("@smithy/node-http-handler"); const coretest_path = get_coretest_path(); const coretest = require(coretest_path); -const { rpc_client, EMAIL, PASSWORD, SYSTEM } = coretest; +const { rpc_client, EMAIL, PASSWORD, SYSTEM, get_admin_account_details } = coretest; coretest.setup({}); const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); @@ -36,6 +37,7 @@ const new_account_params = { }; const first_bucket = 'first.bucket'; +const is_nc_coretest = process.env.NC_CORETEST === 'true'; const encoded_xattr = 'unconfined_u%3Aobject_r%3Auser_home_t%3As0%00'; const decoded_xattr = 'unconfined_u:object_r:user_home_t:s0\x00'; @@ -106,6 +108,11 @@ mocha.describe('bucket operations - namespace_fs', function() { } }); const obj_nsr = { resource: nsr, path: bucket_path }; + // give read and write permission to owner + if (is_nc_coretest) { + const { uid, gid } = get_admin_account_details(); + await test_utils.set_path_permissions_and_owner(src_bucket_path, { uid, gid }, 0o700); + } await rpc_client.bucket.create_bucket({ name: bucket_name, namespace: { @@ -134,6 +141,11 @@ mocha.describe('bucket operations - namespace_fs', function() { mocha.it('export other dir as bucket - and update bucket path to original bucket path', async function() { const obj_nsr = { resource: nsr, path: bucket_path }; const other_obj_nsr = { resource: nsr, path: other_bucket_path }; + // give read and write permission to owner + if (is_nc_coretest) { + const { uid, gid } = get_admin_account_details(); + await test_utils.set_path_permissions_and_owner(src1_bucket_path, { uid, gid }, 0o700); + } await rpc_client.bucket.create_bucket({ name: bucket_name + '-other1', namespace: { @@ -174,7 +186,14 @@ mocha.describe('bucket operations - namespace_fs', function() { const res = await s3_owner.listBuckets({}); console.log(inspect(res)); - const list_ok = bucket_in_list([first_bucket], [bucket_name], res.Buckets); + let list_ok; + if (is_nc_coretest) { + // in order to create a bucket in manage nsfs we need to give permission to the owner + // hence we will have the gid and uid of the bucket_name + list_ok = bucket_in_list([first_bucket], [], res.Buckets); + } else { + list_ok = bucket_in_list([first_bucket], [bucket_name], res.Buckets); + } assert.ok(list_ok); }); mocha.it('create account 1 with uid, gid - wrong uid', async function() { @@ -957,6 +976,11 @@ mocha.describe('list objects - namespace_fs', async function() { } }); const obj_nsr = { resource: namespace_resource_name, path: bucket_path }; + // give read and write permission to owner + if (is_nc_coretest) { + const { uid, gid } = get_admin_account_details(); + await test_utils.set_path_permissions_and_owner(full_path, { uid, gid }, 0o700); + } await rpc_client.bucket.create_bucket({ name: bucket_name, namespace: { @@ -1560,6 +1584,10 @@ mocha.describe('list buckets - namespace_fs', async function() { await fs.promises.chown(cli_bucket_path, uid, gid); } const obj_nsr = { resource: dummy_nsr, path: bucket }; + // give read and write permission to owner + if (is_nc_coretest) { + await test_utils.set_path_permissions_and_owner(cli_bucket_path, account_info, 0o700); + } const create_bucket_options = { name: bucket, namespace: { diff --git a/src/test/unit_tests/test_nc_nsfs_cli.js b/src/test/unit_tests/test_nc_nsfs_cli.js index f5f5476a78..fb36f15160 100644 --- a/src/test/unit_tests/test_nc_nsfs_cli.js +++ b/src/test/unit_tests/test_nc_nsfs_cli.js @@ -1,6 +1,7 @@ /* Copyright (C) 2016 NooBaa */ +/*eslint max-lines-per-function: ["error", 1200]*/ +/*eslint max-statements: ["error", 90]*/ 'use strict'; -/* eslint-disable max-lines-per-function */ const _ = require('lodash'); const path = require('path'); @@ -46,10 +47,12 @@ mocha.describe('manage_nsfs cli', function() { mocha.describe('cli bucket flow ', async function() { const type = TYPES.BUCKET; const account_name = 'user1'; + const account_name2 = 'user2'; const name = 'bucket1'; const bucket_on_gpfs = 'bucketgpfs1'; const owner = account_name; // in a different variable for readability const bucket_path = `${root_path}${name}/`; + const bucket_on_gpfs_path = `${root_path}${bucket_on_gpfs}/`; const bucket_with_policy = 'bucket-with-policy'; const bucket_policy = generate_s3_policy('*', bucket_with_policy, ['s3:*']).policy; const bucket1_policy = generate_s3_policy('*', name, ['s3:*']).policy; @@ -58,9 +61,26 @@ mocha.describe('manage_nsfs cli', function() { let add_res; let bucket_options = { config_root, name, owner, path: bucket_path }; - const gpfs_bucket_options = { config_root, name: bucket_on_gpfs, owner, path: bucket_path, fs_backend: 'GPFS' }; + const gpfs_bucket_options = { config_root, name: bucket_on_gpfs, owner, path: bucket_on_gpfs_path, fs_backend: 'GPFS' }; const bucket_with_policy_options = { ...bucket_options, bucket_policy: bucket_policy, name: bucket_with_policy }; + const new_buckets_path1 = `${root_path}new_buckets_path_user1111/`; + const new_buckets_path2 = `${root_path}new_buckets_path_user2222/`; + const account_options1 = { + config_root: config_root, + name: account_name, + new_buckets_path: new_buckets_path1, + uid: 1111, + gid: 1111, + }; + const account_options2 = { + config_root: config_root, + name: account_name2, + new_buckets_path: new_buckets_path2, + uid: 2222, + gid: 2222, + }; + mocha.before(async () => { await P.all(_.map([CONFIG_SUBDIRS.ACCOUNTS, CONFIG_SUBDIRS.BUCKETS, CONFIG_SUBDIRS.ACCESS_KEYS], async dir => fs_utils.create_fresh_path(`${config_root}/${dir}`))); @@ -84,38 +104,17 @@ mocha.describe('manage_nsfs cli', function() { }); mocha.it('cli create account for bucket (bucket create requirement to have a bucket owner)', async function() { - const account_name2 = 'user2'; - const new_buckets_path1 = `${root_path}new_buckets_path_user1111/`; - const new_buckets_path2 = `${root_path}new_buckets_path_user2222/`; - const uid1 = 1111; - const uid2 = 2222; - const gid1 = 1111; - const gid2 = 2222; const action = ACTIONS.ADD; - // create account 'user1' 'user1@noobaa.io' - const account_options1 = { - config_root: config_root, - name: account_name, - new_buckets_path: new_buckets_path1, - uid: uid1, - gid: gid1, - }; + // create account 'user1' await fs_utils.create_fresh_path(new_buckets_path1); await fs_utils.file_must_exist(new_buckets_path1); - await set_path_permissions_and_owner(new_buckets_path1, { uid: uid1, gid: gid1 }, 0o700); + await set_path_permissions_and_owner(new_buckets_path1, { uid: account_options1.uid, gid: account_options1.gid }, 0o700); await exec_manage_cli(TYPES.ACCOUNT, action, account_options1); - // create account 'user2' 'user2@noobaa.io' - const account_options2 = { - config_root: config_root, - name: account_name2, - new_buckets_path: new_buckets_path2, - uid: uid2, - gid: gid2, - }; + // create account 'user2' await fs_utils.create_fresh_path(new_buckets_path2); await fs_utils.file_must_exist(new_buckets_path2); - await set_path_permissions_and_owner(new_buckets_path2, { uid: uid2, gid: gid2 }, 0o700); + await set_path_permissions_and_owner(new_buckets_path2, { uid: account_options2.uid, gid: account_options2.gid }, 0o700); await exec_manage_cli(TYPES.ACCOUNT, action, account_options2); }); @@ -127,7 +126,6 @@ mocha.describe('manage_nsfs cli', function() { const action = ACTIONS.ADD; // create account 'user3' // without new_buckets_path property - // (currently this is the way to set allow_bucket_creation with false value) const account_options = { config_root: config_root, name: account_name_for_account_cannot_create_bucket, @@ -143,6 +141,9 @@ mocha.describe('manage_nsfs cli', function() { owner: account_name_for_account_cannot_create_bucket, path: bucket_path }; + await fs_utils.create_fresh_path(bucket_path); + await fs_utils.file_must_exist(bucket_path); + await set_path_permissions_and_owner(bucket_path, account_options, 0o700); await exec_manage_cli(type, action, { ...bucket_options_with_owner_of_account_cannot_create_bucket }); assert.fail('should have failed with not allowed to create new buckets'); } catch (err) { @@ -155,6 +156,14 @@ mocha.describe('manage_nsfs cli', function() { try { await fs_utils.create_fresh_path(bucket_path); await fs_utils.file_must_exist(bucket_path); + const account = await read_config_file(config_root, CONFIG_SUBDIRS.ACCOUNTS, account_name); + const account_options = { + gid: account.nsfs_account_config.gid, + uid: account.nsfs_account_config.uid, + user: account.nsfs_account_config.distinguished_name, + new_buckets_path: account.nsfs_account_config.mew_buckets_path, + }; + await set_path_permissions_and_owner(bucket_path, account_options, 0o700); await exec_manage_cli(type, action, { ...bucket_options, bucket_policy: invalid_bucket_policy }); assert.fail('should have failed with invalid bucket policy'); } catch (err) { @@ -343,7 +352,15 @@ mocha.describe('manage_nsfs cli', function() { mocha.it('cli bucket update owner', async function() { const action = ACTIONS.UPDATE; - const update_options = { config_root, owner: 'user2', name }; + const account = await read_config_file(config_root, CONFIG_SUBDIRS.ACCOUNTS, account_name2); + const account_options = { + gid: account.nsfs_account_config.gid, + uid: account.nsfs_account_config.uid, + user: account.nsfs_account_config.distinguished_name, + new_buckets_path: account.nsfs_account_config.mew_buckets_path, + }; + await set_path_permissions_and_owner(bucket_path, account_options, 0o700); + const update_options = { config_root, name, owner: account_name2}; const update_res = await exec_manage_cli(type, action, update_options); bucket_options = { ...bucket_options, ...update_options }; const bucket = await read_config_file(config_root, CONFIG_SUBDIRS.BUCKETS, name); @@ -432,6 +449,9 @@ mocha.describe('manage_nsfs cli', function() { mocha.it('cli bucket create on GPFS', async function() { const action = ACTIONS.ADD; + await fs_utils.create_fresh_path(bucket_on_gpfs_path); + await fs_utils.file_must_exist(bucket_on_gpfs_path); + await set_path_permissions_and_owner(bucket_on_gpfs_path, account_options1, 0o700); const bucket_status = await exec_manage_cli(type, action, gpfs_bucket_options); assert_response(action, type, bucket_status, gpfs_bucket_options); const bucket = await read_config_file(config_root, CONFIG_SUBDIRS.BUCKETS, gpfs_bucket_options.name); @@ -439,9 +459,12 @@ mocha.describe('manage_nsfs cli', function() { await assert_config_file_permissions(config_root, CONFIG_SUBDIRS.BUCKETS, gpfs_bucket_options.name); }); - mocha.it('cli bucket update owner', async function() { + mocha.it('cli bucket update owner on GPFS', async function() { const action = ACTIONS.UPDATE; - gpfs_bucket_options.owner = 'user2'; + await fs_utils.create_fresh_path(bucket_on_gpfs_path); + await fs_utils.file_must_exist(bucket_on_gpfs_path); + gpfs_bucket_options.owner = account_name2; + await set_path_permissions_and_owner(bucket_on_gpfs_path, account_options2, 0o700); const bucket_status = await exec_manage_cli(type, action, gpfs_bucket_options); assert_response(action, type, bucket_status, gpfs_bucket_options); const bucket = await read_config_file(config_root, CONFIG_SUBDIRS.BUCKETS, gpfs_bucket_options.name); From d0edc4276440dbc8f5e05f276953139caedcf445 Mon Sep 17 00:00:00 2001 From: Romy <35330373+romayalon@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:18:29 +0300 Subject: [PATCH 2/5] add missing dasd disks Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> (cherry picked from commit 6ce452bacb971ba26b933c0b4f45b15cd3864535) --- src/util/nb_native.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/nb_native.js b/src/util/nb_native.js index d0e9061694..6811cae5e1 100644 --- a/src/util/nb_native.js +++ b/src/util/nb_native.js @@ -120,7 +120,7 @@ async function generate_entropy(loop_cond) { const count = 32; let disk; let disk_size; - for (disk of ['/dev/sda', '/dev/vda', '/dev/xvda']) { + for (disk of ['/dev/sda', '/dev/vda', '/dev/xvda', '/dev/dasda']) { try { const res = await async_exec(`blockdev --getsize64 ${disk}`); disk_size = res.stdout; From 0fd8610d6a32b829de5fba85dd33d9476948df19 Mon Sep 17 00:00:00 2001 From: Romy <35330373+romayalon@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:34:32 +0300 Subject: [PATCH 3/5] NSFS | NC | whitelist ip fix Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> NC - fix server ip whitelist validation Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> (cherry picked from commit 167ed0d8c7c004b5cf3c87effd3163746b6249de) --- config.js | 3 +- ...NonContainerizedDeveloperCustomizations.md | 2 +- src/endpoint/s3/s3_rest.js | 2 +- .../schemas/nsfs_config_schema.js | 2 +- src/test/system_tests/test_utils.js | 20 +++++ src/test/unit_tests/nc_coretest.js | 6 +- src/test/unit_tests/test_bucketspace.js | 89 ++++++++++++++----- .../unit_tests/test_bucketspace_versioning.js | 50 ++++------- src/util/http_utils.js | 12 +-- 9 files changed, 120 insertions(+), 66 deletions(-) diff --git a/config.js b/config.js index 5f947b8ab4..1f76a10912 100644 --- a/config.js +++ b/config.js @@ -815,6 +815,7 @@ config.NSFS_LOW_FREE_SPACE_PERCENT_UNLEASH = 0.10; // NSFS NON CONTAINERIZED // //////////////////////////// +config.NC_RELOAD_CONFIG_INTERVAL = 10 * 1000; config.NSFS_NC_CONF_DIR_REDIRECT_FILE = 'config_dir_redirect'; config.NSFS_NC_DEFAULT_CONF_DIR = '/etc/noobaa.conf.d'; config.NSFS_NC_CONF_DIR = process.env.NSFS_NC_CONF_DIR || ''; @@ -1059,7 +1060,7 @@ function reload_nsfs_nc_config() { try { const config_path = path.join(config.NSFS_NC_CONF_DIR, 'config.json'); fs.watchFile(config_path, { - interval: 10 * 1000 + interval: config.NC_RELOAD_CONFIG_INTERVAL }, () => { delete require.cache[config_path]; try { diff --git a/docs/dev_guide/NonContainerizedDeveloperCustomizations.md b/docs/dev_guide/NonContainerizedDeveloperCustomizations.md index 01016fb051..54bd10c556 100644 --- a/docs/dev_guide/NonContainerizedDeveloperCustomizations.md +++ b/docs/dev_guide/NonContainerizedDeveloperCustomizations.md @@ -234,7 +234,7 @@ Example: ``` ## 13. Whitelist IPs - -**Description -** List of whitelist IPs. Access is restricted to these IPs only. If there are no IPs mentioned all IPs are allowed. +**Description -** List of whitelist IPs. Access is restricted to these server IPs only. If there are no IPs mentioned all IPs are allowed. **Configuration Key -** S3_SERVER_IP_WHITELIST diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index cdea3b95b6..5f61a5d43e 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -74,7 +74,7 @@ async function s3_rest(req, res) { async function handle_request(req, res) { - http_utils.validate_nsfs_whitelist(req); + http_utils.validate_server_ip_whitelist(req); http_utils.set_amz_headers(req, res); http_utils.set_cors_headers_s3(req, res); diff --git a/src/server/system_services/schemas/nsfs_config_schema.js b/src/server/system_services/schemas/nsfs_config_schema.js index 1cdf24c493..68760a57d6 100644 --- a/src/server/system_services/schemas/nsfs_config_schema.js +++ b/src/server/system_services/schemas/nsfs_config_schema.js @@ -94,7 +94,7 @@ const nsfs_node_config_schema = { S3_SERVER_IP_WHITELIST: { type: 'array', default: [], - description: 'List of whitelisted IPs for S3 access, Allow access from all the IPs if list is empty.' + description: 'Whitelist of server IPs for S3 access, Allow access to all the IPs if list is empty.' }, NSFS_DIR_CACHE_MAX_DIR_SIZE: { type: 'number', diff --git a/src/test/system_tests/test_utils.js b/src/test/system_tests/test_utils.js index 975c346671..706a88eb94 100644 --- a/src/test/system_tests/test_utils.js +++ b/src/test/system_tests/test_utils.js @@ -3,10 +3,13 @@ const fs = require('fs'); const _ = require('lodash'); +const http = require('http'); const P = require('../../util/promise'); const os_utils = require('../../util/os_utils'); const native_fs_utils = require('../../util/native_fs_utils'); const config = require('../../../config'); +const { S3 } = require('@aws-sdk/client-s3'); +const { NodeHttpHandler } = require("@smithy/node-http-handler"); /** * TMP_PATH is a path to the tmp path based on the process platform @@ -328,12 +331,29 @@ function set_nc_config_dir_in_config(config_root) { config.NSFS_NC_CONF_DIR = config_root; } + +function generate_s3_client(access_key, secret_key, endpoint) { + return new S3({ + forcePathStyle: true, + region: config.DEFAULT_REGION, + requestHandler: new NodeHttpHandler({ + httpAgent: new http.Agent({ keepAlive: false }) + }), + credentials: { + accessKeyId: access_key, + secretAccessKey: secret_key, + }, + endpoint + }); +} + exports.blocks_exist_on_cloud = blocks_exist_on_cloud; exports.create_hosts_pool = create_hosts_pool; exports.delete_hosts_pool = delete_hosts_pool; exports.empty_and_delete_buckets = empty_and_delete_buckets; exports.disable_accounts_s3_access = disable_accounts_s3_access; exports.generate_s3_policy = generate_s3_policy; +exports.generate_s3_client = generate_s3_client; exports.invalid_nsfs_root_permissions = invalid_nsfs_root_permissions; exports.get_coretest_path = get_coretest_path; exports.exec_manage_cli = exec_manage_cli; diff --git a/src/test/unit_tests/nc_coretest.js b/src/test/unit_tests/nc_coretest.js index bba152c8d4..3d7eeb97dd 100644 --- a/src/test/unit_tests/nc_coretest.js +++ b/src/test/unit_tests/nc_coretest.js @@ -139,7 +139,11 @@ async function config_dir_setup() { await fs.promises.mkdir(config.NSFS_NC_DEFAULT_CONF_DIR, { recursive: true }); await fs.promises.mkdir(NC_CORETEST_CONFIG_DIR_PATH, { recursive: true }); await fs.promises.writeFile(NC_CORETEST_REDIRECT_FILE_PATH, NC_CORETEST_CONFIG_DIR_PATH); - await fs.promises.writeFile(CONFIG_FILE_PATH, JSON.stringify({ ALLOW_HTTP: true, OBJECT_SDK_BUCKET_CACHE_EXPIRY_MS: 1 })); + await fs.promises.writeFile(CONFIG_FILE_PATH, JSON.stringify({ + ALLOW_HTTP: true, + OBJECT_SDK_BUCKET_CACHE_EXPIRY_MS: 1, + NC_RELOAD_CONFIG_INTERVAL: 1 + })); await fs.promises.mkdir(FIRST_BUCKET_PATH, { recursive: true }); } diff --git a/src/test/unit_tests/test_bucketspace.js b/src/test/unit_tests/test_bucketspace.js index a866d6ee0e..3f353743fd 100644 --- a/src/test/unit_tests/test_bucketspace.js +++ b/src/test/unit_tests/test_bucketspace.js @@ -16,9 +16,12 @@ const fs_utils = require('../../util/fs_utils'); const { stat, open } = require('../../util/nb_native')().fs; const test_utils = require('../system_tests/test_utils'); const { get_process_fs_context } = require('../../util/native_fs_utils'); +const { TYPES } = require('../../manage_nsfs/manage_nsfs_constants'); const ManageCLIError = require('../../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError; const { TMP_PATH, get_coretest_path, invalid_nsfs_root_permissions, - generate_s3_policy, create_fs_user_by_platform, delete_fs_user_by_platform } = require('../system_tests/test_utils'); + generate_s3_policy, create_fs_user_by_platform, delete_fs_user_by_platform, + generate_s3_client, exec_manage_cli } = require('../system_tests/test_utils'); + const { S3 } = require('@aws-sdk/client-s3'); const { NodeHttpHandler } = require("@smithy/node-http-handler"); @@ -27,7 +30,7 @@ const coretest_path = get_coretest_path(); const coretest = require(coretest_path); const { rpc_client, EMAIL, PASSWORD, SYSTEM, get_admin_account_details } = coretest; coretest.setup({}); - +let CORETEST_ENDPOINT; const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); const DEFAULT_FS_CONFIG = get_process_fs_context(); @@ -86,6 +89,7 @@ mocha.describe('bucket operations - namespace_fs', function() { if (process.env.NC_CORETEST) { await create_fs_user_by_platform(no_permissions_dn, 'new_password', 123123123, 123123123); } + CORETEST_ENDPOINT = coretest.get_http_address(); }); mocha.after(async () => { await fs_utils.folder_delete(tmp_fs_root); @@ -513,7 +517,7 @@ mocha.describe('bucket operations - namespace_fs', function() { mocha.it('put object object with wrong uid gid', async function() { const { access_key, secret_key } = account_wrong_uid.access_keys[0]; - const s3_client_wrong_uid = generate_s3_client(access_key.unwrap(), secret_key.unwrap()); + const s3_client_wrong_uid = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); const bucket = bucket_name + '-s3'; const key = 'ob1.txt'; try { @@ -527,7 +531,7 @@ mocha.describe('bucket operations - namespace_fs', function() { mocha.it('putObject - regular object - head/get object - key/ - should fail with 404', async function() { const { access_key, secret_key } = account_correct_uid.access_keys[0]; - const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap()); + const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); const bucket = bucket_name + '-s3'; const key = 'reg_obj.txt'; const invalid_dir_key = `${key}/`; @@ -554,7 +558,7 @@ mocha.describe('bucket operations - namespace_fs', function() { mocha.it('putObject/head object - xattr invalid URI', async function() { const { access_key, secret_key } = account_correct_uid.access_keys[0]; - const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap()); + const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); const bucket = bucket_name + '-s3'; const key = 'obj_dot_xattr.txt'; const xattr = { 'key1.2.3': encoded_xattr }; @@ -572,7 +576,7 @@ mocha.describe('bucket operations - namespace_fs', function() { //const s3_creds_aws_sdk_v2 = get_aws_sdk_v2_base_params(account_correct_uid, s3_endpoint_address); //const s3 = new AWS.S3(s3_creds_aws_sdk_v2); const { access_key, secret_key } = account_correct_uid.access_keys[0]; - const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap()); + const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); const key = 'fs_obj_dot_xattr.txt'; const bucket = bucket_name + '-s3'; const PATH = path.join(s3_new_buckets_path, bucket, key); @@ -592,7 +596,7 @@ mocha.describe('bucket operations - namespace_fs', function() { mocha.it('PUT file directly to FS/head object - xattr encoded invalid URI', async function() { const { access_key, secret_key } = account_correct_uid.access_keys[0]; - const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap()); + const s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); //get_s3_v2_client(account_correct_uid, s3_endpoint_address); const key = 'fs_obj_dot_xattr.txt'; const bucket = bucket_name + '-s3'; @@ -991,11 +995,12 @@ mocha.describe('list objects - namespace_fs', async function() { const s3_policy = generate_s3_policy('*', bucket_name, ['s3:*']); await rpc_client.bucket.put_bucket_policy({ name: s3_policy.params.bucket, policy: s3_policy.policy }); + CORETEST_ENDPOINT = coretest.get_http_address(); for (const account_name of Object.keys(accounts)) { const { uid, gid } = accounts[account_name]; const account = await generate_nsfs_account({ uid, gid, account_name, default_resource: namespace_resource_name }); - const s3_client = generate_s3_client(account.access_key, account.secret_key); + const s3_client = generate_s3_client(account.access_key, account.secret_key, CORETEST_ENDPOINT); accounts[account_name].s3_client = s3_client; } }); @@ -1567,12 +1572,12 @@ mocha.describe('list buckets - namespace_fs', async function() { fs_root_path: tmp_fs_root, } }); - + CORETEST_ENDPOINT = coretest.get_http_address(); for (const account_name of Object.keys(accounts)) { const { create_bucket_via, bucket, account_info } = accounts[account_name]; const { uid, gid } = account_info; const account = await generate_nsfs_account({ uid, gid, account_name, new_buckets_path }); - const s3_client = generate_s3_client(account.access_key, account.secret_key); + const s3_client = generate_s3_client(account.access_key, account.secret_key, CORETEST_ENDPOINT); accounts[account_name].s3_client = s3_client; if (create_bucket_via === 'CLI') { @@ -1702,20 +1707,58 @@ mocha.describe('list buckets - namespace_fs', async function() { }); }); -function generate_s3_client(access_key, secret_key) { - return new S3({ - forcePathStyle: true, - region: config.DEFAULT_REGION, - requestHandler: new NodeHttpHandler({ - httpAgent: new http.Agent({ keepAlive: false }) - }), - credentials: { - accessKeyId: access_key, - secretAccessKey: secret_key, - }, - endpoint: coretest.get_http_address() +mocha.describe('s3 whitelist flow', async function() { + this.timeout(50000); // eslint-disable-line no-invalid-this + const nsr = 'server_ip_whitelist_nsr'; + const account_name = 'server_ip_whitelist_account'; + const fs_path = path.join(TMP_PATH, 'server_ip_whitelist_tests'); + + let s3_client; + mocha.before(async function() { + if (!process.env.NC_CORETEST) this.skip(); // eslint-disable-line no-invalid-this + await fs.promises.mkdir(fs_path, { recursive: true }); + await rpc_client.pool.create_namespace_resource({ name: nsr, nsfs_config: { fs_root_path: fs_path }}); + const nsfs_account_config = { + uid: process.getuid(), gid: process.getgid(), new_buckets_path: '/', nsfs_only: true + }; + const account_params = { ...new_account_params, email: `${account_name}@noobaa.io`, name: account_name, default_resource: nsr, nsfs_account_config }; + const res = await rpc_client.account.create_account(account_params); + const { access_key, secret_key } = res.access_keys[0]; + CORETEST_ENDPOINT = coretest.get_http_address(); + s3_client = generate_s3_client(access_key.unwrap(), secret_key.unwrap(), CORETEST_ENDPOINT); + }); -} + + mocha.after(async function() { + if (!process.env.NC_CORETEST) return; + await rpc_client.account.delete_account({ email: `${account_name}@noobaa.io` }); + await exec_manage_cli(TYPES.IP_WHITELIST, '', { ips: JSON.stringify([]) }); + }); + + mocha.it('whitelist ips empty - s3 request should succeed', async function() { + const buckets_list = await s3_client.listBuckets({}); + bucket_in_list([first_bucket], [], buckets_list.Buckets); + }); + + mocha.it('server ip is in whitelist ips - s3 request should succeed', async function() { + const ips = ['::1', '127.0.0.1']; + await exec_manage_cli(TYPES.IP_WHITELIST, '', { ips: JSON.stringify(ips) }); + const buckets_list = await s3_client.listBuckets({}); + bucket_in_list([first_bucket], [], buckets_list.Buckets); + }); + + mocha.it('server ip is not in whitelist ips - s3 request should fail', async function() { + const ips = ['162.168.1.2']; + await exec_manage_cli(TYPES.IP_WHITELIST, '', { ips: JSON.stringify(ips) }); + try { + await s3_client.listBuckets({}); + assert.fail('test should have failed with AccessDenied - server ip is not in whitelist ips'); + } catch (err) { + assert.equal(err.Code, 'AccessDenied'); + } + }); +}); + async function generate_nsfs_account(options = {}) { const { uid, gid, new_buckets_path, nsfs_only, admin, default_resource, account_name } = options; diff --git a/src/test/unit_tests/test_bucketspace_versioning.js b/src/test/unit_tests/test_bucketspace_versioning.js index 116f3ddac1..8ce706c02a 100644 --- a/src/test/unit_tests/test_bucketspace_versioning.js +++ b/src/test/unit_tests/test_bucketspace_versioning.js @@ -5,18 +5,14 @@ const fs = require('fs'); const path = require('path'); -const http = require('http'); const mocha = require('mocha'); -const { S3 } = require('@aws-sdk/client-s3'); -const { NodeHttpHandler } = require('@smithy/node-http-handler'); -const config = require('../../../config'); const assert = require('assert'); const coretest = require('./coretest'); const { rpc_client, EMAIL } = coretest; const fs_utils = require('../../util/fs_utils'); const nb_native = require('../../util/nb_native'); const size_utils = require('../../util/size_utils'); -const { TMP_PATH, invalid_nsfs_root_permissions } = require('../system_tests/test_utils'); +const { TMP_PATH, invalid_nsfs_root_permissions, generate_s3_client } = require('../system_tests/test_utils'); const { get_process_fs_context } = require('../../util/native_fs_utils'); coretest.setup({}); @@ -28,6 +24,7 @@ const XATTR_DELETE_MARKER = XATTR_INTERNAL_NOOBAA_PREFIX + 'delete_marker'; const NULL_VERSION_ID = 'null'; const DEFAULT_FS_CONFIG = get_process_fs_context(); +let CORETEST_ENDPOINT; mocha.describe('bucketspace namespace_fs - versioning', function() { const nsr = 'versioned-nsr'; @@ -120,9 +117,10 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { } ] }; + CORETEST_ENDPOINT = coretest.get_http_address(); // create accounts let res = await generate_nsfs_account({ admin: true }); - s3_admin = generate_s3_client(res.access_key, res.secret_key); + s3_admin = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await s3_admin.putBucketPolicy({ Bucket: bucket_name, Policy: JSON.stringify(policy) @@ -139,11 +137,11 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { }); res = await generate_nsfs_account({ uid: 5, gid: 5 }); - s3_uid5 = generate_s3_client(res.access_key, res.secret_key); + s3_uid5 = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); accounts.push(res.email); res = await generate_nsfs_account(); - s3_uid6 = generate_s3_client(res.access_key, res.secret_key); + s3_uid6 = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); accounts.push(res.email); }); @@ -881,7 +879,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.describe('delete object - regular version - versioning enabled', async function() { mocha.before(async function() { const res = await generate_nsfs_account({ default_resource: nsr }); - account_with_access = generate_s3_client(res.access_key, res.secret_key); + account_with_access = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await account_with_access.createBucket({ Bucket: delete_object_test_bucket_reg }); await put_allow_all_bucket_policy(s3_admin, delete_object_test_bucket_reg); await account_with_access.createBucket({ Bucket: delete_object_test_bucket_null }); @@ -1190,7 +1188,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { let account_with_access; mocha.before(async function() { const res = await generate_nsfs_account({ default_resource: nsr }); - account_with_access = generate_s3_client(res.access_key, res.secret_key); + account_with_access = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await account_with_access.createBucket({ Bucket: delete_object_test_bucket_reg }); await put_allow_all_bucket_policy(s3_admin, delete_object_test_bucket_reg); await account_with_access.createBucket({ Bucket: delete_object_test_bucket_null }); @@ -1567,7 +1565,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.before(async function() { const res = await generate_nsfs_account({ default_resource: nsr }); - account_with_access = generate_s3_client(res.access_key, res.secret_key); + account_with_access = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await account_with_access.createBucket({ Bucket: delete_multi_object_test_bucket }); await put_allow_all_bucket_policy(s3_admin, delete_multi_object_test_bucket); }); @@ -1829,7 +1827,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.before(async function() { const res = await generate_nsfs_account({ default_resource: nsr }); - account_with_access = generate_s3_client(res.access_key, res.secret_key); + account_with_access = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await account_with_access.createBucket({ Bucket: delete_multi_object_test_bucket }); await put_allow_all_bucket_policy(s3_admin, delete_multi_object_test_bucket); }); @@ -2104,7 +2102,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { mocha.before(async function() { const res = await generate_nsfs_account({ default_resource: nsr }); - account_with_access = generate_s3_client(res.access_key, res.secret_key); + account_with_access = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await account_with_access.createBucket({ Bucket: list_object_versions_test_bucket }); await put_allow_all_bucket_policy(s3_admin, list_object_versions_test_bucket); }); @@ -2242,14 +2240,14 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { }; // create accounts let res = await generate_nsfs_account({ admin: true }); - s3_admin = generate_s3_client(res.access_key, res.secret_key); + s3_admin = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await s3_admin.putBucketPolicy({ Bucket: bucket_name, Policy: JSON.stringify(policy) }); // create nsfs account res = await generate_nsfs_account(); - s3_client = generate_s3_client(res.access_key, res.secret_key); + s3_client = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); accounts.push(res.email); await s3_client.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }); const bucket_ver = await s3_client.getBucketVersioning({ Bucket: bucket_name }); @@ -2355,7 +2353,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { }; // create accounts let res = await generate_nsfs_account({ admin: true }); - s3_admin = generate_s3_client(res.access_key, res.secret_key); + s3_admin = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await s3_admin.putBucketPolicy({ Bucket: bucket_name, Policy: JSON.stringify(policy) @@ -2366,7 +2364,7 @@ mocha.describe('bucketspace namespace_fs - versioning', function() { }); // create nsfs account res = await generate_nsfs_account(); - s3_client = generate_s3_client(res.access_key, res.secret_key); + s3_client = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); accounts.push(res.email); // create a file in version disabled bucket await create_object(`${disabled_full_path}/${dis_version_key}`, dis_version_body, 'null'); @@ -2793,20 +2791,6 @@ async function put_allow_all_bucket_policy(s3_client, bucket) { }); } -function generate_s3_client(access_key, secret_key) { - return new S3({ - forcePathStyle: true, - region: config.DEFAULT_REGION, - requestHandler: new NodeHttpHandler({ - httpAgent: new http.Agent({ keepAlive: false }) - }), - credentials: { - accessKeyId: access_key, - secretAccessKey: secret_key, - }, - endpoint: coretest.get_http_address() - }); -} async function generate_nsfs_account(options = {}) { const { uid, gid, new_buckets_path, nsfs_only, admin, default_resource } = options; @@ -2914,14 +2898,14 @@ mocha.describe('List-objects', function() { }; // create accounts let res = await generate_nsfs_account({ admin: true }); - s3_admin = generate_s3_client(res.access_key, res.secret_key); + s3_admin = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); await s3_admin.putBucketPolicy({ Bucket: bucket_name, Policy: JSON.stringify(policy) }); // create nsfs account res = await generate_nsfs_account(); - s3_client = generate_s3_client(res.access_key, res.secret_key); + s3_client = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); accounts.push(res.email); await s3_client.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }); const bucket_ver = await s3_client.getBucketVersioning({ Bucket: bucket_name }); diff --git a/src/util/http_utils.js b/src/util/http_utils.js index e253162c22..6936e4708b 100644 --- a/src/util/http_utils.js +++ b/src/util/http_utils.js @@ -673,16 +673,18 @@ function authorize_session_token(req, options) { throw new options.ErrorClass(options.error_invalid_token); } } -function validate_nsfs_whitelist(req) { + +function validate_server_ip_whitelist(req) { // remove prefix for V4 IPs for whitelist validation - const client_ip = parse_client_ip(req).replace(/^::ffff:/, ''); + // TODO: replace the equality check with net.BlockList() usage + const server_ip = req.connection.localAddress.replace(/^::ffff:/, ''); if (config.S3_SERVER_IP_WHITELIST.length === 0) return; for (const whitelist_ip of config.S3_SERVER_IP_WHITELIST) { - if (client_ip === whitelist_ip) { + if (server_ip === whitelist_ip) { return; } } - dbg.error(`Whitelist ip validation failed for ip : ${client_ip}`); + dbg.error(`Whitelist ip validation failed for ip : ${server_ip}`); throw new S3Error(S3Error.AccessDenied); } @@ -711,4 +713,4 @@ exports.set_cors_headers_sts = set_cors_headers_sts; exports.parse_content_length = parse_content_length; exports.authorize_session_token = authorize_session_token; exports.get_agent_by_endpoint = get_agent_by_endpoint; -exports.validate_nsfs_whitelist = validate_nsfs_whitelist; +exports.validate_server_ip_whitelist = validate_server_ip_whitelist; From 05d6c49da81709da2c2add10ccb6165105803520 Mon Sep 17 00:00:00 2001 From: Romy <35330373+romayalon@users.noreply.github.com> Date: Thu, 25 Apr 2024 18:02:08 +0300 Subject: [PATCH 4/5] virtual hosts configuration Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> fix code review Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> (cherry picked from commit f3b303af6448e93976794154f26d44a28d6d9753) --- config.js | 1 + .../NonContainerizedDeveloperCustomizations.md | 17 +++++++++++++++++ src/endpoint/endpoint.js | 2 +- .../schemas/nsfs_config_schema.js | 4 ++++ ...est_nc_nsfs_config_schema_validation.test.js | 16 ++++++++++++++++ 5 files changed, 39 insertions(+), 1 deletion(-) diff --git a/config.js b/config.js index 1f76a10912..032d9cb7c2 100644 --- a/config.js +++ b/config.js @@ -831,6 +831,7 @@ config.BASE_MODE_CONFIG_FILE = 0o600; config.BASE_MODE_CONFIG_DIR = 0o700; config.S3_SERVER_IP_WHITELIST = []; +config.VIRTUAL_HOSTS = process.env.VIRTUAL_HOSTS || ''; config.NC_HEALTH_ENDPOINT_RETRY_COUNT = 3; config.NC_HEALTH_ENDPOINT_RETRY_DELAY = 10; diff --git a/docs/dev_guide/NonContainerizedDeveloperCustomizations.md b/docs/dev_guide/NonContainerizedDeveloperCustomizations.md index 54bd10c556..f8631eb75d 100644 --- a/docs/dev_guide/NonContainerizedDeveloperCustomizations.md +++ b/docs/dev_guide/NonContainerizedDeveloperCustomizations.md @@ -405,6 +405,23 @@ Example: 3. systemctl restart noobaa ``` +## 23. Set Virtual hosts - +**Description -** This flag will set the virtual hosts used by NooBa, service restart required. Set the virtual hosts as string of domains sepreated by spaces. + +**Configuration Key -** VIRTUAL_HOSTS + +**Type -** string + +**Default -** '' +**Steps -** +``` +1. Open /path/to/config_dir/config.json file. +2. Set the config key - +Example: +"VIRTUAL_HOSTS": 'my.vritual.host.io' +3. systemctl restart noobaa_nsfs +``` + ## Config.json example ``` > cat /path/to/config_dir/config.json diff --git a/src/endpoint/endpoint.js b/src/endpoint/endpoint.js index c09485c22a..4fbef3ac7f 100755 --- a/src/endpoint/endpoint.js +++ b/src/endpoint/endpoint.js @@ -101,7 +101,7 @@ async function main(options = {}) { const endpoint_group_id = process.env.ENDPOINT_GROUP_ID || 'default-endpoint-group'; const virtual_hosts = Object.freeze( - (process.env.VIRTUAL_HOSTS || '') + config.VIRTUAL_HOSTS .split(' ') .filter(suffix => net_utils.is_fqdn(suffix)) .sort() diff --git a/src/server/system_services/schemas/nsfs_config_schema.js b/src/server/system_services/schemas/nsfs_config_schema.js index 68760a57d6..6c77637817 100644 --- a/src/server/system_services/schemas/nsfs_config_schema.js +++ b/src/server/system_services/schemas/nsfs_config_schema.js @@ -127,6 +127,10 @@ const nsfs_node_config_schema = { NC_MASTER_KEYS_PUT_EXECUTABLE: { type: 'string', description: 'This flag will set the location of the executable script for updating the master keys file used by NooBa.' + }, + VIRTUAL_HOSTS: { + type: 'string', + description: 'This flag will set the virtual hosts, service restart required, Set the virtual hosts as string of domains sepreated by spaces.' } } }; diff --git a/src/test/unit_tests/jest_tests/test_nc_nsfs_config_schema_validation.test.js b/src/test/unit_tests/jest_tests/test_nc_nsfs_config_schema_validation.test.js index 394df9489f..35f2c0729c 100644 --- a/src/test/unit_tests/jest_tests/test_nc_nsfs_config_schema_validation.test.js +++ b/src/test/unit_tests/jest_tests/test_nc_nsfs_config_schema_validation.test.js @@ -170,6 +170,22 @@ describe('schema validation NC NSFS config', () => { assert_validation(config_data, reason, message); }); + it('nsfs_config invalid config virtual hosts', () => { + const config_data = { + VIRTUAL_HOSTS: 5, // number instead of string + }; + const reason = 'Test should have failed because of wrong type ' + + 'VIRTUAL_HOSTS with number (instead of string)'; + const message = 'must be string'; + assert_validation(config_data, reason, message); + }); + + it('nsfs_config valid config virtual hosts', () => { + const config_data = { + VIRTUAL_HOSTS: 'my.virtual.domain1 my.virtual.domain2' + }; + nsfs_schema_utils.validate_nsfs_config_schema(config_data); + }); }); }); From 95df37900db55f597cee6862b2a2bba2054591ae Mon Sep 17 00:00:00 2001 From: Romy <35330373+romayalon@users.noreply.github.com> Date: Thu, 2 May 2024 16:11:20 +0300 Subject: [PATCH 5/5] bump version to 5.15.3 Signed-off-by: Romy <35330373+romayalon@users.noreply.github.com> --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 8fcba107e7..a8e68429da 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "noobaa-core", - "version": "5.15.2", + "version": "5.15.3", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "noobaa-core", - "version": "5.15.2", + "version": "5.15.3", "hasInstallScript": true, "license": "SEE LICENSE IN LICENSE", "dependencies": { diff --git a/package.json b/package.json index 130c335143..64ecba9f6c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "noobaa-core", - "version": "5.15.2", + "version": "5.15.3", "license": "SEE LICENSE IN LICENSE", "description": "", "homepage": "https://github.com/noobaa/noobaa-core",