Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NC | Backport to stage_5.15.3 #8004

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion config.js
Original file line number Diff line number Diff line change
Expand Up @@ -815,6 +815,7 @@ config.NSFS_LOW_FREE_SPACE_PERCENT_UNLEASH = 0.10;
// NSFS NON CONTAINERIZED //
////////////////////////////

config.NC_RELOAD_CONFIG_INTERVAL = 10 * 1000;
config.NSFS_NC_CONF_DIR_REDIRECT_FILE = 'config_dir_redirect';
config.NSFS_NC_DEFAULT_CONF_DIR = '/etc/noobaa.conf.d';
config.NSFS_NC_CONF_DIR = process.env.NSFS_NC_CONF_DIR || '';
Expand All @@ -830,6 +831,7 @@ config.BASE_MODE_CONFIG_FILE = 0o600;
config.BASE_MODE_CONFIG_DIR = 0o700;

config.S3_SERVER_IP_WHITELIST = [];
config.VIRTUAL_HOSTS = process.env.VIRTUAL_HOSTS || '';

config.NC_HEALTH_ENDPOINT_RETRY_COUNT = 3;
config.NC_HEALTH_ENDPOINT_RETRY_DELAY = 10;
Expand Down Expand Up @@ -1059,7 +1061,7 @@ function reload_nsfs_nc_config() {
try {
const config_path = path.join(config.NSFS_NC_CONF_DIR, 'config.json');
fs.watchFile(config_path, {
interval: 10 * 1000
interval: config.NC_RELOAD_CONFIG_INTERVAL
}, () => {
delete require.cache[config_path];
try {
Expand Down
19 changes: 18 additions & 1 deletion docs/dev_guide/NonContainerizedDeveloperCustomizations.md
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ Example:
```

## 13. Whitelist IPs -
**Description -** List of whitelist IPs. Access is restricted to these IPs only. If there are no IPs mentioned all IPs are allowed.
**Description -** List of whitelist IPs. Access is restricted to these server IPs only. If there are no IPs mentioned all IPs are allowed.

**Configuration Key -** S3_SERVER_IP_WHITELIST

Expand Down Expand Up @@ -405,6 +405,23 @@ Example:
3. systemctl restart noobaa
```

## 23. Set Virtual hosts -
**Description -** This flag will set the virtual hosts used by NooBa, service restart required. Set the virtual hosts as string of domains sepreated by spaces.

**Configuration Key -** VIRTUAL_HOSTS

**Type -** string

**Default -** ''
**Steps -**
```
1. Open /path/to/config_dir/config.json file.
2. Set the config key -
Example:
"VIRTUAL_HOSTS": 'my.vritual.host.io'
3. systemctl restart noobaa_nsfs
```

## Config.json example
```
> cat /path/to/config_dir/config.json
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "noobaa-core",
"version": "5.15.2",
"version": "5.15.3",
"license": "SEE LICENSE IN LICENSE",
"description": "",
"homepage": "https://github.com/noobaa/noobaa-core",
Expand Down
43 changes: 22 additions & 21 deletions src/cmd/manage_nsfs.js
Original file line number Diff line number Diff line change
Expand Up @@ -203,13 +203,11 @@ function get_symlink_config_file_path(config_type_path, file_name) {

async function add_bucket(data) {
await validate_bucket_args(data, ACTIONS.ADD);
const account_id = await verify_bucket_owner(data.bucket_owner, ACTIONS.ADD);
const fs_context = native_fs_utils.get_process_fs_context(config_root_backend);
const bucket_conf_path = get_config_file_path(buckets_dir_path, data.name);
const exists = await native_fs_utils.is_path_exists(fs_context, bucket_conf_path);
if (exists) throw_cli_error(ManageCLIError.BucketAlreadyExists, data.name, { bucket: data.name });
data._id = mongo_utils.mongoObjectId();
data.owner_account = account_id;
const data_json = JSON.stringify(data);
// We take an object that was stringify
// (it unwraps ths sensitive strings, creation_date to string and removes undefined parameters)
Expand All @@ -219,33 +217,23 @@ async function add_bucket(data) {
write_stdout_response(ManageCLIResponse.BucketCreated, data_json, { bucket: data.name });
}

/** verify_bucket_owner will check if the bucket_owner has an account
* bucket_owner is the account name in the account schema
* after it finds one, it returns the account id, otherwise it would throw an error
* (in case the action is add bucket it also checks that the owner has allow_bucket_creation)
* @param {string} bucket_owner account name
* @param {string} action
/**
* get_bucket_owner_account will return the account of the bucket_owner
* otherwise it would throw an error
* @param {string} bucket_owner
*/
async function verify_bucket_owner(bucket_owner, action) {
// check if bucket owner exists
async function get_bucket_owner_account(bucket_owner) {
const account_config_path = get_config_file_path(accounts_dir_path, bucket_owner);
let account;
try {
account = await get_config_data(account_config_path);
const account = await get_config_data(account_config_path);
return account;
} catch (err) {
if (err.code === 'ENOENT') {
const detail_msg = `bucket owner ${bucket_owner} does not exists`;
throw_cli_error(ManageCLIError.BucketSetForbiddenNoBucketOwner, detail_msg, {bucket_owner: bucket_owner});
}
throw err;
}
// check if bucket owner has the permission to create bucket (for bucket add only)
if (action === ACTIONS.ADD && !account.allow_bucket_creation) {
const detail_msg = `${bucket_owner} account not allowed to create new buckets. ` +
`Please make sure to have a valid new_buckets_path and enable the flag allow_bucket_creation`;
throw_cli_error(ManageCLIError.BucketCreationNotAllowed, detail_msg);
}
return account._id;
}

async function get_bucket_status(data) {
Expand All @@ -263,7 +251,6 @@ async function get_bucket_status(data) {

async function update_bucket(data) {
await validate_bucket_args(data, ACTIONS.UPDATE);
await verify_bucket_owner(data.bucket_owner, ACTIONS.UPDATE);

const fs_context = native_fs_utils.get_process_fs_context(config_root_backend);

Expand Down Expand Up @@ -760,7 +747,7 @@ function get_access_keys(action, user_input) {
async function validate_bucket_args(data, action) {
if (action === ACTIONS.DELETE || action === ACTIONS.STATUS) {
if (_.isUndefined(data.name)) throw_cli_error(ManageCLIError.MissingBucketNameFlag);
} else {
} else { // action === ACTIONS.ADD || action === ACTIONS.UPDATE
if (_.isUndefined(data.name)) throw_cli_error(ManageCLIError.MissingBucketNameFlag);
try {
native_fs_utils.validate_bucket_creation({ name: data.name });
Expand All @@ -787,6 +774,20 @@ async function validate_bucket_args(data, action) {
if (!exists) {
throw_cli_error(ManageCLIError.InvalidStoragePath, data.path);
}
const account = await get_bucket_owner_account(data.bucket_owner);
const account_fs_context = await native_fs_utils.get_fs_context(account.nsfs_account_config, data.fs_backend);
const accessible = await native_fs_utils.is_dir_rw_accessible(account_fs_context, data.path);
if (!accessible) {
throw_cli_error(ManageCLIError.InaccessibleStoragePath, data.path);
}
if (action === ACTIONS.ADD) {
if (!account.allow_bucket_creation) {
const detail_msg = `${data.bucket_owner} account not allowed to create new buckets. ` +
`Please make sure to have a valid new_buckets_path and enable the flag allow_bucket_creation`;
throw_cli_error(ManageCLIError.BucketCreationNotAllowed, detail_msg);
}
data.owner_account = account._id; // TODO move this assignment to better place
}
if (data.s3_policy) {
try {
await bucket_policy_utils.validate_s3_policy(data.s3_policy, data.name,
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/endpoint.js
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ async function main(options = {}) {
const endpoint_group_id = process.env.ENDPOINT_GROUP_ID || 'default-endpoint-group';

const virtual_hosts = Object.freeze(
(process.env.VIRTUAL_HOSTS || '')
config.VIRTUAL_HOSTS
.split(' ')
.filter(suffix => net_utils.is_fqdn(suffix))
.sort()
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/s3/s3_rest.js
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ async function s3_rest(req, res) {

async function handle_request(req, res) {

http_utils.validate_nsfs_whitelist(req);
http_utils.validate_server_ip_whitelist(req);
http_utils.set_amz_headers(req, res);
http_utils.set_cors_headers_s3(req, res);

Expand Down
5 changes: 5 additions & 0 deletions src/manage_nsfs/manage_nsfs_cli_errors.js
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,11 @@ ManageCLIError.MalformedPolicy = Object.freeze({
http_code: 400,
});

ManageCLIError.InaccessibleStoragePath = Object.freeze({
code: 'InaccessibleStoragePath',
message: 'Bucket owner should have read & write access to the specified bucket storage path',
http_code: 400,
});

ManageCLIError.BucketNotEmpty = Object.freeze({
code: 'BucketNotEmpty',
Expand Down
6 changes: 5 additions & 1 deletion src/server/system_services/schemas/nsfs_config_schema.js
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ const nsfs_node_config_schema = {
S3_SERVER_IP_WHITELIST: {
type: 'array',
default: [],
description: 'List of whitelisted IPs for S3 access, Allow access from all the IPs if list is empty.'
description: 'Whitelist of server IPs for S3 access, Allow access to all the IPs if list is empty.'
},
NSFS_DIR_CACHE_MAX_DIR_SIZE: {
type: 'number',
Expand Down Expand Up @@ -127,6 +127,10 @@ const nsfs_node_config_schema = {
NC_MASTER_KEYS_PUT_EXECUTABLE: {
type: 'string',
description: 'This flag will set the location of the executable script for updating the master keys file used by NooBa.'
},
VIRTUAL_HOSTS: {
type: 'string',
description: 'This flag will set the virtual hosts, service restart required, Set the virtual hosts as string of domains sepreated by spaces.'
}
}
};
Expand Down
20 changes: 20 additions & 0 deletions src/test/system_tests/test_utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,13 @@

const fs = require('fs');
const _ = require('lodash');
const http = require('http');
const P = require('../../util/promise');
const os_utils = require('../../util/os_utils');
const native_fs_utils = require('../../util/native_fs_utils');
const config = require('../../../config');
const { S3 } = require('@aws-sdk/client-s3');
const { NodeHttpHandler } = require("@smithy/node-http-handler");

/**
* TMP_PATH is a path to the tmp path based on the process platform
Expand Down Expand Up @@ -328,12 +331,29 @@ function set_nc_config_dir_in_config(config_root) {
config.NSFS_NC_CONF_DIR = config_root;
}


function generate_s3_client(access_key, secret_key, endpoint) {
return new S3({
forcePathStyle: true,
region: config.DEFAULT_REGION,
requestHandler: new NodeHttpHandler({
httpAgent: new http.Agent({ keepAlive: false })
}),
credentials: {
accessKeyId: access_key,
secretAccessKey: secret_key,
},
endpoint
});
}

exports.blocks_exist_on_cloud = blocks_exist_on_cloud;
exports.create_hosts_pool = create_hosts_pool;
exports.delete_hosts_pool = delete_hosts_pool;
exports.empty_and_delete_buckets = empty_and_delete_buckets;
exports.disable_accounts_s3_access = disable_accounts_s3_access;
exports.generate_s3_policy = generate_s3_policy;
exports.generate_s3_client = generate_s3_client;
exports.invalid_nsfs_root_permissions = invalid_nsfs_root_permissions;
exports.get_coretest_path = get_coretest_path;
exports.exec_manage_cli = exec_manage_cli;
Expand Down
Loading
Loading