diff --git a/.github/workflows/nginx.yaml b/.github/workflows/nginx.yaml index d3c58c8c..469db12f 100644 --- a/.github/workflows/nginx.yaml +++ b/.github/workflows/nginx.yaml @@ -52,6 +52,7 @@ env: load_module ${{ github.workspace }}/nginx/objs/ngx_http_async_module.so; load_module ${{ github.workspace }}/nginx/objs/ngx_http_awssigv4_module.so; load_module ${{ github.workspace }}/nginx/objs/ngx_http_curl_module.so; + load_module ${{ github.workspace }}/nginx/objs/ngx_http_shared_dict_module.so; load_module ${{ github.workspace }}/nginx/objs/ngx_http_upstream_custom_module.so; OPENSSL_VERSION: '3.0.16' diff --git a/Cargo.lock b/Cargo.lock index f4c405a1..9c408ef8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -495,9 +501,9 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -561,6 +567,8 @@ dependencies = [ name = "ngx" version = "0.5.0" dependencies = [ + "allocator-api2", + "lock_api", "nginx-sys", "target-triple", ] diff --git a/Cargo.toml b/Cargo.toml index 014cff92..0daa1bd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,15 +25,20 @@ repository.workspace = true rust-version.workspace = true [dependencies] +allocator-api2 = { version = "0.2.21", default-features = false } +lock_api = "0.4.13" nginx-sys = { path = "nginx-sys", default-features=false, version = "0.5.0"} [features] default = ["vendored","std"] # Enables the components using memory allocation. # If no `std` flag, `alloc` crate is internally used instead. This flag is mainly for `no_std` build. -alloc = [] +alloc = ["allocator-api2/alloc"] # Enables the components using `std` crate. -std = ["alloc"] +std = [ + "alloc", + "allocator-api2/std" +] # Build our own copy of the NGINX by default. # This could be disabled with `--no-default-features` to minimize the dependency # tree when building against an existing copy of the NGINX with the diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 3e199861..9006aba7 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -51,6 +51,11 @@ name = "async" path = "async.rs" crate-type = ["cdylib"] +[[example]] +name = "shared_dict" +path = "shared_dict.rs" +crate-type = ["cdylib"] + [features] default = ["export-modules", "ngx/vendored"] # Generate `ngx_modules` table with module exports diff --git a/examples/config b/examples/config index 5f1ce60b..6b763652 100644 --- a/examples/config +++ b/examples/config @@ -39,6 +39,14 @@ if [ $HTTP = YES ]; then ngx_rust_module fi + if :; then + ngx_module_name=ngx_http_shared_dict_module + ngx_module_libs= + ngx_rust_target_name=shared_dict + + ngx_rust_module + fi + if :; then ngx_module_name=ngx_http_upstream_custom_module ngx_module_libs= diff --git a/examples/shared_dict.rs b/examples/shared_dict.rs new file mode 100644 index 00000000..5f293e9a --- /dev/null +++ b/examples/shared_dict.rs @@ -0,0 +1,443 @@ +#![no_std] +use ::core::ffi::{c_char, c_void}; +use ::core::{mem, ptr, slice}; + +use nginx_sys::{ + ngx_command_t, ngx_conf_t, ngx_http_add_variable, ngx_http_compile_complex_value_t, + ngx_http_complex_value, ngx_http_complex_value_t, ngx_http_module_t, ngx_http_request_t, + ngx_http_variable_t, ngx_http_variable_value_t, ngx_int_t, ngx_module_t, ngx_parse_size, + ngx_shared_memory_add, ngx_shm_zone_t, ngx_str_t, ngx_uint_t, NGX_CONF_TAKE2, NGX_HTTP_DELETE, + NGX_HTTP_MAIN_CONF, NGX_HTTP_MAIN_CONF_OFFSET, NGX_HTTP_MODULE, NGX_HTTP_VAR_CHANGEABLE, + NGX_HTTP_VAR_NOCACHEABLE, NGX_LOG_EMERG, +}; +use ngx::core::{ + NgxStr, NgxString, Pool, RbTreeMap, SlabPool, Status, NGX_CONF_ERROR, NGX_CONF_OK, +}; +use ngx::http::{HttpModule, HttpModuleMainConf}; +use ngx::{ngx_conf_log_error, ngx_log_debug, ngx_string}; + +struct HttpSharedDictModule; + +impl HttpModule for HttpSharedDictModule { + fn module() -> &'static ngx_module_t { + unsafe { &*ptr::addr_of!(ngx_http_shared_dict_module) } + } + + unsafe extern "C" fn preconfiguration(cf: *mut ngx_conf_t) -> ngx_int_t { + for mut v in NGX_HTTP_SHARED_DICT_VARS { + let var = ngx_http_add_variable(cf, &mut v.name, v.flags); + if var.is_null() { + return Status::NGX_ERROR.into(); + } + (*var).get_handler = v.get_handler; + (*var).set_handler = v.set_handler; + (*var).data = v.data; + } + Status::NGX_OK.into() + } +} + +unsafe impl HttpModuleMainConf for HttpSharedDictModule { + type MainConf = SharedDictMainConfig; +} + +static mut NGX_HTTP_SHARED_DICT_COMMANDS: [ngx_command_t; 3] = [ + ngx_command_t { + name: ngx_string!("shared_dict_zone"), + type_: (NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE2) as ngx_uint_t, + set: Some(ngx_http_shared_dict_add_zone), + conf: NGX_HTTP_MAIN_CONF_OFFSET, + offset: 0, + post: ptr::null_mut(), + }, + ngx_command_t { + name: ngx_string!("shared_dict"), + type_: (NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE2) as ngx_uint_t, + set: Some(ngx_http_shared_dict_add_variable), + conf: NGX_HTTP_MAIN_CONF_OFFSET, + offset: 0, + post: ptr::null_mut(), + }, + ngx_command_t::empty(), +]; + +static mut NGX_HTTP_SHARED_DICT_VARS: [ngx_http_variable_t; 1] = [ngx_http_variable_t { + name: ngx_string!("shared_dict_entries"), + set_handler: Some(ngx_http_shared_dict_set_entries), + get_handler: Some(ngx_http_shared_dict_get_entries), + data: 0, + flags: (NGX_HTTP_VAR_CHANGEABLE | NGX_HTTP_VAR_NOCACHEABLE) as ngx_uint_t, + index: 0, +}]; + +static NGX_HTTP_SHARED_DICT_MODULE_CTX: ngx_http_module_t = ngx_http_module_t { + preconfiguration: Some(HttpSharedDictModule::preconfiguration), + postconfiguration: None, + create_main_conf: Some(HttpSharedDictModule::create_main_conf), + init_main_conf: None, + create_srv_conf: None, + merge_srv_conf: None, + create_loc_conf: None, + merge_loc_conf: None, +}; + +// Generate the `ngx_modules` table with exported modules. +// This feature is required to build a 'cdylib' dynamic module outside of the NGINX buildsystem. +#[cfg(feature = "export-modules")] +ngx::ngx_modules!(ngx_http_shared_dict_module); + +#[used] +#[allow(non_upper_case_globals)] +#[cfg_attr(not(feature = "export-modules"), no_mangle)] +pub static mut ngx_http_shared_dict_module: ngx_module_t = ngx_module_t { + ctx: ptr::addr_of!(NGX_HTTP_SHARED_DICT_MODULE_CTX) as _, + commands: unsafe { ptr::addr_of_mut!(NGX_HTTP_SHARED_DICT_COMMANDS[0]) }, + type_: NGX_HTTP_MODULE as _, + ..ngx_module_t::default() +}; + +type SharedData = ngx::sync::RwLock, NgxString, SlabPool>>; + +#[derive(Debug)] +struct SharedDictMainConfig { + shm_zone: *mut ngx_shm_zone_t, +} + +impl Default for SharedDictMainConfig { + fn default() -> Self { + Self { + shm_zone: ptr::null_mut(), + } + } +} + +extern "C" fn ngx_http_shared_dict_add_zone( + cf: *mut ngx_conf_t, + _cmd: *mut ngx_command_t, + conf: *mut c_void, +) -> *mut c_char { + // SAFETY: configuration handlers always receive a valid `cf` pointer. + let cf = unsafe { cf.as_mut().unwrap() }; + let smcf = unsafe { + conf.cast::() + .as_mut() + .expect("shared dict main config") + }; + + // SAFETY: + // - `cf.args` is guaranteed to be a pointer to an array with 3 elements (NGX_CONF_TAKE2). + // - The pointers are well-aligned by construction method (`ngx_palloc`). + debug_assert!(!cf.args.is_null() && unsafe { (*cf.args).nelts >= 3 }); + let args = + unsafe { slice::from_raw_parts_mut((*cf.args).elts as *mut ngx_str_t, (*cf.args).nelts) }; + + let name: ngx_str_t = args[1]; + let size = unsafe { ngx_parse_size(&mut args[2]) }; + if size == -1 { + return NGX_CONF_ERROR; + } + + smcf.shm_zone = unsafe { + ngx_shared_memory_add( + cf, + ptr::addr_of!(name).cast_mut(), + size as usize, + ptr::addr_of_mut!(ngx_http_shared_dict_module).cast(), + ) + }; + + let Some(shm_zone) = (unsafe { smcf.shm_zone.as_mut() }) else { + return NGX_CONF_ERROR; + }; + + shm_zone.init = Some(ngx_http_shared_dict_zone_init); + shm_zone.data = ptr::from_mut(smcf).cast(); + + NGX_CONF_OK +} + +fn ngx_http_shared_dict_get_shared(shm_zone: &mut ngx_shm_zone_t) -> Result<&SharedData, Status> { + let mut alloc = unsafe { SlabPool::from_shm_zone(shm_zone) }.ok_or(Status::NGX_ERROR)?; + + if alloc.as_mut().data.is_null() { + let shared: RbTreeMap, NgxString, SlabPool> = + RbTreeMap::try_new_in(alloc.clone()).map_err(|_| Status::NGX_ERROR)?; + + let shared = ngx::sync::RwLock::new(shared); + + alloc.as_mut().data = ngx::allocator::allocate(shared, &alloc) + .map_err(|_| Status::NGX_ERROR)? + .as_ptr() + .cast(); + } + + unsafe { + alloc + .as_ref() + .data + .cast::() + .as_ref() + .ok_or(Status::NGX_ERROR) + } +} + +extern "C" fn ngx_http_shared_dict_zone_init( + shm_zone: *mut ngx_shm_zone_t, + _data: *mut c_void, +) -> ngx_int_t { + let shm_zone = unsafe { &mut *shm_zone }; + + match ngx_http_shared_dict_get_shared(shm_zone) { + Err(e) => e.into(), + Ok(_) => Status::NGX_OK.into(), + } +} + +extern "C" fn ngx_http_shared_dict_add_variable( + cf: *mut ngx_conf_t, + _cmd: *mut ngx_command_t, + _conf: *mut c_void, +) -> *mut c_char { + // SAFETY: configuration handlers always receive a valid `cf` pointer. + let cf = unsafe { cf.as_mut().unwrap() }; + let mut pool = unsafe { Pool::from_ngx_pool(cf.pool) }; + + let key = pool.calloc_type::(); + if key.is_null() { + return NGX_CONF_ERROR; + } + + // SAFETY: + // - `cf.args` is guaranteed to be a pointer to an array with 3 elements (NGX_CONF_TAKE2). + // - The pointers are well-aligned by construction method (`ngx_palloc`). + debug_assert!(!cf.args.is_null() && unsafe { (*cf.args).nelts >= 3 }); + let args = + unsafe { slice::from_raw_parts_mut((*cf.args).elts as *mut ngx_str_t, (*cf.args).nelts) }; + + let mut ccv: ngx_http_compile_complex_value_t = unsafe { mem::zeroed() }; + ccv.cf = cf; + ccv.value = &mut args[1]; + ccv.complex_value = key; + + if unsafe { nginx_sys::ngx_http_compile_complex_value(&mut ccv) } != Status::NGX_OK.into() { + return NGX_CONF_ERROR; + } + + let mut name = args[2]; + + if name.as_bytes()[0] != b'$' { + ngx_conf_log_error!(NGX_LOG_EMERG, cf, "invalid variable name \"{name}\""); + return NGX_CONF_ERROR; + } + + name.data = unsafe { name.data.add(1) }; + name.len -= 1; + + let var = unsafe { + ngx_http_add_variable( + cf, + &mut name, + (NGX_HTTP_VAR_CHANGEABLE | NGX_HTTP_VAR_NOCACHEABLE) as ngx_uint_t, + ) + }; + if var.is_null() { + return NGX_CONF_ERROR; + } + + unsafe { + (*var).get_handler = Some(ngx_http_shared_dict_get_variable); + (*var).set_handler = Some(ngx_http_shared_dict_set_variable); + (*var).data = key as usize; + } + + NGX_CONF_OK +} + +extern "C" fn ngx_http_shared_dict_get_variable( + r: *mut ngx_http_request_t, + v: *mut ngx_http_variable_value_t, + data: usize, +) -> ngx_int_t { + let r = unsafe { &mut *r }; + let v = unsafe { &mut *v }; + let smcf = HttpSharedDictModule::main_conf_mut(r).expect("shared dict main config"); + + let mut key = ngx_str_t::empty(); + if unsafe { ngx_http_complex_value(r, data as _, &mut key) } != Status::NGX_OK.into() { + return Status::NGX_ERROR.into(); + } + + let key = unsafe { NgxStr::from_ngx_str(key) }; + + let Ok(shared) = ngx_http_shared_dict_get_shared(unsafe { &mut *smcf.shm_zone }) else { + return Status::NGX_ERROR.into(); + }; + + let value = shared + .read() + .get(key) + .and_then(|x| unsafe { ngx_str_t::from_bytes(r.pool, x.as_bytes()) }); + + ngx_log_debug!( + unsafe { (*r.connection).log }, + "shared dict: get \"{}\" -> {:?} w:{} p:{}", + key, + value.as_ref().map(|x| unsafe { NgxStr::from_ngx_str(*x) }), + unsafe { nginx_sys::ngx_worker }, + unsafe { nginx_sys::ngx_pid }, + ); + + let Some(value) = value else { + v.set_not_found(1); + return Status::NGX_ERROR.into(); + }; + + v.data = value.data; + v.set_len(value.len as _); + + v.set_valid(1); + v.set_no_cacheable(0); + v.set_not_found(0); + + Status::NGX_OK.into() +} + +extern "C" fn ngx_http_shared_dict_set_variable( + r: *mut ngx_http_request_t, + v: *mut ngx_http_variable_value_t, + data: usize, +) { + let r = unsafe { &mut *r }; + let v = unsafe { &mut *v }; + let smcf = HttpSharedDictModule::main_conf_mut(r).expect("shared dict main config"); + let mut key = ngx_str_t::empty(); + + if unsafe { ngx_http_complex_value(r, data as _, &mut key) } != Status::NGX_OK.into() { + return; + } + + let Ok(shared) = ngx_http_shared_dict_get_shared(unsafe { &mut *smcf.shm_zone }) else { + return; + }; + + if r.method == NGX_HTTP_DELETE as _ { + let key = unsafe { NgxStr::from_ngx_str(key) }; + + ngx_log_debug!( + unsafe { (*r.connection).log }, + "shared dict: delete \"{}\" w:{} p:{}", + key, + unsafe { nginx_sys::ngx_worker }, + unsafe { nginx_sys::ngx_pid }, + ); + + let _ = shared.write().remove(key); + } else { + let alloc = unsafe { SlabPool::from_shm_zone(&*smcf.shm_zone).expect("slab pool") }; + + let Ok(key) = NgxString::try_from_bytes_in(key.as_bytes(), alloc.clone()) else { + return; + }; + + let Ok(value) = NgxString::try_from_bytes_in(v.as_bytes(), alloc.clone()) else { + return; + }; + + ngx_log_debug!( + unsafe { (*r.connection).log }, + "shared dict: set \"{}\" -> \"{}\" w:{} p:{}", + key, + value, + unsafe { nginx_sys::ngx_worker }, + unsafe { nginx_sys::ngx_pid }, + ); + + let _ = shared.write().try_insert(key, value); + } +} + +extern "C" fn ngx_http_shared_dict_get_entries( + r: *mut ngx_http_request_t, + v: *mut ngx_http_variable_value_t, + _data: usize, +) -> ngx_int_t { + use core::fmt::Write; + + let r = unsafe { &mut *r }; + let v = unsafe { &mut *v }; + let pool = unsafe { Pool::from_ngx_pool(r.pool) }; + let smcf = HttpSharedDictModule::main_conf_mut(r).expect("shared dict main config"); + + ngx_log_debug!( + unsafe { (*r.connection).log }, + "shared dict: get all entries" + ); + + let Ok(shared) = ngx_http_shared_dict_get_shared(unsafe { &mut *smcf.shm_zone }) else { + return Status::NGX_ERROR.into(); + }; + + let mut str = NgxString::new_in(pool); + { + let dict = shared.read(); + + let mut len: usize = 0; + let mut values: usize = 0; + + for (key, value) in dict.iter() { + len += key.len() + value.len() + b" = ; ".len(); + values += 1; + } + + len += values.checked_ilog10().unwrap_or(0) as usize + b"0; ".len(); + + if str.try_reserve(len).is_err() { + return Status::NGX_ERROR.into(); + } + + if write!(str, "{values}; ").is_err() { + return Status::NGX_ERROR.into(); + } + + for (key, value) in dict.iter() { + if write!(str, "{key} = {value}; ").is_err() { + return Status::NGX_ERROR.into(); + } + } + } + + // The string is allocated on the `ngx_pool_t` and will be freed with the request. + let (data, len, _, _) = str.into_raw_parts(); + + v.data = data; + v.set_len(len as _); + + v.set_valid(1); + v.set_no_cacheable(1); + v.set_not_found(0); + + Status::NGX_OK.into() +} + +extern "C" fn ngx_http_shared_dict_set_entries( + r: *mut ngx_http_request_t, + _v: *mut ngx_http_variable_value_t, + _data: usize, +) { + let r = unsafe { &mut *r }; + let smcf = HttpSharedDictModule::main_conf_mut(r).expect("shared dict main config"); + + ngx_log_debug!(unsafe { (*r.connection).log }, "shared dict: clear"); + + let Ok(shared) = ngx_http_shared_dict_get_shared(unsafe { &mut *smcf.shm_zone }) else { + return; + }; + + let Ok(tree) = RbTreeMap::try_new_in(shared.read().allocator().clone()) else { + return; + }; + + // This would check both .clear() and the drop implementation + *shared.write() = tree; + // shared.write().clear() +} diff --git a/examples/t/shared_dict.t b/examples/t/shared_dict.t new file mode 100644 index 00000000..fdb9fc65 --- /dev/null +++ b/examples/t/shared_dict.t @@ -0,0 +1,129 @@ +#!/usr/bin/perl + +# (C) Nginx, Inc + +# Tests for ngx-rust example modules. + +############################################################################### + +use warnings; +use strict; + +use Test::More; + +BEGIN { use FindBin; chdir($FindBin::Bin); } + +use lib 'lib'; +use Test::Nginx; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +my $t = Test::Nginx->new()->has(qw/http rewrite/)->plan(12) + ->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +worker_processes 2; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + shared_dict_zone z 64k; + shared_dict $arg_key $foo; + + server { + listen 127.0.0.1:8080; + server_name localhost; + + add_header X-Value $foo; + add_header X-Process $pid; + + location /set/ { + add_header X-Process $pid; + set $foo $arg_value; + return 200; + } + + location /entries/ { + add_header X-Process $pid; + return 200 $shared_dict_entries; + } + + location /clear/ { + add_header X-Process $pid; + set $shared_dict_entries ""; + return 200; + } + } +} + +EOF + +$t->write_file('index.html', ''); +$t->run(); + +############################################################################### + +like(http_get('/set/?key=fst&value=hello'), qr/200 OK/, 'set value 1'); +like(http_get('/set/?key=snd&value=world'), qr/200 OK/, 'set value 2'); + +ok(check('/?key=fst', qr/X-Value: hello/i), 'check value 1'); +ok(check('/?key=snd', qr/X-Value: world/i), 'check value 2'); + +like(http_get('/set/?key=fst&value=new_value'), qr/200 OK/, 'update value 1'); +ok(check('/?key=fst', qr/X-Value: new_value/i), 'check updated value'); + +like(http_get('/entries/'), qr/^2; ((?:fst = new_value|snd = world); ){2}$/ms, + 'get entries'); + +like(http_delete('/set/?key=snd'), qr/200 OK/, 'delete value 2'); + +unlike(http_get('/?key=snd'), qr/X-Value:/i, 'check deleted value'); + +like(http_get('/entries/'), qr/^1; fst = new_value; $/ms, + 'get entries - deleted'); + +like(http_get('/clear/'), qr/200 OK/, 'clear'); + +like(http_get('/entries/'), qr/^0; $/ms, 'get entries - clear'); + +############################################################################### + +sub check { + my ($uri, $like) = @_; + + my $r = http_get($uri); + + return unless ($r =~ $like && $r =~ /X-Process: (\d+)/); + + return 1 if $^O eq 'MSWin32'; # only one active worker process + + my $pid = $1; + + for (1 .. 25) { + $r = http_get($uri); + + return unless ($r =~ $like && $r =~ /X-Process: (\d+)/); + return 1 if $pid != $1; + } +} + +sub http_delete { + my ($url, %extra) = @_; + return http(< &[u8] { + match self.len() { + 0 => &[], + // SAFETY: data for non-empty value must be a valid well-aligned pointer. + len => unsafe { core::slice::from_raw_parts(self.data, len as usize) }, + } + } +} + +impl AsRef<[u8]> for ngx_variable_value_t { + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } +} + /// Returns the error code of the last failed operation (`errno`). #[inline] pub fn ngx_errno() -> ngx_err_t { @@ -163,6 +182,23 @@ pub fn ngx_random() -> core::ffi::c_long { } } +/// Causes the calling thread to relinquish the CPU. +#[inline] +pub fn ngx_sched_yield() { + #[cfg(windows)] + unsafe { + SwitchToThread() + }; + #[cfg(all(not(windows), ngx_feature = "have_sched_yield"))] + unsafe { + sched_yield() + }; + #[cfg(not(any(windows, ngx_feature = "have_sched_yield")))] + unsafe { + usleep(1) + } +} + /// Returns cached timestamp in seconds, updated at the start of the event loop iteration. /// /// Can be stale when accessing from threads, see [ngx_time_update]. diff --git a/nginx-sys/src/rbtree.rs b/nginx-sys/src/rbtree.rs new file mode 100644 index 00000000..88b94274 --- /dev/null +++ b/nginx-sys/src/rbtree.rs @@ -0,0 +1,84 @@ +use core::ptr; + +use crate::bindings::{ngx_rbtree_insert_pt, ngx_rbtree_node_t, ngx_rbtree_t}; + +/// Get a reference to the beginning of a tree element data structure, +/// considering the link field offset in it. +/// +/// # Safety +/// +/// `$node` must be a valid pointer to the field `$link` in the struct `$type` +#[macro_export] +macro_rules! ngx_rbtree_data { + ($node:expr, $type:path, $link:ident) => { + $node + .byte_sub(::core::mem::offset_of!($type, $link)) + .cast::<$type>() + }; +} + +/// Initializes the RbTree with specified sentinel and insert function. +/// +/// # Safety +/// +/// All of the pointers passed must be valid. +/// `sentinel` is expected to be valid for the whole lifetime of the `tree`. +/// +pub unsafe fn ngx_rbtree_init( + tree: *mut ngx_rbtree_t, + sentinel: *mut ngx_rbtree_node_t, + insert: ngx_rbtree_insert_pt, +) { + ngx_rbtree_sentinel_init(sentinel); + (*tree).root = sentinel; + (*tree).sentinel = sentinel; + (*tree).insert = insert; +} + +/// Marks the tree node as red. +/// +/// # Safety +/// +/// `node` must be a valid pointer to a [ngx_rbtree_node_t]. +#[inline] +pub unsafe fn ngx_rbt_red(node: *mut ngx_rbtree_node_t) { + (*node).color = 1 +} + +/// Marks the tree node as black. +/// +/// # Safety +/// +/// `node` must be a valid pointer to a [ngx_rbtree_node_t]. +#[inline] +pub unsafe fn ngx_rbt_black(node: *mut ngx_rbtree_node_t) { + (*node).color = 0 +} + +/// Initializes the sentinel node. +/// +/// # Safety +/// +/// `node` must be a valid pointer to a [ngx_rbtree_node_t]. +#[inline] +pub unsafe fn ngx_rbtree_sentinel_init(node: *mut ngx_rbtree_node_t) { + ngx_rbt_black(node) +} + +/// Returns the least (leftmost) node of the tree. +/// +/// # Safety +/// +/// `node` must be a valid pointer to a [ngx_rbtree_node_t]. +/// `sentinel` must be a valid pointer to the sentinel node in the same Red-Black tree. +#[inline] +pub unsafe fn ngx_rbtree_min( + mut node: *mut ngx_rbtree_node_t, + sentinel: *mut ngx_rbtree_node_t, +) -> *mut ngx_rbtree_node_t { + while !ptr::addr_eq((*node).left, sentinel) { + node = (*node).left; + } + + node +} diff --git a/src/allocator.rs b/src/allocator.rs new file mode 100644 index 00000000..15fa4d62 --- /dev/null +++ b/src/allocator.rs @@ -0,0 +1,83 @@ +//! The allocator module. +//! +//! The module provides custom memory allocator support traits and utilities based on the unstable +//! [feature(allocator_api)]. +//! +//! Currently implemented as a reexport of parts of the [allocator_api2]. +//! +//! [feature(allocator_api)]: https://github.com/rust-lang/rust/issues/32838 + +use ::core::alloc::Layout; +use ::core::mem; +use ::core::ptr::{self, NonNull}; + +pub use allocator_api2::alloc::{AllocError, Allocator, Global}; + +#[cfg(feature = "alloc")] +pub use allocator_api2::{boxed, collections, vec}; + +/// Explicitly duplicate an object using the specified Allocator. +pub trait TryCloneIn: Sized { + /// Target type, generic over an allocator. + type Target; + + /// Attempts to copy the value using `alloc` as an underlying Allocator. + fn try_clone_in(&self, alloc: A) -> Result, AllocError>; +} + +/// Moves `value` to the memory backed by `alloc` and returns a pointer. +/// +/// This should be similar to `Box::into_raw(Box::try_new_in(value, alloc)?)`, except without +/// `alloc` requirement and intermediate steps. +/// +/// # Note +/// +/// The resulting pointer has no owner. The caller is responsible for destroying `T` and releasing +/// the memory. +pub fn allocate(value: T, alloc: &A) -> Result, AllocError> +where + A: Allocator, +{ + let layout = Layout::for_value(&value); + let ptr: NonNull = alloc.allocate(layout)?.cast(); + + // SAFETY: the allocator succeeded and gave us a correctly aligned pointer to an uninitialized + // data + unsafe { ptr.cast::>().as_mut().write(value) }; + + Ok(ptr) +} +/// +/// Creates a [NonNull] that is dangling, but well-aligned for this [Layout]. +/// +/// See also [::core::alloc::Layout::dangling()] +#[inline(always)] +pub(crate) const fn dangling_for_layout(layout: &Layout) -> NonNull { + unsafe { + let ptr = ptr::null_mut::().byte_add(layout.align()); + NonNull::new_unchecked(ptr) + } +} + +#[cfg(feature = "alloc")] +mod impls { + use allocator_api2::boxed::Box; + + use super::*; + + impl TryCloneIn for Box + where + T: TryCloneIn, + OA: Allocator, + { + type Target = Box<::Target, A>; + + fn try_clone_in( + &self, + alloc: A, + ) -> Result, AllocError> { + let x = self.as_ref().try_clone_in(alloc.clone())?; + Box::try_new_in(x, alloc) + } + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index a5b87a98..31768df1 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,10 +1,14 @@ mod buffer; mod pool; +pub mod rbtree; +pub mod slab; mod status; mod string; pub use buffer::*; pub use pool::*; +pub use rbtree::RbTreeMap; +pub use slab::SlabPool; pub use status::*; pub use string::*; diff --git a/src/core/pool.rs b/src/core/pool.rs index c17ec3b3..e8294f55 100644 --- a/src/core/pool.rs +++ b/src/core/pool.rs @@ -1,13 +1,89 @@ +use core::alloc::Layout; use core::ffi::c_void; -use core::{mem, ptr}; +use core::mem; +use core::ptr::{self, NonNull}; +use nginx_sys::{ + ngx_buf_t, ngx_create_temp_buf, ngx_palloc, ngx_pcalloc, ngx_pfree, ngx_pmemalign, ngx_pnalloc, + ngx_pool_cleanup_add, ngx_pool_t, NGX_ALIGNMENT, +}; + +use crate::allocator::{dangling_for_layout, AllocError, Allocator}; use crate::core::buffer::{Buffer, MemoryBuffer, TemporaryBuffer}; -use crate::ffi::*; -/// Wrapper struct for an [`ngx_pool_t`] pointer, providing methods for working with memory pools. +/// Non-owning wrapper for an [`ngx_pool_t`] pointer, providing methods for working with memory pools. /// /// See -pub struct Pool(*mut ngx_pool_t); +#[derive(Clone, Debug)] +#[repr(transparent)] +pub struct Pool(NonNull); + +unsafe impl Allocator for Pool { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + // SAFETY: + // * This wrapper should be constructed with a valid pointer to ngx_pool_t. + // * The Pool type is !Send, thus we expect exclusive access for this call. + // * Pointers are considered mutable unless obtained from an immutable reference. + let ptr = if layout.size() == 0 { + // We can guarantee alignment <= NGX_ALIGNMENT for allocations of size 0 made with + // ngx_palloc_small. Any other cases are implementation-defined, and we can't tell which + // one will be used internally. + return Ok(NonNull::slice_from_raw_parts( + dangling_for_layout(&layout), + layout.size(), + )); + } else if layout.align() == 1 { + unsafe { ngx_pnalloc(self.0.as_ptr(), layout.size()) } + } else if layout.align() <= NGX_ALIGNMENT { + unsafe { ngx_palloc(self.0.as_ptr(), layout.size()) } + } else if cfg!(any( + ngx_feature = "have_posix_memalign", + ngx_feature = "have_memalign" + )) { + // ngx_pmemalign is always defined, but does not guarantee the requested alignment + // unless memalign/posix_memalign exists. + unsafe { ngx_pmemalign(self.0.as_ptr(), layout.size(), layout.align()) } + } else { + return Err(AllocError); + }; + + // Verify the alignment of the result + debug_assert_eq!(ptr.align_offset(layout.align()), 0); + + let ptr = NonNull::new(ptr.cast()).ok_or(AllocError)?; + Ok(NonNull::slice_from_raw_parts(ptr, layout.size())) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + // ngx_pfree is noop for small allocations unless NGX_DEBUG_PALLOC is set. + // + // Note: there should be no cleanup handlers for the allocations made using this API. + // Violating that could result in the following issues: + // - use-after-free on large allocation + // - multiple cleanup handlers attached to a dangling ptr (these are not unique) + if layout.size() > 0 // 0 is dangling ptr + && (layout.size() > self.as_ref().max || layout.align() > NGX_ALIGNMENT) + { + ngx_pfree(self.0.as_ptr(), ptr.as_ptr().cast()); + } + } +} + +impl AsRef for Pool { + #[inline] + fn as_ref(&self) -> &ngx_pool_t { + // SAFETY: this wrapper should be constructed with a valid pointer to ngx_pool_t + unsafe { self.0.as_ref() } + } +} + +impl AsMut for Pool { + #[inline] + fn as_mut(&mut self) -> &mut ngx_pool_t { + // SAFETY: this wrapper should be constructed with a valid pointer to ngx_pool_t + unsafe { self.0.as_mut() } + } +} impl Pool { /// Creates a new `Pool` from an `ngx_pool_t` pointer. @@ -16,8 +92,9 @@ impl Pool { /// The caller must ensure that a valid `ngx_pool_t` pointer is provided, pointing to valid /// memory and non-null. A null argument will cause an assertion failure and panic. pub unsafe fn from_ngx_pool(pool: *mut ngx_pool_t) -> Pool { - assert!(!pool.is_null()); - Pool(pool) + debug_assert!(!pool.is_null()); + debug_assert!(pool.is_aligned()); + Pool(NonNull::new_unchecked(pool)) } /// Creates a buffer of the specified size in the memory pool. @@ -25,7 +102,7 @@ impl Pool { /// Returns `Some(TemporaryBuffer)` if the buffer is successfully created, or `None` if /// allocation fails. pub fn create_buffer(&mut self, size: usize) -> Option { - let buf = unsafe { ngx_create_temp_buf(self.0, size) }; + let buf = unsafe { ngx_create_temp_buf(self.as_mut(), size) }; if buf.is_null() { return None; } @@ -80,7 +157,7 @@ impl Pool { /// # Safety /// This function is marked as unsafe because it involves raw pointer manipulation. unsafe fn add_cleanup_for_value(&mut self, value: *mut T) -> Result<(), ()> { - let cln = ngx_pool_cleanup_add(self.0, 0); + let cln = ngx_pool_cleanup_add(self.0.as_ptr(), 0); if cln.is_null() { return Err(()); } @@ -95,7 +172,7 @@ impl Pool { /// /// Returns a raw pointer to the allocated memory. pub fn alloc(&mut self, size: usize) -> *mut c_void { - unsafe { ngx_palloc(self.0, size) } + unsafe { ngx_palloc(self.0.as_ptr(), size) } } /// Allocates memory for a type from the pool. @@ -111,7 +188,7 @@ impl Pool { /// /// Returns a raw pointer to the allocated memory. pub fn calloc(&mut self, size: usize) -> *mut c_void { - unsafe { ngx_pcalloc(self.0, size) } + unsafe { ngx_pcalloc(self.0.as_ptr(), size) } } /// Allocates zeroed memory for a type from the pool. @@ -126,7 +203,7 @@ impl Pool { /// /// Returns a raw pointer to the allocated memory. pub fn alloc_unaligned(&mut self, size: usize) -> *mut c_void { - unsafe { ngx_pnalloc(self.0, size) } + unsafe { ngx_pnalloc(self.0.as_ptr(), size) } } /// Allocates unaligned memory for a type from the pool. diff --git a/src/core/rbtree.rs b/src/core/rbtree.rs new file mode 100644 index 00000000..9d824f73 --- /dev/null +++ b/src/core/rbtree.rs @@ -0,0 +1,379 @@ +//! Types and utilities for working with [ngx_rbtree_t]. +//! +//! This module provides both the tools for interaction with the existing `ngx_rbtree_t` objects in +//! the NGINX, and useful high-level types built on top of the `ngx_rbtree_t`. +//! +//! See . + +use core::alloc::Layout; +use core::cmp::Ordering; +use core::hash::{self, BuildHasher, Hash}; +use core::marker::PhantomData; +use core::ptr::{self, NonNull}; +use core::{borrow, mem}; + +use nginx_sys::{ + ngx_rbt_red, ngx_rbtree_data, ngx_rbtree_delete, ngx_rbtree_init, ngx_rbtree_insert, + ngx_rbtree_key_t, ngx_rbtree_min, ngx_rbtree_next, ngx_rbtree_node_t, ngx_rbtree_t, +}; + +use crate::allocator::{self, AllocError, Allocator}; + +/// Raw iterator over the `ngx_rbtree_t` nodes. +/// +/// This iterator type can be used to access elements of any correctly initialized `ngx_rbtree_t` +/// instance, including those already embedded in the nginx structures. The iterator stores pointer +/// to the next node and thus remains valid and usable even if the last returned item is removed +/// from the tree. +pub struct NgxRbTreeIter<'a> { + tree: NonNull, + node: *mut ngx_rbtree_node_t, + _lifetime: PhantomData<&'a ()>, +} + +impl<'a> NgxRbTreeIter<'a> { + /// Creates an iterator for the `ngx_rbtree_t`. + /// + /// # Safety + /// + /// The tree must outlive the iterator. + pub unsafe fn new(tree: NonNull) -> Self { + let t = unsafe { tree.as_ref() }; + let node = if ptr::addr_eq(t.root, t.sentinel) { + // empty tree + ptr::null_mut() + } else { + unsafe { ngx_rbtree_min(t.root, t.sentinel) } + }; + + Self { + tree, + node, + _lifetime: PhantomData, + } + } +} + +impl<'a> Iterator for NgxRbTreeIter<'a> { + type Item = NonNull; + + fn next(&mut self) -> Option { + let item = NonNull::new(self.node)?; + // ngx_rbtree_next does not mutate the tree + self.node = unsafe { ngx_rbtree_next(self.tree.as_mut(), self.node) }; + Some(item) + } +} + +#[allow(deprecated)] +type BuildMapHasher = core::hash::BuildHasherDefault; + +/// A map type based on the `ngx_rbtree_t`. +/// +/// This map implementation owns the stored keys and values and ensures that the data is dropped. +/// The order of the elements is an undocumented implementation detail. +/// +/// This is a `ngx`-specific high-level type with no direct counterpart in the NGINX code. +#[derive(Debug)] +pub struct RbTreeMap +where + A: Allocator, +{ + tree: ngx_rbtree_t, + sentinel: NonNull, + alloc: A, + _kv_type: PhantomData<(K, V)>, +} + +/// Entry type for the [RbTreeMap]. +/// +/// The struct is used from the Rust code only and thus does not need to be compatible with C. +struct MapEntry { + node: ngx_rbtree_node_t, + key: K, + value: V, +} + +impl MapEntry +where + K: Hash, +{ + fn new(key: K, value: V) -> Self { + let mut node: ngx_rbtree_node_t = unsafe { mem::zeroed() }; + node.key = BuildMapHasher::default().hash_one(&key) as ngx_rbtree_key_t; + + Self { node, key, value } + } + + fn into_kv(self) -> (K, V) { + (self.key, self.value) + } +} + +/// An iterator for the [RbTreeMap]. +pub struct Iter<'a, K: 'a, V: 'a>(NgxRbTreeIter<'a>, PhantomData<(K, V)>); + +impl<'a, K: 'a, V: 'a> Iter<'a, K, V> { + /// Creates an iterator for the [RbTreeMap]. + pub fn new(tree: &'a RbTreeMap) -> Self { + // msrv(1.89.0): NonNull::from_ref() + let rbtree = NonNull::from(&tree.tree); + // SAFETY: Iter borrows from the tree, ensuring that the tree would outlive it. + Self(unsafe { NgxRbTreeIter::new(rbtree) }, Default::default()) + } +} + +impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + fn next(&mut self) -> Option { + let item = self.0.next()?; + let item = unsafe { ngx_rbtree_data!(item, MapEntry, node).as_ref() }; + Some((&item.key, &item.value)) + } +} + +/// A mutable iterator for the [RbTreeMap]. +pub struct IterMut<'a, K: 'a, V: 'a>(NgxRbTreeIter<'a>, PhantomData<(K, V)>); + +impl<'a, K: 'a, V: 'a> IterMut<'a, K, V> { + /// Creates an iterator for the [RbTreeMap]. + pub fn new(tree: &'a mut RbTreeMap) -> Self { + // msrv(1.89.0): NonNull::from_mut() + let rbtree = NonNull::from(&mut tree.tree); + // SAFETY: IterMut borrows from the tree, ensuring that the tree would outlive it. + Self(unsafe { NgxRbTreeIter::new(rbtree) }, Default::default()) + } +} + +impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + fn next(&mut self) -> Option { + let item = self.0.next()?; + let item = unsafe { ngx_rbtree_data!(item, MapEntry, node).as_mut() }; + Some((&item.key, &mut item.value)) + } +} + +impl RbTreeMap +where + A: Allocator, +{ + /// Returns a reference to the underlying allocator. + pub fn allocator(&self) -> &A { + &self.alloc + } + + /// Clears the tree, removing all elements. + pub fn clear(&mut self) { + // SAFETY: the iter lives until the end of the scope + let iter = unsafe { NgxRbTreeIter::new(NonNull::from(&self.tree)) }; + let layout = Layout::new::>(); + + for node in iter { + unsafe { + let mut data = ngx_rbtree_data!(node, MapEntry, node); + + ngx_rbtree_delete(&mut self.tree, &mut data.as_mut().node); + ptr::drop_in_place(data.as_mut()); + self.allocator().deallocate(data.cast(), layout) + } + } + } + + /// Returns true if the tree contains no entries. + pub fn is_empty(&self) -> bool { + ptr::addr_eq(self.tree.root, self.tree.sentinel) + } + + /// Returns an iterator over the entries of the tree. + #[inline] + pub fn iter(&self) -> Iter<'_, K, V> { + Iter::new(self) + } + + /// Returns a mutable iterator over the entries of the tree. + #[inline] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut::new(self) + } +} + +impl RbTreeMap +where + A: Allocator, + K: Hash + Ord, +{ + /// Attempts to create and initialize a new RbTreeMap with specified allocator. + pub fn try_new_in(alloc: A) -> Result { + let layout = Layout::new::(); + let sentinel: NonNull = alloc.allocate_zeroed(layout)?.cast(); + + let mut this = RbTreeMap { + tree: unsafe { mem::zeroed() }, + sentinel, + alloc, + _kv_type: PhantomData, + }; + + unsafe { ngx_rbtree_init(&mut this.tree, this.sentinel.as_ptr(), Some(Self::insert)) }; + + Ok(this) + } + + /// Returns a reference to the value corresponding to the key. + pub fn get(&self, key: &Q) -> Option<&V> + where + K: borrow::Borrow, + Q: Hash + Ord + ?Sized, + { + self.lookup(key).map(|x| unsafe { &x.as_ref().value }) + } + + /// Returns a mutable reference to the value corresponding to the key. + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + K: borrow::Borrow, + Q: Hash + Ord + ?Sized, + { + self.lookup(key) + .map(|mut x| unsafe { &mut x.as_mut().value }) + } + + /// Removes a key from the tree, returning the value at the key if the key was previously in the + /// tree. + pub fn remove(&mut self, key: &Q) -> Option + where + K: borrow::Borrow, + Q: Hash + Ord + ?Sized, + { + self.remove_entry(key).map(|(_, v)| v) + } + + /// Removes a key from the tree, returning the stored key and value if the key was previously in + /// the tree. + pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + K: borrow::Borrow, + Q: Hash + Ord + ?Sized, + { + let mut node = self.lookup(key)?; + unsafe { + ngx_rbtree_delete(&mut self.tree, &mut node.as_mut().node); + let layout = Layout::for_value(node.as_ref()); + // SAFETY: we make a bitwise copy of the node and dispose of the original value without + // dropping it. + let copy = node.as_ptr().read(); + self.allocator().deallocate(node.cast(), layout); + Some(copy.into_kv()) + } + } + + /// Attempts to insert a new element into the tree. + pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, AllocError> { + let mut node = if let Some(mut node) = self.lookup(&key) { + unsafe { node.as_mut().value = value }; + node + } else { + let node = MapEntry::new(key, value); + let mut node = allocator::allocate(node, self.allocator())?; + unsafe { ngx_rbtree_insert(&mut self.tree, &mut node.as_mut().node) }; + node + }; + + Ok(unsafe { &mut node.as_mut().value }) + } + + extern "C" fn insert( + mut temp: *mut ngx_rbtree_node_t, + node: *mut ngx_rbtree_node_t, + sentinel: *mut ngx_rbtree_node_t, + ) { + let n = unsafe { &mut *ngx_rbtree_data!(node, MapEntry, node) }; + + loop { + let t = unsafe { &mut *ngx_rbtree_data!(temp, MapEntry, node) }; + let p = match Ord::cmp(&n.node.key, &t.node.key) { + Ordering::Less => &mut t.node.left, + Ordering::Greater => &mut t.node.right, + Ordering::Equal => match Ord::cmp(&n.key, &t.key) { + Ordering::Less => &mut t.node.left, + Ordering::Greater => &mut t.node.right, + // should be handled in try_insert + Ordering::Equal => &mut t.node.right, + }, + }; + + if ptr::addr_eq(*p, sentinel) { + *p = node; + break; + } + + temp = *p; + } + + n.node.parent = temp; + n.node.left = sentinel; + n.node.right = sentinel; + unsafe { ngx_rbt_red(node) }; + } + + fn lookup(&self, key: &Q) -> Option>> + where + K: borrow::Borrow, + Q: Hash + Ord + ?Sized, + { + let mut node = self.tree.root; + let hash = BuildMapHasher::default().hash_one(key) as ngx_rbtree_key_t; + + while !ptr::addr_eq(node, self.tree.sentinel) { + let n = unsafe { NonNull::new_unchecked(ngx_rbtree_data!(node, MapEntry, node)) }; + let nr = unsafe { n.as_ref() }; + + node = match Ord::cmp(&hash, &nr.node.key) { + Ordering::Less => nr.node.left, + Ordering::Greater => nr.node.right, + Ordering::Equal => match Ord::cmp(key, nr.key.borrow()) { + Ordering::Less => nr.node.left, + Ordering::Greater => nr.node.right, + Ordering::Equal => return Some(n), + }, + } + } + + None + } +} + +impl Drop for RbTreeMap +where + A: Allocator, +{ + fn drop(&mut self) { + self.clear(); + + unsafe { + self.allocator().deallocate( + self.sentinel.cast(), + Layout::for_value(self.sentinel.as_ref()), + ) + }; + } +} + +unsafe impl Send for RbTreeMap +where + A: Send + Allocator, + K: Send, + V: Send, +{ +} + +unsafe impl Sync for RbTreeMap +where + A: Sync + Allocator, + K: Sync, + V: Sync, +{ +} diff --git a/src/core/slab.rs b/src/core/slab.rs new file mode 100644 index 00000000..3dadf0d8 --- /dev/null +++ b/src/core/slab.rs @@ -0,0 +1,154 @@ +//! Wrapper for the nginx slab pool allocator. +//! +//! See . +use core::alloc::Layout; +use core::cmp; +use core::ptr::{self, NonNull}; + +use nginx_sys::{ + ngx_shm_zone_t, ngx_shmtx_lock, ngx_shmtx_unlock, ngx_slab_alloc_locked, ngx_slab_free_locked, + ngx_slab_pool_t, +}; + +use crate::allocator::{dangling_for_layout, AllocError, Allocator}; + +/// Non-owning wrapper for an [`ngx_slab_pool_t`] pointer, providing methods for working with +/// shared memory slab pools. +/// +/// See . +#[derive(Clone, Debug)] +pub struct SlabPool(NonNull); + +unsafe impl Send for SlabPool {} +unsafe impl Sync for SlabPool {} + +unsafe impl Allocator for SlabPool { + #[inline] + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.lock().allocate(layout) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + self.lock().deallocate(ptr, layout) + } + + #[inline] + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.lock().allocate_zeroed(layout) + } + + #[inline] + unsafe fn grow( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + self.lock().grow(ptr, old_layout, new_layout) + } + + #[inline] + unsafe fn grow_zeroed( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + self.lock().grow_zeroed(ptr, old_layout, new_layout) + } + + #[inline] + unsafe fn shrink( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + self.lock().shrink(ptr, old_layout, new_layout) + } +} + +impl AsRef for SlabPool { + #[inline] + fn as_ref(&self) -> &ngx_slab_pool_t { + // SAFETY: this wrapper should be constructed with a valid pointer to ngx_slab_pool_t + unsafe { self.0.as_ref() } + } +} + +impl AsMut for SlabPool { + #[inline] + fn as_mut(&mut self) -> &mut ngx_slab_pool_t { + // SAFETY: this wrapper should be constructed with a valid pointer to ngx_slab_pool_t + unsafe { self.0.as_mut() } + } +} + +impl SlabPool { + /// Creates a new `SlabPool` from an initialized shared zone. + /// + /// # Safety + /// + /// Shared zones are initialized and safe to use: + /// * between the zone init callback and configuration reload in the master process + /// * during the whole lifetime of a worker process. + /// + /// After the configuration reload (notably, in the cycle pool cleanup handlers), zone addresses + /// in the old cycle may become unmapped. + pub unsafe fn from_shm_zone(shm_zone: &ngx_shm_zone_t) -> Option { + let ptr = NonNull::new(shm_zone.shm.addr)?.cast(); + Some(Self(ptr)) + } + + /// Locks the slab pool mutex. + #[inline] + pub fn lock(&self) -> LockedSlabPool { + let shpool = self.0.as_ptr(); + unsafe { ngx_shmtx_lock(ptr::addr_of_mut!((*shpool).mutex)) }; + LockedSlabPool(self.0) + } +} + +/// Wrapper for a locked [`ngx_slab_pool_t`] pointer. +pub struct LockedSlabPool(NonNull); + +unsafe impl Allocator for LockedSlabPool { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + if layout.size() == 0 { + return Ok(NonNull::slice_from_raw_parts( + dangling_for_layout(&layout), + layout.size(), + )); + } + + // Small slab allocations (size <= ngx_pagesize / 2) are always aligned to the size rounded + // up to the nearest power of 2. + // If the requested alignment exceeds size, we can guarantee the alignment by allocating + // `align()` bytes. + let size = cmp::max(layout.size(), layout.align()); + + let ptr = unsafe { ngx_slab_alloc_locked(self.0.as_ptr(), size) }; + let ptr = NonNull::new(ptr.cast()).ok_or(AllocError)?; + + if ptr.align_offset(layout.align()) != 0 { + unsafe { self.deallocate(ptr, layout) }; + return Err(AllocError); + } + + Ok(NonNull::slice_from_raw_parts(ptr, layout.size())) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + ngx_slab_free_locked(self.0.as_ptr(), ptr.as_ptr().cast()) + } + } +} + +impl Drop for LockedSlabPool { + fn drop(&mut self) { + let shpool = unsafe { self.0.as_mut() }; + unsafe { ngx_shmtx_unlock(&mut shpool.mutex) } + } +} diff --git a/src/core/string.rs b/src/core/string.rs index 1db73f8a..48bf042f 100644 --- a/src/core/string.rs +++ b/src/core/string.rs @@ -23,6 +23,9 @@ macro_rules! ngx_string { }}; } +#[cfg(feature = "alloc")] +pub use self::_alloc::NgxString; + /// Representation of a borrowed [Nginx string]. /// /// [Nginx string]: https://nginx.org/en/docs/dev/development_guide.html#string_overview @@ -196,8 +199,344 @@ impl_partial_ord_eq_from!(NgxStr, &'a [u8; N]; const N: usize); impl_partial_ord_eq_from!(NgxStr, &'a str); #[cfg(feature = "alloc")] -mod _alloc_impls { +mod _alloc { + use core::borrow::Borrow; + use core::hash; + use core::ops; + use core::ptr; + use super::*; + + use crate::allocator::collections::TryReserveError; + use crate::allocator::vec::Vec; + use crate::allocator::{self, Allocator}; + + /// Owned byte string type with Allocator support. + /// + /// Inspired by [bstr] and unstable [feature(bstr)], with two important differences: + /// - Allocator always have to be specified, + /// - any allocating methods are failible and require explicit handling of the result. + /// + /// [bstr]: https://docs.rs/bstr/latest/bstr/ + /// [feature(bstr)]: https://github.com/rust-lang/rust/issues/134915 + #[derive(Clone)] + #[repr(transparent)] + pub struct NgxString(Vec) + where + A: Allocator + Clone; + + impl NgxString + where + A: Allocator + Clone, + { + /// Constructs a new, empty `NgxString`. + /// + /// No allocations will be made until data is added to the string. + pub fn new_in(alloc: A) -> Self { + Self(Vec::new_in(alloc)) + } + + /// Tries to construct a new `NgxString` from a byte slice. + #[inline] + pub fn try_from_bytes_in( + bytes: impl AsRef<[u8]>, + alloc: A, + ) -> Result { + let mut this = Self::new_in(alloc); + this.try_reserve_exact(bytes.as_ref().len())?; + this.0.extend_from_slice(bytes.as_ref()); + Ok(this) + } + + /// Returns a reference to the underlying allocator + #[inline] + pub fn allocator(&self) -> &A { + self.0.allocator() + } + + /// Returns this `NgxString`'s capacity, in bytes. + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// Returns `true` if this `NgxString` has a length of zero, and `false` otherwise. + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Return this `NgxString`'s length, in bytes. + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Appends bytes if there is sufficient spare capacity. + /// + /// Returns the number of remaining bytes on overflow. + #[inline] + pub fn append_within_capacity(&mut self, other: impl AsRef<[u8]>) -> Result<(), usize> { + let other = other.as_ref(); + if self.0.len() == self.0.capacity() { + return Err(other.len()); + } + + let n = cmp::min(self.0.capacity() - self.0.len(), other.len()); + unsafe { + // SAFETY: + // - self.0 has at least n writable bytes allocated past self.0.len(), + // - other has at least n bytes available for reading. + // - self.0 internal buffer will be initialized until len + n after this operation + // - other is not borrowed from `self` + let p = self.0.as_mut_ptr().add(self.0.len()); + ptr::copy_nonoverlapping(other.as_ptr(), p, n); + self.0.set_len(self.0.len() + n); + } + + match other.len() - n { + 0 => Ok(()), + x => Err(x), + } + } + + /// Tries to append the bytes to the `NgxString`. + #[inline] + pub fn try_append(&mut self, other: impl AsRef<[u8]>) -> Result<(), TryReserveError> { + let other = other.as_ref(); + self.0.try_reserve_exact(other.len())?; + self.0.extend_from_slice(other); + Ok(()) + } + + /// Tries to reserve capacity for at least `additional` more bytes. + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.0.try_reserve(additional) + } + + /// Tries to reserve the minimum capacity for at least `additional` more bytes. + #[inline] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.0.try_reserve_exact(additional) + } + + #[inline] + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.0 + } + + #[inline] + pub(crate) fn as_bytes_mut(&mut self) -> &mut [u8] { + &mut self.0 + } + + #[inline] + pub(crate) fn as_ngx_str(&self) -> &NgxStr { + NgxStr::from_bytes(self.0.as_slice()) + } + + #[inline] + pub(crate) fn as_ngx_str_mut(&mut self) -> &mut NgxStr { + NgxStr::from_bytes_mut(self.0.as_mut_slice()) + } + + /// Creates NgxString directly from a pointer, a capacity, a length and an allocator. + /// + /// # Safety + /// + /// See [Vec::from_raw_parts_in] + #[inline] + pub unsafe fn from_raw_parts( + ptr: *mut u8, + length: usize, + capacity: usize, + alloc: A, + ) -> Self { + Self(Vec::from_raw_parts_in(ptr, length, capacity, alloc)) + } + + /// Splits the NgxString into its raw components. + /// + /// The caller becomes responsible for the memory previously managed by this NgxString. + #[inline] + pub fn into_raw_parts(self) -> (*mut u8, usize, usize, A) { + self.0.into_raw_parts_with_alloc() + } + } + + impl AsRef for NgxString + where + A: Allocator + Clone, + { + fn as_ref(&self) -> &NgxStr { + self.as_ngx_str() + } + } + + impl AsMut for NgxString + where + A: Allocator + Clone, + { + fn as_mut(&mut self) -> &mut NgxStr { + self.as_ngx_str_mut() + } + } + + impl AsRef<[u8]> for NgxString + where + A: Allocator + Clone, + { + #[inline] + fn as_ref(&self) -> &[u8] { + self.as_bytes() + } + } + + impl AsMut<[u8]> for NgxString + where + A: Allocator + Clone, + { + #[inline] + fn as_mut(&mut self) -> &mut [u8] { + self.as_bytes_mut() + } + } + + impl Borrow for NgxString + where + A: Allocator + Clone, + { + fn borrow(&self) -> &NgxStr { + self.as_ngx_str() + } + } + + impl Borrow<[u8]> for NgxString + where + A: Allocator + Clone, + { + fn borrow(&self) -> &[u8] { + self.0.as_slice() + } + } + + impl fmt::Debug for NgxString + where + A: Allocator + Clone, + { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // XXX: Use debug_tuple() and feature(debug_closure_helpers) once it's stabilized + f.write_str("NgxString(")?; + nginx_sys::detail::debug_bytes(f, &self.0)?; + f.write_str(")") + } + } + + impl ops::Deref for NgxString + where + A: Allocator + Clone, + { + type Target = NgxStr; + + fn deref(&self) -> &Self::Target { + self.as_ngx_str() + } + } + + impl ops::DerefMut for NgxString + where + A: Allocator + Clone, + { + fn deref_mut(&mut self) -> &mut Self::Target { + self.as_ngx_str_mut() + } + } + + impl fmt::Display for NgxString + where + A: Allocator + Clone, + { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_ngx_str(), f) + } + } + + impl hash::Hash for NgxString + where + A: Allocator + Clone, + { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } + } + + // `NgxString`'s with different allocators should be comparable + + impl PartialEq> for NgxString + where + A1: Allocator + Clone, + A2: Allocator + Clone, + { + fn eq(&self, other: &NgxString) -> bool { + PartialEq::eq(self.as_bytes(), other.as_bytes()) + } + } + + impl Eq for NgxString where A: Allocator + Clone {} + + impl PartialOrd> for NgxString + where + A1: Allocator + Clone, + A2: Allocator + Clone, + { + fn partial_cmp(&self, other: &NgxString) -> Option { + Some(Ord::cmp(self.as_bytes(), other.as_bytes())) + } + } + + impl Ord for NgxString + where + A: Allocator + Clone, + { + fn cmp(&self, other: &Self) -> cmp::Ordering { + Ord::cmp(self.as_bytes(), other.as_bytes()) + } + } + + impl allocator::TryCloneIn for NgxString { + type Target = NgxString; + + fn try_clone_in( + &self, + alloc: A, + ) -> Result, allocator::AllocError> { + NgxString::try_from_bytes_in(self.as_bytes(), alloc).map_err(|_| allocator::AllocError) + } + } + + impl fmt::Write for NgxString + where + A: Allocator + Clone, + { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.append_within_capacity(s).map_err(|_| fmt::Error) + } + } + + // Implement byte comparisons directly, leave the rest to Deref. + + impl_partial_eq!(NgxString, &'a [u8]; A: Allocator + Clone); + impl_partial_eq!(NgxString, &'a [u8; N]; A: Allocator + Clone, const N: usize); + impl_partial_eq!(NgxString, &'a NgxStr; A: Allocator + Clone); + impl_partial_eq!(NgxString, ngx_str_t; A: Allocator + Clone); + + impl_partial_ord!(NgxString, &'a [u8]; A: Allocator + Clone); + impl_partial_ord!(NgxString, &'a [u8; N]; A: Allocator + Clone, const N: usize); + impl_partial_ord!(NgxString, &'a NgxStr; A: Allocator + Clone); + impl_partial_ord!(NgxString, ngx_str_t; A: Allocator + Clone); + impl_partial_eq!(NgxStr, String); impl_partial_eq!(&'a NgxStr, String); impl_partial_ord!(NgxStr, String); @@ -214,7 +553,7 @@ mod tests { use super::*; #[test] - fn test_comparisons() { + fn test_str_comparisons() { let string = "test".to_string(); let ngx_string = ngx_str_t { data: string.as_ptr().cast_mut(), @@ -239,6 +578,59 @@ mod tests { assert_eq!(ns, "test"); } + #[test] + #[cfg(feature = "alloc")] + fn test_string_comparisons() { + use crate::allocator::Global; + + let string = "test".to_string(); + let ngx_string = ngx_str_t { + data: string.as_ptr().cast_mut(), + len: string.len(), + }; + let borrowed: &NgxStr = string.as_bytes().into(); + let owned = NgxString::try_from_bytes_in(&string, Global).unwrap(); + + assert_eq!(string.as_bytes(), owned); + assert_eq!(ngx_string, owned); + assert_eq!(borrowed, owned); + assert_eq!(b"test", owned); + assert_eq!(owned, string.as_bytes()); + assert_eq!(owned, ngx_string); + assert_eq!(owned, borrowed); + assert_eq!(owned, b"test"); + + // String comparisons via Deref + assert_eq!(string, *owned); + assert_eq!(string.as_str(), *owned); + assert_eq!("test", *owned); + assert_eq!(*owned, string); + assert_eq!(*owned, string.as_str()); + assert_eq!(*owned, "test"); + } + + #[test] + #[cfg(feature = "alloc")] + fn test_string_write() { + use core::fmt::Write; + + use crate::allocator::Global; + + let h = NgxStr::from_bytes(b"Hello"); + let w = NgxStr::from_bytes(b"world"); + + let mut s = NgxString::new_in(Global); + s.try_reserve(16).expect("reserve"); + + // Remember ptr and len of internal buffer + let saved = (s.as_bytes().as_ptr(), s.capacity()); + + write!(s, "{h} {w}!").expect("write"); + + assert_eq!(s, b"Hello world!"); + assert_eq!((s.as_bytes().as_ptr(), s.capacity()), saved); + } + #[test] fn test_lifetimes() { let a: &NgxStr = "Hello World!".into(); diff --git a/src/lib.rs b/src/lib.rs index 32a16f41..adecb784 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,6 +40,8 @@ extern crate alloc; #[cfg(feature = "std")] extern crate std; +pub mod allocator; + /// The core module. /// /// This module provides fundamental utilities needed to interface with many NGINX primitives. @@ -63,6 +65,8 @@ pub mod http; /// This module provides an interface into the NGINX logger framework. pub mod log; +pub mod sync; + /// Define modules exported by this library. /// /// These are normally generated by the Nginx module system, but need to be diff --git a/src/sync.rs b/src/sync.rs new file mode 100644 index 00000000..6bdbd6f2 --- /dev/null +++ b/src/sync.rs @@ -0,0 +1,123 @@ +//! Synchronization primitives over shared memory. +//! +//! This module provides an alternative implementation for the `ngx_atomic_t` type, +//! `ngx_atomic_*`/`ngx_rwlock_*` family of functions and related usage patterns from nginx. +//! +//! `` contains a wide variety of implementation variants for different platforms and +//! build configurations. It's not feasible to properly expose all of these to the Rust code, and we +//! are not going to. The implementation here uses similar logic on the foundation of the +//! [core::sync::atomic] types and is intentionally _not interoperable_ with the nginx atomics. +//! Thus, it's only suitable for use for new shared memory structures instead of, for example, +//! interacting with the upstream zones. +//! +//! One potential pitfall here is that atomics in Rust are specified in terms of threads, and we use +//! the types in this module for interprocess synchronization. This should not be an issue though, +//! as Rust refers to the C/C++11 memory model for atomics, and there's a following note in +//! [atomics.lockfree]: +//! +//! > [Note: Operations that are lock-free should also be address-free. That is, atomic operations +//! > on the same memory location via two different addresses will communicate atomically. The +//! > implementation should not depend on any per-process state. This restriction enables +//! > communication via memory that is mapped into a process more than once and by memory that is +//! > shared between two processes. — end note] +//! +//! In practice, this recommendation is applied in all the implementations that matter to us. +use core::sync::atomic::{self, Ordering}; + +use nginx_sys::ngx_sched_yield; + +const NGX_RWLOCK_SPIN: usize = 2048; +const NGX_RWLOCK_WLOCK: usize = usize::MAX; + +type NgxAtomic = atomic::AtomicUsize; + +/// Raw lock type. +/// +pub struct RawSpinlock(NgxAtomic); + +/// Reader-writer lock over an atomic variable, based on the nginx rwlock implementation. +pub type RwLock = lock_api::RwLock; + +/// RAII structure used to release the shared read access of a lock when dropped. +pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawSpinlock, T>; + +/// RAII structure used to release the exclusive write access of a lock when dropped. +pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawSpinlock, T>; + +unsafe impl lock_api::RawRwLock for RawSpinlock { + // Only used for initialization, will not be mutated + #[allow(clippy::declare_interior_mutable_const)] + const INIT: RawSpinlock = RawSpinlock(NgxAtomic::new(0)); + + type GuardMarker = lock_api::GuardNoSend; + + fn lock_shared(&self) { + loop { + if self.try_lock_shared() { + return; + } + + if unsafe { nginx_sys::ngx_ncpu > 1 } { + for n in 0..NGX_RWLOCK_SPIN { + for _ in 0..n { + core::hint::spin_loop() + } + + if self.try_lock_shared() { + return; + } + } + } + + ngx_sched_yield() + } + } + + fn try_lock_shared(&self) -> bool { + let value = self.0.load(Ordering::Acquire); + + if value == NGX_RWLOCK_WLOCK { + return false; + } + + self.0 + .compare_exchange(value, value + 1, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } + + unsafe fn unlock_shared(&self) { + self.0.fetch_sub(1, Ordering::Release); + } + + fn lock_exclusive(&self) { + loop { + if self.try_lock_exclusive() { + return; + } + + if unsafe { nginx_sys::ngx_ncpu > 1 } { + for n in 0..NGX_RWLOCK_SPIN { + for _ in 0..n { + core::hint::spin_loop() + } + + if self.try_lock_exclusive() { + return; + } + } + } + + ngx_sched_yield() + } + } + + fn try_lock_exclusive(&self) -> bool { + self.0 + .compare_exchange(0, NGX_RWLOCK_WLOCK, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } + + unsafe fn unlock_exclusive(&self) { + self.0.store(0, Ordering::Release) + } +}