diff --git a/default.nix b/default.nix index 55748a9a482..04317d75165 100644 --- a/default.nix +++ b/default.nix @@ -167,7 +167,7 @@ rec { name = "motoko-rts-deps"; src = subpath ./rts; sourceRoot = "rts/motoko-rts-tests"; - sha256 = "13pcsik4nq9w7dwi3srhn1ddm30zi5japwn5q50vxknaj0fixi14"; + sha256 = "1vr9mvjrddjv7xga6hhzq39x8qzdqsnhwic76apv7ksfkh0psfx2"; copyLockfile = true; }; in diff --git a/rts/motoko-rts-macros/Cargo.lock b/rts/motoko-rts-macros/Cargo.lock new file mode 100644 index 00000000000..3786e0d7395 --- /dev/null +++ b/rts/motoko-rts-macros/Cargo.lock @@ -0,0 +1,45 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "motoko-rts-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "syn" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" diff --git a/rts/motoko-rts-macros/Cargo.toml b/rts/motoko-rts-macros/Cargo.toml new file mode 100644 index 00000000000..219c1220ccc --- /dev/null +++ b/rts/motoko-rts-macros/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "motoko-rts-macros" +version = "0.1.0" +authors = ["dfinity "] +edition = "2018" + +[lib] +proc_macro = true + +[dependencies] +proc-macro2 = "1.0.27" +syn = { version = "1.0.73", features = ["full"] } +quote = "1.0.9" diff --git a/rts/motoko-rts-macros/src/lib.rs b/rts/motoko-rts-macros/src/lib.rs new file mode 100644 index 00000000000..49ac0e18d3e --- /dev/null +++ b/rts/motoko-rts-macros/src/lib.rs @@ -0,0 +1,138 @@ +use proc_macro::TokenStream; +use quote::quote; + +/// This macro is used to generate monomorphic versions of allocating RTS functions, to allow +/// calling such functions in generated code. Example: +/// +/// ``` +/// #[ic_mem_fn] +/// pub unsafe fn text_concat(mem: &mut M, s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr { +/// ... +/// } +/// ``` +/// +/// This functions has a `Memory` parameter to be able to allocate on heap. To compile this +/// function to use in generated code we need a monomorphic version, without a `Memory` parameter. +/// This macro generates the monomorphic version. Macro expansion looks like this: +/// +/// ``` +/// // Original function generated directly, to allow use from the test suite +/// pub unsafe fn text_concat(mem: &mut M, s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr { +/// ... +/// } +/// +/// // New, monomorphic version +/// #[cfg(feature = "ic")] +/// #[export_name = "text_concat"] +/// unsafe extern "C" fn ic_text_concat(s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr { +/// text_concat(crate::memory::ic::IcMemory, s1, s2) +/// } +/// ``` +/// +/// Reminder: `ic` feature is used when compiling the RTS to be linked with generated code. It's +/// disabled when compiling for testing. +/// +/// `ic_mem_fn` takes an optional `ic_only` attribute which adds a `cfg(feature = "ic")` guard to +/// the original function: +/// +/// ``` +/// #[ic_mem_fn(ic_only)] +/// fn my_function(mem: &mut M) { ... } +/// ``` +/// +/// Expansion: +/// +/// ``` +/// #[cfg(feature = "ic")] +/// fn my_function(mem: &mut M) { ... } +/// +/// #[cfg(feature = "ic")] +/// #[export_name = "text_concat"] +/// unsafe extern "C" fn ic_my_function() { +/// my_function(crate::memory::ic::IcMemory) +/// } +/// ``` +/// +/// This is useful when the function won't be used when compiling the RTS for testing. +#[proc_macro_attribute] +pub fn ic_mem_fn(attr: TokenStream, input: TokenStream) -> TokenStream { + let ic_only = if attr.is_empty() { + false + } else if attr.to_string() == "ic_only" { + true + } else { + panic!("Unknown attribute: {:?}", attr.to_string()); + }; + + let fun = syn::parse_macro_input!(input as syn::ItemFn); + let fun_sig = &fun.sig; + + // Some sanity checks + assert!(fun_sig.asyncness.is_none(), "IC functions cannot be async"); + assert_eq!( + fun_sig.generics.params.len(), + 1, + "IC memory functions should have one generic argument for the memory implementation" + ); + assert!( + fun_sig.abi.is_none(), + "Functions with #[ic_fn] attribute cannot have ABI annotations" + ); + assert!( + fun_sig.variadic.is_none(), + "IC functions cannot have variadic arguments" + ); + + let fn_ident = &fun_sig.ident; + let fn_wrapper_ident = syn::Ident::new(&format!("ic_{}", fn_ident), fn_ident.span()); + let fn_name = fn_ident.to_string(); + let wrapper_ret = fun_sig.output.clone(); + let wrapper_args: Vec<(syn::Ident, syn::Type)> = fun_sig + .inputs + .iter() + .enumerate() + .filter_map(|(i, arg)| match arg { + syn::FnArg::Receiver(_) => { + panic!("IC functions can't have receivers (`&self`, `&mut self`, etc.)") + } + syn::FnArg::Typed(pat) => { + if i == 0 { + // First argument should be `memory`, skip + None + } else { + Some(( + syn::Ident::new(&format!("arg{}", i), proc_macro2::Span::call_site()), + (*pat.ty).clone(), + )) + } + } + }) + .collect(); + + // Parameters of the wrapper function + let wrapper_params_syn: Vec = wrapper_args + .iter() + .map(|(ident, ty)| quote!(#ident: #ty)) + .collect(); + + // Arguments passed to the original function + let wrapper_args_syn: Vec<&syn::Ident> = wrapper_args.iter().map(|(ident, _)| ident).collect(); + + let fun_attr = if ic_only { + quote!(#[cfg(feature = "ic")]) + } else { + quote!() + }; + + quote!( + #fun_attr + #fun + + #[cfg(feature = "ic")] + #[export_name = #fn_name] + unsafe extern "C" fn #fn_wrapper_ident(#(#wrapper_params_syn,)*) #wrapper_ret { + #fn_ident(&mut crate::memory::ic::IcMemory, #(#wrapper_args_syn,)*) + } + ) + .into() +} diff --git a/rts/motoko-rts-tests/Cargo.lock b/rts/motoko-rts-tests/Cargo.lock index 5308fbd4a3b..77d5a9d785a 100644 --- a/rts/motoko-rts-tests/Cargo.lock +++ b/rts/motoko-rts-tests/Cargo.lock @@ -1,19 +1,28 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "aho-corasick" -version = "0.7.15" +name = "autocfg" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cfg-if" -version = "0.1.10" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "compiler_builtins" @@ -21,21 +30,11 @@ version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3748f82c7d366a0b4950257d19db685d4958d2fa27c6d164a3f069fec42b748b" -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "log", - "regex", -] - [[package]] name = "getrandom" -version = "0.1.15" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ "cfg-if", "libc", @@ -55,19 +54,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] -name = "log" -version = "0.4.11" +name = "maplit" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "motoko-rts" @@ -75,14 +65,36 @@ version = "0.1.0" dependencies = [ "compiler_builtins", "libc", + "motoko-rts-macros", +] + +[[package]] +name = "motoko-rts-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "motoko-rts-tests" version = "0.1.0" dependencies = [ + "byteorder", + "libc", + "maplit", "motoko-rts", - "quickcheck", + "proptest", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", ] [[package]] @@ -92,35 +104,62 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] -name = "quickcheck" -version = "0.9.2" +name = "proc-macro2" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ - "env_logger", - "log", + "unicode-xid", +] + +[[package]] +name = "proptest" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" +dependencies = [ + "bitflags", + "byteorder", + "lazy_static", + "num-traits", + "quick-error", "rand", - "rand_core", + "rand_chacha", + "rand_xorshift", + "regex-syntax", +] + +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", ] [[package]] name = "rand" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ - "getrandom", "libc", "rand_chacha", "rand_core", - "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", @@ -128,34 +167,22 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.5.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "rand_xorshift" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ "rand_core", ] -[[package]] -name = "regex" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - [[package]] name = "regex-syntax" version = "0.6.21" @@ -163,16 +190,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] -name = "thread_local" -version = "1.0.1" +name = "syn" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ - "lazy_static", + "proc-macro2", + "quote", + "unicode-xid", ] +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" + [[package]] name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" diff --git a/rts/motoko-rts-tests/Cargo.toml b/rts/motoko-rts-tests/Cargo.toml index 7bfec1740a0..e31651998f1 100644 --- a/rts/motoko-rts-tests/Cargo.toml +++ b/rts/motoko-rts-tests/Cargo.toml @@ -5,5 +5,8 @@ authors = ["dfinity ) -> *mut libc::c_void { + bigint::mp_calloc(&mut *HEAP, n_elems, elem_size) +} + +#[no_mangle] +unsafe extern "C" fn mp_realloc( + ptr: *mut libc::c_void, + old_size: Bytes, + new_size: Bytes, +) -> *mut libc::c_void { + bigint::mp_realloc(&mut *HEAP, ptr, old_size, new_size) +} pub unsafe fn test() { println!("Testing BigInt ..."); + // Not sure how much we will need in these tests but 1G should be enough + let mut heap = TestMemory::new(Words(1024 * 1024)); + HEAP = &mut heap; + assert!(bigint_eq( bigint_pow(bigint_of_word32(70), bigint_of_word32(32)), bigint_mul( @@ -35,6 +61,9 @@ pub unsafe fn test() { test_bigint_sleb128(bigint_neg(two_pow_i)); test_bigint_sleb128(bigint_neg(plus_one)); } + + HEAP = std::ptr::null_mut(); + drop(heap); } // Check leb128 encode/decode roundtrip diff --git a/rts/motoko-rts-tests/src/bitmap.rs b/rts/motoko-rts-tests/src/bitmap.rs index eecda532272..aee40a8d18a 100644 --- a/rts/motoko-rts-tests/src/bitmap.rs +++ b/rts/motoko-rts-tests/src/bitmap.rs @@ -1,36 +1,75 @@ -use motoko_rts::bitmap::{alloc_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END}; +use crate::memory::TestMemory; + use motoko_rts::constants::WORD_SIZE; +use motoko_rts::gc::mark_compact::bitmap::{ + alloc_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END, +}; +use motoko_rts::memory::Memory; use motoko_rts::types::{Bytes, Words}; -use quickcheck::{quickcheck, TestResult}; - use std::collections::HashSet; +use proptest::strategy::Strategy; +use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner}; + pub unsafe fn test() { println!("Testing bitmap ..."); println!(" Testing set_bit/get_bit"); - test_set_get(vec![0, 33]).unwrap(); - quickcheck(test_set_get_qc as fn(Vec) -> TestResult); + + { + let mut mem = TestMemory::new(Words(1024)); + test_set_get(&mut mem, vec![0, 33]).unwrap(); + } + + let mut proptest_runner = TestRunner::new(Config { + cases: 100, + failure_persistence: None, + ..Default::default() + }); + + proptest_runner + .run(&bit_index_vec_strategy(), |bits| { + // Max bit idx = 65,534, requires 2048 words. Add 2 words for Blob header (header + + // length). + let mut mem = TestMemory::new(Words(2051)); + test_set_get_proptest(&mut mem, bits) + }) + .unwrap(); + println!(" Testing bit iteration"); - quickcheck(test_bit_iter as fn(HashSet) -> TestResult); + proptest_runner + .run(&bit_index_set_strategy(), |bits| { + // Same as above + let mut mem = TestMemory::new(Words(2051)); + test_bit_iter(&mut mem, bits) + }) + .unwrap(); } -fn test_set_get_qc(bits: Vec) -> TestResult { - match test_set_get(bits) { - Ok(()) => TestResult::passed(), - Err(err) => TestResult::error(&err), - } +/// Generates vectors of bit indices +fn bit_index_vec_strategy() -> impl Strategy> { + proptest::collection::vec(0u16..u16::MAX, 0..1_000) +} + +/// Same as `bit_index_vec_strategy`, but generates sets +fn bit_index_set_strategy() -> impl Strategy> { + proptest::collection::hash_set(0u16..u16::MAX, 0..1_000) } -fn test_set_get(mut bits: Vec) -> Result<(), String> { +fn test_set_get_proptest(mem: &mut M, bits: Vec) -> TestCaseResult { + test_set_get(mem, bits).map_err(|err| TestCaseError::Fail(err.into())) +} + +fn test_set_get(mem: &mut M, mut bits: Vec) -> Result<(), String> { if bits.is_empty() { return Ok(()); } unsafe { - alloc_bitmap(Bytes( - u32::from(*bits.iter().max().unwrap() + 1) * WORD_SIZE, - )); + alloc_bitmap( + mem, + Bytes((u32::from(*bits.iter().max().unwrap()) + 1) * WORD_SIZE), + ); for bit in &bits { set_bit(u32::from(*bit)); @@ -64,7 +103,7 @@ fn test_set_get(mut bits: Vec) -> Result<(), String> { Ok(()) } -fn test_bit_iter(bits: HashSet) -> TestResult { +fn test_bit_iter(mem: &mut M, bits: HashSet) -> TestCaseResult { // If the max bit is N, the heap size is at least N+1 words let heap_size = Words(u32::from( bits.iter().max().map(|max_bit| max_bit + 1).unwrap_or(0), @@ -72,7 +111,7 @@ fn test_bit_iter(bits: HashSet) -> TestResult { .to_bytes(); unsafe { - alloc_bitmap(heap_size); + alloc_bitmap(mem, heap_size); for bit in bits.iter() { set_bit(u32::from(*bit)); @@ -87,15 +126,18 @@ fn test_bit_iter(bits: HashSet) -> TestResult { while let Some(vec_bit) = bit_vec_iter.next() { match bit_map_iter.next() { BITMAP_ITER_END => { - return TestResult::error( - "bitmap iterator didn't yield but there are more bits", - ); + return Err(TestCaseError::Fail( + "bitmap iterator didn't yield but there are more bits".into(), + )); } map_bit => { if map_bit != u32::from(vec_bit) { - return TestResult::error(&format!( - "bitmap iterator yields {}, but actual bit is {}", - map_bit, vec_bit + return Err(TestCaseError::Fail( + format!( + "bitmap iterator yields {}, but actual bit is {}", + map_bit, vec_bit + ) + .into(), )); } } @@ -104,12 +146,15 @@ fn test_bit_iter(bits: HashSet) -> TestResult { let map_bit = bit_map_iter.next(); if map_bit != BITMAP_ITER_END { - return TestResult::error(&format!( - "bitmap iterator yields {}, but there are no more bits left", - map_bit + return Err(TestCaseError::Fail( + format!( + "bitmap iterator yields {}, but there are no more bits left", + map_bit + ) + .into(), )); } } - TestResult::passed() + Ok(()) } diff --git a/rts/motoko-rts-tests/src/closure_table.rs b/rts/motoko-rts-tests/src/closure_table.rs index 013fdeda588..49ed9d673f4 100644 --- a/rts/motoko-rts-tests/src/closure_table.rs +++ b/rts/motoko-rts-tests/src/closure_table.rs @@ -1,5 +1,7 @@ +use crate::memory::TestMemory; + use motoko_rts::closure_table::{closure_count, recall_closure, remember_closure}; -use motoko_rts::types::SkewedPtr; +use motoko_rts::types::{SkewedPtr, Words}; pub unsafe fn test() { println!("Testing closure table ..."); @@ -8,9 +10,13 @@ pub unsafe fn test() { const N: usize = 2000; // >256, to exercise `double_closure_table` + // Array will be doubled 3 times, so 256 + 512 + 1024 + 2048 = 3840 words, plus each array will + // have 2 word header. + let mut heap = TestMemory::new(Words(3848)); + let mut references: [u32; N] = [0; N]; for i in 0..N { - references[i] = remember_closure(SkewedPtr((i << 2).wrapping_sub(1))); + references[i] = remember_closure(&mut heap, SkewedPtr((i << 2).wrapping_sub(1))); assert_eq!(closure_count(), (i + 1) as u32); } @@ -21,7 +27,7 @@ pub unsafe fn test() { } for i in 0..N / 2 { - references[i] = remember_closure(SkewedPtr((i << 2).wrapping_sub(1))); + references[i] = remember_closure(&mut heap, SkewedPtr((i << 2).wrapping_sub(1))); assert_eq!(closure_count(), (N / 2 + i + 1) as u32); } @@ -29,6 +35,4 @@ pub unsafe fn test() { assert_eq!(recall_closure(references[i]).0, (i << 2).wrapping_sub(1)); assert_eq!(closure_count(), i as u32); } - - println!("OK"); } diff --git a/rts/motoko-rts-tests/src/crc32.rs b/rts/motoko-rts-tests/src/crc32.rs index 9d38c2cfd6a..167c46d6fc1 100644 --- a/rts/motoko-rts-tests/src/crc32.rs +++ b/rts/motoko-rts-tests/src/crc32.rs @@ -1,26 +1,36 @@ +use crate::memory::TestMemory; + use motoko_rts::principal_id::{base32_of_checksummed_blob, base32_to_blob}; use motoko_rts::text::{text_compare, text_of_ptr_size}; -use motoko_rts::types::Bytes; +use motoko_rts::types::{Bytes, Words}; pub unsafe fn test() { println!("Testing crc32 ..."); + let mut heap = TestMemory::new(Words(1024 * 1024)); + // // Encoding // - assert_eq!( - text_compare( - base32_of_checksummed_blob(text_of_ptr_size(b"abcdefghijklmnop".as_ptr(), Bytes(16))), - text_of_ptr_size(b"SQ5MBE3BMJRWIZLGM5UGS2TLNRWW433Q".as_ptr(), Bytes(32)) - ), - 0 + let text = text_of_ptr_size(&mut heap, b"abcdefghijklmnop".as_ptr(), Bytes(16)); + let text1 = base32_of_checksummed_blob(&mut heap, text); + let text2 = text_of_ptr_size( + &mut heap, + b"SQ5MBE3BMJRWIZLGM5UGS2TLNRWW433Q".as_ptr(), + Bytes(32), ); + assert_eq!(text_compare(text1, text2), 0); + let text = text_of_ptr_size(&mut heap, b"abcdefghijklmnop".as_ptr(), Bytes(16)); assert_eq!( text_compare( - base32_of_checksummed_blob(text_of_ptr_size(b"abcdefghijklmnop".as_ptr(), Bytes(16))), - text_of_ptr_size(b"SQ5MBE3BMJRWIZLGM5UGS2TLNRWW433Q".as_ptr(), Bytes(32)) + base32_of_checksummed_blob(&mut heap, text,), + text_of_ptr_size( + &mut heap, + b"SQ5MBE3BMJRWIZLGM5UGS2TLNRWW433Q".as_ptr(), + Bytes(32) + ) ), 0 ); @@ -29,37 +39,42 @@ pub unsafe fn test() { // Decoding // + let text = text_of_ptr_size(&mut heap, b"".as_ptr(), Bytes(0)); assert_eq!( text_compare( - base32_to_blob(text_of_ptr_size(b"".as_ptr(), Bytes(0))), - text_of_ptr_size(b"".as_ptr(), Bytes(0)) + base32_to_blob(&mut heap, text), + text_of_ptr_size(&mut heap, b"".as_ptr(), Bytes(0)) ), 0 ); + let text = text_of_ptr_size(&mut heap, b"GEZDGNBVGY3TQOI".as_ptr(), Bytes(15)); assert_eq!( text_compare( - base32_to_blob(text_of_ptr_size(b"GEZDGNBVGY3TQOI".as_ptr(), Bytes(15))), - text_of_ptr_size(b"123456789".as_ptr(), Bytes(9)) + base32_to_blob(&mut heap, text), + text_of_ptr_size(&mut heap, b"123456789".as_ptr(), Bytes(9)) ), 0 ); + let text = text_of_ptr_size(&mut heap, b"MFRGGZDFMZTWQ2LKNNWG23TPOA".as_ptr(), Bytes(26)); assert_eq!( text_compare( - base32_to_blob(text_of_ptr_size( - b"MFRGGZDFMZTWQ2LKNNWG23TPOA".as_ptr(), - Bytes(26) - )), - text_of_ptr_size(b"abcdefghijklmnop".as_ptr(), Bytes(16)) + base32_to_blob(&mut heap, text), + text_of_ptr_size(&mut heap, b"abcdefghijklmnop".as_ptr(), Bytes(16)) ), 0 ); + let text = text_of_ptr_size(&mut heap, b"em77e-bvlzu-aq".as_ptr(), Bytes(14)); assert_eq!( text_compare( - base32_to_blob(text_of_ptr_size(b"em77e-bvlzu-aq".as_ptr(), Bytes(14))), - text_of_ptr_size(b"\x23\x3f\xf2\x06\xab\xcd\x01".as_ptr(), Bytes(7)) + base32_to_blob(&mut heap, text), + text_of_ptr_size( + &mut heap, + b"\x23\x3f\xf2\x06\xab\xcd\x01".as_ptr(), + Bytes(7) + ) ), 0 ); diff --git a/rts/motoko-rts-tests/src/gc.rs b/rts/motoko-rts-tests/src/gc.rs new file mode 100644 index 00000000000..4f0b9bc7682 --- /dev/null +++ b/rts/motoko-rts-tests/src/gc.rs @@ -0,0 +1,274 @@ +// Naming conventions: +// +// - offset = index in the "heap" array/slice/vector +// - address = address in the process's address space +// +// To convert an offset into an address, add heap array's address to the offset. + +mod heap; +mod utils; + +use heap::MotokoHeap; +use utils::{read_word, ObjectIdx, GC, GC_IMPLS, WORD_SIZE}; + +use motoko_rts::gc::copying::copying_gc_internal; +use motoko_rts::gc::mark_compact::compacting_gc_internal; +use motoko_rts::types::*; + +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; + +pub fn test() { + println!("Testing garbage collection ..."); + + // TODO: Add more tests + + let heap = hashmap! { + 0 => vec![0, 2], + 2 => vec![0], + 3 => vec![3], + }; + + let roots = vec![0, 2, 3]; + + test_gcs(&TestHeap { heap, roots }); +} + +#[derive(Debug)] +struct TestHeap { + heap: HashMap>, + roots: Vec, +} + +/// Test all GC implementations with the given heap +fn test_gcs(heap_descr: &TestHeap) { + for gc in &GC_IMPLS { + test_gc(*gc, &heap_descr.heap, &heap_descr.roots); + } +} + +fn test_gc(gc: GC, refs: &HashMap>, roots: &[u32]) { + let heap = MotokoHeap::new(refs, roots, gc); + + // Check `check_dynamic_heap` sanity + check_dynamic_heap( + refs, + &roots, + &**heap.heap(), + heap.heap_base_offset(), + heap.heap_ptr_offset(), + ); + + for _ in 0..3 { + gc.run(heap.clone()); + + let heap_base_offset = heap.heap_base_offset(); + let heap_ptr_offset = heap.heap_ptr_offset(); + check_dynamic_heap( + refs, + &roots, + &**heap.heap(), + heap_base_offset, + heap_ptr_offset, + ); + } +} + +/// Check the dynamic heap: +/// +/// - All and only reachable objects should be in the heap. Reachable objects are those in the +/// transitive closure of roots. +/// +/// - Objects should point to right objects. E.g. if object with index X points to objects with +/// indices Y and Z in the `objects` map, it should point to objects with indices Y and Z on the +/// heap. +/// +fn check_dynamic_heap( + objects: &HashMap>, + roots: &[ObjectIdx], + heap: &[u8], + heap_base_offset: usize, + heap_ptr_offset: usize, +) { + // Current offset in the heap + let mut offset = heap_base_offset; + + // Maps objects to their addresses (not offsets!). Used when debugging duplicate objects. + let mut seen: HashMap = Default::default(); + + while offset < heap_ptr_offset { + // Address of the current object. Used for debugging. + let address = offset as usize + heap.as_ptr() as usize; + + let tag = read_word(heap, offset); + offset += WORD_SIZE; + + if tag == 0 { + // Found closure table + continue; + } + + assert_eq!(tag, TAG_ARRAY); + + let n_fields = read_word(heap, offset); + offset += WORD_SIZE; + + // There should be at least one field for the index + assert!(n_fields >= 1); + + let object_idx = read_word(heap, offset) >> 1; + offset += WORD_SIZE; + let old = seen.insert(object_idx, address); + if let Some(old) = old { + panic!( + "Object with index {} seen multiple times: {:#x}, {:#x}", + object_idx, old, address + ); + } + + let object_expected_pointees = objects.get(&object_idx).unwrap_or_else(|| { + panic!("Object with index {} is not in the objects map", object_idx) + }); + + for field_idx in 1..n_fields { + let field = read_word(heap, offset); + offset += WORD_SIZE; + // Get index of the object pointed by the field + let pointee_address = field.wrapping_add(1); // unskew + let pointee_offset = (pointee_address as usize) - (heap.as_ptr() as usize); + let pointee_idx_offset = pointee_offset as usize + 2 * WORD_SIZE; // skip header + length + let pointee_idx = read_word(heap, pointee_idx_offset) >> 1; + let expected_pointee_idx = object_expected_pointees[(field_idx - 1) as usize]; + assert_eq!( + pointee_idx, + expected_pointee_idx, + "Object with index {} points to {} in field {}, but expected to point to {}", + object_idx, + pointee_idx, + field_idx - 1, + expected_pointee_idx, + ); + } + } + + // At this point we've checked that all seen objects point to the expected objects (as + // specified by `objects`). Check that we've seen the reachable objects and only the reachable + // objects. + let reachable_objects = compute_reachable_objects(roots, objects); + + // Objects we've seen in the heap + let seen_objects: HashSet = seen.keys().copied().collect(); + + // Reachable objects that we haven't seen in the heap + let missing_objects: Vec = reachable_objects + .difference(&seen_objects) + .copied() + .collect(); + + // Unreachable objects that we've seen in the heap + let extra_objects: Vec = seen_objects + .difference(&reachable_objects) + .copied() + .collect(); + + let mut error_message = String::new(); + + if !missing_objects.is_empty() { + write!( + &mut error_message, + "Reachable objects missing in the post-GC heap: {:?}", + missing_objects, + ) + .unwrap(); + } + + if !extra_objects.is_empty() { + if !error_message.is_empty() { + error_message.push('\n'); + } + + write!( + &mut error_message, + "Unreachable objects seen in the post-GC heap: {:?}", + extra_objects, + ) + .unwrap(); + } + + if !error_message.is_empty() { + panic!("{}", error_message); + } +} + +fn compute_reachable_objects( + roots: &[ObjectIdx], + heap: &HashMap>, +) -> HashSet { + let mut closure: HashSet = roots.iter().copied().collect(); + let mut work_list: Vec = roots.iter().copied().collect(); + + while let Some(next) = work_list.pop() { + let pointees = heap + .get(&next) + .unwrap_or_else(|| panic!("Object {} is in the work list, but not in heap", next)); + + for pointee in pointees { + if closure.insert(*pointee) { + work_list.push(*pointee); + } + } + } + + closure +} + +impl GC { + fn run(&self, mut heap: MotokoHeap) { + let heap_base = heap.heap_base_address() as u32; + let static_roots = skew(heap.static_root_array_address()); + let closure_table_address = heap.closure_table_address() as *mut SkewedPtr; + + let heap_1 = heap.clone(); + let heap_2 = heap.clone(); + + match self { + GC::Copying => { + unsafe { + copying_gc_internal( + &mut heap, + heap_base, + // get_hp + || heap_1.heap_ptr_address(), + // set_hp + move |hp| heap_2.set_heap_ptr_address(hp as usize), + static_roots, + closure_table_address, + // note_live_size + |_live_size| {}, + // note_reclaimed + |_reclaimed| {}, + ); + } + } + + GC::MarkCompact => { + unsafe { + compacting_gc_internal( + &mut heap, + heap_base, + // get_hp + || heap_1.heap_ptr_address(), + // set_hp + move |hp| heap_2.set_heap_ptr_address(hp as usize), + static_roots, + closure_table_address, + // note_live_size + |_live_size| {}, + // note_reclaimed + |_reclaimed| {}, + ); + } + } + } + } +} diff --git a/rts/motoko-rts-tests/src/gc/heap.rs b/rts/motoko-rts-tests/src/gc/heap.rs new file mode 100644 index 00000000000..93cd3c7d700 --- /dev/null +++ b/rts/motoko-rts-tests/src/gc/heap.rs @@ -0,0 +1,345 @@ +use super::utils::{ + make_pointer, make_scalar, write_word, ObjectIdx, GC, MAX_MARK_STACK_SIZE, WORD_SIZE, +}; + +use motoko_rts::gc::mark_compact::mark_stack::INIT_STACK_SIZE; +use motoko_rts::memory::Memory; +use motoko_rts::types::*; + +use std::cell::{Ref, RefCell}; +use std::collections::HashMap; +use std::convert::TryFrom; +use std::rc::Rc; + +/// Represents Motoko heaps. Reference counted (implements `Clone`) so we can clone and move values +/// of this type to GC callbacks. +#[derive(Clone)] +pub struct MotokoHeap { + inner: Rc>, +} + +impl Memory for MotokoHeap { + unsafe fn alloc_words(&mut self, n: Words) -> SkewedPtr { + self.inner.borrow_mut().alloc_words(n) + } +} + +impl MotokoHeap { + /// Create a new Motoko heap from the given object graph and roots. `GC` argument is used to + /// allocate as little space as possible for the dynamic heap. + /// + /// Note that for `GC::MarkCompact` we limit the upper bound on mark stack size as + /// `super::MAX_MARK_STACK_SIZE`. In the worst case the size would be the same as the heap + /// size, but that's not a realistic scenario. + pub fn new( + map: &HashMap>, + roots: &[ObjectIdx], + gc: GC, + ) -> MotokoHeap { + MotokoHeap { + inner: Rc::new(RefCell::new(MotokoHeapInner::new(map, roots, gc))), + } + } + + /// Get the beginning of dynamic heap, as offset in the heap array + pub fn heap_base_offset(&self) -> usize { + self.inner.borrow().heap_base_offset + } + + /// Get the heap pointer, as offset in the heap array + pub fn heap_ptr_offset(&self) -> usize { + self.inner.borrow().heap_ptr_offset + } + + /// Get the heap pointer, as address in the current process. The address can be used to mutate + /// the heap. + pub fn heap_ptr_address(&self) -> usize { + self.inner.borrow().heap_ptr_address() + } + + /// Update the heap pointer given as an address in the current process. + pub fn set_heap_ptr_address(&self, address: usize) { + self.inner.borrow_mut().set_heap_ptr_address(address) + } + + /// Get the beginning of dynamic heap, as an address in the current process + pub fn heap_base_address(&self) -> usize { + self.inner.borrow().heap_base_address() + } + + /// Get the address of the static root array + pub fn static_root_array_address(&self) -> usize { + self.inner.borrow().static_root_array_address() + } + + /// Get the address of the closure table + pub fn closure_table_address(&self) -> usize { + self.inner.borrow().closure_table_address() + } + + /// Get the heap as an array. Use `offset` values returned by the methods above to read. + pub fn heap(&self) -> Ref> { + Ref::map(self.inner.borrow(), |heap| &heap.heap) + } +} + +struct MotokoHeapInner { + /// The heap. This is a boxed slice instead of a vector as growing this wouldn't make sense + /// (all pointers would have to be updated). + heap: Box<[u8]>, + + /// Where the dynamic heap starts + heap_base_offset: usize, + + /// Where the dynamic heap ends, i.e. the heap pointer + heap_ptr_offset: usize, + + /// Offset of the static root array: an array of pointers below `heap_base` + static_root_array_offset: usize, + + /// Offset of the closure table. Currently we write a tagged scalar to this location and + /// effectively skip closure table evacuation. + closure_table_offset: usize, +} + +impl MotokoHeapInner { + fn address_to_offset(&self, address: usize) -> usize { + address - self.heap.as_ptr() as usize + } + + fn offset_to_address(&self, offset: usize) -> usize { + offset + self.heap.as_ptr() as usize + } + + /// Get heap base in the process's address space + fn heap_base_address(&self) -> usize { + self.offset_to_address(self.heap_base_offset) + } + + /// Get heap pointer (i.e. where the dynamic heap ends) in the process's address space + fn heap_ptr_address(&self) -> usize { + self.offset_to_address(self.heap_ptr_offset) + } + + /// Set heap pointer + fn set_heap_ptr_address(&mut self, address: usize) { + self.heap_ptr_offset = self.address_to_offset(address); + } + + /// Get static root array address in the process's address space + fn static_root_array_address(&self) -> usize { + self.offset_to_address(self.static_root_array_offset) + } + + /// Get closure table address in the process's address space + fn closure_table_address(&self) -> usize { + self.offset_to_address(self.closure_table_offset) + } + + fn new( + map: &HashMap>, + roots: &[ObjectIdx], + gc: GC, + ) -> MotokoHeapInner { + // Each object will be 3 words per object + one word for each reference. Static heap will + // have an array (header + length) with one element, one MutBox for each root. + let static_heap_size_bytes = (2 + roots.len() + (roots.len() * 2)) * WORD_SIZE; + let dynamic_heap_size_bytes = { + let object_headers_words = map.len() * 3; + let references_words = map.values().map(Vec::len).sum::(); + let closure_table_words = 1; + (object_headers_words + references_words + closure_table_words) * WORD_SIZE + }; + let total_heap_size_bytes = static_heap_size_bytes + dynamic_heap_size_bytes; + + let heap_size = heap_size_for_gc( + gc, + static_heap_size_bytes, + dynamic_heap_size_bytes, + map.len(), + ); + + let mut heap: Vec = vec![0; heap_size]; + + // Maps `ObjectIdx`s into their offsets in the heap + let object_addrs: HashMap = + create_dynamic_heap(map, &mut heap[static_heap_size_bytes..]); + + create_static_heap(roots, &object_addrs, &mut heap[..static_heap_size_bytes]); + + // Add closure table at the end of the heap. Currently closure table is just a scalar. + let closure_table_offset = static_heap_size_bytes + dynamic_heap_size_bytes - WORD_SIZE; + write_word(&mut heap, closure_table_offset, 0); + + MotokoHeapInner { + heap: heap.into_boxed_slice(), + heap_base_offset: static_heap_size_bytes, + heap_ptr_offset: total_heap_size_bytes, + static_root_array_offset: 0, + closure_table_offset, + } + } + + unsafe fn alloc_words(&mut self, n: Words) -> SkewedPtr { + let bytes = n.to_bytes(); + + // Update heap pointer + let old_hp = self.heap_ptr_address(); + let new_hp = old_hp + bytes.0 as usize; + self.heap_ptr_offset = new_hp - self.heap.as_ptr() as usize; + + // Grow memory if needed + self.grow_memory(new_hp as usize); + + skew(old_hp) + } + + unsafe fn grow_memory(&mut self, ptr: usize) { + let heap_end = self.heap.as_ptr() as usize + self.heap.len(); + if ptr > heap_end { + // We don't allow growing memory in tests, allocate large enough for the test + panic!( + "MotokoHeap::grow_memory called: heap_end={:#x}, grow_memory argument={:#x}", + heap_end, ptr + ); + } + } +} + +/// Compute the size of the heap to be allocated for the GC test. +fn heap_size_for_gc( + gc: GC, + static_heap_size_bytes: usize, + dynamic_heap_size_bytes: usize, + n_objects: usize, +) -> usize { + let total_heap_size_bytes = static_heap_size_bytes + dynamic_heap_size_bytes; + match gc { + GC::Copying => { + let to_space_bytes = dynamic_heap_size_bytes; + total_heap_size_bytes + to_space_bytes + } + GC::MarkCompact => { + let bitmap_size_bytes = { + let dynamic_heap_bytes = Bytes(dynamic_heap_size_bytes as u32); + // `...to_words().to_bytes()` below effectively rounds up heap size to word size + // then gets the bytes + let dynamic_heap_words = dynamic_heap_bytes.to_words(); + let mark_bit_bytes = dynamic_heap_words.to_bytes(); + + // The bitmap implementation rounds up to 64-bits to be able to read as many + // bits as possible in one instruction and potentially skip 64 words in the + // heap with single 64-bit comparison + (((mark_bit_bytes.0 + 7) / 8) * 8) + size_of::().to_bytes().0 + }; + // In the worst case the entire heap will be pushed to the mark stack, but in tests + // we limit the size + let mark_stack_words = n_objects.clamp(INIT_STACK_SIZE.0 as usize, MAX_MARK_STACK_SIZE) + + size_of::().0 as usize; + + total_heap_size_bytes + bitmap_size_bytes as usize + (mark_stack_words * WORD_SIZE) + } + } +} + +/// Given a heap description (as a map from objects to objects), and the dynamic part of the heap +/// (as an array), initialize the dynamic heap with objects. +/// +/// Returns a mapping from object indices (`ObjectIdx`) to their addresses (see module +/// documentation for "offset" and "address" definitions). +fn create_dynamic_heap( + refs: &HashMap>, + dynamic_heap: &mut [u8], +) -> HashMap { + let heap_start = dynamic_heap.as_ptr() as usize; + + // Maps objects to their addresses + let mut object_addrs: HashMap = HashMap::new(); + + // First pass allocates objects without fields + { + let mut heap_offset = 0; + for (obj, refs) in refs { + object_addrs.insert(*obj, heap_start + heap_offset); + + // Store object header + write_word(dynamic_heap, heap_offset, TAG_ARRAY); + heap_offset += WORD_SIZE; + + // Store length: idx + refs + write_word( + dynamic_heap, + heap_offset, + u32::try_from(refs.len() + 1).unwrap(), + ); + heap_offset += WORD_SIZE; + + // Store object value (idx) + write_word(dynamic_heap, heap_offset, make_scalar(*obj)); + heap_offset += WORD_SIZE; + + // Leave space for the fields + heap_offset += refs.len() * WORD_SIZE; + } + } + + // println!("object addresses={:#?}", object_addrs); + + // Second pass adds fields + for (obj, refs) in refs { + let obj_offset = object_addrs.get(obj).unwrap() - heap_start; + for (ref_idx, ref_) in refs.iter().enumerate() { + // -1 for skewing + let ref_addr = make_pointer(*object_addrs.get(ref_).unwrap() as u32); + let field_offset = obj_offset + (3 + ref_idx) * WORD_SIZE; + write_word(dynamic_heap, field_offset, u32::try_from(ref_addr).unwrap()); + } + } + + object_addrs +} + +/// Given a root set (`roots`, may contain duplicates), a mapping from object indices to addresses +/// (`object_addrs`), and the static part of the heap, initialize the static heap with the static +/// root array. +fn create_static_heap( + roots: &[ObjectIdx], + object_addrs: &HashMap, + heap: &mut [u8], +) { + let root_addresses: Vec = roots + .iter() + .map(|obj| *object_addrs.get(obj).unwrap()) + .collect(); + + // Create static root array. Each element of the array is a MutBox pointing to the actual + // root. + write_word(heap, 0, TAG_ARRAY); + write_word(heap, WORD_SIZE, u32::try_from(roots.len()).unwrap()); + + // Current offset in the heap for the next static roots array element + let mut root_addr_offset = size_of::().to_bytes().0 as usize; + + // Current offset in the heap for the MutBox of the next root + let mut mutbox_offset = (size_of::().0 as usize + roots.len()) * WORD_SIZE; + + for root_address in root_addresses { + // Add a MutBox for the object + write_word(heap, mutbox_offset, TAG_MUTBOX); + write_word( + heap, + mutbox_offset + WORD_SIZE, + make_pointer(u32::try_from(root_address).unwrap()), + ); + + let mutbox_addr = heap.as_ptr() as usize + mutbox_offset; + write_word( + heap, + root_addr_offset, + make_pointer(u32::try_from(mutbox_addr).unwrap()), + ); + + root_addr_offset += WORD_SIZE; + mutbox_offset += size_of::().to_bytes().0 as usize; + } +} diff --git a/rts/motoko-rts-tests/src/gc/utils.rs b/rts/motoko-rts-tests/src/gc/utils.rs new file mode 100644 index 00000000000..5c76c838a2b --- /dev/null +++ b/rts/motoko-rts-tests/src/gc/utils.rs @@ -0,0 +1,45 @@ +use byteorder::{ReadBytesExt, WriteBytesExt, LE}; + +/// A unique object index, used in heap descriptions. +/// +/// These are written as scalar values in object payloads, so they can be at most 31 bits. Larger +/// values will cause test failure in `make_scalar` below. +pub type ObjectIdx = u32; + +/// Same as RTS `WORD_SIZE`, but `usize` +pub const WORD_SIZE: usize = motoko_rts::constants::WORD_SIZE as usize; + +// Max allowed size for the mark stack in mark-compact GC tests +pub const MAX_MARK_STACK_SIZE: usize = 100; + +/// Enum for the GC implementations. GC functions are generic so we can't put them into arrays or +/// other data types, we use this type instead. +#[derive(Debug, Clone, Copy)] +pub enum GC { + Copying, + MarkCompact, +} + +pub static GC_IMPLS: [GC; 2] = [GC::Copying, GC::MarkCompact]; + +/// Read a little-endian (Wasm) word from given offset +pub fn read_word(heap: &[u8], offset: usize) -> u32 { + (&heap[offset..]).read_u32::().unwrap() +} + +/// Write a little-endian (Wasm) word to given offset +pub fn write_word(heap: &mut [u8], offset: usize, word: u32) { + (&mut heap[offset..]).write_u32::(word).unwrap() +} + +/// Make a scalar value to be used in heap object payload +pub fn make_scalar(value: u32) -> u32 { + // Scalar values can be at most 31 bits + assert_eq!(value >> 31, 0); + value << 1 +} + +/// Make a pointer value to be used in heap object payload +pub fn make_pointer(addr: u32) -> u32 { + addr.wrapping_sub(1) +} diff --git a/rts/motoko-rts-tests/src/leb128.rs b/rts/motoko-rts-tests/src/leb128.rs index af827fd4145..8dcd53c4fe9 100644 --- a/rts/motoko-rts-tests/src/leb128.rs +++ b/rts/motoko-rts-tests/src/leb128.rs @@ -1,22 +1,37 @@ use motoko_rts::buf::Buf; use motoko_rts::leb128::{ - leb128_decode, leb128_decode_checked, leb128_encode, sleb128_decode, sleb128_decode_checked, - sleb128_encode, + leb128_decode_checked, leb128_encode, sleb128_decode_checked, sleb128_encode, }; -use quickcheck::{quickcheck, TestResult}; +use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner}; pub unsafe fn test() { println!("Testing (s)leb128 encode-decode roundtrip ..."); - assert!(!roundtrip_signed(i32::MIN).is_failure()); - assert!(!roundtrip_signed(i32::MAX).is_failure()); + let mut proptest_runner = TestRunner::new(Config { + cases: 10_000, + failure_persistence: None, + ..Default::default() + }); - assert!(!roundtrip_unsigned(u32::MIN).is_failure()); - assert!(!roundtrip_unsigned(u32::MAX).is_failure()); + roundtrip_signed(1).unwrap(); + roundtrip_signed(0).unwrap(); + roundtrip_signed(-1).unwrap(); + roundtrip_signed(i32::MIN).unwrap(); // -2147483648 + roundtrip_signed(i32::MAX).unwrap(); // 2147483647 - quickcheck(roundtrip_signed as fn(i32) -> TestResult); - quickcheck(roundtrip_unsigned as fn(u32) -> TestResult); + proptest_runner + .run(&proptest::num::i32::ANY, roundtrip_signed) + .unwrap(); + + roundtrip_unsigned(1).unwrap(); + roundtrip_unsigned(0).unwrap(); + roundtrip_unsigned(u32::MIN).unwrap(); + roundtrip_unsigned(u32::MAX).unwrap(); + + proptest_runner + .run(&proptest::num::u32::ANY, roundtrip_unsigned) + .unwrap(); // Check overflows check_signed_decode_overflow(&[ @@ -44,7 +59,7 @@ pub unsafe fn test() { ]); // u32::MAX + 1 } -fn roundtrip_signed(val: i32) -> TestResult { +fn roundtrip_signed(val: i32) -> TestCaseResult { unsafe { let mut buf = [0u8; 100]; sleb128_encode(val, buf.as_mut_ptr()); @@ -54,11 +69,24 @@ fn roundtrip_signed(val: i32) -> TestResult { end: buf.as_mut_ptr().add(100), }; - TestResult::from_bool(sleb128_decode(&mut buf_) == val) + match sleb128_decode_checked(&mut buf_) { + None => Err(TestCaseError::Fail( + format!("sleb128 decoding of {} overflowed", val).into(), + )), + Some(val_) => { + if val_ == val { + Ok(()) + } else { + Err(TestCaseError::Fail( + format!("Encode-decode roundtrip gives different value for {}", val).into(), + )) + } + } + } } } -fn roundtrip_unsigned(val: u32) -> TestResult { +fn roundtrip_unsigned(val: u32) -> TestCaseResult { unsafe { let mut buf = [0u8; 100]; leb128_encode(val, buf.as_mut_ptr()); @@ -68,7 +96,20 @@ fn roundtrip_unsigned(val: u32) -> TestResult { end: buf.as_mut_ptr().add(100), }; - TestResult::from_bool(leb128_decode(&mut buf_) == val) + match leb128_decode_checked(&mut buf_) { + None => Err(TestCaseError::Fail( + format!("leb128 decoding of {} overflowed", val).into(), + )), + Some(val_) => { + if val_ == val { + Ok(()) + } else { + Err(TestCaseError::Fail( + format!("Encode-decode roundtrip gives different value for {}", val).into(), + )) + } + } + } } } diff --git a/rts/motoko-rts-tests/src/main.rs b/rts/motoko-rts-tests/src/main.rs index ea220522696..8e3c0ddbb10 100644 --- a/rts/motoko-rts-tests/src/main.rs +++ b/rts/motoko-rts-tests/src/main.rs @@ -1,16 +1,21 @@ -#![feature(ptr_offset_from)] +#![feature(ptr_offset_from, map_first_last, clamp)] mod bigint; mod bitmap; mod closure_table; mod crc32; +mod gc; mod leb128; mod mark_stack; +mod memory; mod principal_id; mod text; mod utf8; -use motoko_rts::types::*; +use motoko_rts::types::Bytes; + +#[macro_use] +extern crate maplit; fn main() { if std::mem::size_of::() != 4 { @@ -19,25 +24,30 @@ fn main() { } unsafe { - closure_table::test(); bigint::test(); - utf8::test(); + bitmap::test(); + closure_table::test(); crc32::test(); + gc::test(); + leb128::test(); + mark_stack::test(); principal_id::test(); text::test(); - leb128::test(); - bitmap::test(); - // Mark stack requires that nothing will be allocated since the mark stack allocation until - // we're done with it, which is difficult to guarantee in a test environment as the test - // code can allocate stuff. - // mark_stack::test(); + utf8::test(); } } // Called by the RTS to panic #[no_mangle] -extern "C" fn rts_trap(_msg: *const u8, _len: Bytes) -> ! { - panic!("rts_trap_with called"); +extern "C" fn rts_trap(ptr: *const u8, len: Bytes) -> ! { + let msg = unsafe { std::slice::from_raw_parts(ptr, len.0 as usize) }; + match core::str::from_utf8(msg) { + Err(err) => panic!( + "rts_trap_with called with non-UTF8 string (error={:?}, string={:?})", + err, msg + ), + Ok(str) => panic!("rts_trap_with: {:?}", str), + } } // Called by RTS BigInt functions to panic. Normally generated by the compiler diff --git a/rts/motoko-rts-tests/src/mark_stack.rs b/rts/motoko-rts-tests/src/mark_stack.rs index 0499a31bf46..a123003ae57 100644 --- a/rts/motoko-rts-tests/src/mark_stack.rs +++ b/rts/motoko-rts-tests/src/mark_stack.rs @@ -1,40 +1,56 @@ -use motoko_rts::mark_stack::{alloc_mark_stack, free_mark_stack, pop_mark_stack, push_mark_stack}; +use crate::memory::TestMemory; -use quickcheck::{quickcheck, TestResult}; +use motoko_rts::gc::mark_compact::mark_stack::{ + alloc_mark_stack, free_mark_stack, pop_mark_stack, push_mark_stack, +}; +use motoko_rts::memory::Memory; +use motoko_rts::types::Words; -use std::cmp::min; +use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner}; pub unsafe fn test() { println!("Testing mark stack ..."); - quickcheck(test_ as fn(Vec) -> TestResult); + + let mut proptest_runner = TestRunner::new(Config { + cases: 100, + failure_persistence: None, + ..Default::default() + }); + + proptest_runner + .run(&(0u32..1000u32), |n_objs| { + let mut mem = TestMemory::new(Words(1024 * 1024)); + test_(&mut mem, n_objs) + }) + .unwrap(); } -fn test_(objs: Vec) -> TestResult { - // We can't test grow_stack as it requires the new allocation to be next to the old allocation, - // so cap the limit to 1024 - let objs = &objs[0..min(objs.len(), 1024)]; +fn test_(mem: &mut M, n_objs: u32) -> TestCaseResult { + let objs: Vec = (0..n_objs).collect(); unsafe { - // Leaks, but OK - alloc_mark_stack(); + alloc_mark_stack(mem); - for obj in objs { - push_mark_stack(*obj); + for obj in &objs { + push_mark_stack(mem, *obj as usize); } for obj in objs.iter().rev() { let popped = pop_mark_stack(); - if popped != Some(*obj) { - free_mark_stack(); // TODO: Does not really free - return TestResult::error(format!( - "Unexpected object popped, expected={:?}, popped={:?}", - obj, popped + if popped != Some(*obj as usize) { + free_mark_stack(); + return Err(TestCaseError::Fail( + format!( + "Unexpected object popped, expected={:?}, popped={:?}", + obj, popped + ) + .into(), )); } } - free_mark_stack(); // TODO: Does not really free + free_mark_stack(); } - TestResult::passed() + Ok(()) } diff --git a/rts/motoko-rts-tests/src/memory.rs b/rts/motoko-rts-tests/src/memory.rs new file mode 100644 index 00000000000..b1a98a6c868 --- /dev/null +++ b/rts/motoko-rts-tests/src/memory.rs @@ -0,0 +1,43 @@ +use motoko_rts::memory::Memory; +use motoko_rts::types::{skew, SkewedPtr, Words}; + +pub struct TestMemory { + heap: Box<[u8]>, + hp: usize, +} + +impl TestMemory { + pub fn new(size: Words) -> TestMemory { + let bytes = size.to_bytes().0; + let heap = vec![0u8; bytes as usize].into_boxed_slice(); + let hp = heap.as_ptr() as usize; + TestMemory { heap, hp } + } + + unsafe fn grow_memory(&mut self, ptr: usize) { + let heap_end = self.heap.as_ptr() as usize + self.heap.len(); + if ptr > heap_end { + // We don't allow growing memory in tests, allocate large enough for the test + panic!( + "TestMemory::grow_memory called: heap_end={:#x}, grow_memory argument={:#x}", + heap_end, ptr + ); + } + } +} + +impl Memory for TestMemory { + unsafe fn alloc_words(&mut self, n: Words) -> SkewedPtr { + let bytes = n.to_bytes(); + + // Update heap pointer + let old_hp = self.hp; + let new_hp = old_hp + bytes.0 as usize; + self.hp = new_hp; + + // Grow memory if needed + self.grow_memory(new_hp as usize); + + skew(old_hp) + } +} diff --git a/rts/motoko-rts-tests/src/principal_id.rs b/rts/motoko-rts-tests/src/principal_id.rs index 58e0bf36626..be37cb33d5b 100644 --- a/rts/motoko-rts-tests/src/principal_id.rs +++ b/rts/motoko-rts-tests/src/principal_id.rs @@ -1,24 +1,32 @@ +use crate::memory::TestMemory; + use motoko_rts::principal_id::{blob_of_principal, principal_of_blob}; use motoko_rts::text::{text_compare, text_of_ptr_size, text_of_str}; -use motoko_rts::types::Bytes; +use motoko_rts::types::{Bytes, Words}; pub unsafe fn test() { println!("Testing principal id encoding ..."); + let mut heap = TestMemory::new(Words(1024 * 1024)); + // // Encoding // + let text = text_of_str(&mut heap, ""); assert_eq!( - text_compare(principal_of_blob(text_of_str("")), text_of_str("aaaaa-aa"),), + text_compare( + principal_of_blob(&mut heap, text), + text_of_str(&mut heap, "aaaaa-aa"), + ), 0, ); + let text = text_of_ptr_size(&mut heap, b"\xC0\xFE\xFE\xD0\x0D".as_ptr(), Bytes(5)); + let principal = principal_of_blob(&mut heap, text); + assert_eq!( - text_compare( - principal_of_blob(text_of_ptr_size(b"\xC0\xFE\xFE\xD0\x0D".as_ptr(), Bytes(5))), - text_of_str("bfozs-kwa73-7nadi"), - ), + text_compare(principal, text_of_str(&mut heap, "bfozs-kwa73-7nadi"),), 0 ); @@ -26,15 +34,15 @@ pub unsafe fn test() { // Decoding // - assert_eq!( - text_compare(blob_of_principal(text_of_str("aaaaa-aa")), text_of_str(""),), - 0 - ); + let text = text_of_str(&mut heap, "aaaaa-aa"); + let principal = blob_of_principal(&mut heap, text); + assert_eq!(text_compare(principal, text_of_str(&mut heap, ""),), 0); + let text = text_of_str(&mut heap, "bfozs-kwa73-7nadi"); assert_eq!( text_compare( - blob_of_principal(text_of_str("bfozs-kwa73-7nadi")), - text_of_ptr_size(b"\xC0\xFE\xFE\xD0\x0D".as_ptr(), Bytes(5)) + blob_of_principal(&mut heap, text), + text_of_ptr_size(&mut heap, b"\xC0\xFE\xFE\xD0\x0D".as_ptr(), Bytes(5)) ), 0 ); diff --git a/rts/motoko-rts-tests/src/proptest_utils.rs b/rts/motoko-rts-tests/src/proptest_utils.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/rts/motoko-rts-tests/src/text.rs b/rts/motoko-rts-tests/src/text.rs index 530ea9170aa..caf12f7ecbe 100644 --- a/rts/motoko-rts-tests/src/text.rs +++ b/rts/motoko-rts-tests/src/text.rs @@ -1,31 +1,36 @@ //! Text and text iterator tests +use crate::memory::TestMemory; + +use motoko_rts::memory::Memory; use motoko_rts::text::{ blob_of_text, decode_code_point, text_compare, text_concat, text_len, text_of_str, text_singleton, text_size, }; use motoko_rts::text_iter::{text_iter, text_iter_done, text_iter_next}; -use motoko_rts::types::{Bytes, SkewedPtr, TAG_BLOB}; +use motoko_rts::types::{Bytes, SkewedPtr, Words, TAG_BLOB}; use std::convert::TryFrom; -use quickcheck::{quickcheck, TestResult}; +use proptest::test_runner::{Config, TestCaseError, TestCaseResult, TestRunner}; static STR: &str = "abcdefgh"; -struct TextIter { +struct TextIter<'a, M: Memory> { obj: SkewedPtr, + mem: &'a mut M, } -impl TextIter { - fn from_text(text: SkewedPtr) -> Self { +impl<'a, M: Memory> TextIter<'a, M> { + fn from_text(mem: &'a mut M, text: SkewedPtr) -> Self { TextIter { - obj: unsafe { text_iter(text) }, + obj: unsafe { text_iter(mem, text) }, + mem, } } } -impl Iterator for TextIter { +impl<'a, M: Memory> Iterator for TextIter<'a, M> { type Item = char; fn next(&mut self) -> Option { @@ -33,7 +38,8 @@ impl Iterator for TextIter { if text_iter_done(self.obj) == 1 { None } else { - Some(char::try_from(text_iter_next(self.obj)).unwrap()) + let next = text_iter_next(self.mem, self.obj); + Some(char::try_from(next).unwrap()) } } } @@ -42,6 +48,8 @@ impl Iterator for TextIter { pub unsafe fn test() { println!("Testing text and text iterators ..."); + let mut mem = TestMemory::new(Words(1024 * 1024)); + println!(" Testing decode_code_point and text_singleton for ASCII"); for i in 0..=255u32 { let char = char::try_from(i).unwrap(); @@ -52,32 +60,49 @@ pub unsafe fn test() { assert_eq!(out, str.len() as u32); assert_eq!(char::try_from(char_decoded).unwrap(), char); - let text = text_singleton(char as u32); - assert_eq!(TextIter::from_text(text).collect::(), str); + let text = text_singleton(&mut mem, char as u32); + assert_eq!(TextIter::from_text(&mut mem, text).collect::(), str); } println!(" Testing text blob iteration"); for i in 0..8 { let str = &STR[0..i + 1]; - let text = text_of_str(str); + let text = text_of_str(&mut mem, str); assert_eq!(text.tag(), TAG_BLOB); - let iter = TextIter::from_text(text); + let iter = TextIter::from_text(&mut mem, text); assert_eq!(iter.collect::(), str); } println!(" Testing concatenation"); - concat1(); - quickcheck(concat_prop as fn(Vec) -> TestResult); + concat1(&mut mem); + + drop(mem); + + let mut proptest_runner = TestRunner::new(Config { + cases: 1_000, + failure_persistence: None, + ..Default::default() + }); + + proptest_runner + .run( + &proptest::collection::vec(proptest::string::string_regex(".{0, 20}").unwrap(), 1..20), + |strs| { + let mut mem = TestMemory::new(Words(1024 * 1024)); + concat_prop(&mut mem, strs) + }, + ) + .unwrap(); } -unsafe fn concat1() { +unsafe fn concat1(mem: &mut M) { // A simple test extracted from a QuickCheck generated test case let strs = ["a", "öabcdef", "y"]; - let mut obj = text_of_str(""); + let mut obj = text_of_str(mem, ""); for str in &strs { - let str_obj = text_of_str(str); - obj = text_concat(obj, str_obj); + let str_obj = text_of_str(mem, str); + obj = text_concat(mem, obj, str_obj); } let expected = strs.concat(); @@ -89,7 +114,7 @@ unsafe fn concat1() { assert_eq!(text_size(obj), Bytes(expected.len() as u32)); // Generate blob - let text_blob = blob_of_text(obj); + let text_blob = blob_of_text(mem, obj); // Check number of characters in blob assert_eq!(text_len(text_blob), expected.chars().count() as u32); @@ -98,66 +123,65 @@ unsafe fn concat1() { assert_eq!(text_size(text_blob), Bytes(expected.len() as u32)); // Check blob iteration - assert_eq!( - TextIter::from_text(blob_of_text(obj)).collect::(), - expected - ); + let blob = blob_of_text(mem, obj); + assert_eq!(TextIter::from_text(mem, blob).collect::(), expected); // Check blob-concat comparison assert_eq!(text_compare(text_blob, obj), 0); // Check concat iteration - assert_eq!(TextIter::from_text(obj).collect::(), expected); + assert_eq!(TextIter::from_text(mem, obj).collect::(), expected); } -fn concat_prop(strs: Vec) -> TestResult { +fn concat_prop(mem: &mut M, strs: Vec) -> TestCaseResult { unsafe { - let mut obj = text_of_str(""); + let mut obj = text_of_str(mem, ""); for str in &strs { - let str_obj = text_of_str(str); - obj = text_concat(obj, str_obj); + let str_obj = text_of_str(mem, str); + obj = text_concat(mem, obj, str_obj); } let expected = strs.concat(); // Check number of characters if text_len(obj) != expected.chars().count() as u32 { - return TestResult::error("text_len"); + return Err(TestCaseError::Fail("text_len".into())); } // Check text size in bytes if text_size(obj) != Bytes(expected.len() as u32) { - return TestResult::error("text_size"); + return Err(TestCaseError::Fail("text_size".into())); } // Generate blob - let text_blob = blob_of_text(obj); + let text_blob = blob_of_text(mem, obj); // Check number of characters in blob if text_len(text_blob) != expected.chars().count() as u32 { - return TestResult::error("blob text_len"); + return Err(TestCaseError::Fail("blob text_len".into())); } // Check blob size in bytes if text_size(text_blob) != Bytes(expected.len() as u32) { - return TestResult::error("blob text_size"); + return Err(TestCaseError::Fail("blob text_size".into())); } // Check blob iteration - if TextIter::from_text(blob_of_text(obj)).collect::() != expected { - return TestResult::error("blob_of_text iteration"); + let blob = blob_of_text(mem, obj); + if TextIter::from_text(mem, blob).collect::() != expected { + return Err(TestCaseError::Fail("blob_of_text iteration".into())); } // Check blob-concat comparison if text_compare(text_blob, obj) != 0 { - return TestResult::error("text_compare of blob and text"); + return Err(TestCaseError::Fail("text_compare of blob and text".into())); } // Check concat iteration - if TextIter::from_text(obj).collect::() != expected { - return TestResult::error("iteration"); + if TextIter::from_text(mem, obj).collect::() != expected { + return Err(TestCaseError::Fail("iteration".into())); } - TestResult::passed() + Ok(()) } } diff --git a/rts/motoko-rts/Cargo.lock b/rts/motoko-rts/Cargo.lock index c076980202a..9b2cb23c250 100644 --- a/rts/motoko-rts/Cargo.lock +++ b/rts/motoko-rts/Cargo.lock @@ -18,4 +18,49 @@ version = "0.1.0" dependencies = [ "compiler_builtins", "libc", + "motoko-rts-macros", ] + +[[package]] +name = "motoko-rts-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "syn" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "unicode-xid" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" diff --git a/rts/motoko-rts/Cargo.toml b/rts/motoko-rts/Cargo.toml index 0b39bd7945d..4f07d044f69 100644 --- a/rts/motoko-rts/Cargo.toml +++ b/rts/motoko-rts/Cargo.toml @@ -9,18 +9,18 @@ edition = "2018" crate-type = ["staticlib"] [features] -default = ["panic_handler", "gc"] +# This file is used to build the RTS to be linked with moc-generated code, so +# we enable the "ic" feature. `native/Cargo.toml` doesn't have this feature and +# is used in RTS tests. +default = ["ic"] -# Enables garbage collector and bump allocator. Without this the GC module is -# disabled (not compiled) and allocation routines use `malloc`. -gc = [] - -# Defines a panic handler. Required for generating `staticlib` as static -# libraries need to be self-contained. -panic_handler = [] +# This feature is used to enable stuff needed for the RTS linked with +# moc-generated code, but not when testing the RTS +ic = [] [dependencies] libc = { version = "0.2.81", default_features = false } +motoko-rts-macros = { path = "../motoko-rts-macros" } # Added here so that it ends up in Cargo.lock, so that nix will pre-fetch it [dependencies.compiler_builtins] diff --git a/rts/motoko-rts/native/Cargo.toml b/rts/motoko-rts/native/Cargo.toml index a25a85d332f..788ac1c4efb 100644 --- a/rts/motoko-rts/native/Cargo.toml +++ b/rts/motoko-rts/native/Cargo.toml @@ -10,6 +10,7 @@ path = "../src/lib.rs" [dependencies] libc = { version = "0.2.73", default_features = false } +motoko-rts-macros = { path = "../../motoko-rts-macros" } [dependencies.compiler_builtins] version = "0.1.39" diff --git a/rts/motoko-rts/src/alloc.rs b/rts/motoko-rts/src/alloc.rs deleted file mode 100644 index 3f8595b7230..00000000000 --- a/rts/motoko-rts/src/alloc.rs +++ /dev/null @@ -1,44 +0,0 @@ -#[cfg(feature = "gc")] -#[path = "alloc/gc.rs"] -pub(crate) mod alloc_impl; - -#[cfg(not(feature = "gc"))] -#[path = "alloc/nogc.rs"] -mod alloc_impl; - -pub use alloc_impl::alloc_words; - -#[cfg(feature = "gc")] -pub(crate) use alloc_impl::grow_memory; - -use crate::constants::WASM_HEAP_SIZE; -use crate::rts_trap_with; -use crate::types::{size_of, Array, Blob, Bytes, SkewedPtr, Words, TAG_ARRAY, TAG_BLOB}; - -#[no_mangle] -pub unsafe extern "C" fn alloc_array(len: u32) -> SkewedPtr { - // Array payload should not be larger than half of the memory - if Words(len) > WASM_HEAP_SIZE / 2 { - rts_trap_with("Array allocation too large"); - } - - let skewed_ptr = alloc_words(size_of::() + Words(len)); - - let ptr: *mut Array = skewed_ptr.unskew() as *mut Array; - (*ptr).header.tag = TAG_ARRAY; - (*ptr).len = len; - - skewed_ptr -} - -#[no_mangle] -pub(crate) unsafe extern "C" fn alloc_blob(size: Bytes) -> SkewedPtr { - // NOTE: We round the size up to the next word and allocates words, but we initialize blob - // length as `size` instead of `round_up_to_word(size)`. This is fine as as GC knows that we - // can only allocate words and looks for objects in word boundaries. - let ptr = alloc_words(size_of::() + size.to_words()); - let blob = ptr.unskew() as *mut Blob; - (*blob).header.tag = TAG_BLOB; - (*blob).len = size; - ptr -} diff --git a/rts/motoko-rts/src/alloc/gc.rs b/rts/motoko-rts/src/alloc/gc.rs deleted file mode 100644 index 55639784009..00000000000 --- a/rts/motoko-rts/src/alloc/gc.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Implements allocation routines used by the generated code and the GC. - -use core::arch::wasm32; - -use crate::constants::WASM_PAGE_SIZE; -use crate::gc; -use crate::rts_trap_with; -use crate::types::{skew, Bytes, SkewedPtr, Words}; - -#[no_mangle] -pub unsafe extern "C" fn alloc_words(n: Words) -> SkewedPtr { - let bytes = n.to_bytes(); - // Update ALLOCATED - gc::ALLOCATED += Bytes(bytes.0 as u64); - - // Update heap pointer - let old_hp = gc::HP; - let new_hp = old_hp + bytes.0; - gc::HP = new_hp; - - // Grow memory if needed - grow_memory(new_hp as usize); - - skew(old_hp as usize) -} - -/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated. -pub(crate) unsafe fn grow_memory(ptr: usize) { - let page_size = u64::from(WASM_PAGE_SIZE.0); - let total_pages_needed = (((ptr as u64) + page_size - 1) / page_size) as usize; - let current_pages = wasm32::memory_size(0); - if total_pages_needed > current_pages { - if wasm32::memory_grow(0, total_pages_needed - current_pages) == core::usize::MAX { - rts_trap_with("Cannot grow memory"); - } - } -} diff --git a/rts/motoko-rts/src/alloc/nogc.rs b/rts/motoko-rts/src/alloc/nogc.rs deleted file mode 100644 index c109820e24f..00000000000 --- a/rts/motoko-rts/src/alloc/nogc.rs +++ /dev/null @@ -1,6 +0,0 @@ -use crate::types::{skew, SkewedPtr, Words}; - -#[no_mangle] -pub unsafe extern "C" fn alloc_words(n: Words) -> SkewedPtr { - skew(libc::malloc(n.to_bytes().0 as usize) as usize) -} diff --git a/rts/motoko-rts/src/bigint.rs b/rts/motoko-rts/src/bigint.rs index 9015b4b32a2..413d59540d7 100644 --- a/rts/motoko-rts/src/bigint.rs +++ b/rts/motoko-rts/src/bigint.rs @@ -31,15 +31,16 @@ This scheme makes the following assumptions: - libtommath uses mp_calloc() and mp_realloc() _only_ to allocate the `mp_digit *` array. */ -use crate::alloc::alloc_words; use crate::buf::{read_byte, Buf}; -use crate::mem::memcpy_bytes; +use crate::mem_utils::memcpy_bytes; +use crate::memory::Memory; +use crate::tommath_bindings::*; use crate::types::{size_of, skew, BigInt, Bytes, SkewedPtr, TAG_BIGINT}; -use crate::{rts_trap, tommath_bindings::*}; +use motoko_rts_macros::ic_mem_fn; -unsafe fn mp_alloc(size: Bytes) -> *mut u8 { - let ptr = alloc_words(size_of::() + size.to_words()); +unsafe fn mp_alloc(mem: &mut M, size: Bytes) -> *mut u8 { + let ptr = mem.alloc_words(size_of::() + size.to_words()); let blob = ptr.unskew() as *mut BigInt; (*blob).header.tag = TAG_BIGINT; // libtommath stores the size of the object in alloc @@ -49,15 +50,19 @@ unsafe fn mp_alloc(size: Bytes) -> *mut u8 { blob.payload_addr() as *mut u8 } -#[no_mangle] -unsafe extern "C" fn mp_calloc(n_elems: usize, elem_size: Bytes) -> *mut libc::c_void { +#[ic_mem_fn] +pub unsafe fn mp_calloc( + mem: &mut M, + n_elems: usize, + elem_size: Bytes, +) -> *mut libc::c_void { debug_assert_eq!(elem_size.0, core::mem::size_of::()); // Overflow check for the following multiplication if n_elems > 1 << 30 { bigint_trap(); } let size = Bytes((n_elems * elem_size.0) as u32); - let payload = mp_alloc(size) as *mut u32; + let payload = mp_alloc(mem, size) as *mut u32; // NB. alloc_bytes rounds up to words so we do the same here to set the whole buffer for i in 0..size.to_words().0 { @@ -67,8 +72,9 @@ unsafe extern "C" fn mp_calloc(n_elems: usize, elem_size: Bytes) -> *mut payload as *mut _ } -#[no_mangle] -unsafe extern "C" fn mp_realloc( +#[ic_mem_fn] +pub unsafe fn mp_realloc( + mem: &mut M, ptr: *mut libc::c_void, old_size: Bytes, new_size: Bytes, @@ -79,7 +85,7 @@ unsafe extern "C" fn mp_realloc( debug_assert_eq!(bigint.len(), old_size); if new_size > bigint.len() { - let new_ptr = mp_alloc(new_size); + let new_ptr = mp_alloc(mem, new_size); memcpy_bytes(new_ptr as usize, ptr as usize, old_size); new_ptr as *mut _ } else if new_size == bigint.len() { @@ -92,7 +98,7 @@ unsafe extern "C" fn mp_realloc( } #[no_mangle] -unsafe extern "C" fn mp_free(_ptr: *mut libc::c_void, _size: u32) {} +pub unsafe extern "C" fn mp_free(_ptr: *mut libc::c_void, _size: u32) {} /* Note on libtommath error handling @@ -128,6 +134,7 @@ unsafe fn mp_get_u32(p: *const mp_int) -> u32 { mp_get_i32(p) as u32 } +#[cfg(feature = "ic")] unsafe fn mp_get_u64(p: *const mp_int) -> u64 { mp_get_i64(p) as u64 } @@ -167,6 +174,7 @@ pub unsafe extern "C" fn bigint_of_word32(w: u32) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_of_int32(j: i32) -> SkewedPtr { let mut i = tmp_bigint(); @@ -174,6 +182,7 @@ unsafe extern "C" fn bigint_of_int32(j: i32) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_to_word32_wrap(p: SkewedPtr) -> u32 { mp_get_u32(p.as_bigint().mp_int_ptr()) @@ -191,22 +200,25 @@ unsafe extern "C" fn bigint_to_word32_trap(p: SkewedPtr) -> u32 { } // a : BigInt, msg : Blob +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_to_word32_trap_with(p: SkewedPtr, msg: SkewedPtr) -> u32 { let mp_int = p.as_bigint().mp_int_ptr(); if mp_isneg(mp_int) || mp_count_bits(mp_int) > 32 { - rts_trap(msg.as_blob().payload_addr(), msg.as_blob().len()); + crate::rts_trap(msg.as_blob().payload_addr(), msg.as_blob().len()); } mp_get_u32(mp_int) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_to_word64_wrap(p: SkewedPtr) -> u64 { mp_get_u64(p.as_bigint().mp_int_ptr()) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_to_word64_trap(p: SkewedPtr) -> u64 { let mp_int = p.as_bigint().mp_int_ptr(); @@ -218,6 +230,7 @@ unsafe extern "C" fn bigint_to_word64_trap(p: SkewedPtr) -> u64 { mp_get_u64(mp_int) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_of_word64(w: u64) -> SkewedPtr { let mut i = tmp_bigint(); @@ -225,6 +238,7 @@ unsafe extern "C" fn bigint_of_word64(w: u64) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_of_int64(j: i64) -> SkewedPtr { let mut i = tmp_bigint(); @@ -237,21 +251,25 @@ pub unsafe extern "C" fn bigint_eq(a: SkewedPtr, b: SkewedPtr) -> bool { mp_cmp(a.as_bigint().mp_int_ptr(), b.as_bigint().mp_int_ptr()) == 0 } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_lt(a: SkewedPtr, b: SkewedPtr) -> bool { mp_cmp(a.as_bigint().mp_int_ptr(), b.as_bigint().mp_int_ptr()) < 0 } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_gt(a: SkewedPtr, b: SkewedPtr) -> bool { mp_cmp(a.as_bigint().mp_int_ptr(), b.as_bigint().mp_int_ptr()) > 0 } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_le(a: SkewedPtr, b: SkewedPtr) -> bool { mp_cmp(a.as_bigint().mp_int_ptr(), b.as_bigint().mp_int_ptr()) <= 0 } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_ge(a: SkewedPtr, b: SkewedPtr) -> bool { mp_cmp(a.as_bigint().mp_int_ptr(), b.as_bigint().mp_int_ptr()) >= 0 @@ -298,6 +316,7 @@ pub unsafe extern "C" fn bigint_pow(a: SkewedPtr, b: SkewedPtr) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_div(a: SkewedPtr, b: SkewedPtr) -> SkewedPtr { let mut i = tmp_bigint(); @@ -310,6 +329,7 @@ unsafe extern "C" fn bigint_div(a: SkewedPtr, b: SkewedPtr) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_rem(a: SkewedPtr, b: SkewedPtr) -> SkewedPtr { let mut i = tmp_bigint(); @@ -329,6 +349,7 @@ pub unsafe extern "C" fn bigint_neg(a: SkewedPtr) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_abs(a: SkewedPtr) -> SkewedPtr { let mut i = tmp_bigint(); @@ -336,11 +357,13 @@ unsafe extern "C" fn bigint_abs(a: SkewedPtr) -> SkewedPtr { persist_bigint(i) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_isneg(a: SkewedPtr) -> bool { mp_isneg(a.as_bigint().mp_int_ptr()) } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn bigint_lsh(a: SkewedPtr, b: i32) -> SkewedPtr { let mut i = tmp_bigint(); diff --git a/rts/motoko-rts/src/blob_iter.rs b/rts/motoko-rts/src/blob_iter.rs index 666c0e609ba..17f9426d55c 100644 --- a/rts/motoko-rts/src/blob_iter.rs +++ b/rts/motoko-rts/src/blob_iter.rs @@ -1,13 +1,18 @@ -use crate::alloc::alloc_words; +#[cfg(feature = "ic")] use crate::types::{size_of, Array, Bytes, SkewedPtr, Words, TAG_ARRAY}; +use motoko_rts_macros::ic_mem_fn; + +#[cfg(feature = "ic")] const ITER_BLOB_IDX: u32 = 0; + +#[cfg(feature = "ic")] const ITER_POS_IDX: u32 = 1; /// Returns iterator for the given blob -#[no_mangle] -unsafe extern "C" fn blob_iter(blob: SkewedPtr) -> SkewedPtr { - let iter_ptr = alloc_words(size_of::() + Words(2)); +#[ic_mem_fn(ic_only)] +unsafe fn blob_iter(mem: &mut M, blob: SkewedPtr) -> SkewedPtr { + let iter_ptr = mem.alloc_words(size_of::() + Words(2)); let iter_array = iter_ptr.unskew() as *mut Array; (*iter_array).header.tag = TAG_ARRAY; @@ -20,6 +25,7 @@ unsafe extern "C" fn blob_iter(blob: SkewedPtr) -> SkewedPtr { } /// Returns whether the iterator is finished +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn blob_iter_done(iter: SkewedPtr) -> u32 { let iter_array = iter.as_array(); @@ -31,6 +37,7 @@ unsafe extern "C" fn blob_iter_done(iter: SkewedPtr) -> u32 { } /// Reads next byte, advances the iterator +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn blob_iter_next(iter: SkewedPtr) -> u32 { let iter_array = iter.as_array(); diff --git a/rts/motoko-rts/src/buf.rs b/rts/motoko-rts/src/buf.rs index 7d258b6a51a..c0867ac6d0d 100644 --- a/rts/motoko-rts/src/buf.rs +++ b/rts/motoko-rts/src/buf.rs @@ -1,6 +1,6 @@ //! This module implements a simple buffer to be used by the compiler (in generated code) -use crate::idl::idl_trap_with; +use crate::idl_trap_with; #[repr(packed)] pub struct Buf { @@ -11,6 +11,7 @@ pub struct Buf { } impl Buf { + #[cfg(feature = "ic")] pub(crate) unsafe fn advance(self: *mut Self, n: u32) { advance(self, n) } @@ -28,6 +29,7 @@ pub(crate) unsafe fn read_byte(buf: *mut Buf) -> u8 { byte } +#[cfg(feature = "ic")] /// Read a little-endian word pub(crate) unsafe fn read_word(buf: *mut Buf) -> u32 { if (*buf).ptr.add(3) >= (*buf).end { @@ -42,6 +44,7 @@ pub(crate) unsafe fn read_word(buf: *mut Buf) -> u32 { word } +#[cfg(feature = "ic")] unsafe fn advance(buf: *mut Buf, n: u32) { if (*buf).ptr.add(n as usize) > (*buf).end { idl_trap_with("advance out of buffer"); @@ -51,6 +54,7 @@ unsafe fn advance(buf: *mut Buf, n: u32) { } /// Can also be used for sleb +#[cfg(feature = "ic")] #[no_mangle] pub(crate) unsafe extern "C" fn skip_leb128(buf: *mut Buf) { loop { diff --git a/rts/motoko-rts/src/char.rs b/rts/motoko-rts/src/char.rs index 2443b671dbd..61a92071243 100644 --- a/rts/motoko-rts/src/char.rs +++ b/rts/motoko-rts/src/char.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_to_upper(c: u32) -> u32 { let mut upper_chars = core::char::from_u32_unchecked(c).to_uppercase(); @@ -8,6 +9,7 @@ unsafe extern "C" fn char_to_upper(c: u32) -> u32 { } } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_to_lower(c: u32) -> u32 { let mut lower_chars = core::char::from_u32_unchecked(c).to_lowercase(); @@ -18,21 +20,25 @@ unsafe extern "C" fn char_to_lower(c: u32) -> u32 { } } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_is_whitespace(c: u32) -> u32 { core::char::from_u32_unchecked(c).is_whitespace().into() } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_is_uppercase(c: u32) -> u32 { core::char::from_u32_unchecked(c).is_uppercase().into() } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_is_lowercase(c: u32) -> u32 { core::char::from_u32_unchecked(c).is_lowercase().into() } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn char_is_alphabetic(c: u32) -> u32 { core::char::from_u32_unchecked(c).is_alphabetic().into() diff --git a/rts/motoko-rts/src/closure_table.rs b/rts/motoko-rts/src/closure_table.rs index 26eac3497c1..d6257bcb746 100644 --- a/rts/motoko-rts/src/closure_table.rs +++ b/rts/motoko-rts/src/closure_table.rs @@ -19,10 +19,12 @@ //! the free list. Since all indices are relative to the payload begin, they stay valid. We never //! shrink the table. -use crate::alloc::alloc_array; +use crate::memory::{alloc_array, Memory}; use crate::rts_trap_with; use crate::types::SkewedPtr; +use motoko_rts_macros::ic_mem_fn; + const INITIAL_SIZE: u32 = 256; // Skewed pointer to the `Array` object. This needs to be a skewed pointer to be able to pass its @@ -35,8 +37,8 @@ static mut N_CLOSURES: u32 = 0; // Next free slot static mut FREE_SLOT: u32 = 0; -unsafe fn crate_closure_table() { - TABLE = alloc_array(INITIAL_SIZE); +unsafe fn crate_closure_table(mem: &mut M) { + TABLE = alloc_array(mem, INITIAL_SIZE); FREE_SLOT = 0; N_CLOSURES = 0; @@ -46,7 +48,7 @@ unsafe fn crate_closure_table() { } } -unsafe fn double_closure_table() { +unsafe fn double_closure_table(mem: &mut M) { let old_array = TABLE.as_array(); let old_size = old_array.len(); @@ -54,7 +56,7 @@ unsafe fn double_closure_table() { let new_size = old_size * 2; - TABLE = alloc_array(new_size); + TABLE = alloc_array(mem, new_size); let new_array = TABLE.as_array(); for i in 0..old_size { @@ -66,14 +68,14 @@ unsafe fn double_closure_table() { } } -#[no_mangle] -pub unsafe extern "C" fn remember_closure(ptr: SkewedPtr) -> u32 { +#[ic_mem_fn] +pub unsafe fn remember_closure(mem: &mut M, ptr: SkewedPtr) -> u32 { if TABLE.0 == 0 { - crate_closure_table(); + crate_closure_table(mem); } if FREE_SLOT == TABLE.as_array().len() { - double_closure_table(); + double_closure_table(mem); } // Just as a sanity check make sure the ptr is really skewed @@ -121,10 +123,12 @@ pub unsafe extern "C" fn closure_count() -> u32 { N_CLOSURES } +#[cfg(feature = "ic")] pub(crate) unsafe fn closure_table_loc() -> *mut SkewedPtr { &mut TABLE } +#[cfg(feature = "ic")] #[no_mangle] unsafe extern "C" fn closure_table_size() -> u32 { if TABLE.0 == 0 { diff --git a/rts/motoko-rts/src/debug.rs b/rts/motoko-rts/src/debug.rs index d3b359849ca..8c395af836d 100644 --- a/rts/motoko-rts/src/debug.rs +++ b/rts/motoko-rts/src/debug.rs @@ -1,30 +1,44 @@ #![allow(dead_code)] -use crate::closure_table; use crate::print::*; use crate::types::*; -#[cfg(feature = "gc")] -use crate::gc; - use core::fmt::Write; -#[cfg(feature = "gc")] -pub(crate) unsafe fn dump_heap() { - print_closure_table(); - print_static_roots(); - print_heap(); +/// Print an object. The argument can be a skewed pointer to a boxed object, or a tagged scalar. +#[cfg(feature = "ic")] +#[no_mangle] +unsafe extern "C" fn print_closure(p: usize) { + let mut buf = [0u8; 1000]; + let mut write_buf = WriteBuf::new(&mut buf); + + if SkewedPtr(p).is_tagged_scalar() { + print_tagged_scalar(&mut write_buf, p); + } else { + print_boxed_object(&mut write_buf, SkewedPtr(p).unskew()); + } + + print(&write_buf); } -pub(crate) unsafe fn print_closure_table() { - let closure_tbl = closure_table::closure_table_loc(); +pub unsafe fn dump_heap( + heap_base: u32, + hp: u32, + static_roots: SkewedPtr, + closure_table_loc: *mut SkewedPtr, +) { + print_closure_table(closure_table_loc); + print_static_roots(static_roots); + print_heap(heap_base, hp); +} - if (*closure_tbl).0 == 0 { +pub(crate) unsafe fn print_closure_table(closure_tbl_loc: *mut SkewedPtr) { + if (*closure_tbl_loc).0 == 0 { println!(100, "Closure table not initialized"); return; } - let arr = (*closure_tbl).unskew() as *mut Array; + let arr = (*closure_tbl_loc).unskew() as *mut Array; let len = (*arr).len; if len == 0 { @@ -40,7 +54,7 @@ pub(crate) unsafe fn print_closure_table() { for i in 0..len { let elem = arr.get(i); if !elem.is_tagged_scalar() { - let _ = write!(&mut write_buf, "{}: {:#x} --> ", i, elem.unskew()); + let _ = write!(&mut write_buf, "{}: ", i); print_boxed_object(&mut write_buf, elem.unskew()); print(&write_buf); write_buf.reset(); @@ -49,9 +63,10 @@ pub(crate) unsafe fn print_closure_table() { println!(50, "End of closure table"); } -#[cfg(feature = "gc")] -pub(crate) unsafe fn print_static_roots() { - let static_roots = gc::get_static_roots().unskew() as *mut Array; +pub(crate) unsafe fn print_static_roots(static_roots: SkewedPtr) { + let static_roots = static_roots.unskew() as *mut Array; + println!(100, "static roots at {:#x}", static_roots as usize); + let len = (*static_roots).len; if len == 0 { @@ -64,10 +79,11 @@ pub(crate) unsafe fn print_static_roots() { let mut buf = [0u8; 1000]; let mut write_buf = WriteBuf::new(&mut buf); + let payload_addr = static_roots.payload_addr(); for i in 0..len { - let obj = static_roots.get(i); - let _ = write!(&mut write_buf, "{}: {:#x} --> ", i, obj.unskew()); - print_boxed_object(&mut write_buf, obj.unskew()); + let field_addr = payload_addr.add(i as usize); + let _ = write!(&mut write_buf, "{}: {:#x} --> ", i, field_addr as usize); + print_boxed_object(&mut write_buf, (*field_addr).unskew()); print(&write_buf); write_buf.reset(); } @@ -75,31 +91,21 @@ pub(crate) unsafe fn print_static_roots() { println!(50, "End of static roots"); } -#[cfg(feature = "gc")] -unsafe fn print_heap() { - let heap_begin = gc::get_heap_base(); - let heap_end = gc::HP; - +unsafe fn print_heap(heap_start: u32, heap_end: u32) { println!( 200, - "Heap begin={:#x}, heap end={:#x}, size={} bytes", - heap_begin, + "Heap start={:#x}, heap end={:#x}, size={} bytes", + heap_start, heap_end, - heap_end - heap_begin + heap_end - heap_start ); let mut buf = [0u8; 1000]; let mut write_buf = WriteBuf::new(&mut buf); - let mut p = heap_begin; + let mut p = heap_start; let mut i: Words = Words(0); while p < heap_end { - if *(p as *const u8) == 0 { - p += 1; - continue; - } - - let _ = write!(&mut write_buf, "{}: ", i.0); print_boxed_object(&mut write_buf, p as usize); print(&write_buf); write_buf.reset(); diff --git a/rts/motoko-rts/src/float.rs b/rts/motoko-rts/src/float.rs index 3b50746dfdd..7fdd35af350 100644 --- a/rts/motoko-rts/src/float.rs +++ b/rts/motoko-rts/src/float.rs @@ -1,9 +1,12 @@ +use crate::memory::Memory; use crate::text::text_of_ptr_size; use crate::types::{Bytes, SkewedPtr}; +use motoko_rts_macros::ic_mem_fn; + // The meaning of the `mode` parameter is documented in motoko-base, function Float.format() -#[no_mangle] -unsafe extern "C" fn float_fmt(a: f64, prec: u32, mode: u32) -> SkewedPtr { +#[ic_mem_fn] +unsafe fn float_fmt(mem: &mut M, a: f64, prec: u32, mode: u32) -> SkewedPtr { // prec and mode are tagged (TODO (osa): what tag???) let mode = mode >> 24; let prec = core::cmp::min(prec >> 24, 100) as usize; @@ -30,5 +33,5 @@ unsafe extern "C" fn float_fmt(a: f64, prec: u32, mode: u32) -> SkewedPtr { assert!(n_written > 0); - text_of_ptr_size(buf.as_ptr(), Bytes(n_written as u32)) + text_of_ptr_size(mem, buf.as_ptr(), Bytes(n_written as u32)) } diff --git a/rts/motoko-rts/src/gc.rs b/rts/motoko-rts/src/gc.rs index 2ac5681d680..434c6b082b6 100644 --- a/rts/motoko-rts/src/gc.rs +++ b/rts/motoko-rts/src/gc.rs @@ -1,58 +1,2 @@ -mod copying; -mod mark_compact; - -use crate::types::*; - -extern "C" { - /// Get __heap_base. Provided by the code generator (src/codegen/compile.ml). - pub(crate) fn get_heap_base() -> u32; - - /// Get pointer to the static memory with an array to the static roots. Provided by the - /// generated code. - pub(crate) fn get_static_roots() -> SkewedPtr; -} - -/// Maximum live data retained in a GC. -static mut MAX_LIVE: Bytes = Bytes(0); - -/// Amount of garbage collected so far. -static mut RECLAIMED: Bytes = Bytes(0); - -/// Counter for total allocations -pub(crate) static mut ALLOCATED: Bytes = Bytes(0); - -/// Heap pointer -pub(crate) static mut HP: u32 = 0; - -#[no_mangle] -unsafe extern "C" fn init() { - HP = get_heap_base() as u32; -} - -unsafe fn note_live_size(live: Bytes) { - MAX_LIVE = ::core::cmp::max(MAX_LIVE, live); -} - -#[no_mangle] -unsafe extern "C" fn get_max_live_size() -> Bytes { - MAX_LIVE -} - -unsafe fn note_reclaimed(reclaimed: Bytes) { - RECLAIMED += Bytes(reclaimed.0 as u64); -} - -#[no_mangle] -unsafe extern "C" fn get_reclaimed() -> Bytes { - RECLAIMED -} - -#[no_mangle] -unsafe extern "C" fn get_total_allocations() -> Bytes { - ALLOCATED -} - -#[no_mangle] -unsafe extern "C" fn get_heap_size() -> Bytes { - Bytes(HP - get_heap_base()) -} +pub mod copying; +pub mod mark_compact; diff --git a/rts/motoko-rts/src/gc/copying.rs b/rts/motoko-rts/src/gc/copying.rs index c89e9ce0837..84312f8ec15 100644 --- a/rts/motoko-rts/src/gc/copying.rs +++ b/rts/motoko-rts/src/gc/copying.rs @@ -1,12 +1,93 @@ -use crate::alloc; -use crate::closure_table::closure_table_loc; -use crate::mem::{memcpy_bytes, memcpy_words}; +use crate::mem_utils::{memcpy_bytes, memcpy_words}; +use crate::memory::Memory; use crate::types::*; -use super::{get_heap_base, get_static_roots, note_live_size, note_reclaimed, HP}; +use motoko_rts_macros::ic_mem_fn; + +#[ic_mem_fn(ic_only)] +unsafe fn copying_gc(mem: &mut M) { + copying_gc_internal( + mem, + crate::memory::ic::get_heap_base(), + // get_hp + || crate::memory::ic::HP as usize, + // set_hp + |hp| crate::memory::ic::HP = hp, + crate::memory::ic::get_static_roots(), + crate::closure_table::closure_table_loc(), + // note_live_size + |live_size| { + crate::memory::ic::MAX_LIVE = ::core::cmp::max(crate::memory::ic::MAX_LIVE, live_size) + }, + // note_reclaimed + |reclaimed| crate::memory::ic::RECLAIMED += Bytes(reclaimed.0 as u64), + ); +} + +pub unsafe fn copying_gc_internal< + M: Memory, + GetHp: Fn() -> usize, + SetHp: FnMut(u32), + NoteLiveSize: Fn(Bytes), + NoteReclaimed: Fn(Bytes), +>( + mem: &mut M, + heap_base: u32, + get_hp: GetHp, + mut set_hp: SetHp, + static_roots: SkewedPtr, + closure_table_loc: *mut SkewedPtr, + note_live_size: NoteLiveSize, + note_reclaimed: NoteReclaimed, +) { + let begin_from_space = heap_base as usize; + let end_from_space = get_hp(); + let begin_to_space = end_from_space; + + let static_roots = static_roots.as_array(); + + // Evacuate roots + evac_static_roots(mem, begin_from_space, begin_to_space, static_roots); + + if (*closure_table_loc).unskew() >= begin_from_space { + evac( + mem, + begin_from_space, + begin_to_space, + closure_table_loc as usize, + ); + } + + // Scavenge to-space + let mut p = begin_to_space; + while p < get_hp() { + let size = object_size(p); + scav(mem, begin_from_space, begin_to_space, p); + p += size.to_bytes().0 as usize; + } + + let end_to_space = get_hp(); -/// Evacuate (copy) an object in from-space to to-space, update end_to_space. If the object was -/// already evacuated end_to_space is not changed. + // Note the stats + let new_live_size = end_to_space - begin_to_space; + note_live_size(Bytes(new_live_size as u32)); + + let reclaimed = (end_from_space - begin_from_space) - (end_to_space - begin_to_space); + note_reclaimed(Bytes(reclaimed as u32)); + + // Copy to-space to the beginning of from-space + memcpy_bytes( + begin_from_space, + begin_to_space, + Bytes((end_to_space - begin_to_space) as u32), + ); + + // Reset the heap pointer + let new_hp = begin_from_space + (end_to_space - begin_to_space); + set_hp(new_hp as u32); +} + +/// Evacuate (copy) an object in from-space to to-space. /// /// Arguments: /// @@ -18,18 +99,16 @@ use super::{get_heap_base, get_static_roots, note_live_size, note_reclaimed, HP} /// /// - After all objects are evacuated we move to-space to from-space, to be able to do that the /// pointers need to point to their (eventual) locations in from-space, which is calculated with -/// `end_to_space - begin_to_space + begin_from_space`. +/// `address_in_to_space - begin_to_space + begin_from_space`. /// /// - begin_to_space: Where to-space starts. See above for how this is used. /// -/// - end_to_space: Where the object in `ptr_loc` will be copied. -/// /// - ptr_loc: Location of the object to evacuate, e.g. an object field address. /// -unsafe fn evac( +unsafe fn evac( + mem: &mut M, begin_from_space: usize, begin_to_space: usize, - end_to_space: &mut usize, ptr_loc: usize, ) { // Field holds a skewed pointer to the object to evacuate @@ -45,16 +124,15 @@ unsafe fn evac( } let obj_size = object_size(obj as usize); - let obj_size_bytes = obj_size.to_bytes(); - // Grow memory if needed - alloc::grow_memory(*end_to_space + obj_size_bytes.0 as usize); + // Allocate space in to-space for the object + let obj_addr = mem.alloc_words(obj_size).unskew() as usize; // Copy object to to-space - memcpy_words(*end_to_space, obj as usize, obj_size); + memcpy_words(obj_addr, obj as usize, obj_size); // Final location of the object after copying to-space back to from-space - let obj_loc = (*end_to_space - begin_to_space) + begin_from_space; + let obj_loc = (obj_addr - begin_to_space) + begin_from_space; // Set forwarding pointer let fwd = obj as *mut FwdPtr; @@ -63,96 +141,28 @@ unsafe fn evac( // Update evacuated field *ptr_loc = skew(obj_loc); - - // Update end of to-space - *end_to_space += obj_size_bytes.0 as usize } -unsafe fn scav( - begin_from_space: usize, - begin_to_space: usize, - end_to_space: &mut usize, - obj: usize, -) { +unsafe fn scav(mem: &mut M, begin_from_space: usize, begin_to_space: usize, obj: usize) { let obj = obj as *mut Obj; crate::visitor::visit_pointer_fields(obj, begin_from_space, |field_addr| { - evac( - begin_from_space, - begin_to_space, - end_to_space, - field_addr as usize, - ); + evac(mem, begin_from_space, begin_to_space, field_addr as usize); }); } // We have a special evacuation routine for "static roots" array: we don't evacuate elements of // "static roots", we just scavenge them. -unsafe fn evac_static_roots( +unsafe fn evac_static_roots( + mem: &mut M, begin_from_space: usize, begin_to_space: usize, - end_to_space: &mut usize, roots: *mut Array, ) { // The array and the objects pointed by the array are all static so we don't evacuate them. We // only evacuate fields of objects in the array. for i in 0..roots.len() { let obj = roots.get(i); - scav(begin_from_space, begin_to_space, end_to_space, obj.unskew()); + scav(mem, begin_from_space, begin_to_space, obj.unskew()); } } - -#[no_mangle] -unsafe extern "C" fn copying_gc() { - let begin_from_space = get_heap_base() as usize; - let end_from_space = HP as usize; - let begin_to_space = end_from_space; - let mut end_to_space = begin_to_space; - - let static_roots = get_static_roots().as_array(); - - // Evacuate roots - evac_static_roots( - begin_from_space, - begin_to_space, - &mut end_to_space, - static_roots, - ); - - let closure_table_loc = closure_table_loc(); - if (*closure_table_loc).unskew() >= begin_from_space { - evac( - begin_from_space, - begin_to_space, - &mut end_to_space, - closure_table_loc as usize, - ); - } - - // Scavenge to-space - let mut p = begin_to_space; - while p < end_to_space { - // NB: end_to_space keeps changing within this loop - let size = object_size(p); - scav(begin_from_space, begin_to_space, &mut end_to_space, p); - p += size.to_bytes().0 as usize; - } - - // Note the stats - let new_live_size = end_to_space - begin_to_space; - note_live_size(Bytes(new_live_size as u32)); - - let reclaimed = (end_from_space - begin_from_space) - (end_to_space - begin_to_space); - note_reclaimed(Bytes(reclaimed as u32)); - - // Copy to-space to the beginning of from-space - memcpy_bytes( - begin_from_space, - begin_to_space, - Bytes((end_to_space - begin_to_space) as u32), - ); - - // Reset the heap pointer - let new_hp = begin_from_space + (end_to_space - begin_to_space); - HP = new_hp as u32; -} diff --git a/rts/motoko-rts/src/gc/mark_compact.rs b/rts/motoko-rts/src/gc/mark_compact.rs index e79e4955b95..cdebe6e1844 100644 --- a/rts/motoko-rts/src/gc/mark_compact.rs +++ b/rts/motoko-rts/src/gc/mark_compact.rs @@ -1,47 +1,93 @@ //! Implements "threaded compaction" as described in The Garbage Collection Handbook section 3.3. -use crate::bitmap::{alloc_bitmap, free_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END}; -use crate::closure_table::closure_table_loc; +pub mod bitmap; +pub mod mark_stack; + +use bitmap::{alloc_bitmap, free_bitmap, get_bit, iter_bits, set_bit, BITMAP_ITER_END}; +use mark_stack::{alloc_mark_stack, free_mark_stack, pop_mark_stack}; + use crate::constants::WORD_SIZE; -use crate::mark_stack::{self, alloc_mark_stack, free_mark_stack, pop_mark_stack}; -use crate::mem::memcpy_words; +use crate::mem_utils::memcpy_words; +use crate::memory::Memory; use crate::types::*; use crate::visitor::visit_pointer_fields; -use super::{get_heap_base, get_static_roots, note_live_size, note_reclaimed, HP}; - -#[no_mangle] -unsafe extern "C" fn compacting_gc() { - let old_hp = HP; - let heap_base = get_heap_base(); - - mark_compact(heap_base, old_hp, get_static_roots(), closure_table_loc()); +use motoko_rts_macros::ic_mem_fn; + +#[ic_mem_fn(ic_only)] +unsafe fn compacting_gc(mem: &mut M) { + compacting_gc_internal( + mem, + crate::memory::ic::get_heap_base(), + // get_hp + || crate::memory::ic::HP as usize, + // set_hp + |hp| crate::memory::ic::HP = hp, + crate::memory::ic::get_static_roots(), + crate::closure_table::closure_table_loc(), + // note_live_size + |live_size| { + crate::memory::ic::MAX_LIVE = ::core::cmp::max(crate::memory::ic::MAX_LIVE, live_size) + }, + // note_reclaimed + |reclaimed| crate::memory::ic::RECLAIMED += Bytes(reclaimed.0 as u64), + ); +} - let reclaimed = old_hp - HP; +pub unsafe fn compacting_gc_internal< + M: Memory, + GetHp: Fn() -> usize, + SetHp: Fn(u32), + NoteLiveSize: Fn(Bytes), + NoteReclaimed: Fn(Bytes), +>( + mem: &mut M, + heap_base: u32, + get_hp: GetHp, + set_hp: SetHp, + static_roots: SkewedPtr, + closure_table_loc: *mut SkewedPtr, + note_live_size: NoteLiveSize, + note_reclaimed: NoteReclaimed, +) { + let old_hp = get_hp() as u32; + + mark_compact( + mem, + set_hp, + heap_base, + old_hp, + static_roots, + closure_table_loc, + ); + + let reclaimed = old_hp - (get_hp() as u32); note_reclaimed(Bytes(reclaimed)); - let new_live_size = HP - heap_base; - note_live_size(Bytes(new_live_size)); + let live = get_hp() as u32 - heap_base; + note_live_size(Bytes(live)); } -unsafe fn mark_compact( +unsafe fn mark_compact( + mem: &mut M, + set_hp: SetHp, heap_base: u32, - heap_end: u32, + mem_end: u32, static_roots: SkewedPtr, closure_table_loc: *mut SkewedPtr, ) { - let heap_size = Bytes(heap_end - heap_base); + let mem_size = Bytes(mem_end - heap_base); - alloc_bitmap(heap_size); - alloc_mark_stack(); + alloc_bitmap(mem, mem_size); + alloc_mark_stack(mem); - mark_static_roots(static_roots, heap_base); + mark_static_roots(mem, static_roots, heap_base); if (*closure_table_loc).unskew() >= heap_base as usize { - push_mark_stack(*closure_table_loc, heap_base); + push_mark_stack(mem, *closure_table_loc, heap_base); } - mark_stack(heap_base); + mark_stack(mem, heap_base); thread_roots(static_roots, heap_base); @@ -50,23 +96,23 @@ unsafe fn mark_compact( } update_fwd_refs(heap_base); - update_bwd_refs(heap_base); + update_bwd_refs(set_hp, heap_base); free_mark_stack(); free_bitmap(); } -unsafe fn mark_static_roots(static_roots: SkewedPtr, heap_base: u32) { +unsafe fn mark_static_roots(mem: &mut M, static_roots: SkewedPtr, heap_base: u32) { let root_array = static_roots.as_array(); // Static objects are not in the dynamic heap so don't need marking. for i in 0..root_array.len() { let obj = root_array.get(i).unskew() as *mut Obj; - mark_fields(obj, heap_base); + mark_fields(mem, obj, heap_base); } } -unsafe fn push_mark_stack(obj: SkewedPtr, heap_base: u32) { +unsafe fn push_mark_stack(mem: &mut M, obj: SkewedPtr, heap_base: u32) { let obj = obj.unskew() as u32; let obj_idx = (obj - heap_base) / WORD_SIZE; @@ -77,18 +123,18 @@ unsafe fn push_mark_stack(obj: SkewedPtr, heap_base: u32) { } set_bit(obj_idx); - mark_stack::push_mark_stack(obj as usize); + mark_stack::push_mark_stack(mem, obj as usize); } -unsafe fn mark_stack(heap_base: u32) { +unsafe fn mark_stack(mem: &mut M, heap_base: u32) { while let Some(obj) = pop_mark_stack() { - mark_fields(obj as *mut Obj, heap_base); + mark_fields(mem, obj as *mut Obj, heap_base); } } -unsafe fn mark_fields(obj: *mut Obj, heap_base: u32) { +unsafe fn mark_fields(mem: &mut M, obj: *mut Obj, heap_base: u32) { visit_pointer_fields(obj, heap_base as usize, |field_addr| { - push_mark_stack(*field_addr, heap_base); + push_mark_stack(mem, *field_addr, heap_base); }); } @@ -129,7 +175,7 @@ unsafe fn update_fwd_refs(heap_base: u32) { /// Expects all fields to be threaded. Updates backward references and moves objects to their new /// locations. -unsafe fn update_bwd_refs(heap_base: u32) { +unsafe fn update_bwd_refs(set_hp: SetHp, heap_base: u32) { let mut free = heap_base; let mut bitmap_iter = iter_bits(); @@ -151,7 +197,7 @@ unsafe fn update_bwd_refs(heap_base: u32) { bit = bitmap_iter.next(); } - crate::gc::HP = free; + set_hp(free); } unsafe fn thread_obj_fields(obj: *mut Obj, heap_base: u32) { diff --git a/rts/motoko-rts/src/bitmap.rs b/rts/motoko-rts/src/gc/mark_compact/bitmap.rs similarity index 95% rename from rts/motoko-rts/src/bitmap.rs rename to rts/motoko-rts/src/gc/mark_compact/bitmap.rs index 30976da1325..eb0b357b3bc 100644 --- a/rts/motoko-rts/src/bitmap.rs +++ b/rts/motoko-rts/src/gc/mark_compact/bitmap.rs @@ -1,11 +1,11 @@ -use crate::alloc::alloc_blob; -use crate::mem::memzero; +use crate::mem_utils::memzero; +use crate::memory::{alloc_blob, Memory}; use crate::types::{size_of, Blob, Bytes, Obj}; /// Current bitmap static mut BITMAP_PTR: *mut u8 = core::ptr::null_mut(); -pub unsafe fn alloc_bitmap(heap_size: Bytes) { +pub unsafe fn alloc_bitmap(mem: &mut M, heap_size: Bytes) { // We will have at most this many objects in the heap, each requiring a bit let n_bits = heap_size.to_words().0; // Each byte will hold 8 bits. @@ -14,7 +14,7 @@ pub unsafe fn alloc_bitmap(heap_size: Bytes) { // 64 bits in a single read and check as many bits as possible with a single `word != 0`. let bitmap_bytes = Bytes(((bitmap_bytes + 7) / 8) * 8); // Allocating an actual object here as otherwise dump_heap gets confused - let blob = alloc_blob(bitmap_bytes).unskew() as *mut Blob; + let blob = alloc_blob(mem, bitmap_bytes).unskew() as *mut Blob; memzero(blob.payload_addr() as usize, bitmap_bytes.to_words()); BITMAP_PTR = blob.payload_addr() diff --git a/rts/motoko-rts/src/mark_stack.rs b/rts/motoko-rts/src/gc/mark_compact/mark_stack.rs similarity index 74% rename from rts/motoko-rts/src/mark_stack.rs rename to rts/motoko-rts/src/gc/mark_compact/mark_stack.rs index e42ba1e8f12..9e015c311ad 100644 --- a/rts/motoko-rts/src/mark_stack.rs +++ b/rts/motoko-rts/src/gc/mark_compact/mark_stack.rs @@ -1,13 +1,13 @@ //! A stack for marking heap objects (for GC). There should be no allocation after the stack -//! otherwise things will break as we push. +//! otherwise things will break as we push. This invariant is checked in debug builds. -use crate::alloc::{alloc_blob, alloc_words}; +use crate::memory::{alloc_blob, Memory}; use crate::types::{Blob, Words}; use core::ptr::null_mut; /// Initial stack size -const INIT_STACK_SIZE: Words = Words(64); +pub const INIT_STACK_SIZE: Words = Words(64); /// Pointer to the `blob` object for the mark stack. Used to get the capacity of the stack. static mut STACK_BLOB_PTR: *mut Blob = null_mut(); @@ -21,11 +21,11 @@ static mut STACK_TOP: *mut usize = null_mut(); /// Next free slot in the mark stack static mut STACK_PTR: *mut usize = null_mut(); -pub unsafe fn alloc_mark_stack() { +pub unsafe fn alloc_mark_stack(mem: &mut M) { debug_assert!(STACK_BLOB_PTR.is_null()); // Allocating an actual object here to not break dump_heap - STACK_BLOB_PTR = alloc_blob(INIT_STACK_SIZE.to_bytes()).unskew() as *mut Blob; + STACK_BLOB_PTR = alloc_blob(mem, INIT_STACK_SIZE.to_bytes()).unskew() as *mut Blob; STACK_BASE = STACK_BLOB_PTR.payload_addr() as *mut usize; STACK_PTR = STACK_BASE; STACK_TOP = STACK_BASE.add(INIT_STACK_SIZE.0 as usize); @@ -39,9 +39,9 @@ pub unsafe fn free_mark_stack() { } /// Doubles the stack size -unsafe fn grow_stack() { +unsafe fn grow_stack(mem: &mut M) { let stack_cap: Words = STACK_BLOB_PTR.len().to_words(); - let p = alloc_words(stack_cap).unskew() as *mut usize; + let p = mem.alloc_words(stack_cap).unskew() as *mut usize; // Make sure nothing was allocated after the stack debug_assert_eq!(STACK_TOP, p); @@ -50,9 +50,9 @@ unsafe fn grow_stack() { (*STACK_BLOB_PTR).len = new_cap.to_bytes(); } -pub unsafe fn push_mark_stack(obj: usize) { +pub unsafe fn push_mark_stack(mem: &mut M, obj: usize) { if STACK_PTR == STACK_TOP { - grow_stack(); + grow_stack(mem); } *STACK_PTR = obj; diff --git a/rts/motoko-rts/src/idl.rs b/rts/motoko-rts/src/idl.rs index 92738a4c06e..1f94fe8ab7e 100644 --- a/rts/motoko-rts/src/idl.rs +++ b/rts/motoko-rts/src/idl.rs @@ -1,13 +1,16 @@ #![allow(non_upper_case_globals)] -use crate::alloc::alloc_blob; use crate::buf::{read_byte, read_word, skip_leb128, Buf}; +use crate::idl_trap_with; use crate::leb128::{leb128_decode, sleb128_decode}; -use crate::trap_with_prefix; +use crate::memory::{alloc_blob, Memory}; use crate::types::Words; use crate::utf8::utf8_validate; + use core::cmp::min; +use motoko_rts_macros::ic_mem_fn; + // // IDL constants // @@ -43,10 +46,6 @@ const IDL_CON_alias: i32 = 1; const IDL_PRIM_lowest: i32 = -17; -pub(crate) unsafe fn idl_trap_with(msg: &str) -> ! { - trap_with_prefix("IDL error: ", msg); -} - unsafe fn is_primitive_type(ty: i32) -> bool { ty < 0 && (ty >= IDL_PRIM_lowest || ty == IDL_REF_principal) } @@ -72,8 +71,8 @@ unsafe fn parse_fields(buf: *mut Buf, n_types: u32) { } // NB. This function assumes the allocation does not need to survive GC -unsafe fn alloc(size: Words) -> *mut u8 { - alloc_blob(size.to_bytes()).as_blob().payload_addr() +unsafe fn alloc(mem: &mut M, size: Words) -> *mut u8 { + alloc_blob(mem, size.to_bytes()).as_blob().payload_addr() } /// This function parses the IDL magic header and type description. It @@ -91,8 +90,9 @@ unsafe fn alloc(size: Words) -> *mut u8 { /// /// * returns a pointer to the beginning of the list of main types /// (again via pointer argument, for lack of multi-value returns in C ABI) -#[no_mangle] -unsafe extern "C" fn parse_idl_header( +#[ic_mem_fn] +unsafe fn parse_idl_header( + mem: &mut M, extended: bool, buf: *mut Buf, typtbl_out: *mut *mut *mut u8, @@ -120,7 +120,7 @@ unsafe extern "C" fn parse_idl_header( *typtbl_size_out = n_types; // Allocate the type table to be passed out - let typtbl: *mut *mut u8 = alloc(Words(n_types)) as *mut _; + let typtbl: *mut *mut u8 = alloc(mem, Words(n_types)) as *mut _; // Go through the table for i in 0..n_types { diff --git a/rts/motoko-rts/src/lib.rs b/rts/motoko-rts/src/lib.rs index 23d3ad4bbb4..dffe486b34b 100644 --- a/rts/motoko-rts/src/lib.rs +++ b/rts/motoko-rts/src/lib.rs @@ -13,39 +13,45 @@ #[macro_use] mod print; -#[cfg(feature = "gc")] -mod gc; - #[cfg(debug_assertions)] pub mod debug; -mod alloc; pub mod bigint; -pub mod bitmap; mod blob_iter; pub mod buf; mod char; pub mod closure_table; pub mod constants; -mod float; -mod idl; +pub mod gc; pub mod leb128; -pub mod mark_stack; -mod mem; +mod mem_utils; +pub mod memory; pub mod principal_id; pub mod text; pub mod text_iter; -#[allow(non_camel_case_types)] mod tommath_bindings; pub mod types; pub mod utf8; mod visitor; -use types::{Bytes, SkewedPtr}; +#[cfg(feature = "ic")] +mod idl; + +#[cfg(feature = "ic")] +mod float; + +use types::Bytes; + +use motoko_rts_macros::ic_mem_fn; + +#[ic_mem_fn(ic_only)] +unsafe fn version(mem: &mut M) -> types::SkewedPtr { + text::text_of_str(mem, "0.1") +} -#[no_mangle] -unsafe extern "C" fn version() -> SkewedPtr { - text::text_of_str("0.1") +#[ic_mem_fn(ic_only)] +unsafe fn alloc_words(mem: &mut M, n: types::Words) -> types::SkewedPtr { + mem.alloc_words(n) } extern "C" { @@ -83,11 +89,15 @@ pub(crate) unsafe fn trap_with_prefix(prefix: &str, msg: &str) -> ! { rts_trap(c_str.as_ptr(), Bytes(b_idx as u32)); } +pub(crate) unsafe fn idl_trap_with(msg: &str) -> ! { + trap_with_prefix("IDL error: ", msg); +} + pub(crate) unsafe fn rts_trap_with(msg: &str) -> ! { trap_with_prefix("RTS error: ", msg) } -#[cfg(feature = "panic_handler")] +#[cfg(feature = "ic")] #[panic_handler] fn panic(info: &core::panic::PanicInfo) -> ! { unsafe { diff --git a/rts/motoko-rts/src/mem.rs b/rts/motoko-rts/src/mem_utils.rs similarity index 100% rename from rts/motoko-rts/src/mem.rs rename to rts/motoko-rts/src/mem_utils.rs diff --git a/rts/motoko-rts/src/memory.rs b/rts/motoko-rts/src/memory.rs new file mode 100644 index 00000000000..9d6611abd01 --- /dev/null +++ b/rts/motoko-rts/src/memory.rs @@ -0,0 +1,57 @@ +#[cfg(feature = "ic")] +pub mod ic; + +use crate::rts_trap_with; +use crate::types::*; + +use motoko_rts_macros::ic_mem_fn; + +/// A trait for heap allocation. RTS functions allocate in heap via this trait. +/// +/// To be able to link the RTS with moc-generated code, we implement wrappers around allocating +/// functions that pass `ic::IcMemory` for the `Memory` arguments, and export these functions with +/// the expected names for the generated code. For example, for a function like +/// +/// ``` +/// unsafe fn allocating_function(mem: &mut M) { ... } +/// ``` +/// +/// we implement (or generate with a macro) +/// +/// ``` +/// #[no_mangle] +/// unsafe extern "C" fn export_name() { allocating_function(crate::memory::ic::IcMemory) } +/// ``` +/// +/// This function does not take any `Memory` arguments can be used by the generated code. +pub trait Memory { + unsafe fn alloc_words(&mut self, n: Words) -> SkewedPtr; +} + +/// Helper for allocating blobs +#[ic_mem_fn] +pub unsafe fn alloc_blob(mem: &mut M, size: Bytes) -> SkewedPtr { + let ptr = mem.alloc_words(size_of::() + size.to_words()); + let blob = ptr.unskew() as *mut Blob; + (*blob).header.tag = TAG_BLOB; + (*blob).len = size; + ptr +} + +/// Helper for allocating arrays +#[ic_mem_fn] +pub unsafe fn alloc_array(mem: &mut M, len: u32) -> SkewedPtr { + // Array payload should not be larger than half of the memory + if len > 1 << (32 - 2 - 1) { + // 2 for word size, 1 to divide by two + rts_trap_with("Array allocation too large"); + } + + let skewed_ptr = mem.alloc_words(size_of::() + Words(len)); + + let ptr: *mut Array = skewed_ptr.unskew() as *mut Array; + (*ptr).header.tag = TAG_ARRAY; + (*ptr).len = len; + + skewed_ptr +} diff --git a/rts/motoko-rts/src/memory/ic.rs b/rts/motoko-rts/src/memory/ic.rs new file mode 100644 index 00000000000..0fa01d14ded --- /dev/null +++ b/rts/motoko-rts/src/memory/ic.rs @@ -0,0 +1,87 @@ +// This module is only enabled when compiling the RTS for IC or WASI. + +use super::Memory; +use crate::constants::WASM_PAGE_SIZE; +use crate::rts_trap_with; +use crate::types::*; + +use core::arch::wasm32; + +/// Maximum live data retained in a GC. +pub(crate) static mut MAX_LIVE: Bytes = Bytes(0); + +/// Amount of garbage collected so far. +pub(crate) static mut RECLAIMED: Bytes = Bytes(0); + +/// Counter for total allocations +pub(crate) static mut ALLOCATED: Bytes = Bytes(0); + +/// Heap pointer +pub(crate) static mut HP: u32 = 0; + +// Provided by generated code +extern "C" { + pub(crate) fn get_heap_base() -> u32; + pub(crate) fn get_static_roots() -> SkewedPtr; +} + +#[no_mangle] +unsafe extern "C" fn init() { + HP = get_heap_base() as u32; +} + +#[no_mangle] +unsafe extern "C" fn get_max_live_size() -> Bytes { + MAX_LIVE +} + +#[no_mangle] +unsafe extern "C" fn get_reclaimed() -> Bytes { + RECLAIMED +} + +#[no_mangle] +unsafe extern "C" fn get_total_allocations() -> Bytes { + ALLOCATED +} + +#[no_mangle] +unsafe extern "C" fn get_heap_size() -> Bytes { + Bytes(HP - get_heap_base()) +} + +/// Provides a `Memory` implementation, to be used in functions compiled for IC or WASI. The +/// `Memory` implementation allocates in Wasm heap with Wasm `memory.grow` instruction. +pub struct IcMemory; + +impl Memory for IcMemory { + #[inline] + unsafe fn alloc_words(&mut self, n: Words) -> SkewedPtr { + let bytes = n.to_bytes(); + // Update ALLOCATED + ALLOCATED += Bytes(bytes.0 as u64); + + // Update heap pointer + let old_hp = HP; + let new_hp = old_hp + bytes.0; + HP = new_hp; + + // Grow memory if needed + grow_memory(new_hp as usize); + + skew(old_hp as usize) + } +} + +/// Page allocation. Ensures that the memory up to, but excluding, the given pointer is allocated. +#[inline(never)] +unsafe fn grow_memory(ptr: usize) { + let page_size = u64::from(WASM_PAGE_SIZE.0); + let total_pages_needed = (((ptr as u64) + page_size - 1) / page_size) as usize; + let current_pages = wasm32::memory_size(0); + if total_pages_needed > current_pages { + if wasm32::memory_grow(0, total_pages_needed - current_pages) == core::usize::MAX { + rts_trap_with("Cannot grow memory"); + } + } +} diff --git a/rts/motoko-rts/src/principal_id.rs b/rts/motoko-rts/src/principal_id.rs index d82cf18ac0e..ed2ea2aee19 100644 --- a/rts/motoko-rts/src/principal_id.rs +++ b/rts/motoko-rts/src/principal_id.rs @@ -1,11 +1,13 @@ //! Principal ID encoding and decoding, with integrity checking -use crate::alloc::alloc_blob; -use crate::mem::memcpy_bytes; +use crate::mem_utils::memcpy_bytes; +use crate::memory::{alloc_blob, Memory}; use crate::rts_trap_with; use crate::text::{blob_compare, blob_of_text}; use crate::types::{Bytes, SkewedPtr, TAG_BLOB}; +use motoko_rts_macros::ic_mem_fn; + // CRC32 for blobs. Loosely based on https://rosettacode.org/wiki/CRC-32#Implementation_2 #[no_mangle] @@ -90,12 +92,12 @@ unsafe fn enc_stash(pump: &mut Pump, data: u8) { } /// Encode a blob into an checksum-prepended base32 representation -pub unsafe fn base32_of_checksummed_blob(b: SkewedPtr) -> SkewedPtr { +pub unsafe fn base32_of_checksummed_blob(mem: &mut M, b: SkewedPtr) -> SkewedPtr { let checksum = compute_crc32(b); let n = b.as_blob().len(); let mut data = b.as_blob().payload_addr(); - let r = alloc_blob(Bytes((n.0 + 4 + 4) / 5 * 8)); // contains padding + let r = alloc_blob(mem, Bytes((n.0 + 4 + 4) / 5 * 8)); // contains padding let blob = r.as_blob(); let dest = blob.payload_addr(); @@ -182,12 +184,12 @@ unsafe fn dec_stash(pump: &mut Pump, data: u8) { } } -pub unsafe fn base32_to_blob(b: SkewedPtr) -> SkewedPtr { +pub unsafe fn base32_to_blob(mem: &mut M, b: SkewedPtr) -> SkewedPtr { let n = b.as_blob().len(); let mut data = b.as_blob().payload_addr(); // Every group of 8 characters will yield 5 bytes - let r = alloc_blob(Bytes(((n.0 + 7) / 8) * 5)); // we deal with padding later + let r = alloc_blob(mem, Bytes(((n.0 + 7) / 8) * 5)); // we deal with padding later let blob = r.as_blob(); let dest = blob.payload_addr(); @@ -216,21 +218,22 @@ pub unsafe fn base32_to_blob(b: SkewedPtr) -> SkewedPtr { } /// Encode a blob into its textual representation -#[no_mangle] -pub unsafe extern "C" fn principal_of_blob(b: SkewedPtr) -> SkewedPtr { - base32_to_principal(base32_of_checksummed_blob(b)) +#[ic_mem_fn] +pub unsafe fn principal_of_blob(mem: &mut M, b: SkewedPtr) -> SkewedPtr { + let base32 = base32_of_checksummed_blob(mem, b); + base32_to_principal(mem, base32) } /// Convert a checksum-prepended base32 representation blob into the public principal name format /// by hyphenating and lowercasing -unsafe fn base32_to_principal(b: SkewedPtr) -> SkewedPtr { +unsafe fn base32_to_principal(mem: &mut M, b: SkewedPtr) -> SkewedPtr { let blob = b.as_blob(); let n = blob.len(); let mut data = blob.payload_addr(); // Every group of 5 characters will yield 6 bytes (due to the hypen) - let r = alloc_blob(Bytes(((n.0 + 4) / 5) * 6)); + let r = alloc_blob(mem, Bytes(((n.0 + 4) / 5) * 6)); let blob = r.as_blob(); let mut dest = blob.payload_addr(); @@ -268,10 +271,10 @@ unsafe fn base32_to_principal(b: SkewedPtr) -> SkewedPtr { } // Decode an textual principal representation into a blob -#[no_mangle] -pub unsafe extern "C" fn blob_of_principal(t: SkewedPtr) -> SkewedPtr { - let b0 = blob_of_text(t); - let bytes = base32_to_blob(b0); +#[ic_mem_fn] +pub unsafe fn blob_of_principal(mem: &mut M, t: SkewedPtr) -> SkewedPtr { + let b0 = blob_of_text(mem, t); + let bytes = base32_to_blob(mem, b0); // Strip first four bytes let bytes_len = bytes.as_blob().len(); @@ -279,7 +282,7 @@ pub unsafe extern "C" fn blob_of_principal(t: SkewedPtr) -> SkewedPtr { rts_trap_with("blob_of_principal: principal too short"); } - let stripped = alloc_blob(bytes_len - Bytes(4)); + let stripped = alloc_blob(mem, bytes_len - Bytes(4)); memcpy_bytes( stripped.as_blob().payload_addr() as usize, bytes.as_blob().payload_addr().add(4) as usize, @@ -287,7 +290,7 @@ pub unsafe extern "C" fn blob_of_principal(t: SkewedPtr) -> SkewedPtr { ); // Check encoding - let expected = principal_of_blob(stripped); + let expected = principal_of_blob(mem, stripped); if blob_compare(b0, expected) != 0 { rts_trap_with("blob_of_principal: invalid principal"); } diff --git a/rts/motoko-rts/src/text.rs b/rts/motoko-rts/src/text.rs index 7fbaff2260e..e3abe81d8c5 100644 --- a/rts/motoko-rts/src/text.rs +++ b/rts/motoko-rts/src/text.rs @@ -25,41 +25,43 @@ // Note that `CONCAT_LEN` and `BLOB_LEN` are identical, so no need to check the tag to know the // size of the text. -use crate::alloc::{alloc_blob, alloc_words}; -use crate::mem::memcpy_bytes; +use crate::mem_utils::memcpy_bytes; +use crate::memory::{alloc_blob, Memory}; use crate::rts_trap_with; use crate::types::{size_of, Blob, Bytes, Concat, SkewedPtr, TAG_BLOB, TAG_CONCAT}; use core::cmp::{min, Ordering}; use core::{slice, str}; +use motoko_rts_macros::ic_mem_fn; + const MAX_STR_SIZE: Bytes = Bytes((1 << 30) - 1); // Strings smaller than this must be blobs // Make this MAX_STR_SIZE to disable the use of ropes completely, e.g. for debugging const MIN_CONCAT_SIZE: Bytes = Bytes(9); -unsafe fn alloc_text_blob(size: Bytes) -> SkewedPtr { +unsafe fn alloc_text_blob(mem: &mut M, size: Bytes) -> SkewedPtr { if size > MAX_STR_SIZE { rts_trap_with("alloc_text_bloc: Text too large"); } - alloc_blob(size) + alloc_blob(mem, size) } -#[no_mangle] -pub unsafe extern "C" fn text_of_ptr_size(buf: *const u8, n: Bytes) -> SkewedPtr { - let blob = alloc_text_blob(n); +#[ic_mem_fn] +pub unsafe fn text_of_ptr_size(mem: &mut M, buf: *const u8, n: Bytes) -> SkewedPtr { + let blob = alloc_text_blob(mem, n); let payload_addr = blob.as_blob().payload_addr(); memcpy_bytes(payload_addr as usize, buf as usize, n); blob } -pub unsafe fn text_of_str(s: &str) -> SkewedPtr { - text_of_ptr_size(s.as_ptr(), Bytes(s.len() as u32)) +pub unsafe fn text_of_str(mem: &mut M, s: &str) -> SkewedPtr { + text_of_ptr_size(mem, s.as_ptr(), Bytes(s.len() as u32)) } -#[no_mangle] -pub unsafe extern "C" fn text_concat(s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr { +#[ic_mem_fn] +pub unsafe fn text_concat(mem: &mut M, s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr { let blob1_len = text_size(s1); let blob2_len = text_size(s2); @@ -80,7 +82,7 @@ pub unsafe extern "C" fn text_concat(s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr let blob1 = s1.as_blob(); let blob2 = s2.as_blob(); - let r = alloc_text_blob(new_len); + let r = alloc_text_blob(mem, new_len); let r_payload: *const u8 = r.as_blob().payload_addr(); memcpy_bytes(r_payload as usize, blob1.payload_addr() as usize, blob1_len); memcpy_bytes( @@ -98,7 +100,7 @@ pub unsafe extern "C" fn text_concat(s1: SkewedPtr, s2: SkewedPtr) -> SkewedPtr } // Create concat node - let r = alloc_words(size_of::()); + let r = mem.alloc_words(size_of::()); let r_concat = r.unskew() as *mut Concat; (*r_concat).header.tag = TAG_CONCAT; (*r_concat).n_bytes = new_len; @@ -160,14 +162,14 @@ unsafe extern "C" fn text_to_buf(mut s: SkewedPtr, mut buf: *mut u8) { } // Straighten into contiguous memory, if needed (e.g. for system calls) -#[no_mangle] -pub unsafe extern "C" fn blob_of_text(s: SkewedPtr) -> SkewedPtr { +#[ic_mem_fn] +pub unsafe fn blob_of_text(mem: &mut M, s: SkewedPtr) -> SkewedPtr { let obj = s.as_obj(); if obj.tag() == TAG_BLOB { s } else { let concat = obj.as_concat(); - let r = alloc_text_blob((*concat).n_bytes); + let r = alloc_text_blob(mem, (*concat).n_bytes); text_to_buf(s, r.as_blob().payload_addr()); r } @@ -378,12 +380,12 @@ pub unsafe fn decode_code_point(s: *const u8, size: *mut u32) -> u32 { } /// Allocate a text from a character -#[no_mangle] -pub unsafe extern "C" fn text_singleton(char: u32) -> SkewedPtr { +#[ic_mem_fn] +pub unsafe fn text_singleton(mem: &mut M, char: u32) -> SkewedPtr { let mut buf = [0u8; 4]; let str_len = char::from_u32_unchecked(char).encode_utf8(&mut buf).len() as u32; - let blob_ptr = alloc_text_blob(Bytes(str_len)); + let blob_ptr = alloc_text_blob(mem, Bytes(str_len)); let blob = blob_ptr.as_blob(); diff --git a/rts/motoko-rts/src/text_iter.rs b/rts/motoko-rts/src/text_iter.rs index 88714f37cf2..560f0557cc0 100644 --- a/rts/motoko-rts/src/text_iter.rs +++ b/rts/motoko-rts/src/text_iter.rs @@ -10,22 +10,28 @@ //! 1. A pointer to the text //! 2. 0, or a pointer to the next list entry -use crate::alloc::alloc_array; +use crate::memory::{alloc_array, Memory}; use crate::rts_trap_with; use crate::text::decode_code_point; use crate::types::{SkewedPtr, TAG_BLOB, TAG_CONCAT}; +use motoko_rts_macros::ic_mem_fn; + const TODO_TEXT_IDX: u32 = 0; const TODO_LINK_IDX: u32 = 1; /// Find the left-most leaf of a text, putting all the others onto a list. Used to enforce the /// invariant about TEXT_ITER_BLOB to be a blob. -unsafe fn find_leaf(mut text: SkewedPtr, todo: *mut SkewedPtr) -> SkewedPtr { +unsafe fn find_leaf( + mem: &mut M, + mut text: SkewedPtr, + todo: *mut SkewedPtr, +) -> SkewedPtr { while text.tag() == TAG_CONCAT { let concat = text.as_concat(); // Add right node to TODOs - let new_todo = alloc_array(2); + let new_todo = alloc_array(mem, 2); let new_todo_array = new_todo.as_array(); new_todo_array.set(TODO_TEXT_IDX, (*concat).text2); new_todo_array.set(TODO_LINK_IDX, *todo); @@ -44,9 +50,9 @@ const ITER_POS_IDX: u32 = 1; const ITER_TODO_IDX: u32 = 2; /// Returns a new iterator for the text -#[no_mangle] -pub unsafe extern "C" fn text_iter(text: SkewedPtr) -> SkewedPtr { - let iter = alloc_array(3); +#[ic_mem_fn] +pub unsafe fn text_iter(mem: &mut M, text: SkewedPtr) -> SkewedPtr { + let iter = alloc_array(mem, 3); let array = iter.as_array(); // Initialize the TODO field first, to be able to use it use the location to `find_leaf` @@ -57,7 +63,7 @@ pub unsafe extern "C" fn text_iter(text: SkewedPtr) -> SkewedPtr { array.set(ITER_POS_IDX, SkewedPtr(0)); // Initialize blob field - array.set(ITER_BLOB_IDX, find_leaf(text, todo_addr as *mut _)); + array.set(ITER_BLOB_IDX, find_leaf(mem, text, todo_addr as *mut _)); iter } @@ -78,8 +84,8 @@ pub unsafe extern "C" fn text_iter_done(iter: SkewedPtr) -> u32 { } /// Returns next character in the iterator, advances the iterator -#[no_mangle] -pub unsafe extern "C" fn text_iter_next(iter: SkewedPtr) -> u32 { +#[ic_mem_fn] +pub unsafe fn text_iter_next(mem: &mut M, iter: SkewedPtr) -> u32 { let iter_array = iter.as_array(); let blob = iter_array.get(ITER_BLOB_IDX).as_blob(); @@ -105,15 +111,15 @@ pub unsafe extern "C" fn text_iter_next(iter: SkewedPtr) -> u32 { todo_array.set(TODO_TEXT_IDX, (*concat).text2); iter_array.set(ITER_POS_IDX, SkewedPtr(0)); let todo_addr = iter_array.payload_addr().add(ITER_TODO_IDX as usize); - iter_array.set(ITER_BLOB_IDX, find_leaf((*concat).text1, todo_addr)); - text_iter_next(iter) + iter_array.set(ITER_BLOB_IDX, find_leaf(mem, (*concat).text1, todo_addr)); + text_iter_next(mem, iter) } else { // Otherwise remove the entry from the chain debug_assert_eq!(text.tag(), TAG_BLOB); iter_array.set(ITER_BLOB_IDX, text); iter_array.set(ITER_POS_IDX, SkewedPtr(0)); iter_array.set(ITER_TODO_IDX, todo_array.get(TODO_LINK_IDX)); - text_iter_next(iter) + text_iter_next(mem, iter) } } else { // We are not at the end, read the next character from the blob diff --git a/rts/motoko-rts/src/tommath_bindings.rs b/rts/motoko-rts/src/tommath_bindings.rs index e2fc5359a78..705acbf9739 100644 --- a/rts/motoko-rts/src/tommath_bindings.rs +++ b/rts/motoko-rts/src/tommath_bindings.rs @@ -1 +1,2 @@ +#![allow(unused, non_camel_case_types)] include!("../../_build/tommath_bindings.rs"); diff --git a/rts/motoko-rts/src/types.rs b/rts/motoko-rts/src/types.rs index 01f448f1c08..71ca9640dfe 100644 --- a/rts/motoko-rts/src/types.rs +++ b/rts/motoko-rts/src/types.rs @@ -248,6 +248,7 @@ impl Object { (*self).size } + #[cfg(debug_assertions)] pub(crate) unsafe fn get(self: *mut Self, idx: u32) -> SkewedPtr { *self.payload_addr().add(idx as usize) } @@ -275,10 +276,6 @@ impl Closure { pub(crate) unsafe fn size(self: *mut Self) -> u32 { (*self).size } - - pub(crate) unsafe fn get(self: *mut Self, idx: u32) -> SkewedPtr { - *self.payload_addr().add(idx as usize) - } } #[repr(packed)] diff --git a/rts/motoko-rts/src/utf8.rs b/rts/motoko-rts/src/utf8.rs index 8d880ee4e5e..913206194e6 100644 --- a/rts/motoko-rts/src/utf8.rs +++ b/rts/motoko-rts/src/utf8.rs @@ -1,15 +1,13 @@ -use crate::rts_trap_with; - /// Panics if the string is not valid UTF-8 #[no_mangle] pub(crate) unsafe extern "C" fn utf8_validate(str: *const libc::c_char, len: u32) { if !utf8_valid(str, len) { - rts_trap_with("utf8_validate: string is not UTF-8"); + crate::rts_trap_with("utf8_validate: string is not UTF-8"); } } /// Returns whether the string is valid UTF-8 #[no_mangle] -pub unsafe fn utf8_valid(str: *const libc::c_char, len: u32) -> bool { +pub unsafe extern "C" fn utf8_valid(str: *const libc::c_char, len: u32) -> bool { core::str::from_utf8(core::slice::from_raw_parts(str as *const _, len as usize)).is_ok() } diff --git a/test/run/big-array-access.mo b/test/run/big-array-access.mo index 3e2414723d5..8c64552ac36 100644 --- a/test/run/big-array-access.mo +++ b/test/run/big-array-access.mo @@ -1,3 +1,7 @@ let array = [1,2,3]; array[10000000000000]; + +// wasm-run output includes a backtrace printed by wasmrun, which changes +// depending on RTS compile flags +//SKIP wasm-run